1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM_KVM_INIT_H__
8 #define __ARM_KVM_INIT_H__
11 #error Assembly-only header
14 #include <asm/kvm_arm.h>
15 #include <asm/ptrace.h>
16 #include <asm/sysreg.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 .macro __init_el2_sctlr
20 mov_q x0
, INIT_SCTLR_EL2_MMU_OFF
26 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
27 * This is not necessary for VHE, since the host kernel runs in EL2,
28 * and EL0 accesses are configured in the later stage of boot process.
29 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
30 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
31 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
32 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
35 .macro __init_el2_timers
36 mov x0
, #3 // Enable EL1 physical timers
43 msr cntvoff_el2
, xzr
// Clear virtual offset
46 .macro __init_el2_debug
47 mrs x1
, id_aa64dfr0_el1
48 sbfx x0
, x1
, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
50 b
.lt
.Lskip_pmu_\@
// Skip if no PMU present
51 mrs x0
, pmcr_el0
// Disable debug access traps
52 ubfx x0
, x0
, #11, #5 // to EL2 and allow access to
54 csel x2
, xzr
, x0
, lt
// all PMU counters from EL1
56 /* Statistical profiling */
57 ubfx x0
, x1
, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
58 cbz x0
, .Lskip_spe_\@
// Skip if SPE not present
60 mrs_s x0
, SYS_PMBIDR_EL1
// If SPE available at EL2,
61 and x0
, x0
, #(1 << PMBIDR_EL1_P_SHIFT)
62 cbnz x0
, .Lskip_spe_el2_\@
// then permit sampling of physical
63 mov x0
, #(1 << PMSCR_EL2_PCT_SHIFT | \
64 1 << PMSCR_EL2_PA_SHIFT)
65 msr_s SYS_PMSCR_EL2
, x0
// addresses and physical counter
67 mov x0
, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
68 orr x2
, x2
, x0
// If we don't have VHE, then
69 // use EL1&0 translation.
73 ubfx x0
, x1
, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
74 cbz x0
, .Lskip_trace_\@
// Skip if TraceBuffer is not present
76 mrs_s x0
, SYS_TRBIDR_EL1
77 and x0
, x0
, TRBIDR_PROG
78 cbnz x0
, .Lskip_trace_\@
// If TRBE is available at EL2
80 mov x0
, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
81 orr x2
, x2
, x0
// allow the EL1&0 translation
85 msr mdcr_el2
, x2
// Configure debug traps
90 mrs x1
, id_aa64mmfr1_el1
91 ubfx x0
, x1
, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
93 msr_s SYS_LORC_EL1
, xzr
97 /* Stage-2 translation */
98 .macro __init_el2_stage2
102 /* GICv3 system register access */
103 .macro __init_el2_gicv3
104 mrs x0
, id_aa64pfr0_el1
105 ubfx x0
, x0
, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
106 cbz x0
, .Lskip_gicv3_\@
108 mrs_s x0
, SYS_ICC_SRE_EL2
109 orr x0
, x0
, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
110 orr x0
, x0
, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
111 msr_s SYS_ICC_SRE_EL2
, x0
112 isb
// Make sure SRE is now set
113 mrs_s x0
, SYS_ICC_SRE_EL2
// Read SRE back,
114 tbz x0
, #0, .Lskip_gicv3_\@ // and check that it sticks
115 msr_s SYS_ICH_HCR_EL2
, xzr
// Reset ICH_HCR_EL2 to defaults
119 .macro __init_el2_hstr
120 msr hstr_el2
, xzr
// Disable CP15 traps to EL2
123 /* Virtual CPU ID registers */
124 .macro __init_el2_nvhe_idregs
131 /* Coprocessor traps */
132 .macro __init_el2_cptr
136 mov x0
, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
141 msr cptr_el2
, x0
// Disable copro. traps to EL2
144 /* Disable any fine grained traps */
145 .macro __init_el2_fgt
146 mrs x1
, id_aa64mmfr0_el1
147 ubfx x1
, x1
, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
148 cbz x1
, .Lskip_fgt_\@
151 mrs x1
, id_aa64dfr0_el1
152 ubfx x1
, x1
, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
154 b
.lt
.Lset_debug_fgt_\@
155 /* Disable PMSNEVFR_EL1 read and write traps */
156 orr x0
, x0
, #(1 << 62)
159 msr_s SYS_HDFGRTR_EL2
, x0
160 msr_s SYS_HDFGWTR_EL2
, x0
163 mrs x1
, id_aa64pfr1_el1
164 ubfx x1
, x1
, #ID_AA64PFR1_EL1_SME_SHIFT, #4
167 /* Disable nVHE traps of TPIDR2 and SMPRI */
168 orr x0
, x0
, #HFGxTR_EL2_nSMPRI_EL1_MASK
169 orr x0
, x0
, #HFGxTR_EL2_nTPIDR2_EL0_MASK
172 msr_s SYS_HFGRTR_EL2
, x0
173 msr_s SYS_HFGWTR_EL2
, x0
174 msr_s SYS_HFGITR_EL2
, xzr
176 mrs x1
, id_aa64pfr0_el1
// AMU traps UNDEF without AMU
177 ubfx x1
, x1
, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
178 cbz x1
, .Lskip_fgt_\@
180 msr_s SYS_HAFGRTR_EL2
, xzr
184 .macro __init_el2_nvhe_prepare_eret
185 mov x0
, #INIT_PSTATE_EL1
190 * Initialize EL2 registers to sane values. This should be called early on all
191 * cores that were booted in EL2. Note that everything gets initialised as
192 * if VHE was not available. The kernel context will be upgraded to VHE
193 * if possible later on in the boot process
195 * Regs: x0, x1 and x2 are clobbered.
197 .macro init_el2_state
205 __init_el2_nvhe_idregs
210 #ifndef __KVM_NVHE_HYPERVISOR__
211 // This will clobber tmp1 and tmp2, and expect tmp1 to contain
212 // the id register value as read from the HW
213 .macro __check_override idreg
, fld
, width
, pass
, fail
, tmp1
, tmp2
214 ubfx
\tmp
1, \tmp
1, #\fld, #\width
217 adr_l
\tmp
1, \idreg\
()_override
218 ldr
\tmp
2, [\tmp
1, FTR_OVR_VAL_OFFSET
]
219 ldr
\tmp
1, [\tmp
1, FTR_OVR_MASK_OFFSET
]
220 ubfx
\tmp
2, \tmp
2, #\fld, #\width
221 ubfx
\tmp
1, \tmp
1, #\fld, #\width
223 and \tmp
2, \tmp
2, \tmp
1
224 csinv
\tmp
2, \tmp
2, xzr
, ne
229 // This will clobber tmp1 and tmp2
230 .macro check_override idreg
, fld
, pass
, fail
, tmp1
, tmp2
231 mrs
\tmp
1, \idreg\
()_el1
232 __check_override \idreg
\fld
4 \pass
\fail
\tmp
1 \tmp
2
235 // This will clobber tmp
236 .macro __check_override idreg
, fld
, width
, pass
, fail
, tmp
, ignore
237 ldr_l
\tmp
, \idreg\
()_el1_sys_val
238 ubfx
\tmp
, \tmp
, #\fld, #\width
243 .macro check_override idreg
, fld
, pass
, fail
, tmp
, ignore
244 __check_override \idreg
\fld
4 \pass
\fail
\tmp \ignore
248 .macro finalise_el2_state
249 check_override id_aa64pfr0
, ID_AA64PFR0_EL1_SVE_SHIFT
, .Linit_sve_\@
, .Lskip_sve_\@
, x1
, x2
251 .Linit_sve_\@
: /* SVE register access */
252 mrs x0
, cptr_el2
// Disable SVE traps
255 cbz x1
, .Lcptr_nvhe_\@
258 orr x0
, x0
, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
261 .Lcptr_nvhe_\@
: // nVHE case
262 bic x0
, x0
, #CPTR_EL2_TZ
266 mov x1
, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
267 msr_s SYS_ZCR_EL2
, x1
// length for EL1.
270 check_override id_aa64pfr1
, ID_AA64PFR1_EL1_SME_SHIFT
, .Linit_sme_\@
, .Lskip_sme_\@
, x1
, x2
272 .Linit_sme_\@
: /* SME register access and priority mapping */
273 mrs x0
, cptr_el2
// Disable SME traps
274 bic x0
, x0
, #CPTR_EL2_TSM
279 orr x1
, x1
, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
283 mov x0
, #0 // SMCR controls
286 mrs_s x1
, SYS_ID_AA64SMFR0_EL1
287 __check_override id_aa64smfr0
, ID_AA64SMFR0_EL1_FA64_SHIFT
, 1, .Linit_sme_fa64_\@
, .Lskip_sme_fa64_\@
, x1
, x2
290 orr x0
, x0
, SMCR_ELx_FA64_MASK
294 mrs_s x1
, SYS_ID_AA64SMFR0_EL1
295 __check_override id_aa64smfr0
, ID_AA64SMFR0_EL1_SMEver_SHIFT
, 4, .Linit_sme_zt0_\@
, .Lskip_sme_zt0_\@
, x1
, x2
297 orr x0
, x0
, SMCR_ELx_EZT0_MASK
300 orr x0
, x0
, #SMCR_ELx_LEN_MASK // Enable full SME vector
301 msr_s SYS_SMCR_EL2
, x0
// length for EL1.
303 mrs_s x1
, SYS_SMIDR_EL1
// Priority mapping supported?
304 ubfx x1
, x1
, #SMIDR_EL1_SMPS_SHIFT, #1
305 cbz x1
, .Lskip_sme_\@
307 msr_s SYS_SMPRIMAP_EL2
, xzr
// Make all priorities equal
309 mrs x1
, id_aa64mmfr1_el1
// HCRX_EL2 present?
310 ubfx x1
, x1
, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
311 cbz x1
, .Lskip_sme_\@
313 mrs_s x1
, SYS_HCRX_EL2
314 orr x1
, x1
, #HCRX_EL2_SMPME_MASK // Enable priority mapping
315 msr_s SYS_HCRX_EL2
, x1
319 #endif /* __ARM_KVM_INIT_H__ */