4 * Copyright (c) 2012 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
25 #include "exec/page-vary.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
32 #include "hw/core/tcg-cpu-ops.h"
33 #endif /* CONFIG_TCG */
34 #include "internals.h"
35 #include "exec/exec-all.h"
36 #include "hw/qdev-properties.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/loader.h"
39 #include "hw/boards.h"
41 #include "sysemu/tcg.h"
42 #include "sysemu/hw_accel.h"
44 #include "disas/capstone.h"
45 #include "fpu/softfloat.h"
48 static void arm_cpu_set_pc(CPUState
*cs
, vaddr value
)
50 ARMCPU
*cpu
= ARM_CPU(cs
);
51 CPUARMState
*env
= &cpu
->env
;
57 env
->regs
[15] = value
& ~1;
58 env
->thumb
= value
& 1;
63 void arm_cpu_synchronize_from_tb(CPUState
*cs
,
64 const TranslationBlock
*tb
)
66 ARMCPU
*cpu
= ARM_CPU(cs
);
67 CPUARMState
*env
= &cpu
->env
;
70 * It's OK to look at env for the current mode here, because it's
71 * never possible for an AArch64 TB to chain to an AArch32 TB.
76 env
->regs
[15] = tb
->pc
;
79 #endif /* CONFIG_TCG */
81 static bool arm_cpu_has_work(CPUState
*cs
)
83 ARMCPU
*cpu
= ARM_CPU(cs
);
85 return (cpu
->power_state
!= PSCI_OFF
)
86 && cs
->interrupt_request
&
87 (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
88 | CPU_INTERRUPT_VFIQ
| CPU_INTERRUPT_VIRQ
| CPU_INTERRUPT_VSERR
89 | CPU_INTERRUPT_EXITTB
);
92 void arm_register_pre_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
95 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
98 entry
->opaque
= opaque
;
100 QLIST_INSERT_HEAD(&cpu
->pre_el_change_hooks
, entry
, node
);
103 void arm_register_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
106 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
109 entry
->opaque
= opaque
;
111 QLIST_INSERT_HEAD(&cpu
->el_change_hooks
, entry
, node
);
114 static void cp_reg_reset(gpointer key
, gpointer value
, gpointer opaque
)
116 /* Reset a single ARMCPRegInfo register */
117 ARMCPRegInfo
*ri
= value
;
118 ARMCPU
*cpu
= opaque
;
120 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
)) {
125 ri
->resetfn(&cpu
->env
, ri
);
129 /* A zero offset is never possible as it would be regs[0]
130 * so we use it to indicate that reset is being handled elsewhere.
131 * This is basically only used for fields in non-core coprocessors
132 * (like the pxa2xx ones).
134 if (!ri
->fieldoffset
) {
138 if (cpreg_field_is_64bit(ri
)) {
139 CPREG_FIELD64(&cpu
->env
, ri
) = ri
->resetvalue
;
141 CPREG_FIELD32(&cpu
->env
, ri
) = ri
->resetvalue
;
145 static void cp_reg_check_reset(gpointer key
, gpointer value
, gpointer opaque
)
147 /* Purely an assertion check: we've already done reset once,
148 * so now check that running the reset for the cpreg doesn't
149 * change its value. This traps bugs where two different cpregs
150 * both try to reset the same state field but to different values.
152 ARMCPRegInfo
*ri
= value
;
153 ARMCPU
*cpu
= opaque
;
154 uint64_t oldvalue
, newvalue
;
156 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
| ARM_CP_NO_RAW
)) {
160 oldvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
161 cp_reg_reset(key
, value
, opaque
);
162 newvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
163 assert(oldvalue
== newvalue
);
166 static void arm_cpu_reset(DeviceState
*dev
)
168 CPUState
*s
= CPU(dev
);
169 ARMCPU
*cpu
= ARM_CPU(s
);
170 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(cpu
);
171 CPUARMState
*env
= &cpu
->env
;
173 acc
->parent_reset(dev
);
175 memset(env
, 0, offsetof(CPUARMState
, end_reset_fields
));
177 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_reset
, cpu
);
178 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_check_reset
, cpu
);
180 env
->vfp
.xregs
[ARM_VFP_FPSID
] = cpu
->reset_fpsid
;
181 env
->vfp
.xregs
[ARM_VFP_MVFR0
] = cpu
->isar
.mvfr0
;
182 env
->vfp
.xregs
[ARM_VFP_MVFR1
] = cpu
->isar
.mvfr1
;
183 env
->vfp
.xregs
[ARM_VFP_MVFR2
] = cpu
->isar
.mvfr2
;
185 cpu
->power_state
= s
->start_powered_off
? PSCI_OFF
: PSCI_ON
;
187 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
188 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCID
] = 0x69051000 | 'Q';
191 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
192 /* 64 bit CPUs always start in 64 bit mode */
194 #if defined(CONFIG_USER_ONLY)
195 env
->pstate
= PSTATE_MODE_EL0t
;
196 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
197 env
->cp15
.sctlr_el
[1] |= SCTLR_UCT
| SCTLR_UCI
| SCTLR_DZE
;
198 /* Enable all PAC keys. */
199 env
->cp15
.sctlr_el
[1] |= (SCTLR_EnIA
| SCTLR_EnIB
|
200 SCTLR_EnDA
| SCTLR_EnDB
);
201 /* Trap on btype=3 for PACIxSP. */
202 env
->cp15
.sctlr_el
[1] |= SCTLR_BT0
;
203 /* and to the FP/Neon instructions */
204 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 2, 3);
205 /* and to the SVE instructions */
206 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 16, 2, 3);
207 /* with reasonable vector length */
208 if (cpu_isar_feature(aa64_sve
, cpu
)) {
210 aarch64_sve_zcr_get_valid_len(cpu
, cpu
->sve_default_vq
- 1);
213 * Enable 48-bit address space (TODO: take reserved_va into account).
214 * Enable TBI0 but not TBI1.
215 * Note that this must match useronly_clean_ptr.
217 env
->cp15
.tcr_el
[1].raw_tcr
= 5 | (1ULL << 37);
220 if (cpu_isar_feature(aa64_mte
, cpu
)) {
221 /* Enable tag access, but leave TCF0 as No Effect (0). */
222 env
->cp15
.sctlr_el
[1] |= SCTLR_ATA0
;
224 * Exclude all tags, so that tag 0 is always used.
225 * This corresponds to Linux current->thread.gcr_incl = 0.
227 * Set RRND, so that helper_irg() will generate a seed later.
228 * Here in cpu_reset(), the crypto subsystem has not yet been
231 env
->cp15
.gcr_el1
= 0x1ffff;
234 * Disable access to SCXTNUM_EL0 from CSV2_1p2.
235 * This is not yet exposed from the Linux kernel in any way.
237 env
->cp15
.sctlr_el
[1] |= SCTLR_TSCXT
;
239 /* Reset into the highest available EL */
240 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
241 env
->pstate
= PSTATE_MODE_EL3h
;
242 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
243 env
->pstate
= PSTATE_MODE_EL2h
;
245 env
->pstate
= PSTATE_MODE_EL1h
;
248 /* Sample rvbar at reset. */
249 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
250 env
->pc
= env
->cp15
.rvbar
;
253 #if defined(CONFIG_USER_ONLY)
254 /* Userspace expects access to cp10 and cp11 for FP/Neon */
255 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 4, 0xf);
259 #if defined(CONFIG_USER_ONLY)
260 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
261 /* For user mode we must enable access to coprocessors */
262 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
263 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
264 env
->cp15
.c15_cpar
= 3;
265 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
266 env
->cp15
.c15_cpar
= 1;
271 * If the highest available EL is EL2, AArch32 will start in Hyp
272 * mode; otherwise it starts in SVC. Note that if we start in
273 * AArch64 then these values in the uncached_cpsr will be ignored.
275 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
276 !arm_feature(env
, ARM_FEATURE_EL3
)) {
277 env
->uncached_cpsr
= ARM_CPU_MODE_HYP
;
279 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
;
281 env
->daif
= PSTATE_D
| PSTATE_A
| PSTATE_I
| PSTATE_F
;
283 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
284 * executing as AArch32 then check if highvecs are enabled and
285 * adjust the PC accordingly.
287 if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
288 env
->regs
[15] = 0xFFFF0000;
291 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
294 if (arm_feature(env
, ARM_FEATURE_M
)) {
295 #ifndef CONFIG_USER_ONLY
296 uint32_t initial_msp
; /* Loaded from 0x0 */
297 uint32_t initial_pc
; /* Loaded from 0x4 */
302 if (cpu_isar_feature(aa32_lob
, cpu
)) {
304 * LTPSIZE is constant 4 if MVE not implemented, and resets
305 * to an UNKNOWN value if MVE is implemented. We choose to
308 env
->v7m
.ltpsize
= 4;
309 /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
310 env
->v7m
.fpdscr
[M_REG_NS
] = 4 << FPCR_LTPSIZE_SHIFT
;
311 env
->v7m
.fpdscr
[M_REG_S
] = 4 << FPCR_LTPSIZE_SHIFT
;
314 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
315 env
->v7m
.secure
= true;
317 /* This bit resets to 0 if security is supported, but 1 if
318 * it is not. The bit is not present in v7M, but we set it
319 * here so we can avoid having to make checks on it conditional
320 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
322 env
->v7m
.aircr
= R_V7M_AIRCR_BFHFNMINS_MASK
;
324 * Set NSACR to indicate "NS access permitted to everything";
325 * this avoids having to have all the tests of it being
326 * conditional on ARM_FEATURE_M_SECURITY. Note also that from
327 * v8.1M the guest-visible value of NSACR in a CPU without the
328 * Security Extension is 0xcff.
330 env
->v7m
.nsacr
= 0xcff;
333 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
334 * that it resets to 1, so QEMU always does that rather than making
335 * it dependent on CPU model. In v8M it is RES1.
337 env
->v7m
.ccr
[M_REG_NS
] = R_V7M_CCR_STKALIGN_MASK
;
338 env
->v7m
.ccr
[M_REG_S
] = R_V7M_CCR_STKALIGN_MASK
;
339 if (arm_feature(env
, ARM_FEATURE_V8
)) {
340 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
341 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
342 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
344 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
345 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
346 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
349 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
350 env
->v7m
.fpccr
[M_REG_NS
] = R_V7M_FPCCR_ASPEN_MASK
;
351 env
->v7m
.fpccr
[M_REG_S
] = R_V7M_FPCCR_ASPEN_MASK
|
352 R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
;
355 #ifndef CONFIG_USER_ONLY
356 /* Unlike A/R profile, M profile defines the reset LR value */
357 env
->regs
[14] = 0xffffffff;
359 env
->v7m
.vecbase
[M_REG_S
] = cpu
->init_svtor
& 0xffffff80;
360 env
->v7m
.vecbase
[M_REG_NS
] = cpu
->init_nsvtor
& 0xffffff80;
362 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
363 vecbase
= env
->v7m
.vecbase
[env
->v7m
.secure
];
364 rom
= rom_ptr_for_as(s
->as
, vecbase
, 8);
366 /* Address zero is covered by ROM which hasn't yet been
367 * copied into physical memory.
369 initial_msp
= ldl_p(rom
);
370 initial_pc
= ldl_p(rom
+ 4);
372 /* Address zero not covered by a ROM blob, or the ROM blob
373 * is in non-modifiable memory and this is a second reset after
374 * it got copied into memory. In the latter case, rom_ptr
375 * will return a NULL pointer and we should use ldl_phys instead.
377 initial_msp
= ldl_phys(s
->as
, vecbase
);
378 initial_pc
= ldl_phys(s
->as
, vecbase
+ 4);
381 qemu_log_mask(CPU_LOG_INT
,
382 "Loaded reset SP 0x%x PC 0x%x from vector table\n",
383 initial_msp
, initial_pc
);
385 env
->regs
[13] = initial_msp
& 0xFFFFFFFC;
386 env
->regs
[15] = initial_pc
& ~1;
387 env
->thumb
= initial_pc
& 1;
390 * For user mode we run non-secure and with access to the FPU.
391 * The FPU context is active (ie does not need further setup)
392 * and is owned by non-secure.
394 env
->v7m
.secure
= false;
395 env
->v7m
.nsacr
= 0xcff;
396 env
->v7m
.cpacr
[M_REG_NS
] = 0xf0ffff;
397 env
->v7m
.fpccr
[M_REG_S
] &=
398 ~(R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
);
399 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
403 /* M profile requires that reset clears the exclusive monitor;
404 * A profile does not, but clearing it makes more sense than having it
405 * set with an exclusive access on address zero.
407 arm_clear_exclusive(env
);
409 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
410 if (cpu
->pmsav7_dregion
> 0) {
411 if (arm_feature(env
, ARM_FEATURE_V8
)) {
412 memset(env
->pmsav8
.rbar
[M_REG_NS
], 0,
413 sizeof(*env
->pmsav8
.rbar
[M_REG_NS
])
414 * cpu
->pmsav7_dregion
);
415 memset(env
->pmsav8
.rlar
[M_REG_NS
], 0,
416 sizeof(*env
->pmsav8
.rlar
[M_REG_NS
])
417 * cpu
->pmsav7_dregion
);
418 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
419 memset(env
->pmsav8
.rbar
[M_REG_S
], 0,
420 sizeof(*env
->pmsav8
.rbar
[M_REG_S
])
421 * cpu
->pmsav7_dregion
);
422 memset(env
->pmsav8
.rlar
[M_REG_S
], 0,
423 sizeof(*env
->pmsav8
.rlar
[M_REG_S
])
424 * cpu
->pmsav7_dregion
);
426 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
427 memset(env
->pmsav7
.drbar
, 0,
428 sizeof(*env
->pmsav7
.drbar
) * cpu
->pmsav7_dregion
);
429 memset(env
->pmsav7
.drsr
, 0,
430 sizeof(*env
->pmsav7
.drsr
) * cpu
->pmsav7_dregion
);
431 memset(env
->pmsav7
.dracr
, 0,
432 sizeof(*env
->pmsav7
.dracr
) * cpu
->pmsav7_dregion
);
435 env
->pmsav7
.rnr
[M_REG_NS
] = 0;
436 env
->pmsav7
.rnr
[M_REG_S
] = 0;
437 env
->pmsav8
.mair0
[M_REG_NS
] = 0;
438 env
->pmsav8
.mair0
[M_REG_S
] = 0;
439 env
->pmsav8
.mair1
[M_REG_NS
] = 0;
440 env
->pmsav8
.mair1
[M_REG_S
] = 0;
443 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
444 if (cpu
->sau_sregion
> 0) {
445 memset(env
->sau
.rbar
, 0, sizeof(*env
->sau
.rbar
) * cpu
->sau_sregion
);
446 memset(env
->sau
.rlar
, 0, sizeof(*env
->sau
.rlar
) * cpu
->sau_sregion
);
449 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
450 * the Cortex-M33 does.
455 set_flush_to_zero(1, &env
->vfp
.standard_fp_status
);
456 set_flush_inputs_to_zero(1, &env
->vfp
.standard_fp_status
);
457 set_default_nan_mode(1, &env
->vfp
.standard_fp_status
);
458 set_default_nan_mode(1, &env
->vfp
.standard_fp_status_f16
);
459 set_float_detect_tininess(float_tininess_before_rounding
,
460 &env
->vfp
.fp_status
);
461 set_float_detect_tininess(float_tininess_before_rounding
,
462 &env
->vfp
.standard_fp_status
);
463 set_float_detect_tininess(float_tininess_before_rounding
,
464 &env
->vfp
.fp_status_f16
);
465 set_float_detect_tininess(float_tininess_before_rounding
,
466 &env
->vfp
.standard_fp_status_f16
);
467 #ifndef CONFIG_USER_ONLY
469 kvm_arm_reset_vcpu(cpu
);
473 hw_breakpoint_update_all(cpu
);
474 hw_watchpoint_update_all(cpu
);
475 arm_rebuild_hflags(env
);
478 #ifndef CONFIG_USER_ONLY
480 static inline bool arm_excp_unmasked(CPUState
*cs
, unsigned int excp_idx
,
481 unsigned int target_el
,
482 unsigned int cur_el
, bool secure
,
485 CPUARMState
*env
= cs
->env_ptr
;
486 bool pstate_unmasked
;
487 bool unmasked
= false;
490 * Don't take exceptions if they target a lower EL.
491 * This check should catch any exceptions that would not be taken
494 if (cur_el
> target_el
) {
500 pstate_unmasked
= !(env
->daif
& PSTATE_F
);
504 pstate_unmasked
= !(env
->daif
& PSTATE_I
);
508 if (!(hcr_el2
& HCR_FMO
) || (hcr_el2
& HCR_TGE
)) {
509 /* VFIQs are only taken when hypervized. */
512 return !(env
->daif
& PSTATE_F
);
514 if (!(hcr_el2
& HCR_IMO
) || (hcr_el2
& HCR_TGE
)) {
515 /* VIRQs are only taken when hypervized. */
518 return !(env
->daif
& PSTATE_I
);
520 if (!(hcr_el2
& HCR_AMO
) || (hcr_el2
& HCR_TGE
)) {
521 /* VIRQs are only taken when hypervized. */
524 return !(env
->daif
& PSTATE_A
);
526 g_assert_not_reached();
530 * Use the target EL, current execution state and SCR/HCR settings to
531 * determine whether the corresponding CPSR bit is used to mask the
534 if ((target_el
> cur_el
) && (target_el
!= 1)) {
535 /* Exceptions targeting a higher EL may not be maskable */
536 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
538 * 64-bit masking rules are simple: exceptions to EL3
539 * can't be masked, and exceptions to EL2 can only be
540 * masked from Secure state. The HCR and SCR settings
541 * don't affect the masking logic, only the interrupt routing.
543 if (target_el
== 3 || !secure
|| (env
->cp15
.scr_el3
& SCR_EEL2
)) {
548 * The old 32-bit-only environment has a more complicated
549 * masking setup. HCR and SCR bits not only affect interrupt
550 * routing but also change the behaviour of masking.
557 * If FIQs are routed to EL3 or EL2 then there are cases where
558 * we override the CPSR.F in determining if the exception is
559 * masked or not. If neither of these are set then we fall back
560 * to the CPSR.F setting otherwise we further assess the state
563 hcr
= hcr_el2
& HCR_FMO
;
564 scr
= (env
->cp15
.scr_el3
& SCR_FIQ
);
567 * When EL3 is 32-bit, the SCR.FW bit controls whether the
568 * CPSR.F bit masks FIQ interrupts when taken in non-secure
569 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
570 * when non-secure but only when FIQs are only routed to EL3.
572 scr
= scr
&& !((env
->cp15
.scr_el3
& SCR_FW
) && !hcr
);
576 * When EL3 execution state is 32-bit, if HCR.IMO is set then
577 * we may override the CPSR.I masking when in non-secure state.
578 * The SCR.IRQ setting has already been taken into consideration
579 * when setting the target EL, so it does not have a further
582 hcr
= hcr_el2
& HCR_IMO
;
586 g_assert_not_reached();
589 if ((scr
|| hcr
) && !secure
) {
596 * The PSTATE bits only mask the interrupt if we have not overriden the
599 return unmasked
|| pstate_unmasked
;
602 static bool arm_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
604 CPUClass
*cc
= CPU_GET_CLASS(cs
);
605 CPUARMState
*env
= cs
->env_ptr
;
606 uint32_t cur_el
= arm_current_el(env
);
607 bool secure
= arm_is_secure(env
);
608 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
612 /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
614 if (interrupt_request
& CPU_INTERRUPT_FIQ
) {
616 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
617 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
618 cur_el
, secure
, hcr_el2
)) {
622 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
624 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
625 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
626 cur_el
, secure
, hcr_el2
)) {
630 if (interrupt_request
& CPU_INTERRUPT_VIRQ
) {
631 excp_idx
= EXCP_VIRQ
;
633 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
634 cur_el
, secure
, hcr_el2
)) {
638 if (interrupt_request
& CPU_INTERRUPT_VFIQ
) {
639 excp_idx
= EXCP_VFIQ
;
641 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
642 cur_el
, secure
, hcr_el2
)) {
646 if (interrupt_request
& CPU_INTERRUPT_VSERR
) {
647 excp_idx
= EXCP_VSERR
;
649 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
650 cur_el
, secure
, hcr_el2
)) {
651 /* Taking a virtual abort clears HCR_EL2.VSE */
652 env
->cp15
.hcr_el2
&= ~HCR_VSE
;
653 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
660 cs
->exception_index
= excp_idx
;
661 env
->exception
.target_el
= target_el
;
662 cc
->tcg_ops
->do_interrupt(cs
);
665 #endif /* !CONFIG_USER_ONLY */
667 void arm_cpu_update_virq(ARMCPU
*cpu
)
670 * Update the interrupt level for VIRQ, which is the logical OR of
671 * the HCR_EL2.VI bit and the input line level from the GIC.
673 CPUARMState
*env
= &cpu
->env
;
674 CPUState
*cs
= CPU(cpu
);
676 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VI
) ||
677 (env
->irq_line_state
& CPU_INTERRUPT_VIRQ
);
679 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) != 0)) {
681 cpu_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
683 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
688 void arm_cpu_update_vfiq(ARMCPU
*cpu
)
691 * Update the interrupt level for VFIQ, which is the logical OR of
692 * the HCR_EL2.VF bit and the input line level from the GIC.
694 CPUARMState
*env
= &cpu
->env
;
695 CPUState
*cs
= CPU(cpu
);
697 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VF
) ||
698 (env
->irq_line_state
& CPU_INTERRUPT_VFIQ
);
700 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) != 0)) {
702 cpu_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
704 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
709 void arm_cpu_update_vserr(ARMCPU
*cpu
)
712 * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
714 CPUARMState
*env
= &cpu
->env
;
715 CPUState
*cs
= CPU(cpu
);
717 bool new_state
= env
->cp15
.hcr_el2
& HCR_VSE
;
719 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VSERR
) != 0)) {
721 cpu_interrupt(cs
, CPU_INTERRUPT_VSERR
);
723 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
728 #ifndef CONFIG_USER_ONLY
729 static void arm_cpu_set_irq(void *opaque
, int irq
, int level
)
731 ARMCPU
*cpu
= opaque
;
732 CPUARMState
*env
= &cpu
->env
;
733 CPUState
*cs
= CPU(cpu
);
734 static const int mask
[] = {
735 [ARM_CPU_IRQ
] = CPU_INTERRUPT_HARD
,
736 [ARM_CPU_FIQ
] = CPU_INTERRUPT_FIQ
,
737 [ARM_CPU_VIRQ
] = CPU_INTERRUPT_VIRQ
,
738 [ARM_CPU_VFIQ
] = CPU_INTERRUPT_VFIQ
741 if (!arm_feature(env
, ARM_FEATURE_EL2
) &&
742 (irq
== ARM_CPU_VIRQ
|| irq
== ARM_CPU_VFIQ
)) {
744 * The GIC might tell us about VIRQ and VFIQ state, but if we don't
745 * have EL2 support we don't care. (Unless the guest is doing something
746 * silly this will only be calls saying "level is still 0".)
752 env
->irq_line_state
|= mask
[irq
];
754 env
->irq_line_state
&= ~mask
[irq
];
759 arm_cpu_update_virq(cpu
);
762 arm_cpu_update_vfiq(cpu
);
767 cpu_interrupt(cs
, mask
[irq
]);
769 cpu_reset_interrupt(cs
, mask
[irq
]);
773 g_assert_not_reached();
777 static void arm_cpu_kvm_set_irq(void *opaque
, int irq
, int level
)
780 ARMCPU
*cpu
= opaque
;
781 CPUARMState
*env
= &cpu
->env
;
782 CPUState
*cs
= CPU(cpu
);
783 uint32_t linestate_bit
;
788 irq_id
= KVM_ARM_IRQ_CPU_IRQ
;
789 linestate_bit
= CPU_INTERRUPT_HARD
;
792 irq_id
= KVM_ARM_IRQ_CPU_FIQ
;
793 linestate_bit
= CPU_INTERRUPT_FIQ
;
796 g_assert_not_reached();
800 env
->irq_line_state
|= linestate_bit
;
802 env
->irq_line_state
&= ~linestate_bit
;
804 kvm_arm_set_irq(cs
->cpu_index
, KVM_ARM_IRQ_TYPE_CPU
, irq_id
, !!level
);
808 static bool arm_cpu_virtio_is_big_endian(CPUState
*cs
)
810 ARMCPU
*cpu
= ARM_CPU(cs
);
811 CPUARMState
*env
= &cpu
->env
;
813 cpu_synchronize_state(cs
);
814 return arm_cpu_data_is_big_endian(env
);
819 static void arm_disas_set_info(CPUState
*cpu
, disassemble_info
*info
)
821 ARMCPU
*ac
= ARM_CPU(cpu
);
822 CPUARMState
*env
= &ac
->env
;
826 /* We might not be compiled with the A64 disassembler
827 * because it needs a C++ compiler. Leave print_insn
828 * unset in this case to use the caller default behaviour.
830 #if defined(CONFIG_ARM_A64_DIS)
831 info
->print_insn
= print_insn_arm_a64
;
833 info
->cap_arch
= CS_ARCH_ARM64
;
834 info
->cap_insn_unit
= 4;
835 info
->cap_insn_split
= 4;
839 info
->cap_insn_unit
= 2;
840 info
->cap_insn_split
= 4;
841 cap_mode
= CS_MODE_THUMB
;
843 info
->cap_insn_unit
= 4;
844 info
->cap_insn_split
= 4;
845 cap_mode
= CS_MODE_ARM
;
847 if (arm_feature(env
, ARM_FEATURE_V8
)) {
848 cap_mode
|= CS_MODE_V8
;
850 if (arm_feature(env
, ARM_FEATURE_M
)) {
851 cap_mode
|= CS_MODE_MCLASS
;
853 info
->cap_arch
= CS_ARCH_ARM
;
854 info
->cap_mode
= cap_mode
;
857 sctlr_b
= arm_sctlr_b(env
);
858 if (bswap_code(sctlr_b
)) {
859 #if TARGET_BIG_ENDIAN
860 info
->endian
= BFD_ENDIAN_LITTLE
;
862 info
->endian
= BFD_ENDIAN_BIG
;
865 info
->flags
&= ~INSN_ARM_BE32
;
866 #ifndef CONFIG_USER_ONLY
868 info
->flags
|= INSN_ARM_BE32
;
873 #ifdef TARGET_AARCH64
875 static void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
877 ARMCPU
*cpu
= ARM_CPU(cs
);
878 CPUARMState
*env
= &cpu
->env
;
879 uint32_t psr
= pstate_read(env
);
881 int el
= arm_current_el(env
);
882 const char *ns_status
;
884 qemu_fprintf(f
, " PC=%016" PRIx64
" ", env
->pc
);
885 for (i
= 0; i
< 32; i
++) {
887 qemu_fprintf(f
, " SP=%016" PRIx64
"\n", env
->xregs
[i
]);
889 qemu_fprintf(f
, "X%02d=%016" PRIx64
"%s", i
, env
->xregs
[i
],
890 (i
+ 2) % 3 ? " " : "\n");
894 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
895 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
899 qemu_fprintf(f
, "PSTATE=%08x %c%c%c%c %sEL%d%c",
901 psr
& PSTATE_N
? 'N' : '-',
902 psr
& PSTATE_Z
? 'Z' : '-',
903 psr
& PSTATE_C
? 'C' : '-',
904 psr
& PSTATE_V
? 'V' : '-',
907 psr
& PSTATE_SP
? 'h' : 't');
909 if (cpu_isar_feature(aa64_bti
, cpu
)) {
910 qemu_fprintf(f
, " BTYPE=%d", (psr
& PSTATE_BTYPE
) >> 10);
912 if (!(flags
& CPU_DUMP_FPU
)) {
913 qemu_fprintf(f
, "\n");
916 if (fp_exception_el(env
, el
) != 0) {
917 qemu_fprintf(f
, " FPU disabled\n");
920 qemu_fprintf(f
, " FPCR=%08x FPSR=%08x\n",
921 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
923 if (cpu_isar_feature(aa64_sve
, cpu
) && sve_exception_el(env
, el
) == 0) {
924 int j
, zcr_len
= sve_zcr_len_for_el(env
, el
);
926 for (i
= 0; i
<= FFR_PRED_NUM
; i
++) {
928 if (i
== FFR_PRED_NUM
) {
929 qemu_fprintf(f
, "FFR=");
930 /* It's last, so end the line. */
933 qemu_fprintf(f
, "P%02d=", i
);
946 /* More than one quadword per predicate. */
951 for (j
= zcr_len
/ 4; j
>= 0; j
--) {
953 if (j
* 4 + 4 <= zcr_len
+ 1) {
956 digits
= (zcr_len
% 4 + 1) * 4;
958 qemu_fprintf(f
, "%0*" PRIx64
"%s", digits
,
959 env
->vfp
.pregs
[i
].p
[j
],
960 j
? ":" : eol
? "\n" : " ");
964 for (i
= 0; i
< 32; i
++) {
966 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
"%s",
967 i
, env
->vfp
.zregs
[i
].d
[1],
968 env
->vfp
.zregs
[i
].d
[0], i
& 1 ? "\n" : " ");
969 } else if (zcr_len
== 1) {
970 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
971 ":%016" PRIx64
":%016" PRIx64
"\n",
972 i
, env
->vfp
.zregs
[i
].d
[3], env
->vfp
.zregs
[i
].d
[2],
973 env
->vfp
.zregs
[i
].d
[1], env
->vfp
.zregs
[i
].d
[0]);
975 for (j
= zcr_len
; j
>= 0; j
--) {
976 bool odd
= (zcr_len
- j
) % 2 != 0;
978 qemu_fprintf(f
, "Z%02d[%x-%x]=", i
, j
, j
- 1);
981 qemu_fprintf(f
, " [%x-%x]=", j
, j
- 1);
983 qemu_fprintf(f
, " [%x]=", j
);
986 qemu_fprintf(f
, "%016" PRIx64
":%016" PRIx64
"%s",
987 env
->vfp
.zregs
[i
].d
[j
* 2 + 1],
988 env
->vfp
.zregs
[i
].d
[j
* 2],
989 odd
|| j
== 0 ? "\n" : ":");
994 for (i
= 0; i
< 32; i
++) {
995 uint64_t *q
= aa64_vfp_qreg(env
, i
);
996 qemu_fprintf(f
, "Q%02d=%016" PRIx64
":%016" PRIx64
"%s",
997 i
, q
[1], q
[0], (i
& 1 ? "\n" : " "));
1004 static inline void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1006 g_assert_not_reached();
1011 static void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1013 ARMCPU
*cpu
= ARM_CPU(cs
);
1014 CPUARMState
*env
= &cpu
->env
;
1018 aarch64_cpu_dump_state(cs
, f
, flags
);
1022 for (i
= 0; i
< 16; i
++) {
1023 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
1025 qemu_fprintf(f
, "\n");
1027 qemu_fprintf(f
, " ");
1031 if (arm_feature(env
, ARM_FEATURE_M
)) {
1032 uint32_t xpsr
= xpsr_read(env
);
1034 const char *ns_status
= "";
1036 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1037 ns_status
= env
->v7m
.secure
? "S " : "NS ";
1040 if (xpsr
& XPSR_EXCP
) {
1043 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
1044 mode
= "unpriv-thread";
1046 mode
= "priv-thread";
1050 qemu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
1052 xpsr
& XPSR_N
? 'N' : '-',
1053 xpsr
& XPSR_Z
? 'Z' : '-',
1054 xpsr
& XPSR_C
? 'C' : '-',
1055 xpsr
& XPSR_V
? 'V' : '-',
1056 xpsr
& XPSR_T
? 'T' : 'A',
1060 uint32_t psr
= cpsr_read(env
);
1061 const char *ns_status
= "";
1063 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
1064 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
1065 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
1068 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1070 psr
& CPSR_N
? 'N' : '-',
1071 psr
& CPSR_Z
? 'Z' : '-',
1072 psr
& CPSR_C
? 'C' : '-',
1073 psr
& CPSR_V
? 'V' : '-',
1074 psr
& CPSR_T
? 'T' : 'A',
1076 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
1079 if (flags
& CPU_DUMP_FPU
) {
1081 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1083 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1086 for (i
= 0; i
< numvfpregs
; i
++) {
1087 uint64_t v
= *aa32_vfp_dreg(env
, i
);
1088 qemu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
1090 i
* 2 + 1, (uint32_t)(v
>> 32),
1093 qemu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
1094 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1095 qemu_fprintf(f
, "VPR: %08x\n", env
->v7m
.vpr
);
1100 uint64_t arm_cpu_mp_affinity(int idx
, uint8_t clustersz
)
1102 uint32_t Aff1
= idx
/ clustersz
;
1103 uint32_t Aff0
= idx
% clustersz
;
1104 return (Aff1
<< ARM_AFF1_SHIFT
) | Aff0
;
1107 static void arm_cpu_initfn(Object
*obj
)
1109 ARMCPU
*cpu
= ARM_CPU(obj
);
1111 cpu_set_cpustate_pointers(cpu
);
1112 cpu
->cp_regs
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
,
1115 QLIST_INIT(&cpu
->pre_el_change_hooks
);
1116 QLIST_INIT(&cpu
->el_change_hooks
);
1118 #ifdef CONFIG_USER_ONLY
1119 # ifdef TARGET_AARCH64
1121 * The linux kernel defaults to 512-bit vectors, when sve is supported.
1122 * See documentation for /proc/sys/abi/sve_default_vector_length, and
1123 * our corresponding sve-default-vector-length cpu property.
1125 cpu
->sve_default_vq
= 4;
1128 /* Our inbound IRQ and FIQ lines */
1129 if (kvm_enabled()) {
1130 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1131 * the same interface as non-KVM CPUs.
1133 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_kvm_set_irq
, 4);
1135 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_set_irq
, 4);
1138 qdev_init_gpio_out(DEVICE(cpu
), cpu
->gt_timer_outputs
,
1139 ARRAY_SIZE(cpu
->gt_timer_outputs
));
1141 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->gicv3_maintenance_interrupt
,
1142 "gicv3-maintenance-interrupt", 1);
1143 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->pmu_interrupt
,
1144 "pmu-interrupt", 1);
1147 /* DTB consumers generally don't in fact care what the 'compatible'
1148 * string is, so always provide some string and trust that a hypothetical
1149 * picky DTB consumer will also provide a helpful error message.
1151 cpu
->dtb_compatible
= "qemu,unknown";
1152 cpu
->psci_version
= QEMU_PSCI_VERSION_0_1
; /* By default assume PSCI v0.1 */
1153 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
1155 if (tcg_enabled() || hvf_enabled()) {
1156 /* TCG and HVF implement PSCI 1.1 */
1157 cpu
->psci_version
= QEMU_PSCI_VERSION_1_1
;
1161 static Property arm_cpu_gt_cntfrq_property
=
1162 DEFINE_PROP_UINT64("cntfrq", ARMCPU
, gt_cntfrq_hz
,
1163 NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
);
1165 static Property arm_cpu_reset_cbar_property
=
1166 DEFINE_PROP_UINT64("reset-cbar", ARMCPU
, reset_cbar
, 0);
1168 static Property arm_cpu_reset_hivecs_property
=
1169 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU
, reset_hivecs
, false);
1171 #ifndef CONFIG_USER_ONLY
1172 static Property arm_cpu_has_el2_property
=
1173 DEFINE_PROP_BOOL("has_el2", ARMCPU
, has_el2
, true);
1175 static Property arm_cpu_has_el3_property
=
1176 DEFINE_PROP_BOOL("has_el3", ARMCPU
, has_el3
, true);
1179 static Property arm_cpu_cfgend_property
=
1180 DEFINE_PROP_BOOL("cfgend", ARMCPU
, cfgend
, false);
1182 static Property arm_cpu_has_vfp_property
=
1183 DEFINE_PROP_BOOL("vfp", ARMCPU
, has_vfp
, true);
1185 static Property arm_cpu_has_neon_property
=
1186 DEFINE_PROP_BOOL("neon", ARMCPU
, has_neon
, true);
1188 static Property arm_cpu_has_dsp_property
=
1189 DEFINE_PROP_BOOL("dsp", ARMCPU
, has_dsp
, true);
1191 static Property arm_cpu_has_mpu_property
=
1192 DEFINE_PROP_BOOL("has-mpu", ARMCPU
, has_mpu
, true);
1194 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1195 * because the CPU initfn will have already set cpu->pmsav7_dregion to
1196 * the right value for that particular CPU type, and we don't want
1197 * to override that with an incorrect constant value.
1199 static Property arm_cpu_pmsav7_dregion_property
=
1200 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU
,
1202 qdev_prop_uint32
, uint32_t);
1204 static bool arm_get_pmu(Object
*obj
, Error
**errp
)
1206 ARMCPU
*cpu
= ARM_CPU(obj
);
1208 return cpu
->has_pmu
;
1211 static void arm_set_pmu(Object
*obj
, bool value
, Error
**errp
)
1213 ARMCPU
*cpu
= ARM_CPU(obj
);
1216 if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1217 error_setg(errp
, "'pmu' feature not supported by KVM on this host");
1220 set_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1222 unset_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1224 cpu
->has_pmu
= value
;
1227 unsigned int gt_cntfrq_period_ns(ARMCPU
*cpu
)
1230 * The exact approach to calculating guest ticks is:
1232 * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1233 * NANOSECONDS_PER_SECOND);
1235 * We don't do that. Rather we intentionally use integer division
1236 * truncation below and in the caller for the conversion of host monotonic
1237 * time to guest ticks to provide the exact inverse for the semantics of
1238 * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1239 * it loses precision when representing frequencies where
1240 * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1241 * provide an exact inverse leads to scheduling timers with negative
1242 * periods, which in turn leads to sticky behaviour in the guest.
1244 * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1245 * cannot become zero.
1247 return NANOSECONDS_PER_SECOND
> cpu
->gt_cntfrq_hz
?
1248 NANOSECONDS_PER_SECOND
/ cpu
->gt_cntfrq_hz
: 1;
1251 void arm_cpu_post_init(Object
*obj
)
1253 ARMCPU
*cpu
= ARM_CPU(obj
);
1255 /* M profile implies PMSA. We have to do this here rather than
1256 * in realize with the other feature-implication checks because
1257 * we look at the PMSA bit to see if we should add some properties.
1259 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1260 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
1263 if (arm_feature(&cpu
->env
, ARM_FEATURE_CBAR
) ||
1264 arm_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
)) {
1265 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_cbar_property
);
1268 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1269 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_hivecs_property
);
1272 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1273 object_property_add_uint64_ptr(obj
, "rvbar",
1275 OBJ_PROP_FLAG_READWRITE
);
1278 #ifndef CONFIG_USER_ONLY
1279 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1280 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
1281 * prevent "has_el3" from existing on CPUs which cannot support EL3.
1283 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el3_property
);
1285 object_property_add_link(obj
, "secure-memory",
1287 (Object
**)&cpu
->secure_memory
,
1288 qdev_prop_allow_set_link_before_realize
,
1289 OBJ_PROP_LINK_STRONG
);
1292 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
)) {
1293 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el2_property
);
1297 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMU
)) {
1298 cpu
->has_pmu
= true;
1299 object_property_add_bool(obj
, "pmu", arm_get_pmu
, arm_set_pmu
);
1303 * Allow user to turn off VFP and Neon support, but only for TCG --
1304 * KVM does not currently allow us to lie to the guest about its
1305 * ID/feature registers, so the guest always sees what the host has.
1307 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
1308 ? cpu_isar_feature(aa64_fp_simd
, cpu
)
1309 : cpu_isar_feature(aa32_vfp
, cpu
)) {
1310 cpu
->has_vfp
= true;
1311 if (!kvm_enabled()) {
1312 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_vfp_property
);
1316 if (arm_feature(&cpu
->env
, ARM_FEATURE_NEON
)) {
1317 cpu
->has_neon
= true;
1318 if (!kvm_enabled()) {
1319 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_neon_property
);
1323 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
) &&
1324 arm_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
)) {
1325 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_dsp_property
);
1328 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMSA
)) {
1329 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_mpu_property
);
1330 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1331 qdev_property_add_static(DEVICE(obj
),
1332 &arm_cpu_pmsav7_dregion_property
);
1336 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
1337 object_property_add_link(obj
, "idau", TYPE_IDAU_INTERFACE
, &cpu
->idau
,
1338 qdev_prop_allow_set_link_before_realize
,
1339 OBJ_PROP_LINK_STRONG
);
1341 * M profile: initial value of the Secure VTOR. We can't just use
1342 * a simple DEFINE_PROP_UINT32 for this because we want to permit
1343 * the property to be set after realize.
1345 object_property_add_uint32_ptr(obj
, "init-svtor",
1347 OBJ_PROP_FLAG_READWRITE
);
1349 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1351 * Initial value of the NS VTOR (for cores without the Security
1352 * extension, this is the only VTOR)
1354 object_property_add_uint32_ptr(obj
, "init-nsvtor",
1356 OBJ_PROP_FLAG_READWRITE
);
1359 /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1360 object_property_add_uint32_ptr(obj
, "psci-conduit",
1362 OBJ_PROP_FLAG_READWRITE
);
1364 qdev_property_add_static(DEVICE(obj
), &arm_cpu_cfgend_property
);
1366 if (arm_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
)) {
1367 qdev_property_add_static(DEVICE(cpu
), &arm_cpu_gt_cntfrq_property
);
1370 if (kvm_enabled()) {
1371 kvm_arm_add_vcpu_properties(obj
);
1374 #ifndef CONFIG_USER_ONLY
1375 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) &&
1376 cpu_isar_feature(aa64_mte
, cpu
)) {
1377 object_property_add_link(obj
, "tag-memory",
1379 (Object
**)&cpu
->tag_memory
,
1380 qdev_prop_allow_set_link_before_realize
,
1381 OBJ_PROP_LINK_STRONG
);
1383 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1384 object_property_add_link(obj
, "secure-tag-memory",
1386 (Object
**)&cpu
->secure_tag_memory
,
1387 qdev_prop_allow_set_link_before_realize
,
1388 OBJ_PROP_LINK_STRONG
);
1394 static void arm_cpu_finalizefn(Object
*obj
)
1396 ARMCPU
*cpu
= ARM_CPU(obj
);
1397 ARMELChangeHook
*hook
, *next
;
1399 g_hash_table_destroy(cpu
->cp_regs
);
1401 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
1402 QLIST_REMOVE(hook
, node
);
1405 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
1406 QLIST_REMOVE(hook
, node
);
1409 #ifndef CONFIG_USER_ONLY
1410 if (cpu
->pmu_timer
) {
1411 timer_free(cpu
->pmu_timer
);
1416 void arm_cpu_finalize_features(ARMCPU
*cpu
, Error
**errp
)
1418 Error
*local_err
= NULL
;
1420 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1421 arm_cpu_sve_finalize(cpu
, &local_err
);
1422 if (local_err
!= NULL
) {
1423 error_propagate(errp
, local_err
);
1427 arm_cpu_pauth_finalize(cpu
, &local_err
);
1428 if (local_err
!= NULL
) {
1429 error_propagate(errp
, local_err
);
1433 arm_cpu_lpa2_finalize(cpu
, &local_err
);
1434 if (local_err
!= NULL
) {
1435 error_propagate(errp
, local_err
);
1440 if (kvm_enabled()) {
1441 kvm_arm_steal_time_finalize(cpu
, &local_err
);
1442 if (local_err
!= NULL
) {
1443 error_propagate(errp
, local_err
);
1449 static void arm_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
1451 CPUState
*cs
= CPU(dev
);
1452 ARMCPU
*cpu
= ARM_CPU(dev
);
1453 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(dev
);
1454 CPUARMState
*env
= &cpu
->env
;
1456 Error
*local_err
= NULL
;
1457 bool no_aa32
= false;
1459 /* If we needed to query the host kernel for the CPU features
1460 * then it's possible that might have failed in the initfn, but
1461 * this is the first point where we can report it.
1463 if (cpu
->host_cpu_probe_failed
) {
1464 if (!kvm_enabled() && !hvf_enabled()) {
1465 error_setg(errp
, "The 'host' CPU type can only be used with KVM or HVF");
1467 error_setg(errp
, "Failed to retrieve host CPU features");
1472 #ifndef CONFIG_USER_ONLY
1473 /* The NVIC and M-profile CPU are two halves of a single piece of
1474 * hardware; trying to use one without the other is a command line
1475 * error and will result in segfaults if not caught here.
1477 if (arm_feature(env
, ARM_FEATURE_M
)) {
1479 error_setg(errp
, "This board cannot be used with Cortex-M CPUs");
1484 error_setg(errp
, "This board can only be used with Cortex-M CPUs");
1489 if (kvm_enabled()) {
1491 * Catch all the cases which might cause us to create more than one
1492 * address space for the CPU (otherwise we will assert() later in
1493 * cpu_address_space_init()).
1495 if (arm_feature(env
, ARM_FEATURE_M
)) {
1497 "Cannot enable KVM when using an M-profile guest CPU");
1502 "Cannot enable KVM when guest CPU has EL3 enabled");
1505 if (cpu
->tag_memory
) {
1507 "Cannot enable KVM when guest CPUs has MTE enabled");
1515 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1516 if (!cpu
->gt_cntfrq_hz
) {
1517 error_setg(errp
, "Invalid CNTFRQ: %"PRId64
"Hz",
1521 scale
= gt_cntfrq_period_ns(cpu
);
1523 scale
= GTIMER_SCALE
;
1526 cpu
->gt_timer
[GTIMER_PHYS
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1527 arm_gt_ptimer_cb
, cpu
);
1528 cpu
->gt_timer
[GTIMER_VIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1529 arm_gt_vtimer_cb
, cpu
);
1530 cpu
->gt_timer
[GTIMER_HYP
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1531 arm_gt_htimer_cb
, cpu
);
1532 cpu
->gt_timer
[GTIMER_SEC
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1533 arm_gt_stimer_cb
, cpu
);
1534 cpu
->gt_timer
[GTIMER_HYPVIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1535 arm_gt_hvtimer_cb
, cpu
);
1539 cpu_exec_realizefn(cs
, &local_err
);
1540 if (local_err
!= NULL
) {
1541 error_propagate(errp
, local_err
);
1545 arm_cpu_finalize_features(cpu
, &local_err
);
1546 if (local_err
!= NULL
) {
1547 error_propagate(errp
, local_err
);
1551 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1552 cpu
->has_vfp
!= cpu
->has_neon
) {
1554 * This is an architectural requirement for AArch64; AArch32 is
1555 * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1558 "AArch64 CPUs must have both VFP and Neon or neither");
1562 if (!cpu
->has_vfp
) {
1566 t
= cpu
->isar
.id_aa64isar1
;
1567 t
= FIELD_DP64(t
, ID_AA64ISAR1
, JSCVT
, 0);
1568 cpu
->isar
.id_aa64isar1
= t
;
1570 t
= cpu
->isar
.id_aa64pfr0
;
1571 t
= FIELD_DP64(t
, ID_AA64PFR0
, FP
, 0xf);
1572 cpu
->isar
.id_aa64pfr0
= t
;
1574 u
= cpu
->isar
.id_isar6
;
1575 u
= FIELD_DP32(u
, ID_ISAR6
, JSCVT
, 0);
1576 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1577 cpu
->isar
.id_isar6
= u
;
1579 u
= cpu
->isar
.mvfr0
;
1580 u
= FIELD_DP32(u
, MVFR0
, FPSP
, 0);
1581 u
= FIELD_DP32(u
, MVFR0
, FPDP
, 0);
1582 u
= FIELD_DP32(u
, MVFR0
, FPDIVIDE
, 0);
1583 u
= FIELD_DP32(u
, MVFR0
, FPSQRT
, 0);
1584 u
= FIELD_DP32(u
, MVFR0
, FPROUND
, 0);
1585 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1586 u
= FIELD_DP32(u
, MVFR0
, FPTRAP
, 0);
1587 u
= FIELD_DP32(u
, MVFR0
, FPSHVEC
, 0);
1589 cpu
->isar
.mvfr0
= u
;
1591 u
= cpu
->isar
.mvfr1
;
1592 u
= FIELD_DP32(u
, MVFR1
, FPFTZ
, 0);
1593 u
= FIELD_DP32(u
, MVFR1
, FPDNAN
, 0);
1594 u
= FIELD_DP32(u
, MVFR1
, FPHP
, 0);
1595 if (arm_feature(env
, ARM_FEATURE_M
)) {
1596 u
= FIELD_DP32(u
, MVFR1
, FP16
, 0);
1598 cpu
->isar
.mvfr1
= u
;
1600 u
= cpu
->isar
.mvfr2
;
1601 u
= FIELD_DP32(u
, MVFR2
, FPMISC
, 0);
1602 cpu
->isar
.mvfr2
= u
;
1605 if (!cpu
->has_neon
) {
1609 unset_feature(env
, ARM_FEATURE_NEON
);
1611 t
= cpu
->isar
.id_aa64isar0
;
1612 t
= FIELD_DP64(t
, ID_AA64ISAR0
, AES
, 0);
1613 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA1
, 0);
1614 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA2
, 0);
1615 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA3
, 0);
1616 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM3
, 0);
1617 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM4
, 0);
1618 t
= FIELD_DP64(t
, ID_AA64ISAR0
, DP
, 0);
1619 cpu
->isar
.id_aa64isar0
= t
;
1621 t
= cpu
->isar
.id_aa64isar1
;
1622 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FCMA
, 0);
1623 t
= FIELD_DP64(t
, ID_AA64ISAR1
, BF16
, 0);
1624 t
= FIELD_DP64(t
, ID_AA64ISAR1
, I8MM
, 0);
1625 cpu
->isar
.id_aa64isar1
= t
;
1627 t
= cpu
->isar
.id_aa64pfr0
;
1628 t
= FIELD_DP64(t
, ID_AA64PFR0
, ADVSIMD
, 0xf);
1629 cpu
->isar
.id_aa64pfr0
= t
;
1631 u
= cpu
->isar
.id_isar5
;
1632 u
= FIELD_DP32(u
, ID_ISAR5
, AES
, 0);
1633 u
= FIELD_DP32(u
, ID_ISAR5
, SHA1
, 0);
1634 u
= FIELD_DP32(u
, ID_ISAR5
, SHA2
, 0);
1635 u
= FIELD_DP32(u
, ID_ISAR5
, RDM
, 0);
1636 u
= FIELD_DP32(u
, ID_ISAR5
, VCMA
, 0);
1637 cpu
->isar
.id_isar5
= u
;
1639 u
= cpu
->isar
.id_isar6
;
1640 u
= FIELD_DP32(u
, ID_ISAR6
, DP
, 0);
1641 u
= FIELD_DP32(u
, ID_ISAR6
, FHM
, 0);
1642 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1643 u
= FIELD_DP32(u
, ID_ISAR6
, I8MM
, 0);
1644 cpu
->isar
.id_isar6
= u
;
1646 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1647 u
= cpu
->isar
.mvfr1
;
1648 u
= FIELD_DP32(u
, MVFR1
, SIMDLS
, 0);
1649 u
= FIELD_DP32(u
, MVFR1
, SIMDINT
, 0);
1650 u
= FIELD_DP32(u
, MVFR1
, SIMDSP
, 0);
1651 u
= FIELD_DP32(u
, MVFR1
, SIMDHP
, 0);
1652 cpu
->isar
.mvfr1
= u
;
1654 u
= cpu
->isar
.mvfr2
;
1655 u
= FIELD_DP32(u
, MVFR2
, SIMDMISC
, 0);
1656 cpu
->isar
.mvfr2
= u
;
1660 if (!cpu
->has_neon
&& !cpu
->has_vfp
) {
1664 t
= cpu
->isar
.id_aa64isar0
;
1665 t
= FIELD_DP64(t
, ID_AA64ISAR0
, FHM
, 0);
1666 cpu
->isar
.id_aa64isar0
= t
;
1668 t
= cpu
->isar
.id_aa64isar1
;
1669 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FRINTTS
, 0);
1670 cpu
->isar
.id_aa64isar1
= t
;
1672 u
= cpu
->isar
.mvfr0
;
1673 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 0);
1674 cpu
->isar
.mvfr0
= u
;
1676 /* Despite the name, this field covers both VFP and Neon */
1677 u
= cpu
->isar
.mvfr1
;
1678 u
= FIELD_DP32(u
, MVFR1
, SIMDFMAC
, 0);
1679 cpu
->isar
.mvfr1
= u
;
1682 if (arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_dsp
) {
1685 unset_feature(env
, ARM_FEATURE_THUMB_DSP
);
1687 u
= cpu
->isar
.id_isar1
;
1688 u
= FIELD_DP32(u
, ID_ISAR1
, EXTEND
, 1);
1689 cpu
->isar
.id_isar1
= u
;
1691 u
= cpu
->isar
.id_isar2
;
1692 u
= FIELD_DP32(u
, ID_ISAR2
, MULTU
, 1);
1693 u
= FIELD_DP32(u
, ID_ISAR2
, MULTS
, 1);
1694 cpu
->isar
.id_isar2
= u
;
1696 u
= cpu
->isar
.id_isar3
;
1697 u
= FIELD_DP32(u
, ID_ISAR3
, SIMD
, 1);
1698 u
= FIELD_DP32(u
, ID_ISAR3
, SATURATE
, 0);
1699 cpu
->isar
.id_isar3
= u
;
1702 /* Some features automatically imply others: */
1703 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1704 if (arm_feature(env
, ARM_FEATURE_M
)) {
1705 set_feature(env
, ARM_FEATURE_V7
);
1707 set_feature(env
, ARM_FEATURE_V7VE
);
1712 * There exist AArch64 cpus without AArch32 support. When KVM
1713 * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1714 * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1715 * As a general principle, we also do not make ID register
1716 * consistency checks anywhere unless using TCG, because only
1717 * for TCG would a consistency-check failure be a QEMU bug.
1719 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1720 no_aa32
= !cpu_isar_feature(aa64_aa32
, cpu
);
1723 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
1724 /* v7 Virtualization Extensions. In real hardware this implies
1725 * EL2 and also the presence of the Security Extensions.
1726 * For QEMU, for backwards-compatibility we implement some
1727 * CPUs or CPU configs which have no actual EL2 or EL3 but do
1728 * include the various other features that V7VE implies.
1729 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1730 * Security Extensions is ARM_FEATURE_EL3.
1732 assert(!tcg_enabled() || no_aa32
||
1733 cpu_isar_feature(aa32_arm_div
, cpu
));
1734 set_feature(env
, ARM_FEATURE_LPAE
);
1735 set_feature(env
, ARM_FEATURE_V7
);
1737 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1738 set_feature(env
, ARM_FEATURE_VAPA
);
1739 set_feature(env
, ARM_FEATURE_THUMB2
);
1740 set_feature(env
, ARM_FEATURE_MPIDR
);
1741 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1742 set_feature(env
, ARM_FEATURE_V6K
);
1744 set_feature(env
, ARM_FEATURE_V6
);
1747 /* Always define VBAR for V7 CPUs even if it doesn't exist in
1748 * non-EL3 configs. This is needed by some legacy boards.
1750 set_feature(env
, ARM_FEATURE_VBAR
);
1752 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1753 set_feature(env
, ARM_FEATURE_V6
);
1754 set_feature(env
, ARM_FEATURE_MVFR
);
1756 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1757 set_feature(env
, ARM_FEATURE_V5
);
1758 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1759 assert(!tcg_enabled() || no_aa32
||
1760 cpu_isar_feature(aa32_jazelle
, cpu
));
1761 set_feature(env
, ARM_FEATURE_AUXCR
);
1764 if (arm_feature(env
, ARM_FEATURE_V5
)) {
1765 set_feature(env
, ARM_FEATURE_V4T
);
1767 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1768 set_feature(env
, ARM_FEATURE_V7MP
);
1770 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
1771 set_feature(env
, ARM_FEATURE_CBAR
);
1773 if (arm_feature(env
, ARM_FEATURE_THUMB2
) &&
1774 !arm_feature(env
, ARM_FEATURE_M
)) {
1775 set_feature(env
, ARM_FEATURE_THUMB_DSP
);
1779 * We rely on no XScale CPU having VFP so we can use the same bits in the
1780 * TB flags field for VECSTRIDE and XSCALE_CPAR.
1782 assert(arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) ||
1783 !cpu_isar_feature(aa32_vfp_simd
, cpu
) ||
1784 !arm_feature(env
, ARM_FEATURE_XSCALE
));
1786 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1787 !arm_feature(env
, ARM_FEATURE_M
) &&
1788 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
1789 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1794 /* For CPUs which might have tiny 1K pages, or which have an
1795 * MPU and might have small region sizes, stick with 1K pages.
1799 if (!set_preferred_target_page_bits(pagebits
)) {
1800 /* This can only ever happen for hotplugging a CPU, or if
1801 * the board code incorrectly creates a CPU which it has
1802 * promised via minimum_page_size that it will not.
1804 error_setg(errp
, "This CPU requires a smaller page size than the "
1809 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1810 * We don't support setting cluster ID ([16..23]) (known as Aff2
1811 * in later ARM ARM versions), or any of the higher affinity level fields,
1812 * so these bits always RAZ.
1814 if (cpu
->mp_affinity
== ARM64_AFFINITY_INVALID
) {
1815 cpu
->mp_affinity
= arm_cpu_mp_affinity(cs
->cpu_index
,
1816 ARM_DEFAULT_CPUS_PER_CLUSTER
);
1819 if (cpu
->reset_hivecs
) {
1820 cpu
->reset_sctlr
|= (1 << 13);
1824 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1825 cpu
->reset_sctlr
|= SCTLR_EE
;
1827 cpu
->reset_sctlr
|= SCTLR_B
;
1831 if (!arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_el3
) {
1832 /* If the has_el3 CPU property is disabled then we need to disable the
1835 unset_feature(env
, ARM_FEATURE_EL3
);
1838 * Disable the security extension feature bits in the processor
1839 * feature registers as well.
1841 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
, ID_PFR1
, SECURITY
, 0);
1842 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, COPSDBG
, 0);
1843 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
1844 ID_AA64PFR0
, EL3
, 0);
1847 if (!cpu
->has_el2
) {
1848 unset_feature(env
, ARM_FEATURE_EL2
);
1851 if (!cpu
->has_pmu
) {
1852 unset_feature(env
, ARM_FEATURE_PMU
);
1854 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
1857 if (!kvm_enabled()) {
1858 arm_register_pre_el_change_hook(cpu
, &pmu_pre_el_change
, 0);
1859 arm_register_el_change_hook(cpu
, &pmu_post_el_change
, 0);
1862 #ifndef CONFIG_USER_ONLY
1863 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, arm_pmu_timer_cb
,
1867 cpu
->isar
.id_aa64dfr0
=
1868 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMUVER
, 0);
1869 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, PERFMON
, 0);
1874 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1876 * Disable the hypervisor feature bits in the processor feature
1877 * registers if we don't have EL2.
1879 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
1880 ID_AA64PFR0
, EL2
, 0);
1881 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
,
1882 ID_PFR1
, VIRTUALIZATION
, 0);
1885 #ifndef CONFIG_USER_ONLY
1886 if (cpu
->tag_memory
== NULL
&& cpu_isar_feature(aa64_mte
, cpu
)) {
1888 * Disable the MTE feature bits if we do not have tag-memory
1889 * provided by the machine.
1891 cpu
->isar
.id_aa64pfr1
=
1892 FIELD_DP64(cpu
->isar
.id_aa64pfr1
, ID_AA64PFR1
, MTE
, 0);
1896 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1897 * to false or by setting pmsav7-dregion to 0.
1899 if (!cpu
->has_mpu
) {
1900 cpu
->pmsav7_dregion
= 0;
1902 if (cpu
->pmsav7_dregion
== 0) {
1903 cpu
->has_mpu
= false;
1906 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
1907 arm_feature(env
, ARM_FEATURE_V7
)) {
1908 uint32_t nr
= cpu
->pmsav7_dregion
;
1911 error_setg(errp
, "PMSAv7 MPU #regions invalid %" PRIu32
, nr
);
1916 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1918 env
->pmsav8
.rbar
[M_REG_NS
] = g_new0(uint32_t, nr
);
1919 env
->pmsav8
.rlar
[M_REG_NS
] = g_new0(uint32_t, nr
);
1920 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1921 env
->pmsav8
.rbar
[M_REG_S
] = g_new0(uint32_t, nr
);
1922 env
->pmsav8
.rlar
[M_REG_S
] = g_new0(uint32_t, nr
);
1925 env
->pmsav7
.drbar
= g_new0(uint32_t, nr
);
1926 env
->pmsav7
.drsr
= g_new0(uint32_t, nr
);
1927 env
->pmsav7
.dracr
= g_new0(uint32_t, nr
);
1932 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1933 uint32_t nr
= cpu
->sau_sregion
;
1936 error_setg(errp
, "v8M SAU #regions invalid %" PRIu32
, nr
);
1941 env
->sau
.rbar
= g_new0(uint32_t, nr
);
1942 env
->sau
.rlar
= g_new0(uint32_t, nr
);
1946 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
1947 set_feature(env
, ARM_FEATURE_VBAR
);
1950 register_cp_regs_for_features(cpu
);
1951 arm_cpu_register_gdb_regs_for_features(cpu
);
1953 init_cpreg_list(cpu
);
1955 #ifndef CONFIG_USER_ONLY
1956 MachineState
*ms
= MACHINE(qdev_get_machine());
1957 unsigned int smp_cpus
= ms
->smp
.cpus
;
1958 bool has_secure
= cpu
->has_el3
|| arm_feature(env
, ARM_FEATURE_M_SECURITY
);
1961 * We must set cs->num_ases to the final value before
1962 * the first call to cpu_address_space_init.
1964 if (cpu
->tag_memory
!= NULL
) {
1965 cs
->num_ases
= 3 + has_secure
;
1967 cs
->num_ases
= 1 + has_secure
;
1971 if (!cpu
->secure_memory
) {
1972 cpu
->secure_memory
= cs
->memory
;
1974 cpu_address_space_init(cs
, ARMASIdx_S
, "cpu-secure-memory",
1975 cpu
->secure_memory
);
1978 if (cpu
->tag_memory
!= NULL
) {
1979 cpu_address_space_init(cs
, ARMASIdx_TagNS
, "cpu-tag-memory",
1982 cpu_address_space_init(cs
, ARMASIdx_TagS
, "cpu-tag-memory",
1983 cpu
->secure_tag_memory
);
1987 cpu_address_space_init(cs
, ARMASIdx_NS
, "cpu-memory", cs
->memory
);
1989 /* No core_count specified, default to smp_cpus. */
1990 if (cpu
->core_count
== -1) {
1991 cpu
->core_count
= smp_cpus
;
1995 if (tcg_enabled()) {
1996 int dcz_blocklen
= 4 << cpu
->dcz_blocksize
;
1999 * We only support DCZ blocklen that fits on one page.
2001 * Architectually this is always true. However TARGET_PAGE_SIZE
2002 * is variable and, for compatibility with -machine virt-2.7,
2003 * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
2004 * But even then, while the largest architectural DCZ blocklen
2005 * is 2KiB, no cpu actually uses such a large blocklen.
2007 assert(dcz_blocklen
<= TARGET_PAGE_SIZE
);
2010 * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
2011 * both nibbles of each byte storing tag data may be written at once.
2012 * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
2014 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2015 assert(dcz_blocklen
>= 2 * TAG_GRANULE
);
2022 acc
->parent_realize(dev
, errp
);
2025 static ObjectClass
*arm_cpu_class_by_name(const char *cpu_model
)
2030 const char *cpunamestr
;
2032 cpuname
= g_strsplit(cpu_model
, ",", 1);
2033 cpunamestr
= cpuname
[0];
2034 #ifdef CONFIG_USER_ONLY
2035 /* For backwards compatibility usermode emulation allows "-cpu any",
2036 * which has the same semantics as "-cpu max".
2038 if (!strcmp(cpunamestr
, "any")) {
2042 typename
= g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr
);
2043 oc
= object_class_by_name(typename
);
2044 g_strfreev(cpuname
);
2046 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_ARM_CPU
) ||
2047 object_class_is_abstract(oc
)) {
2053 static Property arm_cpu_properties
[] = {
2054 DEFINE_PROP_UINT64("midr", ARMCPU
, midr
, 0),
2055 DEFINE_PROP_UINT64("mp-affinity", ARMCPU
,
2056 mp_affinity
, ARM64_AFFINITY_INVALID
),
2057 DEFINE_PROP_INT32("node-id", ARMCPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
2058 DEFINE_PROP_INT32("core-count", ARMCPU
, core_count
, -1),
2059 DEFINE_PROP_END_OF_LIST()
2062 static gchar
*arm_gdb_arch_name(CPUState
*cs
)
2064 ARMCPU
*cpu
= ARM_CPU(cs
);
2065 CPUARMState
*env
= &cpu
->env
;
2067 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2068 return g_strdup("iwmmxt");
2070 return g_strdup("arm");
2073 #ifndef CONFIG_USER_ONLY
2074 #include "hw/core/sysemu-cpu-ops.h"
2076 static const struct SysemuCPUOps arm_sysemu_ops
= {
2077 .get_phys_page_attrs_debug
= arm_cpu_get_phys_page_attrs_debug
,
2078 .asidx_from_attrs
= arm_asidx_from_attrs
,
2079 .write_elf32_note
= arm_cpu_write_elf32_note
,
2080 .write_elf64_note
= arm_cpu_write_elf64_note
,
2081 .virtio_is_big_endian
= arm_cpu_virtio_is_big_endian
,
2082 .legacy_vmsd
= &vmstate_arm_cpu
,
2087 static const struct TCGCPUOps arm_tcg_ops
= {
2088 .initialize
= arm_translate_init
,
2089 .synchronize_from_tb
= arm_cpu_synchronize_from_tb
,
2090 .debug_excp_handler
= arm_debug_excp_handler
,
2092 #ifdef CONFIG_USER_ONLY
2093 .record_sigsegv
= arm_cpu_record_sigsegv
,
2094 .record_sigbus
= arm_cpu_record_sigbus
,
2096 .tlb_fill
= arm_cpu_tlb_fill
,
2097 .cpu_exec_interrupt
= arm_cpu_exec_interrupt
,
2098 .do_interrupt
= arm_cpu_do_interrupt
,
2099 .do_transaction_failed
= arm_cpu_do_transaction_failed
,
2100 .do_unaligned_access
= arm_cpu_do_unaligned_access
,
2101 .adjust_watchpoint_address
= arm_adjust_watchpoint_address
,
2102 .debug_check_watchpoint
= arm_debug_check_watchpoint
,
2103 .debug_check_breakpoint
= arm_debug_check_breakpoint
,
2104 #endif /* !CONFIG_USER_ONLY */
2106 #endif /* CONFIG_TCG */
2108 static void arm_cpu_class_init(ObjectClass
*oc
, void *data
)
2110 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2111 CPUClass
*cc
= CPU_CLASS(acc
);
2112 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2114 device_class_set_parent_realize(dc
, arm_cpu_realizefn
,
2115 &acc
->parent_realize
);
2117 device_class_set_props(dc
, arm_cpu_properties
);
2118 device_class_set_parent_reset(dc
, arm_cpu_reset
, &acc
->parent_reset
);
2120 cc
->class_by_name
= arm_cpu_class_by_name
;
2121 cc
->has_work
= arm_cpu_has_work
;
2122 cc
->dump_state
= arm_cpu_dump_state
;
2123 cc
->set_pc
= arm_cpu_set_pc
;
2124 cc
->gdb_read_register
= arm_cpu_gdb_read_register
;
2125 cc
->gdb_write_register
= arm_cpu_gdb_write_register
;
2126 #ifndef CONFIG_USER_ONLY
2127 cc
->sysemu_ops
= &arm_sysemu_ops
;
2129 cc
->gdb_num_core_regs
= 26;
2130 cc
->gdb_core_xml_file
= "arm-core.xml";
2131 cc
->gdb_arch_name
= arm_gdb_arch_name
;
2132 cc
->gdb_get_dynamic_xml
= arm_gdb_get_dynamic_xml
;
2133 cc
->gdb_stop_before_watchpoint
= true;
2134 cc
->disas_set_info
= arm_disas_set_info
;
2137 cc
->tcg_ops
= &arm_tcg_ops
;
2138 #endif /* CONFIG_TCG */
2141 static void arm_cpu_instance_init(Object
*obj
)
2143 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(obj
);
2145 acc
->info
->initfn(obj
);
2146 arm_cpu_post_init(obj
);
2149 static void cpu_register_class_init(ObjectClass
*oc
, void *data
)
2151 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2156 void arm_cpu_register(const ARMCPUInfo
*info
)
2158 TypeInfo type_info
= {
2159 .parent
= TYPE_ARM_CPU
,
2160 .instance_size
= sizeof(ARMCPU
),
2161 .instance_align
= __alignof__(ARMCPU
),
2162 .instance_init
= arm_cpu_instance_init
,
2163 .class_size
= sizeof(ARMCPUClass
),
2164 .class_init
= info
->class_init
?: cpu_register_class_init
,
2165 .class_data
= (void *)info
,
2168 type_info
.name
= g_strdup_printf("%s-" TYPE_ARM_CPU
, info
->name
);
2169 type_register(&type_info
);
2170 g_free((void *)type_info
.name
);
2173 static const TypeInfo arm_cpu_type_info
= {
2174 .name
= TYPE_ARM_CPU
,
2176 .instance_size
= sizeof(ARMCPU
),
2177 .instance_align
= __alignof__(ARMCPU
),
2178 .instance_init
= arm_cpu_initfn
,
2179 .instance_finalize
= arm_cpu_finalizefn
,
2181 .class_size
= sizeof(ARMCPUClass
),
2182 .class_init
= arm_cpu_class_init
,
2185 static void arm_cpu_register_types(void)
2187 type_register_static(&arm_cpu_type_info
);
2190 type_init(arm_cpu_register_types
)