4 * Copyright (c) 2012 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
25 #include "exec/page-vary.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
31 #include "hw/core/tcg-cpu-ops.h"
32 #endif /* CONFIG_TCG */
33 #include "internals.h"
34 #include "exec/exec-all.h"
35 #include "hw/qdev-properties.h"
36 #if !defined(CONFIG_USER_ONLY)
37 #include "hw/loader.h"
38 #include "hw/boards.h"
40 #include "hw/intc/armv7m_nvic.h"
41 #endif /* CONFIG_TCG */
42 #endif /* !CONFIG_USER_ONLY */
43 #include "sysemu/tcg.h"
44 #include "sysemu/qtest.h"
45 #include "sysemu/hw_accel.h"
47 #include "disas/capstone.h"
48 #include "fpu/softfloat.h"
51 static void arm_cpu_set_pc(CPUState
*cs
, vaddr value
)
53 ARMCPU
*cpu
= ARM_CPU(cs
);
54 CPUARMState
*env
= &cpu
->env
;
60 env
->regs
[15] = value
& ~1;
61 env
->thumb
= value
& 1;
65 static vaddr
arm_cpu_get_pc(CPUState
*cs
)
67 ARMCPU
*cpu
= ARM_CPU(cs
);
68 CPUARMState
*env
= &cpu
->env
;
78 void arm_cpu_synchronize_from_tb(CPUState
*cs
,
79 const TranslationBlock
*tb
)
81 /* The program counter is always up to date with CF_PCREL. */
82 if (!(tb_cflags(tb
) & CF_PCREL
)) {
83 CPUARMState
*env
= cs
->env_ptr
;
85 * It's OK to look at env for the current mode here, because it's
86 * never possible for an AArch64 TB to chain to an AArch32 TB.
91 env
->regs
[15] = tb
->pc
;
96 void arm_restore_state_to_opc(CPUState
*cs
,
97 const TranslationBlock
*tb
,
100 CPUARMState
*env
= cs
->env_ptr
;
103 if (tb_cflags(tb
) & CF_PCREL
) {
104 env
->pc
= (env
->pc
& TARGET_PAGE_MASK
) | data
[0];
108 env
->condexec_bits
= 0;
109 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
111 if (tb_cflags(tb
) & CF_PCREL
) {
112 env
->regs
[15] = (env
->regs
[15] & TARGET_PAGE_MASK
) | data
[0];
114 env
->regs
[15] = data
[0];
116 env
->condexec_bits
= data
[1];
117 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
120 #endif /* CONFIG_TCG */
122 static bool arm_cpu_has_work(CPUState
*cs
)
124 ARMCPU
*cpu
= ARM_CPU(cs
);
126 return (cpu
->power_state
!= PSCI_OFF
)
127 && cs
->interrupt_request
&
128 (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
129 | CPU_INTERRUPT_VFIQ
| CPU_INTERRUPT_VIRQ
| CPU_INTERRUPT_VSERR
130 | CPU_INTERRUPT_EXITTB
);
133 void arm_register_pre_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
136 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
139 entry
->opaque
= opaque
;
141 QLIST_INSERT_HEAD(&cpu
->pre_el_change_hooks
, entry
, node
);
144 void arm_register_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
147 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
150 entry
->opaque
= opaque
;
152 QLIST_INSERT_HEAD(&cpu
->el_change_hooks
, entry
, node
);
155 static void cp_reg_reset(gpointer key
, gpointer value
, gpointer opaque
)
157 /* Reset a single ARMCPRegInfo register */
158 ARMCPRegInfo
*ri
= value
;
159 ARMCPU
*cpu
= opaque
;
161 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
)) {
166 ri
->resetfn(&cpu
->env
, ri
);
170 /* A zero offset is never possible as it would be regs[0]
171 * so we use it to indicate that reset is being handled elsewhere.
172 * This is basically only used for fields in non-core coprocessors
173 * (like the pxa2xx ones).
175 if (!ri
->fieldoffset
) {
179 if (cpreg_field_is_64bit(ri
)) {
180 CPREG_FIELD64(&cpu
->env
, ri
) = ri
->resetvalue
;
182 CPREG_FIELD32(&cpu
->env
, ri
) = ri
->resetvalue
;
186 static void cp_reg_check_reset(gpointer key
, gpointer value
, gpointer opaque
)
188 /* Purely an assertion check: we've already done reset once,
189 * so now check that running the reset for the cpreg doesn't
190 * change its value. This traps bugs where two different cpregs
191 * both try to reset the same state field but to different values.
193 ARMCPRegInfo
*ri
= value
;
194 ARMCPU
*cpu
= opaque
;
195 uint64_t oldvalue
, newvalue
;
197 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
| ARM_CP_NO_RAW
)) {
201 oldvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
202 cp_reg_reset(key
, value
, opaque
);
203 newvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
204 assert(oldvalue
== newvalue
);
207 static void arm_cpu_reset_hold(Object
*obj
)
209 CPUState
*s
= CPU(obj
);
210 ARMCPU
*cpu
= ARM_CPU(s
);
211 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(cpu
);
212 CPUARMState
*env
= &cpu
->env
;
214 if (acc
->parent_phases
.hold
) {
215 acc
->parent_phases
.hold(obj
);
218 memset(env
, 0, offsetof(CPUARMState
, end_reset_fields
));
220 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_reset
, cpu
);
221 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_check_reset
, cpu
);
223 env
->vfp
.xregs
[ARM_VFP_FPSID
] = cpu
->reset_fpsid
;
224 env
->vfp
.xregs
[ARM_VFP_MVFR0
] = cpu
->isar
.mvfr0
;
225 env
->vfp
.xregs
[ARM_VFP_MVFR1
] = cpu
->isar
.mvfr1
;
226 env
->vfp
.xregs
[ARM_VFP_MVFR2
] = cpu
->isar
.mvfr2
;
228 cpu
->power_state
= s
->start_powered_off
? PSCI_OFF
: PSCI_ON
;
230 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
231 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCID
] = 0x69051000 | 'Q';
234 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
235 /* 64 bit CPUs always start in 64 bit mode */
237 #if defined(CONFIG_USER_ONLY)
238 env
->pstate
= PSTATE_MODE_EL0t
;
239 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
240 env
->cp15
.sctlr_el
[1] |= SCTLR_UCT
| SCTLR_UCI
| SCTLR_DZE
;
241 /* Enable all PAC keys. */
242 env
->cp15
.sctlr_el
[1] |= (SCTLR_EnIA
| SCTLR_EnIB
|
243 SCTLR_EnDA
| SCTLR_EnDB
);
244 /* Trap on btype=3 for PACIxSP. */
245 env
->cp15
.sctlr_el
[1] |= SCTLR_BT0
;
246 /* and to the FP/Neon instructions */
247 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
249 /* and to the SVE instructions, with default vector length */
250 if (cpu_isar_feature(aa64_sve
, cpu
)) {
251 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
253 env
->vfp
.zcr_el
[1] = cpu
->sve_default_vq
- 1;
255 /* and for SME instructions, with default vector length, and TPIDR2 */
256 if (cpu_isar_feature(aa64_sme
, cpu
)) {
257 env
->cp15
.sctlr_el
[1] |= SCTLR_EnTP2
;
258 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
260 env
->vfp
.smcr_el
[1] = cpu
->sme_default_vq
- 1;
261 if (cpu_isar_feature(aa64_sme_fa64
, cpu
)) {
262 env
->vfp
.smcr_el
[1] = FIELD_DP64(env
->vfp
.smcr_el
[1],
267 * Enable 48-bit address space (TODO: take reserved_va into account).
268 * Enable TBI0 but not TBI1.
269 * Note that this must match useronly_clean_ptr.
271 env
->cp15
.tcr_el
[1] = 5 | (1ULL << 37);
274 if (cpu_isar_feature(aa64_mte
, cpu
)) {
275 /* Enable tag access, but leave TCF0 as No Effect (0). */
276 env
->cp15
.sctlr_el
[1] |= SCTLR_ATA0
;
278 * Exclude all tags, so that tag 0 is always used.
279 * This corresponds to Linux current->thread.gcr_incl = 0.
281 * Set RRND, so that helper_irg() will generate a seed later.
282 * Here in cpu_reset(), the crypto subsystem has not yet been
285 env
->cp15
.gcr_el1
= 0x1ffff;
288 * Disable access to SCXTNUM_EL0 from CSV2_1p2.
289 * This is not yet exposed from the Linux kernel in any way.
291 env
->cp15
.sctlr_el
[1] |= SCTLR_TSCXT
;
292 /* Disable access to Debug Communication Channel (DCC). */
293 env
->cp15
.mdscr_el1
|= 1 << 12;
295 /* Reset into the highest available EL */
296 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
297 env
->pstate
= PSTATE_MODE_EL3h
;
298 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
299 env
->pstate
= PSTATE_MODE_EL2h
;
301 env
->pstate
= PSTATE_MODE_EL1h
;
304 /* Sample rvbar at reset. */
305 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
306 env
->pc
= env
->cp15
.rvbar
;
309 #if defined(CONFIG_USER_ONLY)
310 /* Userspace expects access to cp10 and cp11 for FP/Neon */
311 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
313 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
316 if (arm_feature(env
, ARM_FEATURE_V8
)) {
317 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
318 env
->regs
[15] = cpu
->rvbar_prop
;
322 #if defined(CONFIG_USER_ONLY)
323 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
324 /* For user mode we must enable access to coprocessors */
325 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
326 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
327 env
->cp15
.c15_cpar
= 3;
328 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
329 env
->cp15
.c15_cpar
= 1;
334 * If the highest available EL is EL2, AArch32 will start in Hyp
335 * mode; otherwise it starts in SVC. Note that if we start in
336 * AArch64 then these values in the uncached_cpsr will be ignored.
338 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
339 !arm_feature(env
, ARM_FEATURE_EL3
)) {
340 env
->uncached_cpsr
= ARM_CPU_MODE_HYP
;
342 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
;
344 env
->daif
= PSTATE_D
| PSTATE_A
| PSTATE_I
| PSTATE_F
;
346 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
347 * executing as AArch32 then check if highvecs are enabled and
348 * adjust the PC accordingly.
350 if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
351 env
->regs
[15] = 0xFFFF0000;
354 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
357 if (arm_feature(env
, ARM_FEATURE_M
)) {
358 #ifndef CONFIG_USER_ONLY
359 uint32_t initial_msp
; /* Loaded from 0x0 */
360 uint32_t initial_pc
; /* Loaded from 0x4 */
365 if (cpu_isar_feature(aa32_lob
, cpu
)) {
367 * LTPSIZE is constant 4 if MVE not implemented, and resets
368 * to an UNKNOWN value if MVE is implemented. We choose to
371 env
->v7m
.ltpsize
= 4;
372 /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
373 env
->v7m
.fpdscr
[M_REG_NS
] = 4 << FPCR_LTPSIZE_SHIFT
;
374 env
->v7m
.fpdscr
[M_REG_S
] = 4 << FPCR_LTPSIZE_SHIFT
;
377 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
378 env
->v7m
.secure
= true;
380 /* This bit resets to 0 if security is supported, but 1 if
381 * it is not. The bit is not present in v7M, but we set it
382 * here so we can avoid having to make checks on it conditional
383 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
385 env
->v7m
.aircr
= R_V7M_AIRCR_BFHFNMINS_MASK
;
387 * Set NSACR to indicate "NS access permitted to everything";
388 * this avoids having to have all the tests of it being
389 * conditional on ARM_FEATURE_M_SECURITY. Note also that from
390 * v8.1M the guest-visible value of NSACR in a CPU without the
391 * Security Extension is 0xcff.
393 env
->v7m
.nsacr
= 0xcff;
396 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
397 * that it resets to 1, so QEMU always does that rather than making
398 * it dependent on CPU model. In v8M it is RES1.
400 env
->v7m
.ccr
[M_REG_NS
] = R_V7M_CCR_STKALIGN_MASK
;
401 env
->v7m
.ccr
[M_REG_S
] = R_V7M_CCR_STKALIGN_MASK
;
402 if (arm_feature(env
, ARM_FEATURE_V8
)) {
403 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
404 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
405 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
407 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
408 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
409 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
412 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
413 env
->v7m
.fpccr
[M_REG_NS
] = R_V7M_FPCCR_ASPEN_MASK
;
414 env
->v7m
.fpccr
[M_REG_S
] = R_V7M_FPCCR_ASPEN_MASK
|
415 R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
;
418 #ifndef CONFIG_USER_ONLY
419 /* Unlike A/R profile, M profile defines the reset LR value */
420 env
->regs
[14] = 0xffffffff;
422 env
->v7m
.vecbase
[M_REG_S
] = cpu
->init_svtor
& 0xffffff80;
423 env
->v7m
.vecbase
[M_REG_NS
] = cpu
->init_nsvtor
& 0xffffff80;
425 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
426 vecbase
= env
->v7m
.vecbase
[env
->v7m
.secure
];
427 rom
= rom_ptr_for_as(s
->as
, vecbase
, 8);
429 /* Address zero is covered by ROM which hasn't yet been
430 * copied into physical memory.
432 initial_msp
= ldl_p(rom
);
433 initial_pc
= ldl_p(rom
+ 4);
435 /* Address zero not covered by a ROM blob, or the ROM blob
436 * is in non-modifiable memory and this is a second reset after
437 * it got copied into memory. In the latter case, rom_ptr
438 * will return a NULL pointer and we should use ldl_phys instead.
440 initial_msp
= ldl_phys(s
->as
, vecbase
);
441 initial_pc
= ldl_phys(s
->as
, vecbase
+ 4);
444 qemu_log_mask(CPU_LOG_INT
,
445 "Loaded reset SP 0x%x PC 0x%x from vector table\n",
446 initial_msp
, initial_pc
);
448 env
->regs
[13] = initial_msp
& 0xFFFFFFFC;
449 env
->regs
[15] = initial_pc
& ~1;
450 env
->thumb
= initial_pc
& 1;
453 * For user mode we run non-secure and with access to the FPU.
454 * The FPU context is active (ie does not need further setup)
455 * and is owned by non-secure.
457 env
->v7m
.secure
= false;
458 env
->v7m
.nsacr
= 0xcff;
459 env
->v7m
.cpacr
[M_REG_NS
] = 0xf0ffff;
460 env
->v7m
.fpccr
[M_REG_S
] &=
461 ~(R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
);
462 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
466 /* M profile requires that reset clears the exclusive monitor;
467 * A profile does not, but clearing it makes more sense than having it
468 * set with an exclusive access on address zero.
470 arm_clear_exclusive(env
);
472 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
473 if (cpu
->pmsav7_dregion
> 0) {
474 if (arm_feature(env
, ARM_FEATURE_V8
)) {
475 memset(env
->pmsav8
.rbar
[M_REG_NS
], 0,
476 sizeof(*env
->pmsav8
.rbar
[M_REG_NS
])
477 * cpu
->pmsav7_dregion
);
478 memset(env
->pmsav8
.rlar
[M_REG_NS
], 0,
479 sizeof(*env
->pmsav8
.rlar
[M_REG_NS
])
480 * cpu
->pmsav7_dregion
);
481 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
482 memset(env
->pmsav8
.rbar
[M_REG_S
], 0,
483 sizeof(*env
->pmsav8
.rbar
[M_REG_S
])
484 * cpu
->pmsav7_dregion
);
485 memset(env
->pmsav8
.rlar
[M_REG_S
], 0,
486 sizeof(*env
->pmsav8
.rlar
[M_REG_S
])
487 * cpu
->pmsav7_dregion
);
489 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
490 memset(env
->pmsav7
.drbar
, 0,
491 sizeof(*env
->pmsav7
.drbar
) * cpu
->pmsav7_dregion
);
492 memset(env
->pmsav7
.drsr
, 0,
493 sizeof(*env
->pmsav7
.drsr
) * cpu
->pmsav7_dregion
);
494 memset(env
->pmsav7
.dracr
, 0,
495 sizeof(*env
->pmsav7
.dracr
) * cpu
->pmsav7_dregion
);
499 if (cpu
->pmsav8r_hdregion
> 0) {
500 memset(env
->pmsav8
.hprbar
, 0,
501 sizeof(*env
->pmsav8
.hprbar
) * cpu
->pmsav8r_hdregion
);
502 memset(env
->pmsav8
.hprlar
, 0,
503 sizeof(*env
->pmsav8
.hprlar
) * cpu
->pmsav8r_hdregion
);
506 env
->pmsav7
.rnr
[M_REG_NS
] = 0;
507 env
->pmsav7
.rnr
[M_REG_S
] = 0;
508 env
->pmsav8
.mair0
[M_REG_NS
] = 0;
509 env
->pmsav8
.mair0
[M_REG_S
] = 0;
510 env
->pmsav8
.mair1
[M_REG_NS
] = 0;
511 env
->pmsav8
.mair1
[M_REG_S
] = 0;
514 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
515 if (cpu
->sau_sregion
> 0) {
516 memset(env
->sau
.rbar
, 0, sizeof(*env
->sau
.rbar
) * cpu
->sau_sregion
);
517 memset(env
->sau
.rlar
, 0, sizeof(*env
->sau
.rlar
) * cpu
->sau_sregion
);
520 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
521 * the Cortex-M33 does.
526 set_flush_to_zero(1, &env
->vfp
.standard_fp_status
);
527 set_flush_inputs_to_zero(1, &env
->vfp
.standard_fp_status
);
528 set_default_nan_mode(1, &env
->vfp
.standard_fp_status
);
529 set_default_nan_mode(1, &env
->vfp
.standard_fp_status_f16
);
530 set_float_detect_tininess(float_tininess_before_rounding
,
531 &env
->vfp
.fp_status
);
532 set_float_detect_tininess(float_tininess_before_rounding
,
533 &env
->vfp
.standard_fp_status
);
534 set_float_detect_tininess(float_tininess_before_rounding
,
535 &env
->vfp
.fp_status_f16
);
536 set_float_detect_tininess(float_tininess_before_rounding
,
537 &env
->vfp
.standard_fp_status_f16
);
538 #ifndef CONFIG_USER_ONLY
540 kvm_arm_reset_vcpu(cpu
);
545 hw_breakpoint_update_all(cpu
);
546 hw_watchpoint_update_all(cpu
);
548 arm_rebuild_hflags(env
);
552 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
554 static inline bool arm_excp_unmasked(CPUState
*cs
, unsigned int excp_idx
,
555 unsigned int target_el
,
556 unsigned int cur_el
, bool secure
,
559 CPUARMState
*env
= cs
->env_ptr
;
560 bool pstate_unmasked
;
561 bool unmasked
= false;
564 * Don't take exceptions if they target a lower EL.
565 * This check should catch any exceptions that would not be taken
568 if (cur_el
> target_el
) {
574 pstate_unmasked
= !(env
->daif
& PSTATE_F
);
578 pstate_unmasked
= !(env
->daif
& PSTATE_I
);
582 if (!(hcr_el2
& HCR_FMO
) || (hcr_el2
& HCR_TGE
)) {
583 /* VFIQs are only taken when hypervized. */
586 return !(env
->daif
& PSTATE_F
);
588 if (!(hcr_el2
& HCR_IMO
) || (hcr_el2
& HCR_TGE
)) {
589 /* VIRQs are only taken when hypervized. */
592 return !(env
->daif
& PSTATE_I
);
594 if (!(hcr_el2
& HCR_AMO
) || (hcr_el2
& HCR_TGE
)) {
595 /* VIRQs are only taken when hypervized. */
598 return !(env
->daif
& PSTATE_A
);
600 g_assert_not_reached();
604 * Use the target EL, current execution state and SCR/HCR settings to
605 * determine whether the corresponding CPSR bit is used to mask the
608 if ((target_el
> cur_el
) && (target_el
!= 1)) {
609 /* Exceptions targeting a higher EL may not be maskable */
610 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
614 * According to ARM DDI 0487H.a, an interrupt can be masked
615 * when HCR_E2H and HCR_TGE are both set regardless of the
616 * current Security state. Note that we need to revisit this
617 * part again once we need to support NMI.
619 if ((hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
624 /* Interrupt cannot be masked when the target EL is 3 */
628 g_assert_not_reached();
632 * The old 32-bit-only environment has a more complicated
633 * masking setup. HCR and SCR bits not only affect interrupt
634 * routing but also change the behaviour of masking.
641 * If FIQs are routed to EL3 or EL2 then there are cases where
642 * we override the CPSR.F in determining if the exception is
643 * masked or not. If neither of these are set then we fall back
644 * to the CPSR.F setting otherwise we further assess the state
647 hcr
= hcr_el2
& HCR_FMO
;
648 scr
= (env
->cp15
.scr_el3
& SCR_FIQ
);
651 * When EL3 is 32-bit, the SCR.FW bit controls whether the
652 * CPSR.F bit masks FIQ interrupts when taken in non-secure
653 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
654 * when non-secure but only when FIQs are only routed to EL3.
656 scr
= scr
&& !((env
->cp15
.scr_el3
& SCR_FW
) && !hcr
);
660 * When EL3 execution state is 32-bit, if HCR.IMO is set then
661 * we may override the CPSR.I masking when in non-secure state.
662 * The SCR.IRQ setting has already been taken into consideration
663 * when setting the target EL, so it does not have a further
666 hcr
= hcr_el2
& HCR_IMO
;
670 g_assert_not_reached();
673 if ((scr
|| hcr
) && !secure
) {
680 * The PSTATE bits only mask the interrupt if we have not overriden the
683 return unmasked
|| pstate_unmasked
;
686 static bool arm_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
688 CPUClass
*cc
= CPU_GET_CLASS(cs
);
689 CPUARMState
*env
= cs
->env_ptr
;
690 uint32_t cur_el
= arm_current_el(env
);
691 bool secure
= arm_is_secure(env
);
692 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
696 /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
698 if (interrupt_request
& CPU_INTERRUPT_FIQ
) {
700 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
701 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
702 cur_el
, secure
, hcr_el2
)) {
706 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
708 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
709 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
710 cur_el
, secure
, hcr_el2
)) {
714 if (interrupt_request
& CPU_INTERRUPT_VIRQ
) {
715 excp_idx
= EXCP_VIRQ
;
717 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
718 cur_el
, secure
, hcr_el2
)) {
722 if (interrupt_request
& CPU_INTERRUPT_VFIQ
) {
723 excp_idx
= EXCP_VFIQ
;
725 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
726 cur_el
, secure
, hcr_el2
)) {
730 if (interrupt_request
& CPU_INTERRUPT_VSERR
) {
731 excp_idx
= EXCP_VSERR
;
733 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
734 cur_el
, secure
, hcr_el2
)) {
735 /* Taking a virtual abort clears HCR_EL2.VSE */
736 env
->cp15
.hcr_el2
&= ~HCR_VSE
;
737 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
744 cs
->exception_index
= excp_idx
;
745 env
->exception
.target_el
= target_el
;
746 cc
->tcg_ops
->do_interrupt(cs
);
750 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
752 void arm_cpu_update_virq(ARMCPU
*cpu
)
755 * Update the interrupt level for VIRQ, which is the logical OR of
756 * the HCR_EL2.VI bit and the input line level from the GIC.
758 CPUARMState
*env
= &cpu
->env
;
759 CPUState
*cs
= CPU(cpu
);
761 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VI
) ||
762 (env
->irq_line_state
& CPU_INTERRUPT_VIRQ
);
764 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) != 0)) {
766 cpu_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
768 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
773 void arm_cpu_update_vfiq(ARMCPU
*cpu
)
776 * Update the interrupt level for VFIQ, which is the logical OR of
777 * the HCR_EL2.VF bit and the input line level from the GIC.
779 CPUARMState
*env
= &cpu
->env
;
780 CPUState
*cs
= CPU(cpu
);
782 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VF
) ||
783 (env
->irq_line_state
& CPU_INTERRUPT_VFIQ
);
785 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) != 0)) {
787 cpu_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
789 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
794 void arm_cpu_update_vserr(ARMCPU
*cpu
)
797 * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
799 CPUARMState
*env
= &cpu
->env
;
800 CPUState
*cs
= CPU(cpu
);
802 bool new_state
= env
->cp15
.hcr_el2
& HCR_VSE
;
804 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VSERR
) != 0)) {
806 cpu_interrupt(cs
, CPU_INTERRUPT_VSERR
);
808 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
813 #ifndef CONFIG_USER_ONLY
814 static void arm_cpu_set_irq(void *opaque
, int irq
, int level
)
816 ARMCPU
*cpu
= opaque
;
817 CPUARMState
*env
= &cpu
->env
;
818 CPUState
*cs
= CPU(cpu
);
819 static const int mask
[] = {
820 [ARM_CPU_IRQ
] = CPU_INTERRUPT_HARD
,
821 [ARM_CPU_FIQ
] = CPU_INTERRUPT_FIQ
,
822 [ARM_CPU_VIRQ
] = CPU_INTERRUPT_VIRQ
,
823 [ARM_CPU_VFIQ
] = CPU_INTERRUPT_VFIQ
826 if (!arm_feature(env
, ARM_FEATURE_EL2
) &&
827 (irq
== ARM_CPU_VIRQ
|| irq
== ARM_CPU_VFIQ
)) {
829 * The GIC might tell us about VIRQ and VFIQ state, but if we don't
830 * have EL2 support we don't care. (Unless the guest is doing something
831 * silly this will only be calls saying "level is still 0".)
837 env
->irq_line_state
|= mask
[irq
];
839 env
->irq_line_state
&= ~mask
[irq
];
844 arm_cpu_update_virq(cpu
);
847 arm_cpu_update_vfiq(cpu
);
852 cpu_interrupt(cs
, mask
[irq
]);
854 cpu_reset_interrupt(cs
, mask
[irq
]);
858 g_assert_not_reached();
862 static void arm_cpu_kvm_set_irq(void *opaque
, int irq
, int level
)
865 ARMCPU
*cpu
= opaque
;
866 CPUARMState
*env
= &cpu
->env
;
867 CPUState
*cs
= CPU(cpu
);
868 uint32_t linestate_bit
;
873 irq_id
= KVM_ARM_IRQ_CPU_IRQ
;
874 linestate_bit
= CPU_INTERRUPT_HARD
;
877 irq_id
= KVM_ARM_IRQ_CPU_FIQ
;
878 linestate_bit
= CPU_INTERRUPT_FIQ
;
881 g_assert_not_reached();
885 env
->irq_line_state
|= linestate_bit
;
887 env
->irq_line_state
&= ~linestate_bit
;
889 kvm_arm_set_irq(cs
->cpu_index
, KVM_ARM_IRQ_TYPE_CPU
, irq_id
, !!level
);
893 static bool arm_cpu_virtio_is_big_endian(CPUState
*cs
)
895 ARMCPU
*cpu
= ARM_CPU(cs
);
896 CPUARMState
*env
= &cpu
->env
;
898 cpu_synchronize_state(cs
);
899 return arm_cpu_data_is_big_endian(env
);
904 static void arm_disas_set_info(CPUState
*cpu
, disassemble_info
*info
)
906 ARMCPU
*ac
= ARM_CPU(cpu
);
907 CPUARMState
*env
= &ac
->env
;
911 info
->cap_arch
= CS_ARCH_ARM64
;
912 info
->cap_insn_unit
= 4;
913 info
->cap_insn_split
= 4;
917 info
->cap_insn_unit
= 2;
918 info
->cap_insn_split
= 4;
919 cap_mode
= CS_MODE_THUMB
;
921 info
->cap_insn_unit
= 4;
922 info
->cap_insn_split
= 4;
923 cap_mode
= CS_MODE_ARM
;
925 if (arm_feature(env
, ARM_FEATURE_V8
)) {
926 cap_mode
|= CS_MODE_V8
;
928 if (arm_feature(env
, ARM_FEATURE_M
)) {
929 cap_mode
|= CS_MODE_MCLASS
;
931 info
->cap_arch
= CS_ARCH_ARM
;
932 info
->cap_mode
= cap_mode
;
935 sctlr_b
= arm_sctlr_b(env
);
936 if (bswap_code(sctlr_b
)) {
937 #if TARGET_BIG_ENDIAN
938 info
->endian
= BFD_ENDIAN_LITTLE
;
940 info
->endian
= BFD_ENDIAN_BIG
;
943 info
->flags
&= ~INSN_ARM_BE32
;
944 #ifndef CONFIG_USER_ONLY
946 info
->flags
|= INSN_ARM_BE32
;
951 #ifdef TARGET_AARCH64
953 static void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
955 ARMCPU
*cpu
= ARM_CPU(cs
);
956 CPUARMState
*env
= &cpu
->env
;
957 uint32_t psr
= pstate_read(env
);
959 int el
= arm_current_el(env
);
960 const char *ns_status
;
963 qemu_fprintf(f
, " PC=%016" PRIx64
" ", env
->pc
);
964 for (i
= 0; i
< 32; i
++) {
966 qemu_fprintf(f
, " SP=%016" PRIx64
"\n", env
->xregs
[i
]);
968 qemu_fprintf(f
, "X%02d=%016" PRIx64
"%s", i
, env
->xregs
[i
],
969 (i
+ 2) % 3 ? " " : "\n");
973 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
974 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
978 qemu_fprintf(f
, "PSTATE=%08x %c%c%c%c %sEL%d%c",
980 psr
& PSTATE_N
? 'N' : '-',
981 psr
& PSTATE_Z
? 'Z' : '-',
982 psr
& PSTATE_C
? 'C' : '-',
983 psr
& PSTATE_V
? 'V' : '-',
986 psr
& PSTATE_SP
? 'h' : 't');
988 if (cpu_isar_feature(aa64_sme
, cpu
)) {
989 qemu_fprintf(f
, " SVCR=%08" PRIx64
" %c%c",
991 (FIELD_EX64(env
->svcr
, SVCR
, ZA
) ? 'Z' : '-'),
992 (FIELD_EX64(env
->svcr
, SVCR
, SM
) ? 'S' : '-'));
994 if (cpu_isar_feature(aa64_bti
, cpu
)) {
995 qemu_fprintf(f
, " BTYPE=%d", (psr
& PSTATE_BTYPE
) >> 10);
997 if (!(flags
& CPU_DUMP_FPU
)) {
998 qemu_fprintf(f
, "\n");
1001 if (fp_exception_el(env
, el
) != 0) {
1002 qemu_fprintf(f
, " FPU disabled\n");
1005 qemu_fprintf(f
, " FPCR=%08x FPSR=%08x\n",
1006 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
1008 if (cpu_isar_feature(aa64_sme
, cpu
) && FIELD_EX64(env
->svcr
, SVCR
, SM
)) {
1009 sve
= sme_exception_el(env
, el
) == 0;
1010 } else if (cpu_isar_feature(aa64_sve
, cpu
)) {
1011 sve
= sve_exception_el(env
, el
) == 0;
1017 int j
, zcr_len
= sve_vqm1_for_el(env
, el
);
1019 for (i
= 0; i
<= FFR_PRED_NUM
; i
++) {
1021 if (i
== FFR_PRED_NUM
) {
1022 qemu_fprintf(f
, "FFR=");
1023 /* It's last, so end the line. */
1026 qemu_fprintf(f
, "P%02d=", i
);
1039 /* More than one quadword per predicate. */
1044 for (j
= zcr_len
/ 4; j
>= 0; j
--) {
1046 if (j
* 4 + 4 <= zcr_len
+ 1) {
1049 digits
= (zcr_len
% 4 + 1) * 4;
1051 qemu_fprintf(f
, "%0*" PRIx64
"%s", digits
,
1052 env
->vfp
.pregs
[i
].p
[j
],
1053 j
? ":" : eol
? "\n" : " ");
1057 for (i
= 0; i
< 32; i
++) {
1059 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
"%s",
1060 i
, env
->vfp
.zregs
[i
].d
[1],
1061 env
->vfp
.zregs
[i
].d
[0], i
& 1 ? "\n" : " ");
1062 } else if (zcr_len
== 1) {
1063 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
1064 ":%016" PRIx64
":%016" PRIx64
"\n",
1065 i
, env
->vfp
.zregs
[i
].d
[3], env
->vfp
.zregs
[i
].d
[2],
1066 env
->vfp
.zregs
[i
].d
[1], env
->vfp
.zregs
[i
].d
[0]);
1068 for (j
= zcr_len
; j
>= 0; j
--) {
1069 bool odd
= (zcr_len
- j
) % 2 != 0;
1071 qemu_fprintf(f
, "Z%02d[%x-%x]=", i
, j
, j
- 1);
1074 qemu_fprintf(f
, " [%x-%x]=", j
, j
- 1);
1076 qemu_fprintf(f
, " [%x]=", j
);
1079 qemu_fprintf(f
, "%016" PRIx64
":%016" PRIx64
"%s",
1080 env
->vfp
.zregs
[i
].d
[j
* 2 + 1],
1081 env
->vfp
.zregs
[i
].d
[j
* 2],
1082 odd
|| j
== 0 ? "\n" : ":");
1087 for (i
= 0; i
< 32; i
++) {
1088 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1089 qemu_fprintf(f
, "Q%02d=%016" PRIx64
":%016" PRIx64
"%s",
1090 i
, q
[1], q
[0], (i
& 1 ? "\n" : " "));
1097 static inline void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1099 g_assert_not_reached();
1104 static void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1106 ARMCPU
*cpu
= ARM_CPU(cs
);
1107 CPUARMState
*env
= &cpu
->env
;
1111 aarch64_cpu_dump_state(cs
, f
, flags
);
1115 for (i
= 0; i
< 16; i
++) {
1116 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
1118 qemu_fprintf(f
, "\n");
1120 qemu_fprintf(f
, " ");
1124 if (arm_feature(env
, ARM_FEATURE_M
)) {
1125 uint32_t xpsr
= xpsr_read(env
);
1127 const char *ns_status
= "";
1129 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1130 ns_status
= env
->v7m
.secure
? "S " : "NS ";
1133 if (xpsr
& XPSR_EXCP
) {
1136 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
1137 mode
= "unpriv-thread";
1139 mode
= "priv-thread";
1143 qemu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
1145 xpsr
& XPSR_N
? 'N' : '-',
1146 xpsr
& XPSR_Z
? 'Z' : '-',
1147 xpsr
& XPSR_C
? 'C' : '-',
1148 xpsr
& XPSR_V
? 'V' : '-',
1149 xpsr
& XPSR_T
? 'T' : 'A',
1153 uint32_t psr
= cpsr_read(env
);
1154 const char *ns_status
= "";
1156 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
1157 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
1158 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
1161 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1163 psr
& CPSR_N
? 'N' : '-',
1164 psr
& CPSR_Z
? 'Z' : '-',
1165 psr
& CPSR_C
? 'C' : '-',
1166 psr
& CPSR_V
? 'V' : '-',
1167 psr
& CPSR_T
? 'T' : 'A',
1169 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
1172 if (flags
& CPU_DUMP_FPU
) {
1174 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1176 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1179 for (i
= 0; i
< numvfpregs
; i
++) {
1180 uint64_t v
= *aa32_vfp_dreg(env
, i
);
1181 qemu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
1183 i
* 2 + 1, (uint32_t)(v
>> 32),
1186 qemu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
1187 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1188 qemu_fprintf(f
, "VPR: %08x\n", env
->v7m
.vpr
);
1193 uint64_t arm_cpu_mp_affinity(int idx
, uint8_t clustersz
)
1195 uint32_t Aff1
= idx
/ clustersz
;
1196 uint32_t Aff0
= idx
% clustersz
;
1197 return (Aff1
<< ARM_AFF1_SHIFT
) | Aff0
;
1200 static void arm_cpu_initfn(Object
*obj
)
1202 ARMCPU
*cpu
= ARM_CPU(obj
);
1204 cpu_set_cpustate_pointers(cpu
);
1205 cpu
->cp_regs
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
,
1208 QLIST_INIT(&cpu
->pre_el_change_hooks
);
1209 QLIST_INIT(&cpu
->el_change_hooks
);
1211 #ifdef CONFIG_USER_ONLY
1212 # ifdef TARGET_AARCH64
1214 * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
1215 * These values were chosen to fit within the default signal frame.
1216 * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
1217 * and our corresponding cpu property.
1219 cpu
->sve_default_vq
= 4;
1220 cpu
->sme_default_vq
= 2;
1223 /* Our inbound IRQ and FIQ lines */
1224 if (kvm_enabled()) {
1225 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1226 * the same interface as non-KVM CPUs.
1228 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_kvm_set_irq
, 4);
1230 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_set_irq
, 4);
1233 qdev_init_gpio_out(DEVICE(cpu
), cpu
->gt_timer_outputs
,
1234 ARRAY_SIZE(cpu
->gt_timer_outputs
));
1236 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->gicv3_maintenance_interrupt
,
1237 "gicv3-maintenance-interrupt", 1);
1238 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->pmu_interrupt
,
1239 "pmu-interrupt", 1);
1242 /* DTB consumers generally don't in fact care what the 'compatible'
1243 * string is, so always provide some string and trust that a hypothetical
1244 * picky DTB consumer will also provide a helpful error message.
1246 cpu
->dtb_compatible
= "qemu,unknown";
1247 cpu
->psci_version
= QEMU_PSCI_VERSION_0_1
; /* By default assume PSCI v0.1 */
1248 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
1250 if (tcg_enabled() || hvf_enabled()) {
1251 /* TCG and HVF implement PSCI 1.1 */
1252 cpu
->psci_version
= QEMU_PSCI_VERSION_1_1
;
1256 static Property arm_cpu_gt_cntfrq_property
=
1257 DEFINE_PROP_UINT64("cntfrq", ARMCPU
, gt_cntfrq_hz
,
1258 NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
);
1260 static Property arm_cpu_reset_cbar_property
=
1261 DEFINE_PROP_UINT64("reset-cbar", ARMCPU
, reset_cbar
, 0);
1263 static Property arm_cpu_reset_hivecs_property
=
1264 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU
, reset_hivecs
, false);
1266 #ifndef CONFIG_USER_ONLY
1267 static Property arm_cpu_has_el2_property
=
1268 DEFINE_PROP_BOOL("has_el2", ARMCPU
, has_el2
, true);
1270 static Property arm_cpu_has_el3_property
=
1271 DEFINE_PROP_BOOL("has_el3", ARMCPU
, has_el3
, true);
1274 static Property arm_cpu_cfgend_property
=
1275 DEFINE_PROP_BOOL("cfgend", ARMCPU
, cfgend
, false);
1277 static Property arm_cpu_has_vfp_property
=
1278 DEFINE_PROP_BOOL("vfp", ARMCPU
, has_vfp
, true);
1280 static Property arm_cpu_has_vfp_d32_property
=
1281 DEFINE_PROP_BOOL("vfp-d32", ARMCPU
, has_vfp_d32
, true);
1283 static Property arm_cpu_has_neon_property
=
1284 DEFINE_PROP_BOOL("neon", ARMCPU
, has_neon
, true);
1286 static Property arm_cpu_has_dsp_property
=
1287 DEFINE_PROP_BOOL("dsp", ARMCPU
, has_dsp
, true);
1289 static Property arm_cpu_has_mpu_property
=
1290 DEFINE_PROP_BOOL("has-mpu", ARMCPU
, has_mpu
, true);
1292 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1293 * because the CPU initfn will have already set cpu->pmsav7_dregion to
1294 * the right value for that particular CPU type, and we don't want
1295 * to override that with an incorrect constant value.
1297 static Property arm_cpu_pmsav7_dregion_property
=
1298 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU
,
1300 qdev_prop_uint32
, uint32_t);
1302 static bool arm_get_pmu(Object
*obj
, Error
**errp
)
1304 ARMCPU
*cpu
= ARM_CPU(obj
);
1306 return cpu
->has_pmu
;
1309 static void arm_set_pmu(Object
*obj
, bool value
, Error
**errp
)
1311 ARMCPU
*cpu
= ARM_CPU(obj
);
1314 if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1315 error_setg(errp
, "'pmu' feature not supported by KVM on this host");
1318 set_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1320 unset_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1322 cpu
->has_pmu
= value
;
1325 unsigned int gt_cntfrq_period_ns(ARMCPU
*cpu
)
1328 * The exact approach to calculating guest ticks is:
1330 * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1331 * NANOSECONDS_PER_SECOND);
1333 * We don't do that. Rather we intentionally use integer division
1334 * truncation below and in the caller for the conversion of host monotonic
1335 * time to guest ticks to provide the exact inverse for the semantics of
1336 * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1337 * it loses precision when representing frequencies where
1338 * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1339 * provide an exact inverse leads to scheduling timers with negative
1340 * periods, which in turn leads to sticky behaviour in the guest.
1342 * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1343 * cannot become zero.
1345 return NANOSECONDS_PER_SECOND
> cpu
->gt_cntfrq_hz
?
1346 NANOSECONDS_PER_SECOND
/ cpu
->gt_cntfrq_hz
: 1;
1349 void arm_cpu_post_init(Object
*obj
)
1351 ARMCPU
*cpu
= ARM_CPU(obj
);
1353 /* M profile implies PMSA. We have to do this here rather than
1354 * in realize with the other feature-implication checks because
1355 * we look at the PMSA bit to see if we should add some properties.
1357 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1358 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
1361 if (arm_feature(&cpu
->env
, ARM_FEATURE_CBAR
) ||
1362 arm_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
)) {
1363 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_cbar_property
);
1366 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1367 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_hivecs_property
);
1370 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1371 object_property_add_uint64_ptr(obj
, "rvbar",
1373 OBJ_PROP_FLAG_READWRITE
);
1376 #ifndef CONFIG_USER_ONLY
1377 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1378 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
1379 * prevent "has_el3" from existing on CPUs which cannot support EL3.
1381 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el3_property
);
1383 object_property_add_link(obj
, "secure-memory",
1385 (Object
**)&cpu
->secure_memory
,
1386 qdev_prop_allow_set_link_before_realize
,
1387 OBJ_PROP_LINK_STRONG
);
1390 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
)) {
1391 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el2_property
);
1395 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMU
)) {
1396 cpu
->has_pmu
= true;
1397 object_property_add_bool(obj
, "pmu", arm_get_pmu
, arm_set_pmu
);
1401 * Allow user to turn off VFP and Neon support, but only for TCG --
1402 * KVM does not currently allow us to lie to the guest about its
1403 * ID/feature registers, so the guest always sees what the host has.
1405 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1406 if (cpu_isar_feature(aa64_fp_simd
, cpu
)) {
1407 cpu
->has_vfp
= true;
1408 cpu
->has_vfp_d32
= true;
1409 if (tcg_enabled() || qtest_enabled()) {
1410 qdev_property_add_static(DEVICE(obj
),
1411 &arm_cpu_has_vfp_property
);
1414 } else if (cpu_isar_feature(aa32_vfp
, cpu
)) {
1415 cpu
->has_vfp
= true;
1416 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1417 cpu
->has_vfp_d32
= true;
1419 * The permitted values of the SIMDReg bits [3:0] on
1420 * Armv8-A are either 0b0000 and 0b0010. On such CPUs,
1421 * make sure that has_vfp_d32 can not be set to false.
1423 if ((tcg_enabled() || qtest_enabled())
1424 && !(arm_feature(&cpu
->env
, ARM_FEATURE_V8
)
1425 && !arm_feature(&cpu
->env
, ARM_FEATURE_M
))) {
1426 qdev_property_add_static(DEVICE(obj
),
1427 &arm_cpu_has_vfp_d32_property
);
1432 if (arm_feature(&cpu
->env
, ARM_FEATURE_NEON
)) {
1433 cpu
->has_neon
= true;
1434 if (!kvm_enabled()) {
1435 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_neon_property
);
1439 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
) &&
1440 arm_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
)) {
1441 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_dsp_property
);
1444 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMSA
)) {
1445 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_mpu_property
);
1446 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1447 qdev_property_add_static(DEVICE(obj
),
1448 &arm_cpu_pmsav7_dregion_property
);
1452 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
1453 object_property_add_link(obj
, "idau", TYPE_IDAU_INTERFACE
, &cpu
->idau
,
1454 qdev_prop_allow_set_link_before_realize
,
1455 OBJ_PROP_LINK_STRONG
);
1457 * M profile: initial value of the Secure VTOR. We can't just use
1458 * a simple DEFINE_PROP_UINT32 for this because we want to permit
1459 * the property to be set after realize.
1461 object_property_add_uint32_ptr(obj
, "init-svtor",
1463 OBJ_PROP_FLAG_READWRITE
);
1465 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1467 * Initial value of the NS VTOR (for cores without the Security
1468 * extension, this is the only VTOR)
1470 object_property_add_uint32_ptr(obj
, "init-nsvtor",
1472 OBJ_PROP_FLAG_READWRITE
);
1475 /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1476 object_property_add_uint32_ptr(obj
, "psci-conduit",
1478 OBJ_PROP_FLAG_READWRITE
);
1480 qdev_property_add_static(DEVICE(obj
), &arm_cpu_cfgend_property
);
1482 if (arm_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
)) {
1483 qdev_property_add_static(DEVICE(cpu
), &arm_cpu_gt_cntfrq_property
);
1486 if (kvm_enabled()) {
1487 kvm_arm_add_vcpu_properties(obj
);
1490 #ifndef CONFIG_USER_ONLY
1491 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) &&
1492 cpu_isar_feature(aa64_mte
, cpu
)) {
1493 object_property_add_link(obj
, "tag-memory",
1495 (Object
**)&cpu
->tag_memory
,
1496 qdev_prop_allow_set_link_before_realize
,
1497 OBJ_PROP_LINK_STRONG
);
1499 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1500 object_property_add_link(obj
, "secure-tag-memory",
1502 (Object
**)&cpu
->secure_tag_memory
,
1503 qdev_prop_allow_set_link_before_realize
,
1504 OBJ_PROP_LINK_STRONG
);
1510 static void arm_cpu_finalizefn(Object
*obj
)
1512 ARMCPU
*cpu
= ARM_CPU(obj
);
1513 ARMELChangeHook
*hook
, *next
;
1515 g_hash_table_destroy(cpu
->cp_regs
);
1517 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
1518 QLIST_REMOVE(hook
, node
);
1521 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
1522 QLIST_REMOVE(hook
, node
);
1525 #ifndef CONFIG_USER_ONLY
1526 if (cpu
->pmu_timer
) {
1527 timer_free(cpu
->pmu_timer
);
1532 void arm_cpu_finalize_features(ARMCPU
*cpu
, Error
**errp
)
1534 Error
*local_err
= NULL
;
1536 #ifdef TARGET_AARCH64
1537 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1538 arm_cpu_sve_finalize(cpu
, &local_err
);
1539 if (local_err
!= NULL
) {
1540 error_propagate(errp
, local_err
);
1544 arm_cpu_sme_finalize(cpu
, &local_err
);
1545 if (local_err
!= NULL
) {
1546 error_propagate(errp
, local_err
);
1550 arm_cpu_pauth_finalize(cpu
, &local_err
);
1551 if (local_err
!= NULL
) {
1552 error_propagate(errp
, local_err
);
1556 arm_cpu_lpa2_finalize(cpu
, &local_err
);
1557 if (local_err
!= NULL
) {
1558 error_propagate(errp
, local_err
);
1564 if (kvm_enabled()) {
1565 kvm_arm_steal_time_finalize(cpu
, &local_err
);
1566 if (local_err
!= NULL
) {
1567 error_propagate(errp
, local_err
);
1573 static void arm_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
1575 CPUState
*cs
= CPU(dev
);
1576 ARMCPU
*cpu
= ARM_CPU(dev
);
1577 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(dev
);
1578 CPUARMState
*env
= &cpu
->env
;
1580 Error
*local_err
= NULL
;
1581 bool no_aa32
= false;
1583 /* Use pc-relative instructions in system-mode */
1584 #ifndef CONFIG_USER_ONLY
1585 cs
->tcg_cflags
|= CF_PCREL
;
1588 /* If we needed to query the host kernel for the CPU features
1589 * then it's possible that might have failed in the initfn, but
1590 * this is the first point where we can report it.
1592 if (cpu
->host_cpu_probe_failed
) {
1593 if (!kvm_enabled() && !hvf_enabled()) {
1594 error_setg(errp
, "The 'host' CPU type can only be used with KVM or HVF");
1596 error_setg(errp
, "Failed to retrieve host CPU features");
1601 #ifndef CONFIG_USER_ONLY
1602 /* The NVIC and M-profile CPU are two halves of a single piece of
1603 * hardware; trying to use one without the other is a command line
1604 * error and will result in segfaults if not caught here.
1606 if (arm_feature(env
, ARM_FEATURE_M
)) {
1608 error_setg(errp
, "This board cannot be used with Cortex-M CPUs");
1613 error_setg(errp
, "This board can only be used with Cortex-M CPUs");
1618 if (!tcg_enabled() && !qtest_enabled()) {
1620 * We assume that no accelerator except TCG (and the "not really an
1621 * accelerator" qtest) can handle these features, because Arm hardware
1622 * virtualization can't virtualize them.
1624 * Catch all the cases which might cause us to create more than one
1625 * address space for the CPU (otherwise we will assert() later in
1626 * cpu_address_space_init()).
1628 if (arm_feature(env
, ARM_FEATURE_M
)) {
1630 "Cannot enable %s when using an M-profile guest CPU",
1631 current_accel_name());
1636 "Cannot enable %s when guest CPU has EL3 enabled",
1637 current_accel_name());
1640 if (cpu
->tag_memory
) {
1642 "Cannot enable %s when guest CPUs has MTE enabled",
1643 current_accel_name());
1651 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1652 if (!cpu
->gt_cntfrq_hz
) {
1653 error_setg(errp
, "Invalid CNTFRQ: %"PRId64
"Hz",
1657 scale
= gt_cntfrq_period_ns(cpu
);
1659 scale
= GTIMER_SCALE
;
1662 cpu
->gt_timer
[GTIMER_PHYS
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1663 arm_gt_ptimer_cb
, cpu
);
1664 cpu
->gt_timer
[GTIMER_VIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1665 arm_gt_vtimer_cb
, cpu
);
1666 cpu
->gt_timer
[GTIMER_HYP
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1667 arm_gt_htimer_cb
, cpu
);
1668 cpu
->gt_timer
[GTIMER_SEC
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1669 arm_gt_stimer_cb
, cpu
);
1670 cpu
->gt_timer
[GTIMER_HYPVIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1671 arm_gt_hvtimer_cb
, cpu
);
1675 cpu_exec_realizefn(cs
, &local_err
);
1676 if (local_err
!= NULL
) {
1677 error_propagate(errp
, local_err
);
1681 arm_cpu_finalize_features(cpu
, &local_err
);
1682 if (local_err
!= NULL
) {
1683 error_propagate(errp
, local_err
);
1687 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1688 cpu
->has_vfp
!= cpu
->has_neon
) {
1690 * This is an architectural requirement for AArch64; AArch32 is
1691 * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1694 "AArch64 CPUs must have both VFP and Neon or neither");
1698 if (cpu
->has_vfp_d32
!= cpu
->has_neon
) {
1699 error_setg(errp
, "ARM CPUs must have both VFP-D32 and Neon or neither");
1703 if (!cpu
->has_vfp_d32
) {
1706 u
= cpu
->isar
.mvfr0
;
1707 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 1); /* 16 registers */
1708 cpu
->isar
.mvfr0
= u
;
1711 if (!cpu
->has_vfp
) {
1715 t
= cpu
->isar
.id_aa64isar1
;
1716 t
= FIELD_DP64(t
, ID_AA64ISAR1
, JSCVT
, 0);
1717 cpu
->isar
.id_aa64isar1
= t
;
1719 t
= cpu
->isar
.id_aa64pfr0
;
1720 t
= FIELD_DP64(t
, ID_AA64PFR0
, FP
, 0xf);
1721 cpu
->isar
.id_aa64pfr0
= t
;
1723 u
= cpu
->isar
.id_isar6
;
1724 u
= FIELD_DP32(u
, ID_ISAR6
, JSCVT
, 0);
1725 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1726 cpu
->isar
.id_isar6
= u
;
1728 u
= cpu
->isar
.mvfr0
;
1729 u
= FIELD_DP32(u
, MVFR0
, FPSP
, 0);
1730 u
= FIELD_DP32(u
, MVFR0
, FPDP
, 0);
1731 u
= FIELD_DP32(u
, MVFR0
, FPDIVIDE
, 0);
1732 u
= FIELD_DP32(u
, MVFR0
, FPSQRT
, 0);
1733 u
= FIELD_DP32(u
, MVFR0
, FPROUND
, 0);
1734 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1735 u
= FIELD_DP32(u
, MVFR0
, FPTRAP
, 0);
1736 u
= FIELD_DP32(u
, MVFR0
, FPSHVEC
, 0);
1738 cpu
->isar
.mvfr0
= u
;
1740 u
= cpu
->isar
.mvfr1
;
1741 u
= FIELD_DP32(u
, MVFR1
, FPFTZ
, 0);
1742 u
= FIELD_DP32(u
, MVFR1
, FPDNAN
, 0);
1743 u
= FIELD_DP32(u
, MVFR1
, FPHP
, 0);
1744 if (arm_feature(env
, ARM_FEATURE_M
)) {
1745 u
= FIELD_DP32(u
, MVFR1
, FP16
, 0);
1747 cpu
->isar
.mvfr1
= u
;
1749 u
= cpu
->isar
.mvfr2
;
1750 u
= FIELD_DP32(u
, MVFR2
, FPMISC
, 0);
1751 cpu
->isar
.mvfr2
= u
;
1754 if (!cpu
->has_neon
) {
1758 unset_feature(env
, ARM_FEATURE_NEON
);
1760 t
= cpu
->isar
.id_aa64isar0
;
1761 t
= FIELD_DP64(t
, ID_AA64ISAR0
, AES
, 0);
1762 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA1
, 0);
1763 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA2
, 0);
1764 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA3
, 0);
1765 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM3
, 0);
1766 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM4
, 0);
1767 t
= FIELD_DP64(t
, ID_AA64ISAR0
, DP
, 0);
1768 cpu
->isar
.id_aa64isar0
= t
;
1770 t
= cpu
->isar
.id_aa64isar1
;
1771 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FCMA
, 0);
1772 t
= FIELD_DP64(t
, ID_AA64ISAR1
, BF16
, 0);
1773 t
= FIELD_DP64(t
, ID_AA64ISAR1
, I8MM
, 0);
1774 cpu
->isar
.id_aa64isar1
= t
;
1776 t
= cpu
->isar
.id_aa64pfr0
;
1777 t
= FIELD_DP64(t
, ID_AA64PFR0
, ADVSIMD
, 0xf);
1778 cpu
->isar
.id_aa64pfr0
= t
;
1780 u
= cpu
->isar
.id_isar5
;
1781 u
= FIELD_DP32(u
, ID_ISAR5
, AES
, 0);
1782 u
= FIELD_DP32(u
, ID_ISAR5
, SHA1
, 0);
1783 u
= FIELD_DP32(u
, ID_ISAR5
, SHA2
, 0);
1784 u
= FIELD_DP32(u
, ID_ISAR5
, RDM
, 0);
1785 u
= FIELD_DP32(u
, ID_ISAR5
, VCMA
, 0);
1786 cpu
->isar
.id_isar5
= u
;
1788 u
= cpu
->isar
.id_isar6
;
1789 u
= FIELD_DP32(u
, ID_ISAR6
, DP
, 0);
1790 u
= FIELD_DP32(u
, ID_ISAR6
, FHM
, 0);
1791 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1792 u
= FIELD_DP32(u
, ID_ISAR6
, I8MM
, 0);
1793 cpu
->isar
.id_isar6
= u
;
1795 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1796 u
= cpu
->isar
.mvfr1
;
1797 u
= FIELD_DP32(u
, MVFR1
, SIMDLS
, 0);
1798 u
= FIELD_DP32(u
, MVFR1
, SIMDINT
, 0);
1799 u
= FIELD_DP32(u
, MVFR1
, SIMDSP
, 0);
1800 u
= FIELD_DP32(u
, MVFR1
, SIMDHP
, 0);
1801 cpu
->isar
.mvfr1
= u
;
1803 u
= cpu
->isar
.mvfr2
;
1804 u
= FIELD_DP32(u
, MVFR2
, SIMDMISC
, 0);
1805 cpu
->isar
.mvfr2
= u
;
1809 if (!cpu
->has_neon
&& !cpu
->has_vfp
) {
1813 t
= cpu
->isar
.id_aa64isar0
;
1814 t
= FIELD_DP64(t
, ID_AA64ISAR0
, FHM
, 0);
1815 cpu
->isar
.id_aa64isar0
= t
;
1817 t
= cpu
->isar
.id_aa64isar1
;
1818 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FRINTTS
, 0);
1819 cpu
->isar
.id_aa64isar1
= t
;
1821 u
= cpu
->isar
.mvfr0
;
1822 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 0);
1823 cpu
->isar
.mvfr0
= u
;
1825 /* Despite the name, this field covers both VFP and Neon */
1826 u
= cpu
->isar
.mvfr1
;
1827 u
= FIELD_DP32(u
, MVFR1
, SIMDFMAC
, 0);
1828 cpu
->isar
.mvfr1
= u
;
1831 if (arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_dsp
) {
1834 unset_feature(env
, ARM_FEATURE_THUMB_DSP
);
1836 u
= cpu
->isar
.id_isar1
;
1837 u
= FIELD_DP32(u
, ID_ISAR1
, EXTEND
, 1);
1838 cpu
->isar
.id_isar1
= u
;
1840 u
= cpu
->isar
.id_isar2
;
1841 u
= FIELD_DP32(u
, ID_ISAR2
, MULTU
, 1);
1842 u
= FIELD_DP32(u
, ID_ISAR2
, MULTS
, 1);
1843 cpu
->isar
.id_isar2
= u
;
1845 u
= cpu
->isar
.id_isar3
;
1846 u
= FIELD_DP32(u
, ID_ISAR3
, SIMD
, 1);
1847 u
= FIELD_DP32(u
, ID_ISAR3
, SATURATE
, 0);
1848 cpu
->isar
.id_isar3
= u
;
1851 /* Some features automatically imply others: */
1852 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1853 if (arm_feature(env
, ARM_FEATURE_M
)) {
1854 set_feature(env
, ARM_FEATURE_V7
);
1856 set_feature(env
, ARM_FEATURE_V7VE
);
1861 * There exist AArch64 cpus without AArch32 support. When KVM
1862 * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1863 * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1864 * As a general principle, we also do not make ID register
1865 * consistency checks anywhere unless using TCG, because only
1866 * for TCG would a consistency-check failure be a QEMU bug.
1868 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1869 no_aa32
= !cpu_isar_feature(aa64_aa32
, cpu
);
1872 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
1873 /* v7 Virtualization Extensions. In real hardware this implies
1874 * EL2 and also the presence of the Security Extensions.
1875 * For QEMU, for backwards-compatibility we implement some
1876 * CPUs or CPU configs which have no actual EL2 or EL3 but do
1877 * include the various other features that V7VE implies.
1878 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1879 * Security Extensions is ARM_FEATURE_EL3.
1881 assert(!tcg_enabled() || no_aa32
||
1882 cpu_isar_feature(aa32_arm_div
, cpu
));
1883 set_feature(env
, ARM_FEATURE_LPAE
);
1884 set_feature(env
, ARM_FEATURE_V7
);
1886 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1887 set_feature(env
, ARM_FEATURE_VAPA
);
1888 set_feature(env
, ARM_FEATURE_THUMB2
);
1889 set_feature(env
, ARM_FEATURE_MPIDR
);
1890 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1891 set_feature(env
, ARM_FEATURE_V6K
);
1893 set_feature(env
, ARM_FEATURE_V6
);
1896 /* Always define VBAR for V7 CPUs even if it doesn't exist in
1897 * non-EL3 configs. This is needed by some legacy boards.
1899 set_feature(env
, ARM_FEATURE_VBAR
);
1901 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1902 set_feature(env
, ARM_FEATURE_V6
);
1903 set_feature(env
, ARM_FEATURE_MVFR
);
1905 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1906 set_feature(env
, ARM_FEATURE_V5
);
1907 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1908 assert(!tcg_enabled() || no_aa32
||
1909 cpu_isar_feature(aa32_jazelle
, cpu
));
1910 set_feature(env
, ARM_FEATURE_AUXCR
);
1913 if (arm_feature(env
, ARM_FEATURE_V5
)) {
1914 set_feature(env
, ARM_FEATURE_V4T
);
1916 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1917 set_feature(env
, ARM_FEATURE_V7MP
);
1919 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
1920 set_feature(env
, ARM_FEATURE_CBAR
);
1922 if (arm_feature(env
, ARM_FEATURE_THUMB2
) &&
1923 !arm_feature(env
, ARM_FEATURE_M
)) {
1924 set_feature(env
, ARM_FEATURE_THUMB_DSP
);
1928 * We rely on no XScale CPU having VFP so we can use the same bits in the
1929 * TB flags field for VECSTRIDE and XSCALE_CPAR.
1931 assert(arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) ||
1932 !cpu_isar_feature(aa32_vfp_simd
, cpu
) ||
1933 !arm_feature(env
, ARM_FEATURE_XSCALE
));
1935 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1936 !arm_feature(env
, ARM_FEATURE_M
) &&
1937 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
1938 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1943 /* For CPUs which might have tiny 1K pages, or which have an
1944 * MPU and might have small region sizes, stick with 1K pages.
1948 if (!set_preferred_target_page_bits(pagebits
)) {
1949 /* This can only ever happen for hotplugging a CPU, or if
1950 * the board code incorrectly creates a CPU which it has
1951 * promised via minimum_page_size that it will not.
1953 error_setg(errp
, "This CPU requires a smaller page size than the "
1958 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1959 * We don't support setting cluster ID ([16..23]) (known as Aff2
1960 * in later ARM ARM versions), or any of the higher affinity level fields,
1961 * so these bits always RAZ.
1963 if (cpu
->mp_affinity
== ARM64_AFFINITY_INVALID
) {
1964 cpu
->mp_affinity
= arm_cpu_mp_affinity(cs
->cpu_index
,
1965 ARM_DEFAULT_CPUS_PER_CLUSTER
);
1968 if (cpu
->reset_hivecs
) {
1969 cpu
->reset_sctlr
|= (1 << 13);
1973 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1974 cpu
->reset_sctlr
|= SCTLR_EE
;
1976 cpu
->reset_sctlr
|= SCTLR_B
;
1980 if (!arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_el3
) {
1981 /* If the has_el3 CPU property is disabled then we need to disable the
1984 unset_feature(env
, ARM_FEATURE_EL3
);
1987 * Disable the security extension feature bits in the processor
1988 * feature registers as well.
1990 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
, ID_PFR1
, SECURITY
, 0);
1991 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, COPSDBG
, 0);
1992 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
1993 ID_AA64PFR0
, EL3
, 0);
1995 /* Disable the realm management extension, which requires EL3. */
1996 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
1997 ID_AA64PFR0
, RME
, 0);
2000 if (!cpu
->has_el2
) {
2001 unset_feature(env
, ARM_FEATURE_EL2
);
2004 if (!cpu
->has_pmu
) {
2005 unset_feature(env
, ARM_FEATURE_PMU
);
2007 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
2010 if (!kvm_enabled()) {
2011 arm_register_pre_el_change_hook(cpu
, &pmu_pre_el_change
, 0);
2012 arm_register_el_change_hook(cpu
, &pmu_post_el_change
, 0);
2015 #ifndef CONFIG_USER_ONLY
2016 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, arm_pmu_timer_cb
,
2020 cpu
->isar
.id_aa64dfr0
=
2021 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMUVER
, 0);
2022 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, PERFMON
, 0);
2027 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2029 * Disable the hypervisor feature bits in the processor feature
2030 * registers if we don't have EL2.
2032 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
2033 ID_AA64PFR0
, EL2
, 0);
2034 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
,
2035 ID_PFR1
, VIRTUALIZATION
, 0);
2038 #ifndef CONFIG_USER_ONLY
2039 if (cpu
->tag_memory
== NULL
&& cpu_isar_feature(aa64_mte
, cpu
)) {
2041 * Disable the MTE feature bits if we do not have tag-memory
2042 * provided by the machine.
2044 cpu
->isar
.id_aa64pfr1
=
2045 FIELD_DP64(cpu
->isar
.id_aa64pfr1
, ID_AA64PFR1
, MTE
, 0);
2049 if (tcg_enabled()) {
2051 * Don't report the Statistical Profiling Extension in the ID
2052 * registers, because TCG doesn't implement it yet (not even a
2053 * minimal stub version) and guests will fall over when they
2054 * try to access the non-existent system registers for it.
2056 cpu
->isar
.id_aa64dfr0
=
2057 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMSVER
, 0);
2060 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
2061 * to false or by setting pmsav7-dregion to 0.
2063 if (!cpu
->has_mpu
|| cpu
->pmsav7_dregion
== 0) {
2064 cpu
->has_mpu
= false;
2065 cpu
->pmsav7_dregion
= 0;
2066 cpu
->pmsav8r_hdregion
= 0;
2069 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
2070 arm_feature(env
, ARM_FEATURE_V7
)) {
2071 uint32_t nr
= cpu
->pmsav7_dregion
;
2074 error_setg(errp
, "PMSAv7 MPU #regions invalid %" PRIu32
, nr
);
2079 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2081 env
->pmsav8
.rbar
[M_REG_NS
] = g_new0(uint32_t, nr
);
2082 env
->pmsav8
.rlar
[M_REG_NS
] = g_new0(uint32_t, nr
);
2083 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2084 env
->pmsav8
.rbar
[M_REG_S
] = g_new0(uint32_t, nr
);
2085 env
->pmsav8
.rlar
[M_REG_S
] = g_new0(uint32_t, nr
);
2088 env
->pmsav7
.drbar
= g_new0(uint32_t, nr
);
2089 env
->pmsav7
.drsr
= g_new0(uint32_t, nr
);
2090 env
->pmsav7
.dracr
= g_new0(uint32_t, nr
);
2094 if (cpu
->pmsav8r_hdregion
> 0xff) {
2095 error_setg(errp
, "PMSAv8 MPU EL2 #regions invalid %" PRIu32
,
2096 cpu
->pmsav8r_hdregion
);
2100 if (cpu
->pmsav8r_hdregion
) {
2101 env
->pmsav8
.hprbar
= g_new0(uint32_t,
2102 cpu
->pmsav8r_hdregion
);
2103 env
->pmsav8
.hprlar
= g_new0(uint32_t,
2104 cpu
->pmsav8r_hdregion
);
2108 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2109 uint32_t nr
= cpu
->sau_sregion
;
2112 error_setg(errp
, "v8M SAU #regions invalid %" PRIu32
, nr
);
2117 env
->sau
.rbar
= g_new0(uint32_t, nr
);
2118 env
->sau
.rlar
= g_new0(uint32_t, nr
);
2122 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2123 set_feature(env
, ARM_FEATURE_VBAR
);
2126 register_cp_regs_for_features(cpu
);
2127 arm_cpu_register_gdb_regs_for_features(cpu
);
2129 init_cpreg_list(cpu
);
2131 #ifndef CONFIG_USER_ONLY
2132 MachineState
*ms
= MACHINE(qdev_get_machine());
2133 unsigned int smp_cpus
= ms
->smp
.cpus
;
2134 bool has_secure
= cpu
->has_el3
|| arm_feature(env
, ARM_FEATURE_M_SECURITY
);
2137 * We must set cs->num_ases to the final value before
2138 * the first call to cpu_address_space_init.
2140 if (cpu
->tag_memory
!= NULL
) {
2141 cs
->num_ases
= 3 + has_secure
;
2143 cs
->num_ases
= 1 + has_secure
;
2147 if (!cpu
->secure_memory
) {
2148 cpu
->secure_memory
= cs
->memory
;
2150 cpu_address_space_init(cs
, ARMASIdx_S
, "cpu-secure-memory",
2151 cpu
->secure_memory
);
2154 if (cpu
->tag_memory
!= NULL
) {
2155 cpu_address_space_init(cs
, ARMASIdx_TagNS
, "cpu-tag-memory",
2158 cpu_address_space_init(cs
, ARMASIdx_TagS
, "cpu-tag-memory",
2159 cpu
->secure_tag_memory
);
2163 cpu_address_space_init(cs
, ARMASIdx_NS
, "cpu-memory", cs
->memory
);
2165 /* No core_count specified, default to smp_cpus. */
2166 if (cpu
->core_count
== -1) {
2167 cpu
->core_count
= smp_cpus
;
2171 if (tcg_enabled()) {
2172 int dcz_blocklen
= 4 << cpu
->dcz_blocksize
;
2175 * We only support DCZ blocklen that fits on one page.
2177 * Architectually this is always true. However TARGET_PAGE_SIZE
2178 * is variable and, for compatibility with -machine virt-2.7,
2179 * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
2180 * But even then, while the largest architectural DCZ blocklen
2181 * is 2KiB, no cpu actually uses such a large blocklen.
2183 assert(dcz_blocklen
<= TARGET_PAGE_SIZE
);
2186 * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
2187 * both nibbles of each byte storing tag data may be written at once.
2188 * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
2190 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2191 assert(dcz_blocklen
>= 2 * TAG_GRANULE
);
2198 acc
->parent_realize(dev
, errp
);
2201 static ObjectClass
*arm_cpu_class_by_name(const char *cpu_model
)
2206 const char *cpunamestr
;
2208 cpuname
= g_strsplit(cpu_model
, ",", 1);
2209 cpunamestr
= cpuname
[0];
2210 #ifdef CONFIG_USER_ONLY
2211 /* For backwards compatibility usermode emulation allows "-cpu any",
2212 * which has the same semantics as "-cpu max".
2214 if (!strcmp(cpunamestr
, "any")) {
2218 typename
= g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr
);
2219 oc
= object_class_by_name(typename
);
2220 g_strfreev(cpuname
);
2222 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_ARM_CPU
) ||
2223 object_class_is_abstract(oc
)) {
2229 static Property arm_cpu_properties
[] = {
2230 DEFINE_PROP_UINT64("midr", ARMCPU
, midr
, 0),
2231 DEFINE_PROP_UINT64("mp-affinity", ARMCPU
,
2232 mp_affinity
, ARM64_AFFINITY_INVALID
),
2233 DEFINE_PROP_INT32("node-id", ARMCPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
2234 DEFINE_PROP_INT32("core-count", ARMCPU
, core_count
, -1),
2235 DEFINE_PROP_END_OF_LIST()
2238 static gchar
*arm_gdb_arch_name(CPUState
*cs
)
2240 ARMCPU
*cpu
= ARM_CPU(cs
);
2241 CPUARMState
*env
= &cpu
->env
;
2243 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2244 return g_strdup("iwmmxt");
2246 return g_strdup("arm");
2249 #ifndef CONFIG_USER_ONLY
2250 #include "hw/core/sysemu-cpu-ops.h"
2252 static const struct SysemuCPUOps arm_sysemu_ops
= {
2253 .get_phys_page_attrs_debug
= arm_cpu_get_phys_page_attrs_debug
,
2254 .asidx_from_attrs
= arm_asidx_from_attrs
,
2255 .write_elf32_note
= arm_cpu_write_elf32_note
,
2256 .write_elf64_note
= arm_cpu_write_elf64_note
,
2257 .virtio_is_big_endian
= arm_cpu_virtio_is_big_endian
,
2258 .legacy_vmsd
= &vmstate_arm_cpu
,
2263 static const struct TCGCPUOps arm_tcg_ops
= {
2264 .initialize
= arm_translate_init
,
2265 .synchronize_from_tb
= arm_cpu_synchronize_from_tb
,
2266 .debug_excp_handler
= arm_debug_excp_handler
,
2267 .restore_state_to_opc
= arm_restore_state_to_opc
,
2269 #ifdef CONFIG_USER_ONLY
2270 .record_sigsegv
= arm_cpu_record_sigsegv
,
2271 .record_sigbus
= arm_cpu_record_sigbus
,
2273 .tlb_fill
= arm_cpu_tlb_fill
,
2274 .cpu_exec_interrupt
= arm_cpu_exec_interrupt
,
2275 .do_interrupt
= arm_cpu_do_interrupt
,
2276 .do_transaction_failed
= arm_cpu_do_transaction_failed
,
2277 .do_unaligned_access
= arm_cpu_do_unaligned_access
,
2278 .adjust_watchpoint_address
= arm_adjust_watchpoint_address
,
2279 .debug_check_watchpoint
= arm_debug_check_watchpoint
,
2280 .debug_check_breakpoint
= arm_debug_check_breakpoint
,
2281 #endif /* !CONFIG_USER_ONLY */
2283 #endif /* CONFIG_TCG */
2285 static void arm_cpu_class_init(ObjectClass
*oc
, void *data
)
2287 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2288 CPUClass
*cc
= CPU_CLASS(acc
);
2289 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2290 ResettableClass
*rc
= RESETTABLE_CLASS(oc
);
2292 device_class_set_parent_realize(dc
, arm_cpu_realizefn
,
2293 &acc
->parent_realize
);
2295 device_class_set_props(dc
, arm_cpu_properties
);
2297 resettable_class_set_parent_phases(rc
, NULL
, arm_cpu_reset_hold
, NULL
,
2298 &acc
->parent_phases
);
2300 cc
->class_by_name
= arm_cpu_class_by_name
;
2301 cc
->has_work
= arm_cpu_has_work
;
2302 cc
->dump_state
= arm_cpu_dump_state
;
2303 cc
->set_pc
= arm_cpu_set_pc
;
2304 cc
->get_pc
= arm_cpu_get_pc
;
2305 cc
->gdb_read_register
= arm_cpu_gdb_read_register
;
2306 cc
->gdb_write_register
= arm_cpu_gdb_write_register
;
2307 #ifndef CONFIG_USER_ONLY
2308 cc
->sysemu_ops
= &arm_sysemu_ops
;
2310 cc
->gdb_num_core_regs
= 26;
2311 cc
->gdb_core_xml_file
= "arm-core.xml";
2312 cc
->gdb_arch_name
= arm_gdb_arch_name
;
2313 cc
->gdb_get_dynamic_xml
= arm_gdb_get_dynamic_xml
;
2314 cc
->gdb_stop_before_watchpoint
= true;
2315 cc
->disas_set_info
= arm_disas_set_info
;
2318 cc
->tcg_ops
= &arm_tcg_ops
;
2319 #endif /* CONFIG_TCG */
2322 static void arm_cpu_instance_init(Object
*obj
)
2324 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(obj
);
2326 acc
->info
->initfn(obj
);
2327 arm_cpu_post_init(obj
);
2330 static void cpu_register_class_init(ObjectClass
*oc
, void *data
)
2332 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2337 void arm_cpu_register(const ARMCPUInfo
*info
)
2339 TypeInfo type_info
= {
2340 .parent
= TYPE_ARM_CPU
,
2341 .instance_size
= sizeof(ARMCPU
),
2342 .instance_align
= __alignof__(ARMCPU
),
2343 .instance_init
= arm_cpu_instance_init
,
2344 .class_size
= sizeof(ARMCPUClass
),
2345 .class_init
= info
->class_init
?: cpu_register_class_init
,
2346 .class_data
= (void *)info
,
2349 type_info
.name
= g_strdup_printf("%s-" TYPE_ARM_CPU
, info
->name
);
2350 type_register(&type_info
);
2351 g_free((void *)type_info
.name
);
2354 static const TypeInfo arm_cpu_type_info
= {
2355 .name
= TYPE_ARM_CPU
,
2357 .instance_size
= sizeof(ARMCPU
),
2358 .instance_align
= __alignof__(ARMCPU
),
2359 .instance_init
= arm_cpu_initfn
,
2360 .instance_finalize
= arm_cpu_finalizefn
,
2362 .class_size
= sizeof(ARMCPUClass
),
2363 .class_init
= arm_cpu_class_init
,
2366 static void arm_cpu_register_types(void)
2368 type_register_static(&arm_cpu_type_info
);
2371 type_init(arm_cpu_register_types
)