4 * Copyright (c) 2012 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
25 #include "exec/page-vary.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
31 #include "hw/core/tcg-cpu-ops.h"
32 #endif /* CONFIG_TCG */
33 #include "internals.h"
34 #include "cpu-features.h"
35 #include "exec/exec-all.h"
36 #include "hw/qdev-properties.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/loader.h"
39 #include "hw/boards.h"
41 #include "hw/intc/armv7m_nvic.h"
42 #endif /* CONFIG_TCG */
43 #endif /* !CONFIG_USER_ONLY */
44 #include "sysemu/tcg.h"
45 #include "sysemu/qtest.h"
46 #include "sysemu/hw_accel.h"
48 #include "disas/capstone.h"
49 #include "fpu/softfloat.h"
52 static void arm_cpu_set_pc(CPUState
*cs
, vaddr value
)
54 ARMCPU
*cpu
= ARM_CPU(cs
);
55 CPUARMState
*env
= &cpu
->env
;
61 env
->regs
[15] = value
& ~1;
62 env
->thumb
= value
& 1;
66 static vaddr
arm_cpu_get_pc(CPUState
*cs
)
68 ARMCPU
*cpu
= ARM_CPU(cs
);
69 CPUARMState
*env
= &cpu
->env
;
79 void arm_cpu_synchronize_from_tb(CPUState
*cs
,
80 const TranslationBlock
*tb
)
82 /* The program counter is always up to date with CF_PCREL. */
83 if (!(tb_cflags(tb
) & CF_PCREL
)) {
84 CPUARMState
*env
= cpu_env(cs
);
86 * It's OK to look at env for the current mode here, because it's
87 * never possible for an AArch64 TB to chain to an AArch32 TB.
92 env
->regs
[15] = tb
->pc
;
97 void arm_restore_state_to_opc(CPUState
*cs
,
98 const TranslationBlock
*tb
,
101 CPUARMState
*env
= cpu_env(cs
);
104 if (tb_cflags(tb
) & CF_PCREL
) {
105 env
->pc
= (env
->pc
& TARGET_PAGE_MASK
) | data
[0];
109 env
->condexec_bits
= 0;
110 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
112 if (tb_cflags(tb
) & CF_PCREL
) {
113 env
->regs
[15] = (env
->regs
[15] & TARGET_PAGE_MASK
) | data
[0];
115 env
->regs
[15] = data
[0];
117 env
->condexec_bits
= data
[1];
118 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
121 #endif /* CONFIG_TCG */
123 static bool arm_cpu_has_work(CPUState
*cs
)
125 ARMCPU
*cpu
= ARM_CPU(cs
);
127 return (cpu
->power_state
!= PSCI_OFF
)
128 && cs
->interrupt_request
&
129 (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
130 | CPU_INTERRUPT_VFIQ
| CPU_INTERRUPT_VIRQ
| CPU_INTERRUPT_VSERR
131 | CPU_INTERRUPT_EXITTB
);
134 void arm_register_pre_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
137 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
140 entry
->opaque
= opaque
;
142 QLIST_INSERT_HEAD(&cpu
->pre_el_change_hooks
, entry
, node
);
145 void arm_register_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
148 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
151 entry
->opaque
= opaque
;
153 QLIST_INSERT_HEAD(&cpu
->el_change_hooks
, entry
, node
);
156 static void cp_reg_reset(gpointer key
, gpointer value
, gpointer opaque
)
158 /* Reset a single ARMCPRegInfo register */
159 ARMCPRegInfo
*ri
= value
;
160 ARMCPU
*cpu
= opaque
;
162 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
)) {
167 ri
->resetfn(&cpu
->env
, ri
);
171 /* A zero offset is never possible as it would be regs[0]
172 * so we use it to indicate that reset is being handled elsewhere.
173 * This is basically only used for fields in non-core coprocessors
174 * (like the pxa2xx ones).
176 if (!ri
->fieldoffset
) {
180 if (cpreg_field_is_64bit(ri
)) {
181 CPREG_FIELD64(&cpu
->env
, ri
) = ri
->resetvalue
;
183 CPREG_FIELD32(&cpu
->env
, ri
) = ri
->resetvalue
;
187 static void cp_reg_check_reset(gpointer key
, gpointer value
, gpointer opaque
)
189 /* Purely an assertion check: we've already done reset once,
190 * so now check that running the reset for the cpreg doesn't
191 * change its value. This traps bugs where two different cpregs
192 * both try to reset the same state field but to different values.
194 ARMCPRegInfo
*ri
= value
;
195 ARMCPU
*cpu
= opaque
;
196 uint64_t oldvalue
, newvalue
;
198 if (ri
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_ALIAS
| ARM_CP_NO_RAW
)) {
202 oldvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
203 cp_reg_reset(key
, value
, opaque
);
204 newvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
205 assert(oldvalue
== newvalue
);
208 static void arm_cpu_reset_hold(Object
*obj
)
210 CPUState
*s
= CPU(obj
);
211 ARMCPU
*cpu
= ARM_CPU(s
);
212 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(cpu
);
213 CPUARMState
*env
= &cpu
->env
;
215 if (acc
->parent_phases
.hold
) {
216 acc
->parent_phases
.hold(obj
);
219 memset(env
, 0, offsetof(CPUARMState
, end_reset_fields
));
221 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_reset
, cpu
);
222 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_check_reset
, cpu
);
224 env
->vfp
.xregs
[ARM_VFP_FPSID
] = cpu
->reset_fpsid
;
225 env
->vfp
.xregs
[ARM_VFP_MVFR0
] = cpu
->isar
.mvfr0
;
226 env
->vfp
.xregs
[ARM_VFP_MVFR1
] = cpu
->isar
.mvfr1
;
227 env
->vfp
.xregs
[ARM_VFP_MVFR2
] = cpu
->isar
.mvfr2
;
229 cpu
->power_state
= s
->start_powered_off
? PSCI_OFF
: PSCI_ON
;
231 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
232 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCID
] = 0x69051000 | 'Q';
235 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
236 /* 64 bit CPUs always start in 64 bit mode */
238 #if defined(CONFIG_USER_ONLY)
239 env
->pstate
= PSTATE_MODE_EL0t
;
240 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
241 env
->cp15
.sctlr_el
[1] |= SCTLR_UCT
| SCTLR_UCI
| SCTLR_DZE
;
242 /* Enable all PAC keys. */
243 env
->cp15
.sctlr_el
[1] |= (SCTLR_EnIA
| SCTLR_EnIB
|
244 SCTLR_EnDA
| SCTLR_EnDB
);
245 /* Trap on btype=3 for PACIxSP. */
246 env
->cp15
.sctlr_el
[1] |= SCTLR_BT0
;
247 /* Trap on implementation defined registers. */
248 if (cpu_isar_feature(aa64_tidcp1
, cpu
)) {
249 env
->cp15
.sctlr_el
[1] |= SCTLR_TIDCP
;
251 /* and to the FP/Neon instructions */
252 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
254 /* and to the SVE instructions, with default vector length */
255 if (cpu_isar_feature(aa64_sve
, cpu
)) {
256 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
258 env
->vfp
.zcr_el
[1] = cpu
->sve_default_vq
- 1;
260 /* and for SME instructions, with default vector length, and TPIDR2 */
261 if (cpu_isar_feature(aa64_sme
, cpu
)) {
262 env
->cp15
.sctlr_el
[1] |= SCTLR_EnTP2
;
263 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
265 env
->vfp
.smcr_el
[1] = cpu
->sme_default_vq
- 1;
266 if (cpu_isar_feature(aa64_sme_fa64
, cpu
)) {
267 env
->vfp
.smcr_el
[1] = FIELD_DP64(env
->vfp
.smcr_el
[1],
272 * Enable 48-bit address space (TODO: take reserved_va into account).
273 * Enable TBI0 but not TBI1.
274 * Note that this must match useronly_clean_ptr.
276 env
->cp15
.tcr_el
[1] = 5 | (1ULL << 37);
279 if (cpu_isar_feature(aa64_mte
, cpu
)) {
280 /* Enable tag access, but leave TCF0 as No Effect (0). */
281 env
->cp15
.sctlr_el
[1] |= SCTLR_ATA0
;
283 * Exclude all tags, so that tag 0 is always used.
284 * This corresponds to Linux current->thread.gcr_incl = 0.
286 * Set RRND, so that helper_irg() will generate a seed later.
287 * Here in cpu_reset(), the crypto subsystem has not yet been
290 env
->cp15
.gcr_el1
= 0x1ffff;
293 * Disable access to SCXTNUM_EL0 from CSV2_1p2.
294 * This is not yet exposed from the Linux kernel in any way.
296 env
->cp15
.sctlr_el
[1] |= SCTLR_TSCXT
;
297 /* Disable access to Debug Communication Channel (DCC). */
298 env
->cp15
.mdscr_el1
|= 1 << 12;
300 /* Reset into the highest available EL */
301 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
302 env
->pstate
= PSTATE_MODE_EL3h
;
303 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
304 env
->pstate
= PSTATE_MODE_EL2h
;
306 env
->pstate
= PSTATE_MODE_EL1h
;
309 /* Sample rvbar at reset. */
310 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
311 env
->pc
= env
->cp15
.rvbar
;
314 #if defined(CONFIG_USER_ONLY)
315 /* Userspace expects access to cp10 and cp11 for FP/Neon */
316 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
318 env
->cp15
.cpacr_el1
= FIELD_DP64(env
->cp15
.cpacr_el1
,
321 if (arm_feature(env
, ARM_FEATURE_V8
)) {
322 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
323 env
->regs
[15] = cpu
->rvbar_prop
;
327 #if defined(CONFIG_USER_ONLY)
328 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
329 /* For user mode we must enable access to coprocessors */
330 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
331 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
332 env
->cp15
.c15_cpar
= 3;
333 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
334 env
->cp15
.c15_cpar
= 1;
339 * If the highest available EL is EL2, AArch32 will start in Hyp
340 * mode; otherwise it starts in SVC. Note that if we start in
341 * AArch64 then these values in the uncached_cpsr will be ignored.
343 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
344 !arm_feature(env
, ARM_FEATURE_EL3
)) {
345 env
->uncached_cpsr
= ARM_CPU_MODE_HYP
;
347 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
;
349 env
->daif
= PSTATE_D
| PSTATE_A
| PSTATE_I
| PSTATE_F
;
351 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
352 * executing as AArch32 then check if highvecs are enabled and
353 * adjust the PC accordingly.
355 if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
356 env
->regs
[15] = 0xFFFF0000;
359 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
362 if (arm_feature(env
, ARM_FEATURE_M
)) {
363 #ifndef CONFIG_USER_ONLY
364 uint32_t initial_msp
; /* Loaded from 0x0 */
365 uint32_t initial_pc
; /* Loaded from 0x4 */
370 if (cpu_isar_feature(aa32_lob
, cpu
)) {
372 * LTPSIZE is constant 4 if MVE not implemented, and resets
373 * to an UNKNOWN value if MVE is implemented. We choose to
376 env
->v7m
.ltpsize
= 4;
377 /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
378 env
->v7m
.fpdscr
[M_REG_NS
] = 4 << FPCR_LTPSIZE_SHIFT
;
379 env
->v7m
.fpdscr
[M_REG_S
] = 4 << FPCR_LTPSIZE_SHIFT
;
382 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
383 env
->v7m
.secure
= true;
385 /* This bit resets to 0 if security is supported, but 1 if
386 * it is not. The bit is not present in v7M, but we set it
387 * here so we can avoid having to make checks on it conditional
388 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
390 env
->v7m
.aircr
= R_V7M_AIRCR_BFHFNMINS_MASK
;
392 * Set NSACR to indicate "NS access permitted to everything";
393 * this avoids having to have all the tests of it being
394 * conditional on ARM_FEATURE_M_SECURITY. Note also that from
395 * v8.1M the guest-visible value of NSACR in a CPU without the
396 * Security Extension is 0xcff.
398 env
->v7m
.nsacr
= 0xcff;
401 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
402 * that it resets to 1, so QEMU always does that rather than making
403 * it dependent on CPU model. In v8M it is RES1.
405 env
->v7m
.ccr
[M_REG_NS
] = R_V7M_CCR_STKALIGN_MASK
;
406 env
->v7m
.ccr
[M_REG_S
] = R_V7M_CCR_STKALIGN_MASK
;
407 if (arm_feature(env
, ARM_FEATURE_V8
)) {
408 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
409 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
410 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
412 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
413 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
414 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
417 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
418 env
->v7m
.fpccr
[M_REG_NS
] = R_V7M_FPCCR_ASPEN_MASK
;
419 env
->v7m
.fpccr
[M_REG_S
] = R_V7M_FPCCR_ASPEN_MASK
|
420 R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
;
423 #ifndef CONFIG_USER_ONLY
424 /* Unlike A/R profile, M profile defines the reset LR value */
425 env
->regs
[14] = 0xffffffff;
427 env
->v7m
.vecbase
[M_REG_S
] = cpu
->init_svtor
& 0xffffff80;
428 env
->v7m
.vecbase
[M_REG_NS
] = cpu
->init_nsvtor
& 0xffffff80;
430 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
431 vecbase
= env
->v7m
.vecbase
[env
->v7m
.secure
];
432 rom
= rom_ptr_for_as(s
->as
, vecbase
, 8);
434 /* Address zero is covered by ROM which hasn't yet been
435 * copied into physical memory.
437 initial_msp
= ldl_p(rom
);
438 initial_pc
= ldl_p(rom
+ 4);
440 /* Address zero not covered by a ROM blob, or the ROM blob
441 * is in non-modifiable memory and this is a second reset after
442 * it got copied into memory. In the latter case, rom_ptr
443 * will return a NULL pointer and we should use ldl_phys instead.
445 initial_msp
= ldl_phys(s
->as
, vecbase
);
446 initial_pc
= ldl_phys(s
->as
, vecbase
+ 4);
449 qemu_log_mask(CPU_LOG_INT
,
450 "Loaded reset SP 0x%x PC 0x%x from vector table\n",
451 initial_msp
, initial_pc
);
453 env
->regs
[13] = initial_msp
& 0xFFFFFFFC;
454 env
->regs
[15] = initial_pc
& ~1;
455 env
->thumb
= initial_pc
& 1;
458 * For user mode we run non-secure and with access to the FPU.
459 * The FPU context is active (ie does not need further setup)
460 * and is owned by non-secure.
462 env
->v7m
.secure
= false;
463 env
->v7m
.nsacr
= 0xcff;
464 env
->v7m
.cpacr
[M_REG_NS
] = 0xf0ffff;
465 env
->v7m
.fpccr
[M_REG_S
] &=
466 ~(R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
);
467 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
471 /* M profile requires that reset clears the exclusive monitor;
472 * A profile does not, but clearing it makes more sense than having it
473 * set with an exclusive access on address zero.
475 arm_clear_exclusive(env
);
477 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
478 if (cpu
->pmsav7_dregion
> 0) {
479 if (arm_feature(env
, ARM_FEATURE_V8
)) {
480 memset(env
->pmsav8
.rbar
[M_REG_NS
], 0,
481 sizeof(*env
->pmsav8
.rbar
[M_REG_NS
])
482 * cpu
->pmsav7_dregion
);
483 memset(env
->pmsav8
.rlar
[M_REG_NS
], 0,
484 sizeof(*env
->pmsav8
.rlar
[M_REG_NS
])
485 * cpu
->pmsav7_dregion
);
486 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
487 memset(env
->pmsav8
.rbar
[M_REG_S
], 0,
488 sizeof(*env
->pmsav8
.rbar
[M_REG_S
])
489 * cpu
->pmsav7_dregion
);
490 memset(env
->pmsav8
.rlar
[M_REG_S
], 0,
491 sizeof(*env
->pmsav8
.rlar
[M_REG_S
])
492 * cpu
->pmsav7_dregion
);
494 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
495 memset(env
->pmsav7
.drbar
, 0,
496 sizeof(*env
->pmsav7
.drbar
) * cpu
->pmsav7_dregion
);
497 memset(env
->pmsav7
.drsr
, 0,
498 sizeof(*env
->pmsav7
.drsr
) * cpu
->pmsav7_dregion
);
499 memset(env
->pmsav7
.dracr
, 0,
500 sizeof(*env
->pmsav7
.dracr
) * cpu
->pmsav7_dregion
);
504 if (cpu
->pmsav8r_hdregion
> 0) {
505 memset(env
->pmsav8
.hprbar
, 0,
506 sizeof(*env
->pmsav8
.hprbar
) * cpu
->pmsav8r_hdregion
);
507 memset(env
->pmsav8
.hprlar
, 0,
508 sizeof(*env
->pmsav8
.hprlar
) * cpu
->pmsav8r_hdregion
);
511 env
->pmsav7
.rnr
[M_REG_NS
] = 0;
512 env
->pmsav7
.rnr
[M_REG_S
] = 0;
513 env
->pmsav8
.mair0
[M_REG_NS
] = 0;
514 env
->pmsav8
.mair0
[M_REG_S
] = 0;
515 env
->pmsav8
.mair1
[M_REG_NS
] = 0;
516 env
->pmsav8
.mair1
[M_REG_S
] = 0;
519 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
520 if (cpu
->sau_sregion
> 0) {
521 memset(env
->sau
.rbar
, 0, sizeof(*env
->sau
.rbar
) * cpu
->sau_sregion
);
522 memset(env
->sau
.rlar
, 0, sizeof(*env
->sau
.rlar
) * cpu
->sau_sregion
);
525 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
526 * the Cortex-M33 does.
531 set_flush_to_zero(1, &env
->vfp
.standard_fp_status
);
532 set_flush_inputs_to_zero(1, &env
->vfp
.standard_fp_status
);
533 set_default_nan_mode(1, &env
->vfp
.standard_fp_status
);
534 set_default_nan_mode(1, &env
->vfp
.standard_fp_status_f16
);
535 set_float_detect_tininess(float_tininess_before_rounding
,
536 &env
->vfp
.fp_status
);
537 set_float_detect_tininess(float_tininess_before_rounding
,
538 &env
->vfp
.standard_fp_status
);
539 set_float_detect_tininess(float_tininess_before_rounding
,
540 &env
->vfp
.fp_status_f16
);
541 set_float_detect_tininess(float_tininess_before_rounding
,
542 &env
->vfp
.standard_fp_status_f16
);
543 #ifndef CONFIG_USER_ONLY
545 kvm_arm_reset_vcpu(cpu
);
550 hw_breakpoint_update_all(cpu
);
551 hw_watchpoint_update_all(cpu
);
553 arm_rebuild_hflags(env
);
557 void arm_emulate_firmware_reset(CPUState
*cpustate
, int target_el
)
559 ARMCPU
*cpu
= ARM_CPU(cpustate
);
560 CPUARMState
*env
= &cpu
->env
;
561 bool have_el3
= arm_feature(env
, ARM_FEATURE_EL3
);
562 bool have_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
565 * Check we have the EL we're aiming for. If that is the
566 * highest implemented EL, then cpu_reset has already done
580 if (!have_el3
&& !have_el2
) {
585 g_assert_not_reached();
590 * Set the EL3 state so code can run at EL2. This should match
591 * the requirements set by Linux in its booting spec.
594 env
->cp15
.scr_el3
|= SCR_RW
;
595 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
596 env
->cp15
.scr_el3
|= SCR_API
| SCR_APK
;
598 if (cpu_isar_feature(aa64_mte
, cpu
)) {
599 env
->cp15
.scr_el3
|= SCR_ATA
;
601 if (cpu_isar_feature(aa64_sve
, cpu
)) {
602 env
->cp15
.cptr_el
[3] |= R_CPTR_EL3_EZ_MASK
;
603 env
->vfp
.zcr_el
[3] = 0xf;
605 if (cpu_isar_feature(aa64_sme
, cpu
)) {
606 env
->cp15
.cptr_el
[3] |= R_CPTR_EL3_ESM_MASK
;
607 env
->cp15
.scr_el3
|= SCR_ENTP2
;
608 env
->vfp
.smcr_el
[3] = 0xf;
610 if (cpu_isar_feature(aa64_hcx
, cpu
)) {
611 env
->cp15
.scr_el3
|= SCR_HXEN
;
613 if (cpu_isar_feature(aa64_fgt
, cpu
)) {
614 env
->cp15
.scr_el3
|= SCR_FGTEN
;
618 if (target_el
== 2) {
619 /* If the guest is at EL2 then Linux expects the HVC insn to work */
620 env
->cp15
.scr_el3
|= SCR_HCE
;
623 /* Put CPU into non-secure state */
624 env
->cp15
.scr_el3
|= SCR_NS
;
625 /* Set NSACR.{CP11,CP10} so NS can access the FPU */
626 env
->cp15
.nsacr
|= 3 << 10;
629 if (have_el2
&& target_el
< 2) {
630 /* Set EL2 state so code can run at EL1. */
632 env
->cp15
.hcr_el2
|= HCR_RW
;
636 /* Set the CPU to the desired state */
638 env
->pstate
= aarch64_pstate_mode(target_el
, true);
640 static const uint32_t mode_for_el
[] = {
647 cpsr_write(env
, mode_for_el
[target_el
], CPSR_M
, CPSRWriteRaw
);
652 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
654 static inline bool arm_excp_unmasked(CPUState
*cs
, unsigned int excp_idx
,
655 unsigned int target_el
,
656 unsigned int cur_el
, bool secure
,
659 CPUARMState
*env
= cpu_env(cs
);
660 bool pstate_unmasked
;
661 bool unmasked
= false;
664 * Don't take exceptions if they target a lower EL.
665 * This check should catch any exceptions that would not be taken
668 if (cur_el
> target_el
) {
674 pstate_unmasked
= !(env
->daif
& PSTATE_F
);
678 pstate_unmasked
= !(env
->daif
& PSTATE_I
);
682 if (!(hcr_el2
& HCR_FMO
) || (hcr_el2
& HCR_TGE
)) {
683 /* VFIQs are only taken when hypervized. */
686 return !(env
->daif
& PSTATE_F
);
688 if (!(hcr_el2
& HCR_IMO
) || (hcr_el2
& HCR_TGE
)) {
689 /* VIRQs are only taken when hypervized. */
692 return !(env
->daif
& PSTATE_I
);
694 if (!(hcr_el2
& HCR_AMO
) || (hcr_el2
& HCR_TGE
)) {
695 /* VIRQs are only taken when hypervized. */
698 return !(env
->daif
& PSTATE_A
);
700 g_assert_not_reached();
704 * Use the target EL, current execution state and SCR/HCR settings to
705 * determine whether the corresponding CPSR bit is used to mask the
708 if ((target_el
> cur_el
) && (target_el
!= 1)) {
709 /* Exceptions targeting a higher EL may not be maskable */
710 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
714 * According to ARM DDI 0487H.a, an interrupt can be masked
715 * when HCR_E2H and HCR_TGE are both set regardless of the
716 * current Security state. Note that we need to revisit this
717 * part again once we need to support NMI.
719 if ((hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
724 /* Interrupt cannot be masked when the target EL is 3 */
728 g_assert_not_reached();
732 * The old 32-bit-only environment has a more complicated
733 * masking setup. HCR and SCR bits not only affect interrupt
734 * routing but also change the behaviour of masking.
741 * If FIQs are routed to EL3 or EL2 then there are cases where
742 * we override the CPSR.F in determining if the exception is
743 * masked or not. If neither of these are set then we fall back
744 * to the CPSR.F setting otherwise we further assess the state
747 hcr
= hcr_el2
& HCR_FMO
;
748 scr
= (env
->cp15
.scr_el3
& SCR_FIQ
);
751 * When EL3 is 32-bit, the SCR.FW bit controls whether the
752 * CPSR.F bit masks FIQ interrupts when taken in non-secure
753 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
754 * when non-secure but only when FIQs are only routed to EL3.
756 scr
= scr
&& !((env
->cp15
.scr_el3
& SCR_FW
) && !hcr
);
760 * When EL3 execution state is 32-bit, if HCR.IMO is set then
761 * we may override the CPSR.I masking when in non-secure state.
762 * The SCR.IRQ setting has already been taken into consideration
763 * when setting the target EL, so it does not have a further
766 hcr
= hcr_el2
& HCR_IMO
;
770 g_assert_not_reached();
773 if ((scr
|| hcr
) && !secure
) {
780 * The PSTATE bits only mask the interrupt if we have not overridden the
783 return unmasked
|| pstate_unmasked
;
786 static bool arm_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
788 CPUClass
*cc
= CPU_GET_CLASS(cs
);
789 CPUARMState
*env
= cpu_env(cs
);
790 uint32_t cur_el
= arm_current_el(env
);
791 bool secure
= arm_is_secure(env
);
792 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
796 /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
798 if (interrupt_request
& CPU_INTERRUPT_FIQ
) {
800 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
801 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
802 cur_el
, secure
, hcr_el2
)) {
806 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
808 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
809 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
810 cur_el
, secure
, hcr_el2
)) {
814 if (interrupt_request
& CPU_INTERRUPT_VIRQ
) {
815 excp_idx
= EXCP_VIRQ
;
817 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
818 cur_el
, secure
, hcr_el2
)) {
822 if (interrupt_request
& CPU_INTERRUPT_VFIQ
) {
823 excp_idx
= EXCP_VFIQ
;
825 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
826 cur_el
, secure
, hcr_el2
)) {
830 if (interrupt_request
& CPU_INTERRUPT_VSERR
) {
831 excp_idx
= EXCP_VSERR
;
833 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
834 cur_el
, secure
, hcr_el2
)) {
835 /* Taking a virtual abort clears HCR_EL2.VSE */
836 env
->cp15
.hcr_el2
&= ~HCR_VSE
;
837 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
844 cs
->exception_index
= excp_idx
;
845 env
->exception
.target_el
= target_el
;
846 cc
->tcg_ops
->do_interrupt(cs
);
850 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
852 void arm_cpu_update_virq(ARMCPU
*cpu
)
855 * Update the interrupt level for VIRQ, which is the logical OR of
856 * the HCR_EL2.VI bit and the input line level from the GIC.
858 CPUARMState
*env
= &cpu
->env
;
859 CPUState
*cs
= CPU(cpu
);
861 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VI
) ||
862 (env
->irq_line_state
& CPU_INTERRUPT_VIRQ
);
864 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) != 0)) {
866 cpu_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
868 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
873 void arm_cpu_update_vfiq(ARMCPU
*cpu
)
876 * Update the interrupt level for VFIQ, which is the logical OR of
877 * the HCR_EL2.VF bit and the input line level from the GIC.
879 CPUARMState
*env
= &cpu
->env
;
880 CPUState
*cs
= CPU(cpu
);
882 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VF
) ||
883 (env
->irq_line_state
& CPU_INTERRUPT_VFIQ
);
885 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) != 0)) {
887 cpu_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
889 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
894 void arm_cpu_update_vserr(ARMCPU
*cpu
)
897 * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
899 CPUARMState
*env
= &cpu
->env
;
900 CPUState
*cs
= CPU(cpu
);
902 bool new_state
= env
->cp15
.hcr_el2
& HCR_VSE
;
904 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VSERR
) != 0)) {
906 cpu_interrupt(cs
, CPU_INTERRUPT_VSERR
);
908 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VSERR
);
913 #ifndef CONFIG_USER_ONLY
914 static void arm_cpu_set_irq(void *opaque
, int irq
, int level
)
916 ARMCPU
*cpu
= opaque
;
917 CPUARMState
*env
= &cpu
->env
;
918 CPUState
*cs
= CPU(cpu
);
919 static const int mask
[] = {
920 [ARM_CPU_IRQ
] = CPU_INTERRUPT_HARD
,
921 [ARM_CPU_FIQ
] = CPU_INTERRUPT_FIQ
,
922 [ARM_CPU_VIRQ
] = CPU_INTERRUPT_VIRQ
,
923 [ARM_CPU_VFIQ
] = CPU_INTERRUPT_VFIQ
926 if (!arm_feature(env
, ARM_FEATURE_EL2
) &&
927 (irq
== ARM_CPU_VIRQ
|| irq
== ARM_CPU_VFIQ
)) {
929 * The GIC might tell us about VIRQ and VFIQ state, but if we don't
930 * have EL2 support we don't care. (Unless the guest is doing something
931 * silly this will only be calls saying "level is still 0".)
937 env
->irq_line_state
|= mask
[irq
];
939 env
->irq_line_state
&= ~mask
[irq
];
944 arm_cpu_update_virq(cpu
);
947 arm_cpu_update_vfiq(cpu
);
952 cpu_interrupt(cs
, mask
[irq
]);
954 cpu_reset_interrupt(cs
, mask
[irq
]);
958 g_assert_not_reached();
962 static void arm_cpu_kvm_set_irq(void *opaque
, int irq
, int level
)
965 ARMCPU
*cpu
= opaque
;
966 CPUARMState
*env
= &cpu
->env
;
967 CPUState
*cs
= CPU(cpu
);
968 uint32_t linestate_bit
;
973 irq_id
= KVM_ARM_IRQ_CPU_IRQ
;
974 linestate_bit
= CPU_INTERRUPT_HARD
;
977 irq_id
= KVM_ARM_IRQ_CPU_FIQ
;
978 linestate_bit
= CPU_INTERRUPT_FIQ
;
981 g_assert_not_reached();
985 env
->irq_line_state
|= linestate_bit
;
987 env
->irq_line_state
&= ~linestate_bit
;
989 kvm_arm_set_irq(cs
->cpu_index
, KVM_ARM_IRQ_TYPE_CPU
, irq_id
, !!level
);
993 static bool arm_cpu_virtio_is_big_endian(CPUState
*cs
)
995 ARMCPU
*cpu
= ARM_CPU(cs
);
996 CPUARMState
*env
= &cpu
->env
;
998 cpu_synchronize_state(cs
);
999 return arm_cpu_data_is_big_endian(env
);
1004 static void arm_disas_set_info(CPUState
*cpu
, disassemble_info
*info
)
1006 ARMCPU
*ac
= ARM_CPU(cpu
);
1007 CPUARMState
*env
= &ac
->env
;
1011 info
->cap_arch
= CS_ARCH_ARM64
;
1012 info
->cap_insn_unit
= 4;
1013 info
->cap_insn_split
= 4;
1017 info
->cap_insn_unit
= 2;
1018 info
->cap_insn_split
= 4;
1019 cap_mode
= CS_MODE_THUMB
;
1021 info
->cap_insn_unit
= 4;
1022 info
->cap_insn_split
= 4;
1023 cap_mode
= CS_MODE_ARM
;
1025 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1026 cap_mode
|= CS_MODE_V8
;
1028 if (arm_feature(env
, ARM_FEATURE_M
)) {
1029 cap_mode
|= CS_MODE_MCLASS
;
1031 info
->cap_arch
= CS_ARCH_ARM
;
1032 info
->cap_mode
= cap_mode
;
1035 sctlr_b
= arm_sctlr_b(env
);
1036 if (bswap_code(sctlr_b
)) {
1037 #if TARGET_BIG_ENDIAN
1038 info
->endian
= BFD_ENDIAN_LITTLE
;
1040 info
->endian
= BFD_ENDIAN_BIG
;
1043 info
->flags
&= ~INSN_ARM_BE32
;
1044 #ifndef CONFIG_USER_ONLY
1046 info
->flags
|= INSN_ARM_BE32
;
1051 #ifdef TARGET_AARCH64
1053 static void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1055 ARMCPU
*cpu
= ARM_CPU(cs
);
1056 CPUARMState
*env
= &cpu
->env
;
1057 uint32_t psr
= pstate_read(env
);
1059 int el
= arm_current_el(env
);
1060 const char *ns_status
;
1063 qemu_fprintf(f
, " PC=%016" PRIx64
" ", env
->pc
);
1064 for (i
= 0; i
< 32; i
++) {
1066 qemu_fprintf(f
, " SP=%016" PRIx64
"\n", env
->xregs
[i
]);
1068 qemu_fprintf(f
, "X%02d=%016" PRIx64
"%s", i
, env
->xregs
[i
],
1069 (i
+ 2) % 3 ? " " : "\n");
1073 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
1074 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
1078 qemu_fprintf(f
, "PSTATE=%08x %c%c%c%c %sEL%d%c",
1080 psr
& PSTATE_N
? 'N' : '-',
1081 psr
& PSTATE_Z
? 'Z' : '-',
1082 psr
& PSTATE_C
? 'C' : '-',
1083 psr
& PSTATE_V
? 'V' : '-',
1086 psr
& PSTATE_SP
? 'h' : 't');
1088 if (cpu_isar_feature(aa64_sme
, cpu
)) {
1089 qemu_fprintf(f
, " SVCR=%08" PRIx64
" %c%c",
1091 (FIELD_EX64(env
->svcr
, SVCR
, ZA
) ? 'Z' : '-'),
1092 (FIELD_EX64(env
->svcr
, SVCR
, SM
) ? 'S' : '-'));
1094 if (cpu_isar_feature(aa64_bti
, cpu
)) {
1095 qemu_fprintf(f
, " BTYPE=%d", (psr
& PSTATE_BTYPE
) >> 10);
1097 if (!(flags
& CPU_DUMP_FPU
)) {
1098 qemu_fprintf(f
, "\n");
1101 if (fp_exception_el(env
, el
) != 0) {
1102 qemu_fprintf(f
, " FPU disabled\n");
1105 qemu_fprintf(f
, " FPCR=%08x FPSR=%08x\n",
1106 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
1108 if (cpu_isar_feature(aa64_sme
, cpu
) && FIELD_EX64(env
->svcr
, SVCR
, SM
)) {
1109 sve
= sme_exception_el(env
, el
) == 0;
1110 } else if (cpu_isar_feature(aa64_sve
, cpu
)) {
1111 sve
= sve_exception_el(env
, el
) == 0;
1117 int zcr_len
= sve_vqm1_for_el(env
, el
);
1119 for (i
= 0; i
<= FFR_PRED_NUM
; i
++) {
1121 if (i
== FFR_PRED_NUM
) {
1122 qemu_fprintf(f
, "FFR=");
1123 /* It's last, so end the line. */
1126 qemu_fprintf(f
, "P%02d=", i
);
1139 /* More than one quadword per predicate. */
1144 for (j
= zcr_len
/ 4; j
>= 0; j
--) {
1146 if (j
* 4 + 4 <= zcr_len
+ 1) {
1149 digits
= (zcr_len
% 4 + 1) * 4;
1151 qemu_fprintf(f
, "%0*" PRIx64
"%s", digits
,
1152 env
->vfp
.pregs
[i
].p
[j
],
1153 j
? ":" : eol
? "\n" : " ");
1159 * With vl=16, there are only 37 columns per register,
1160 * so output two registers per line.
1162 for (i
= 0; i
< 32; i
++) {
1163 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
"%s",
1164 i
, env
->vfp
.zregs
[i
].d
[1],
1165 env
->vfp
.zregs
[i
].d
[0], i
& 1 ? "\n" : " ");
1168 for (i
= 0; i
< 32; i
++) {
1169 qemu_fprintf(f
, "Z%02d=", i
);
1170 for (j
= zcr_len
; j
>= 0; j
--) {
1171 qemu_fprintf(f
, "%016" PRIx64
":%016" PRIx64
"%s",
1172 env
->vfp
.zregs
[i
].d
[j
* 2 + 1],
1173 env
->vfp
.zregs
[i
].d
[j
* 2 + 0],
1179 for (i
= 0; i
< 32; i
++) {
1180 uint64_t *q
= aa64_vfp_qreg(env
, i
);
1181 qemu_fprintf(f
, "Q%02d=%016" PRIx64
":%016" PRIx64
"%s",
1182 i
, q
[1], q
[0], (i
& 1 ? "\n" : " "));
1186 if (cpu_isar_feature(aa64_sme
, cpu
) &&
1187 FIELD_EX64(env
->svcr
, SVCR
, ZA
) &&
1188 sme_exception_el(env
, el
) == 0) {
1189 int zcr_len
= sve_vqm1_for_el_sm(env
, el
, true);
1190 int svl
= (zcr_len
+ 1) * 16;
1191 int svl_lg10
= svl
< 100 ? 2 : 3;
1193 for (i
= 0; i
< svl
; i
++) {
1194 qemu_fprintf(f
, "ZA[%0*d]=", svl_lg10
, i
);
1195 for (j
= zcr_len
; j
>= 0; --j
) {
1196 qemu_fprintf(f
, "%016" PRIx64
":%016" PRIx64
"%c",
1197 env
->zarray
[i
].d
[2 * j
+ 1],
1198 env
->zarray
[i
].d
[2 * j
],
1207 static inline void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1209 g_assert_not_reached();
1214 static void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1216 ARMCPU
*cpu
= ARM_CPU(cs
);
1217 CPUARMState
*env
= &cpu
->env
;
1221 aarch64_cpu_dump_state(cs
, f
, flags
);
1225 for (i
= 0; i
< 16; i
++) {
1226 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
1228 qemu_fprintf(f
, "\n");
1230 qemu_fprintf(f
, " ");
1234 if (arm_feature(env
, ARM_FEATURE_M
)) {
1235 uint32_t xpsr
= xpsr_read(env
);
1237 const char *ns_status
= "";
1239 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1240 ns_status
= env
->v7m
.secure
? "S " : "NS ";
1243 if (xpsr
& XPSR_EXCP
) {
1246 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
1247 mode
= "unpriv-thread";
1249 mode
= "priv-thread";
1253 qemu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
1255 xpsr
& XPSR_N
? 'N' : '-',
1256 xpsr
& XPSR_Z
? 'Z' : '-',
1257 xpsr
& XPSR_C
? 'C' : '-',
1258 xpsr
& XPSR_V
? 'V' : '-',
1259 xpsr
& XPSR_T
? 'T' : 'A',
1263 uint32_t psr
= cpsr_read(env
);
1264 const char *ns_status
= "";
1266 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
1267 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
1268 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
1271 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1273 psr
& CPSR_N
? 'N' : '-',
1274 psr
& CPSR_Z
? 'Z' : '-',
1275 psr
& CPSR_C
? 'C' : '-',
1276 psr
& CPSR_V
? 'V' : '-',
1277 psr
& CPSR_T
? 'T' : 'A',
1279 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
1282 if (flags
& CPU_DUMP_FPU
) {
1284 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1286 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1289 for (i
= 0; i
< numvfpregs
; i
++) {
1290 uint64_t v
= *aa32_vfp_dreg(env
, i
);
1291 qemu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
1293 i
* 2 + 1, (uint32_t)(v
>> 32),
1296 qemu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
1297 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1298 qemu_fprintf(f
, "VPR: %08x\n", env
->v7m
.vpr
);
1303 uint64_t arm_cpu_mp_affinity(int idx
, uint8_t clustersz
)
1305 uint32_t Aff1
= idx
/ clustersz
;
1306 uint32_t Aff0
= idx
% clustersz
;
1307 return (Aff1
<< ARM_AFF1_SHIFT
) | Aff0
;
1310 static void arm_cpu_initfn(Object
*obj
)
1312 ARMCPU
*cpu
= ARM_CPU(obj
);
1314 cpu
->cp_regs
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
,
1317 QLIST_INIT(&cpu
->pre_el_change_hooks
);
1318 QLIST_INIT(&cpu
->el_change_hooks
);
1320 #ifdef CONFIG_USER_ONLY
1321 # ifdef TARGET_AARCH64
1323 * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
1324 * These values were chosen to fit within the default signal frame.
1325 * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
1326 * and our corresponding cpu property.
1328 cpu
->sve_default_vq
= 4;
1329 cpu
->sme_default_vq
= 2;
1332 /* Our inbound IRQ and FIQ lines */
1333 if (kvm_enabled()) {
1334 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1335 * the same interface as non-KVM CPUs.
1337 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_kvm_set_irq
, 4);
1339 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_set_irq
, 4);
1342 qdev_init_gpio_out(DEVICE(cpu
), cpu
->gt_timer_outputs
,
1343 ARRAY_SIZE(cpu
->gt_timer_outputs
));
1345 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->gicv3_maintenance_interrupt
,
1346 "gicv3-maintenance-interrupt", 1);
1347 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->pmu_interrupt
,
1348 "pmu-interrupt", 1);
1351 /* DTB consumers generally don't in fact care what the 'compatible'
1352 * string is, so always provide some string and trust that a hypothetical
1353 * picky DTB consumer will also provide a helpful error message.
1355 cpu
->dtb_compatible
= "qemu,unknown";
1356 cpu
->psci_version
= QEMU_PSCI_VERSION_0_1
; /* By default assume PSCI v0.1 */
1357 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
1359 if (tcg_enabled() || hvf_enabled()) {
1360 /* TCG and HVF implement PSCI 1.1 */
1361 cpu
->psci_version
= QEMU_PSCI_VERSION_1_1
;
1365 static Property arm_cpu_gt_cntfrq_property
=
1366 DEFINE_PROP_UINT64("cntfrq", ARMCPU
, gt_cntfrq_hz
,
1367 NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
);
1369 static Property arm_cpu_reset_cbar_property
=
1370 DEFINE_PROP_UINT64("reset-cbar", ARMCPU
, reset_cbar
, 0);
1372 static Property arm_cpu_reset_hivecs_property
=
1373 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU
, reset_hivecs
, false);
1375 #ifndef CONFIG_USER_ONLY
1376 static Property arm_cpu_has_el2_property
=
1377 DEFINE_PROP_BOOL("has_el2", ARMCPU
, has_el2
, true);
1379 static Property arm_cpu_has_el3_property
=
1380 DEFINE_PROP_BOOL("has_el3", ARMCPU
, has_el3
, true);
1383 static Property arm_cpu_cfgend_property
=
1384 DEFINE_PROP_BOOL("cfgend", ARMCPU
, cfgend
, false);
1386 static Property arm_cpu_has_vfp_property
=
1387 DEFINE_PROP_BOOL("vfp", ARMCPU
, has_vfp
, true);
1389 static Property arm_cpu_has_vfp_d32_property
=
1390 DEFINE_PROP_BOOL("vfp-d32", ARMCPU
, has_vfp_d32
, true);
1392 static Property arm_cpu_has_neon_property
=
1393 DEFINE_PROP_BOOL("neon", ARMCPU
, has_neon
, true);
1395 static Property arm_cpu_has_dsp_property
=
1396 DEFINE_PROP_BOOL("dsp", ARMCPU
, has_dsp
, true);
1398 static Property arm_cpu_has_mpu_property
=
1399 DEFINE_PROP_BOOL("has-mpu", ARMCPU
, has_mpu
, true);
1401 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1402 * because the CPU initfn will have already set cpu->pmsav7_dregion to
1403 * the right value for that particular CPU type, and we don't want
1404 * to override that with an incorrect constant value.
1406 static Property arm_cpu_pmsav7_dregion_property
=
1407 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU
,
1409 qdev_prop_uint32
, uint32_t);
1411 static bool arm_get_pmu(Object
*obj
, Error
**errp
)
1413 ARMCPU
*cpu
= ARM_CPU(obj
);
1415 return cpu
->has_pmu
;
1418 static void arm_set_pmu(Object
*obj
, bool value
, Error
**errp
)
1420 ARMCPU
*cpu
= ARM_CPU(obj
);
1423 if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1424 error_setg(errp
, "'pmu' feature not supported by KVM on this host");
1427 set_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1429 unset_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1431 cpu
->has_pmu
= value
;
1434 unsigned int gt_cntfrq_period_ns(ARMCPU
*cpu
)
1437 * The exact approach to calculating guest ticks is:
1439 * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1440 * NANOSECONDS_PER_SECOND);
1442 * We don't do that. Rather we intentionally use integer division
1443 * truncation below and in the caller for the conversion of host monotonic
1444 * time to guest ticks to provide the exact inverse for the semantics of
1445 * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1446 * it loses precision when representing frequencies where
1447 * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1448 * provide an exact inverse leads to scheduling timers with negative
1449 * periods, which in turn leads to sticky behaviour in the guest.
1451 * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1452 * cannot become zero.
1454 return NANOSECONDS_PER_SECOND
> cpu
->gt_cntfrq_hz
?
1455 NANOSECONDS_PER_SECOND
/ cpu
->gt_cntfrq_hz
: 1;
1458 static void arm_cpu_propagate_feature_implications(ARMCPU
*cpu
)
1460 CPUARMState
*env
= &cpu
->env
;
1461 bool no_aa32
= false;
1464 * Some features automatically imply others: set the feature
1465 * bits explicitly for these cases.
1468 if (arm_feature(env
, ARM_FEATURE_M
)) {
1469 set_feature(env
, ARM_FEATURE_PMSA
);
1472 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1473 if (arm_feature(env
, ARM_FEATURE_M
)) {
1474 set_feature(env
, ARM_FEATURE_V7
);
1476 set_feature(env
, ARM_FEATURE_V7VE
);
1481 * There exist AArch64 cpus without AArch32 support. When KVM
1482 * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1483 * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1484 * As a general principle, we also do not make ID register
1485 * consistency checks anywhere unless using TCG, because only
1486 * for TCG would a consistency-check failure be a QEMU bug.
1488 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1489 no_aa32
= !cpu_isar_feature(aa64_aa32
, cpu
);
1492 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
1494 * v7 Virtualization Extensions. In real hardware this implies
1495 * EL2 and also the presence of the Security Extensions.
1496 * For QEMU, for backwards-compatibility we implement some
1497 * CPUs or CPU configs which have no actual EL2 or EL3 but do
1498 * include the various other features that V7VE implies.
1499 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1500 * Security Extensions is ARM_FEATURE_EL3.
1502 assert(!tcg_enabled() || no_aa32
||
1503 cpu_isar_feature(aa32_arm_div
, cpu
));
1504 set_feature(env
, ARM_FEATURE_LPAE
);
1505 set_feature(env
, ARM_FEATURE_V7
);
1507 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1508 set_feature(env
, ARM_FEATURE_VAPA
);
1509 set_feature(env
, ARM_FEATURE_THUMB2
);
1510 set_feature(env
, ARM_FEATURE_MPIDR
);
1511 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1512 set_feature(env
, ARM_FEATURE_V6K
);
1514 set_feature(env
, ARM_FEATURE_V6
);
1518 * Always define VBAR for V7 CPUs even if it doesn't exist in
1519 * non-EL3 configs. This is needed by some legacy boards.
1521 set_feature(env
, ARM_FEATURE_VBAR
);
1523 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1524 set_feature(env
, ARM_FEATURE_V6
);
1525 set_feature(env
, ARM_FEATURE_MVFR
);
1527 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1528 set_feature(env
, ARM_FEATURE_V5
);
1529 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1530 assert(!tcg_enabled() || no_aa32
||
1531 cpu_isar_feature(aa32_jazelle
, cpu
));
1532 set_feature(env
, ARM_FEATURE_AUXCR
);
1535 if (arm_feature(env
, ARM_FEATURE_V5
)) {
1536 set_feature(env
, ARM_FEATURE_V4T
);
1538 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1539 set_feature(env
, ARM_FEATURE_V7MP
);
1541 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
1542 set_feature(env
, ARM_FEATURE_CBAR
);
1544 if (arm_feature(env
, ARM_FEATURE_THUMB2
) &&
1545 !arm_feature(env
, ARM_FEATURE_M
)) {
1546 set_feature(env
, ARM_FEATURE_THUMB_DSP
);
1550 void arm_cpu_post_init(Object
*obj
)
1552 ARMCPU
*cpu
= ARM_CPU(obj
);
1555 * Some features imply others. Figure this out now, because we
1556 * are going to look at the feature bits in deciding which
1557 * properties to add.
1559 arm_cpu_propagate_feature_implications(cpu
);
1561 if (arm_feature(&cpu
->env
, ARM_FEATURE_CBAR
) ||
1562 arm_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
)) {
1563 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_cbar_property
);
1566 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1567 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_hivecs_property
);
1570 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1571 object_property_add_uint64_ptr(obj
, "rvbar",
1573 OBJ_PROP_FLAG_READWRITE
);
1576 #ifndef CONFIG_USER_ONLY
1577 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1578 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
1579 * prevent "has_el3" from existing on CPUs which cannot support EL3.
1581 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el3_property
);
1583 object_property_add_link(obj
, "secure-memory",
1585 (Object
**)&cpu
->secure_memory
,
1586 qdev_prop_allow_set_link_before_realize
,
1587 OBJ_PROP_LINK_STRONG
);
1590 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
)) {
1591 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el2_property
);
1595 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMU
)) {
1596 cpu
->has_pmu
= true;
1597 object_property_add_bool(obj
, "pmu", arm_get_pmu
, arm_set_pmu
);
1601 * Allow user to turn off VFP and Neon support, but only for TCG --
1602 * KVM does not currently allow us to lie to the guest about its
1603 * ID/feature registers, so the guest always sees what the host has.
1605 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1606 if (cpu_isar_feature(aa64_fp_simd
, cpu
)) {
1607 cpu
->has_vfp
= true;
1608 cpu
->has_vfp_d32
= true;
1609 if (tcg_enabled() || qtest_enabled()) {
1610 qdev_property_add_static(DEVICE(obj
),
1611 &arm_cpu_has_vfp_property
);
1614 } else if (cpu_isar_feature(aa32_vfp
, cpu
)) {
1615 cpu
->has_vfp
= true;
1616 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1617 cpu
->has_vfp_d32
= true;
1619 * The permitted values of the SIMDReg bits [3:0] on
1620 * Armv8-A are either 0b0000 and 0b0010. On such CPUs,
1621 * make sure that has_vfp_d32 can not be set to false.
1623 if ((tcg_enabled() || qtest_enabled())
1624 && !(arm_feature(&cpu
->env
, ARM_FEATURE_V8
)
1625 && !arm_feature(&cpu
->env
, ARM_FEATURE_M
))) {
1626 qdev_property_add_static(DEVICE(obj
),
1627 &arm_cpu_has_vfp_d32_property
);
1632 if (arm_feature(&cpu
->env
, ARM_FEATURE_NEON
)) {
1633 cpu
->has_neon
= true;
1634 if (!kvm_enabled()) {
1635 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_neon_property
);
1639 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
) &&
1640 arm_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
)) {
1641 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_dsp_property
);
1644 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMSA
)) {
1645 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_mpu_property
);
1646 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1647 qdev_property_add_static(DEVICE(obj
),
1648 &arm_cpu_pmsav7_dregion_property
);
1652 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
1653 object_property_add_link(obj
, "idau", TYPE_IDAU_INTERFACE
, &cpu
->idau
,
1654 qdev_prop_allow_set_link_before_realize
,
1655 OBJ_PROP_LINK_STRONG
);
1657 * M profile: initial value of the Secure VTOR. We can't just use
1658 * a simple DEFINE_PROP_UINT32 for this because we want to permit
1659 * the property to be set after realize.
1661 object_property_add_uint32_ptr(obj
, "init-svtor",
1663 OBJ_PROP_FLAG_READWRITE
);
1665 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1667 * Initial value of the NS VTOR (for cores without the Security
1668 * extension, this is the only VTOR)
1670 object_property_add_uint32_ptr(obj
, "init-nsvtor",
1672 OBJ_PROP_FLAG_READWRITE
);
1675 /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1676 object_property_add_uint32_ptr(obj
, "psci-conduit",
1678 OBJ_PROP_FLAG_READWRITE
);
1680 qdev_property_add_static(DEVICE(obj
), &arm_cpu_cfgend_property
);
1682 if (arm_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
)) {
1683 qdev_property_add_static(DEVICE(cpu
), &arm_cpu_gt_cntfrq_property
);
1686 if (kvm_enabled()) {
1687 kvm_arm_add_vcpu_properties(obj
);
1690 #ifndef CONFIG_USER_ONLY
1691 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) &&
1692 cpu_isar_feature(aa64_mte
, cpu
)) {
1693 object_property_add_link(obj
, "tag-memory",
1695 (Object
**)&cpu
->tag_memory
,
1696 qdev_prop_allow_set_link_before_realize
,
1697 OBJ_PROP_LINK_STRONG
);
1699 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1700 object_property_add_link(obj
, "secure-tag-memory",
1702 (Object
**)&cpu
->secure_tag_memory
,
1703 qdev_prop_allow_set_link_before_realize
,
1704 OBJ_PROP_LINK_STRONG
);
1710 static void arm_cpu_finalizefn(Object
*obj
)
1712 ARMCPU
*cpu
= ARM_CPU(obj
);
1713 ARMELChangeHook
*hook
, *next
;
1715 g_hash_table_destroy(cpu
->cp_regs
);
1717 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
1718 QLIST_REMOVE(hook
, node
);
1721 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
1722 QLIST_REMOVE(hook
, node
);
1725 #ifndef CONFIG_USER_ONLY
1726 if (cpu
->pmu_timer
) {
1727 timer_free(cpu
->pmu_timer
);
1732 void arm_cpu_finalize_features(ARMCPU
*cpu
, Error
**errp
)
1734 Error
*local_err
= NULL
;
1736 #ifdef TARGET_AARCH64
1737 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1738 arm_cpu_sve_finalize(cpu
, &local_err
);
1739 if (local_err
!= NULL
) {
1740 error_propagate(errp
, local_err
);
1744 arm_cpu_sme_finalize(cpu
, &local_err
);
1745 if (local_err
!= NULL
) {
1746 error_propagate(errp
, local_err
);
1750 arm_cpu_pauth_finalize(cpu
, &local_err
);
1751 if (local_err
!= NULL
) {
1752 error_propagate(errp
, local_err
);
1756 arm_cpu_lpa2_finalize(cpu
, &local_err
);
1757 if (local_err
!= NULL
) {
1758 error_propagate(errp
, local_err
);
1764 if (kvm_enabled()) {
1765 kvm_arm_steal_time_finalize(cpu
, &local_err
);
1766 if (local_err
!= NULL
) {
1767 error_propagate(errp
, local_err
);
1773 static void arm_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
1775 CPUState
*cs
= CPU(dev
);
1776 ARMCPU
*cpu
= ARM_CPU(dev
);
1777 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(dev
);
1778 CPUARMState
*env
= &cpu
->env
;
1780 Error
*local_err
= NULL
;
1782 /* Use pc-relative instructions in system-mode */
1783 #ifndef CONFIG_USER_ONLY
1784 cs
->tcg_cflags
|= CF_PCREL
;
1787 /* If we needed to query the host kernel for the CPU features
1788 * then it's possible that might have failed in the initfn, but
1789 * this is the first point where we can report it.
1791 if (cpu
->host_cpu_probe_failed
) {
1792 if (!kvm_enabled() && !hvf_enabled()) {
1793 error_setg(errp
, "The 'host' CPU type can only be used with KVM or HVF");
1795 error_setg(errp
, "Failed to retrieve host CPU features");
1800 #ifndef CONFIG_USER_ONLY
1801 /* The NVIC and M-profile CPU are two halves of a single piece of
1802 * hardware; trying to use one without the other is a command line
1803 * error and will result in segfaults if not caught here.
1805 if (arm_feature(env
, ARM_FEATURE_M
)) {
1807 error_setg(errp
, "This board cannot be used with Cortex-M CPUs");
1812 error_setg(errp
, "This board can only be used with Cortex-M CPUs");
1817 if (!tcg_enabled() && !qtest_enabled()) {
1819 * We assume that no accelerator except TCG (and the "not really an
1820 * accelerator" qtest) can handle these features, because Arm hardware
1821 * virtualization can't virtualize them.
1823 * Catch all the cases which might cause us to create more than one
1824 * address space for the CPU (otherwise we will assert() later in
1825 * cpu_address_space_init()).
1827 if (arm_feature(env
, ARM_FEATURE_M
)) {
1829 "Cannot enable %s when using an M-profile guest CPU",
1830 current_accel_name());
1835 "Cannot enable %s when guest CPU has EL3 enabled",
1836 current_accel_name());
1839 if (cpu
->tag_memory
) {
1841 "Cannot enable %s when guest CPUs has MTE enabled",
1842 current_accel_name());
1850 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1851 if (!cpu
->gt_cntfrq_hz
) {
1852 error_setg(errp
, "Invalid CNTFRQ: %"PRId64
"Hz",
1856 scale
= gt_cntfrq_period_ns(cpu
);
1858 scale
= GTIMER_SCALE
;
1861 cpu
->gt_timer
[GTIMER_PHYS
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1862 arm_gt_ptimer_cb
, cpu
);
1863 cpu
->gt_timer
[GTIMER_VIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1864 arm_gt_vtimer_cb
, cpu
);
1865 cpu
->gt_timer
[GTIMER_HYP
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1866 arm_gt_htimer_cb
, cpu
);
1867 cpu
->gt_timer
[GTIMER_SEC
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1868 arm_gt_stimer_cb
, cpu
);
1869 cpu
->gt_timer
[GTIMER_HYPVIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1870 arm_gt_hvtimer_cb
, cpu
);
1874 cpu_exec_realizefn(cs
, &local_err
);
1875 if (local_err
!= NULL
) {
1876 error_propagate(errp
, local_err
);
1880 arm_cpu_finalize_features(cpu
, &local_err
);
1881 if (local_err
!= NULL
) {
1882 error_propagate(errp
, local_err
);
1886 #ifdef CONFIG_USER_ONLY
1888 * User mode relies on IC IVAU instructions to catch modification of
1891 * Clear CTR_EL0.DIC to ensure that software that honors these flags uses
1892 * IC IVAU even if the emulated processor does not normally require it.
1894 cpu
->ctr
= FIELD_DP64(cpu
->ctr
, CTR_EL0
, DIC
, 0);
1897 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1898 cpu
->has_vfp
!= cpu
->has_neon
) {
1900 * This is an architectural requirement for AArch64; AArch32 is
1901 * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1904 "AArch64 CPUs must have both VFP and Neon or neither");
1908 if (cpu
->has_vfp_d32
!= cpu
->has_neon
) {
1909 error_setg(errp
, "ARM CPUs must have both VFP-D32 and Neon or neither");
1913 if (!cpu
->has_vfp_d32
) {
1916 u
= cpu
->isar
.mvfr0
;
1917 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 1); /* 16 registers */
1918 cpu
->isar
.mvfr0
= u
;
1921 if (!cpu
->has_vfp
) {
1925 t
= cpu
->isar
.id_aa64isar1
;
1926 t
= FIELD_DP64(t
, ID_AA64ISAR1
, JSCVT
, 0);
1927 cpu
->isar
.id_aa64isar1
= t
;
1929 t
= cpu
->isar
.id_aa64pfr0
;
1930 t
= FIELD_DP64(t
, ID_AA64PFR0
, FP
, 0xf);
1931 cpu
->isar
.id_aa64pfr0
= t
;
1933 u
= cpu
->isar
.id_isar6
;
1934 u
= FIELD_DP32(u
, ID_ISAR6
, JSCVT
, 0);
1935 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1936 cpu
->isar
.id_isar6
= u
;
1938 u
= cpu
->isar
.mvfr0
;
1939 u
= FIELD_DP32(u
, MVFR0
, FPSP
, 0);
1940 u
= FIELD_DP32(u
, MVFR0
, FPDP
, 0);
1941 u
= FIELD_DP32(u
, MVFR0
, FPDIVIDE
, 0);
1942 u
= FIELD_DP32(u
, MVFR0
, FPSQRT
, 0);
1943 u
= FIELD_DP32(u
, MVFR0
, FPROUND
, 0);
1944 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1945 u
= FIELD_DP32(u
, MVFR0
, FPTRAP
, 0);
1946 u
= FIELD_DP32(u
, MVFR0
, FPSHVEC
, 0);
1948 cpu
->isar
.mvfr0
= u
;
1950 u
= cpu
->isar
.mvfr1
;
1951 u
= FIELD_DP32(u
, MVFR1
, FPFTZ
, 0);
1952 u
= FIELD_DP32(u
, MVFR1
, FPDNAN
, 0);
1953 u
= FIELD_DP32(u
, MVFR1
, FPHP
, 0);
1954 if (arm_feature(env
, ARM_FEATURE_M
)) {
1955 u
= FIELD_DP32(u
, MVFR1
, FP16
, 0);
1957 cpu
->isar
.mvfr1
= u
;
1959 u
= cpu
->isar
.mvfr2
;
1960 u
= FIELD_DP32(u
, MVFR2
, FPMISC
, 0);
1961 cpu
->isar
.mvfr2
= u
;
1964 if (!cpu
->has_neon
) {
1968 unset_feature(env
, ARM_FEATURE_NEON
);
1970 t
= cpu
->isar
.id_aa64isar0
;
1971 t
= FIELD_DP64(t
, ID_AA64ISAR0
, AES
, 0);
1972 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA1
, 0);
1973 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA2
, 0);
1974 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SHA3
, 0);
1975 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM3
, 0);
1976 t
= FIELD_DP64(t
, ID_AA64ISAR0
, SM4
, 0);
1977 t
= FIELD_DP64(t
, ID_AA64ISAR0
, DP
, 0);
1978 cpu
->isar
.id_aa64isar0
= t
;
1980 t
= cpu
->isar
.id_aa64isar1
;
1981 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FCMA
, 0);
1982 t
= FIELD_DP64(t
, ID_AA64ISAR1
, BF16
, 0);
1983 t
= FIELD_DP64(t
, ID_AA64ISAR1
, I8MM
, 0);
1984 cpu
->isar
.id_aa64isar1
= t
;
1986 t
= cpu
->isar
.id_aa64pfr0
;
1987 t
= FIELD_DP64(t
, ID_AA64PFR0
, ADVSIMD
, 0xf);
1988 cpu
->isar
.id_aa64pfr0
= t
;
1990 u
= cpu
->isar
.id_isar5
;
1991 u
= FIELD_DP32(u
, ID_ISAR5
, AES
, 0);
1992 u
= FIELD_DP32(u
, ID_ISAR5
, SHA1
, 0);
1993 u
= FIELD_DP32(u
, ID_ISAR5
, SHA2
, 0);
1994 u
= FIELD_DP32(u
, ID_ISAR5
, RDM
, 0);
1995 u
= FIELD_DP32(u
, ID_ISAR5
, VCMA
, 0);
1996 cpu
->isar
.id_isar5
= u
;
1998 u
= cpu
->isar
.id_isar6
;
1999 u
= FIELD_DP32(u
, ID_ISAR6
, DP
, 0);
2000 u
= FIELD_DP32(u
, ID_ISAR6
, FHM
, 0);
2001 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
2002 u
= FIELD_DP32(u
, ID_ISAR6
, I8MM
, 0);
2003 cpu
->isar
.id_isar6
= u
;
2005 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2006 u
= cpu
->isar
.mvfr1
;
2007 u
= FIELD_DP32(u
, MVFR1
, SIMDLS
, 0);
2008 u
= FIELD_DP32(u
, MVFR1
, SIMDINT
, 0);
2009 u
= FIELD_DP32(u
, MVFR1
, SIMDSP
, 0);
2010 u
= FIELD_DP32(u
, MVFR1
, SIMDHP
, 0);
2011 cpu
->isar
.mvfr1
= u
;
2013 u
= cpu
->isar
.mvfr2
;
2014 u
= FIELD_DP32(u
, MVFR2
, SIMDMISC
, 0);
2015 cpu
->isar
.mvfr2
= u
;
2019 if (!cpu
->has_neon
&& !cpu
->has_vfp
) {
2023 t
= cpu
->isar
.id_aa64isar0
;
2024 t
= FIELD_DP64(t
, ID_AA64ISAR0
, FHM
, 0);
2025 cpu
->isar
.id_aa64isar0
= t
;
2027 t
= cpu
->isar
.id_aa64isar1
;
2028 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FRINTTS
, 0);
2029 cpu
->isar
.id_aa64isar1
= t
;
2031 u
= cpu
->isar
.mvfr0
;
2032 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 0);
2033 cpu
->isar
.mvfr0
= u
;
2035 /* Despite the name, this field covers both VFP and Neon */
2036 u
= cpu
->isar
.mvfr1
;
2037 u
= FIELD_DP32(u
, MVFR1
, SIMDFMAC
, 0);
2038 cpu
->isar
.mvfr1
= u
;
2041 if (arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_dsp
) {
2044 unset_feature(env
, ARM_FEATURE_THUMB_DSP
);
2046 u
= cpu
->isar
.id_isar1
;
2047 u
= FIELD_DP32(u
, ID_ISAR1
, EXTEND
, 1);
2048 cpu
->isar
.id_isar1
= u
;
2050 u
= cpu
->isar
.id_isar2
;
2051 u
= FIELD_DP32(u
, ID_ISAR2
, MULTU
, 1);
2052 u
= FIELD_DP32(u
, ID_ISAR2
, MULTS
, 1);
2053 cpu
->isar
.id_isar2
= u
;
2055 u
= cpu
->isar
.id_isar3
;
2056 u
= FIELD_DP32(u
, ID_ISAR3
, SIMD
, 1);
2057 u
= FIELD_DP32(u
, ID_ISAR3
, SATURATE
, 0);
2058 cpu
->isar
.id_isar3
= u
;
2063 * We rely on no XScale CPU having VFP so we can use the same bits in the
2064 * TB flags field for VECSTRIDE and XSCALE_CPAR.
2066 assert(arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) ||
2067 !cpu_isar_feature(aa32_vfp_simd
, cpu
) ||
2068 !arm_feature(env
, ARM_FEATURE_XSCALE
));
2070 if (arm_feature(env
, ARM_FEATURE_V7
) &&
2071 !arm_feature(env
, ARM_FEATURE_M
) &&
2072 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
2073 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
2078 /* For CPUs which might have tiny 1K pages, or which have an
2079 * MPU and might have small region sizes, stick with 1K pages.
2083 if (!set_preferred_target_page_bits(pagebits
)) {
2084 /* This can only ever happen for hotplugging a CPU, or if
2085 * the board code incorrectly creates a CPU which it has
2086 * promised via minimum_page_size that it will not.
2088 error_setg(errp
, "This CPU requires a smaller page size than the "
2093 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
2094 * We don't support setting cluster ID ([16..23]) (known as Aff2
2095 * in later ARM ARM versions), or any of the higher affinity level fields,
2096 * so these bits always RAZ.
2098 if (cpu
->mp_affinity
== ARM64_AFFINITY_INVALID
) {
2099 cpu
->mp_affinity
= arm_cpu_mp_affinity(cs
->cpu_index
,
2100 ARM_DEFAULT_CPUS_PER_CLUSTER
);
2103 if (cpu
->reset_hivecs
) {
2104 cpu
->reset_sctlr
|= (1 << 13);
2108 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
2109 cpu
->reset_sctlr
|= SCTLR_EE
;
2111 cpu
->reset_sctlr
|= SCTLR_B
;
2115 if (!arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_el3
) {
2116 /* If the has_el3 CPU property is disabled then we need to disable the
2119 unset_feature(env
, ARM_FEATURE_EL3
);
2122 * Disable the security extension feature bits in the processor
2123 * feature registers as well.
2125 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
, ID_PFR1
, SECURITY
, 0);
2126 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, COPSDBG
, 0);
2127 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
2128 ID_AA64PFR0
, EL3
, 0);
2130 /* Disable the realm management extension, which requires EL3. */
2131 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
2132 ID_AA64PFR0
, RME
, 0);
2135 if (!cpu
->has_el2
) {
2136 unset_feature(env
, ARM_FEATURE_EL2
);
2139 if (!cpu
->has_pmu
) {
2140 unset_feature(env
, ARM_FEATURE_PMU
);
2142 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
2145 if (!kvm_enabled()) {
2146 arm_register_pre_el_change_hook(cpu
, &pmu_pre_el_change
, 0);
2147 arm_register_el_change_hook(cpu
, &pmu_post_el_change
, 0);
2150 #ifndef CONFIG_USER_ONLY
2151 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, arm_pmu_timer_cb
,
2155 cpu
->isar
.id_aa64dfr0
=
2156 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMUVER
, 0);
2157 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, PERFMON
, 0);
2162 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
2164 * Disable the hypervisor feature bits in the processor feature
2165 * registers if we don't have EL2.
2167 cpu
->isar
.id_aa64pfr0
= FIELD_DP64(cpu
->isar
.id_aa64pfr0
,
2168 ID_AA64PFR0
, EL2
, 0);
2169 cpu
->isar
.id_pfr1
= FIELD_DP32(cpu
->isar
.id_pfr1
,
2170 ID_PFR1
, VIRTUALIZATION
, 0);
2173 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2175 * The architectural range of GM blocksize is 2-6, however qemu
2176 * doesn't support blocksize of 2 (see HELPER(ldgm)).
2178 if (tcg_enabled()) {
2179 assert(cpu
->gm_blocksize
>= 3 && cpu
->gm_blocksize
<= 6);
2182 #ifndef CONFIG_USER_ONLY
2184 * If we do not have tag-memory provided by the machine,
2185 * reduce MTE support to instructions enabled at EL0.
2186 * This matches Cortex-A710 BROADCASTMTE input being LOW.
2188 if (cpu
->tag_memory
== NULL
) {
2189 cpu
->isar
.id_aa64pfr1
=
2190 FIELD_DP64(cpu
->isar
.id_aa64pfr1
, ID_AA64PFR1
, MTE
, 1);
2195 if (tcg_enabled()) {
2197 * Don't report some architectural features in the ID registers
2198 * where TCG does not yet implement it (not even a minimal
2199 * stub version). This avoids guests falling over when they
2200 * try to access the non-existent system registers for them.
2202 /* FEAT_SPE (Statistical Profiling Extension) */
2203 cpu
->isar
.id_aa64dfr0
=
2204 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMSVER
, 0);
2205 /* FEAT_TRBE (Trace Buffer Extension) */
2206 cpu
->isar
.id_aa64dfr0
=
2207 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, TRACEBUFFER
, 0);
2208 /* FEAT_TRF (Self-hosted Trace Extension) */
2209 cpu
->isar
.id_aa64dfr0
=
2210 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, TRACEFILT
, 0);
2212 FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, TRACEFILT
, 0);
2213 /* Trace Macrocell system register access */
2214 cpu
->isar
.id_aa64dfr0
=
2215 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, TRACEVER
, 0);
2217 FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, COPTRC
, 0);
2218 /* Memory mapped trace */
2220 FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, MMAPTRC
, 0);
2221 /* FEAT_AMU (Activity Monitors Extension) */
2222 cpu
->isar
.id_aa64pfr0
=
2223 FIELD_DP64(cpu
->isar
.id_aa64pfr0
, ID_AA64PFR0
, AMU
, 0);
2225 FIELD_DP32(cpu
->isar
.id_pfr0
, ID_PFR0
, AMU
, 0);
2226 /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
2227 cpu
->isar
.id_aa64pfr0
=
2228 FIELD_DP64(cpu
->isar
.id_aa64pfr0
, ID_AA64PFR0
, MPAM
, 0);
2229 /* FEAT_NV (Nested Virtualization) */
2230 cpu
->isar
.id_aa64mmfr2
=
2231 FIELD_DP64(cpu
->isar
.id_aa64mmfr2
, ID_AA64MMFR2
, NV
, 0);
2234 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
2235 * to false or by setting pmsav7-dregion to 0.
2237 if (!cpu
->has_mpu
|| cpu
->pmsav7_dregion
== 0) {
2238 cpu
->has_mpu
= false;
2239 cpu
->pmsav7_dregion
= 0;
2240 cpu
->pmsav8r_hdregion
= 0;
2243 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
2244 arm_feature(env
, ARM_FEATURE_V7
)) {
2245 uint32_t nr
= cpu
->pmsav7_dregion
;
2248 error_setg(errp
, "PMSAv7 MPU #regions invalid %" PRIu32
, nr
);
2253 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2255 env
->pmsav8
.rbar
[M_REG_NS
] = g_new0(uint32_t, nr
);
2256 env
->pmsav8
.rlar
[M_REG_NS
] = g_new0(uint32_t, nr
);
2257 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2258 env
->pmsav8
.rbar
[M_REG_S
] = g_new0(uint32_t, nr
);
2259 env
->pmsav8
.rlar
[M_REG_S
] = g_new0(uint32_t, nr
);
2262 env
->pmsav7
.drbar
= g_new0(uint32_t, nr
);
2263 env
->pmsav7
.drsr
= g_new0(uint32_t, nr
);
2264 env
->pmsav7
.dracr
= g_new0(uint32_t, nr
);
2268 if (cpu
->pmsav8r_hdregion
> 0xff) {
2269 error_setg(errp
, "PMSAv8 MPU EL2 #regions invalid %" PRIu32
,
2270 cpu
->pmsav8r_hdregion
);
2274 if (cpu
->pmsav8r_hdregion
) {
2275 env
->pmsav8
.hprbar
= g_new0(uint32_t,
2276 cpu
->pmsav8r_hdregion
);
2277 env
->pmsav8
.hprlar
= g_new0(uint32_t,
2278 cpu
->pmsav8r_hdregion
);
2282 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2283 uint32_t nr
= cpu
->sau_sregion
;
2286 error_setg(errp
, "v8M SAU #regions invalid %" PRIu32
, nr
);
2291 env
->sau
.rbar
= g_new0(uint32_t, nr
);
2292 env
->sau
.rlar
= g_new0(uint32_t, nr
);
2296 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2297 set_feature(env
, ARM_FEATURE_VBAR
);
2300 #ifndef CONFIG_USER_ONLY
2301 if (tcg_enabled() && cpu_isar_feature(aa64_rme
, cpu
)) {
2302 arm_register_el_change_hook(cpu
, >_rme_post_el_change
, 0);
2306 register_cp_regs_for_features(cpu
);
2307 arm_cpu_register_gdb_regs_for_features(cpu
);
2309 init_cpreg_list(cpu
);
2311 #ifndef CONFIG_USER_ONLY
2312 MachineState
*ms
= MACHINE(qdev_get_machine());
2313 unsigned int smp_cpus
= ms
->smp
.cpus
;
2314 bool has_secure
= cpu
->has_el3
|| arm_feature(env
, ARM_FEATURE_M_SECURITY
);
2317 * We must set cs->num_ases to the final value before
2318 * the first call to cpu_address_space_init.
2320 if (cpu
->tag_memory
!= NULL
) {
2321 cs
->num_ases
= 3 + has_secure
;
2323 cs
->num_ases
= 1 + has_secure
;
2327 if (!cpu
->secure_memory
) {
2328 cpu
->secure_memory
= cs
->memory
;
2330 cpu_address_space_init(cs
, ARMASIdx_S
, "cpu-secure-memory",
2331 cpu
->secure_memory
);
2334 if (cpu
->tag_memory
!= NULL
) {
2335 cpu_address_space_init(cs
, ARMASIdx_TagNS
, "cpu-tag-memory",
2338 cpu_address_space_init(cs
, ARMASIdx_TagS
, "cpu-tag-memory",
2339 cpu
->secure_tag_memory
);
2343 cpu_address_space_init(cs
, ARMASIdx_NS
, "cpu-memory", cs
->memory
);
2345 /* No core_count specified, default to smp_cpus. */
2346 if (cpu
->core_count
== -1) {
2347 cpu
->core_count
= smp_cpus
;
2351 if (tcg_enabled()) {
2352 int dcz_blocklen
= 4 << cpu
->dcz_blocksize
;
2355 * We only support DCZ blocklen that fits on one page.
2357 * Architectually this is always true. However TARGET_PAGE_SIZE
2358 * is variable and, for compatibility with -machine virt-2.7,
2359 * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
2360 * But even then, while the largest architectural DCZ blocklen
2361 * is 2KiB, no cpu actually uses such a large blocklen.
2363 assert(dcz_blocklen
<= TARGET_PAGE_SIZE
);
2366 * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
2367 * both nibbles of each byte storing tag data may be written at once.
2368 * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
2370 if (cpu_isar_feature(aa64_mte
, cpu
)) {
2371 assert(dcz_blocklen
>= 2 * TAG_GRANULE
);
2378 acc
->parent_realize(dev
, errp
);
2381 static ObjectClass
*arm_cpu_class_by_name(const char *cpu_model
)
2386 const char *cpunamestr
;
2388 cpuname
= g_strsplit(cpu_model
, ",", 1);
2389 cpunamestr
= cpuname
[0];
2390 #ifdef CONFIG_USER_ONLY
2391 /* For backwards compatibility usermode emulation allows "-cpu any",
2392 * which has the same semantics as "-cpu max".
2394 if (!strcmp(cpunamestr
, "any")) {
2398 typename
= g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr
);
2399 oc
= object_class_by_name(typename
);
2400 g_strfreev(cpuname
);
2402 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_ARM_CPU
) ||
2403 object_class_is_abstract(oc
)) {
2409 static Property arm_cpu_properties
[] = {
2410 DEFINE_PROP_UINT64("midr", ARMCPU
, midr
, 0),
2411 DEFINE_PROP_UINT64("mp-affinity", ARMCPU
,
2412 mp_affinity
, ARM64_AFFINITY_INVALID
),
2413 DEFINE_PROP_INT32("node-id", ARMCPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
2414 DEFINE_PROP_INT32("core-count", ARMCPU
, core_count
, -1),
2415 DEFINE_PROP_END_OF_LIST()
2418 static const gchar
*arm_gdb_arch_name(CPUState
*cs
)
2420 ARMCPU
*cpu
= ARM_CPU(cs
);
2421 CPUARMState
*env
= &cpu
->env
;
2423 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2429 #ifndef CONFIG_USER_ONLY
2430 #include "hw/core/sysemu-cpu-ops.h"
2432 static const struct SysemuCPUOps arm_sysemu_ops
= {
2433 .get_phys_page_attrs_debug
= arm_cpu_get_phys_page_attrs_debug
,
2434 .asidx_from_attrs
= arm_asidx_from_attrs
,
2435 .write_elf32_note
= arm_cpu_write_elf32_note
,
2436 .write_elf64_note
= arm_cpu_write_elf64_note
,
2437 .virtio_is_big_endian
= arm_cpu_virtio_is_big_endian
,
2438 .legacy_vmsd
= &vmstate_arm_cpu
,
2443 static const struct TCGCPUOps arm_tcg_ops
= {
2444 .initialize
= arm_translate_init
,
2445 .synchronize_from_tb
= arm_cpu_synchronize_from_tb
,
2446 .debug_excp_handler
= arm_debug_excp_handler
,
2447 .restore_state_to_opc
= arm_restore_state_to_opc
,
2449 #ifdef CONFIG_USER_ONLY
2450 .record_sigsegv
= arm_cpu_record_sigsegv
,
2451 .record_sigbus
= arm_cpu_record_sigbus
,
2453 .tlb_fill
= arm_cpu_tlb_fill
,
2454 .cpu_exec_interrupt
= arm_cpu_exec_interrupt
,
2455 .do_interrupt
= arm_cpu_do_interrupt
,
2456 .do_transaction_failed
= arm_cpu_do_transaction_failed
,
2457 .do_unaligned_access
= arm_cpu_do_unaligned_access
,
2458 .adjust_watchpoint_address
= arm_adjust_watchpoint_address
,
2459 .debug_check_watchpoint
= arm_debug_check_watchpoint
,
2460 .debug_check_breakpoint
= arm_debug_check_breakpoint
,
2461 #endif /* !CONFIG_USER_ONLY */
2463 #endif /* CONFIG_TCG */
2465 static void arm_cpu_class_init(ObjectClass
*oc
, void *data
)
2467 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2468 CPUClass
*cc
= CPU_CLASS(acc
);
2469 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2470 ResettableClass
*rc
= RESETTABLE_CLASS(oc
);
2472 device_class_set_parent_realize(dc
, arm_cpu_realizefn
,
2473 &acc
->parent_realize
);
2475 device_class_set_props(dc
, arm_cpu_properties
);
2477 resettable_class_set_parent_phases(rc
, NULL
, arm_cpu_reset_hold
, NULL
,
2478 &acc
->parent_phases
);
2480 cc
->class_by_name
= arm_cpu_class_by_name
;
2481 cc
->has_work
= arm_cpu_has_work
;
2482 cc
->dump_state
= arm_cpu_dump_state
;
2483 cc
->set_pc
= arm_cpu_set_pc
;
2484 cc
->get_pc
= arm_cpu_get_pc
;
2485 cc
->gdb_read_register
= arm_cpu_gdb_read_register
;
2486 cc
->gdb_write_register
= arm_cpu_gdb_write_register
;
2487 #ifndef CONFIG_USER_ONLY
2488 cc
->sysemu_ops
= &arm_sysemu_ops
;
2490 cc
->gdb_num_core_regs
= 26;
2491 cc
->gdb_arch_name
= arm_gdb_arch_name
;
2492 cc
->gdb_get_dynamic_xml
= arm_gdb_get_dynamic_xml
;
2493 cc
->gdb_stop_before_watchpoint
= true;
2494 cc
->disas_set_info
= arm_disas_set_info
;
2497 cc
->tcg_ops
= &arm_tcg_ops
;
2498 #endif /* CONFIG_TCG */
2501 static void arm_cpu_instance_init(Object
*obj
)
2503 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(obj
);
2505 acc
->info
->initfn(obj
);
2506 arm_cpu_post_init(obj
);
2509 static void cpu_register_class_init(ObjectClass
*oc
, void *data
)
2511 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2512 CPUClass
*cc
= CPU_CLASS(acc
);
2515 cc
->gdb_core_xml_file
= "arm-core.xml";
2518 void arm_cpu_register(const ARMCPUInfo
*info
)
2520 TypeInfo type_info
= {
2521 .parent
= TYPE_ARM_CPU
,
2522 .instance_init
= arm_cpu_instance_init
,
2523 .class_init
= info
->class_init
?: cpu_register_class_init
,
2524 .class_data
= (void *)info
,
2527 type_info
.name
= g_strdup_printf("%s-" TYPE_ARM_CPU
, info
->name
);
2528 type_register(&type_info
);
2529 g_free((void *)type_info
.name
);
2532 static const TypeInfo arm_cpu_type_info
= {
2533 .name
= TYPE_ARM_CPU
,
2535 .instance_size
= sizeof(ARMCPU
),
2536 .instance_align
= __alignof__(ARMCPU
),
2537 .instance_init
= arm_cpu_initfn
,
2538 .instance_finalize
= arm_cpu_finalizefn
,
2540 .class_size
= sizeof(ARMCPUClass
),
2541 .class_init
= arm_cpu_class_init
,
2544 static void arm_cpu_register_types(void)
2546 type_register_static(&arm_cpu_type_info
);
2549 type_init(arm_cpu_register_types
)