]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/cpu.c
target/arm: Drop always-true test in define_arm_vh_e2h_redirects_aliases
[mirror_qemu.git] / target / arm / cpu.c
CommitLineData
dec9c2d4
AF
1/*
2 * QEMU ARM CPU
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
74c21bd0 21#include "qemu/osdep.h"
86480615 22#include "qemu/qemu-print.h"
b8012ecf 23#include "qemu/timer.h"
8cc2246c 24#include "qemu/log.h"
ec5f7ca8 25#include "exec/page-vary.h"
181962fd 26#include "target/arm/idau.h"
0b8fa32f 27#include "qemu/module.h"
da34e65c 28#include "qapi/error.h"
f9f62e4c 29#include "qapi/visitor.h"
778c3a06 30#include "cpu.h"
78271684
CF
31#ifdef CONFIG_TCG
32#include "hw/core/tcg-cpu-ops.h"
33#endif /* CONFIG_TCG */
ccd38087 34#include "internals.h"
63c91552 35#include "exec/exec-all.h"
5de16430 36#include "hw/qdev-properties.h"
3c30dd5a
PM
37#if !defined(CONFIG_USER_ONLY)
38#include "hw/loader.h"
cc7d44c2 39#include "hw/boards.h"
3c30dd5a 40#endif
14a48c1d 41#include "sysemu/tcg.h"
b3946626 42#include "sysemu/hw_accel.h"
50a2c6e5 43#include "kvm_arm.h"
110f6c70 44#include "disas/capstone.h"
24f91e81 45#include "fpu/softfloat.h"
cf7c6d10 46#include "cpregs.h"
dec9c2d4 47
f45748f1
AF
48static void arm_cpu_set_pc(CPUState *cs, vaddr value)
49{
50 ARMCPU *cpu = ARM_CPU(cs);
42f6ed91
JS
51 CPUARMState *env = &cpu->env;
52
53 if (is_a64(env)) {
54 env->pc = value;
063bbd80 55 env->thumb = false;
42f6ed91
JS
56 } else {
57 env->regs[15] = value & ~1;
58 env->thumb = value & 1;
59 }
60}
f45748f1 61
ec62595b 62#ifdef CONFIG_TCG
78271684
CF
63void arm_cpu_synchronize_from_tb(CPUState *cs,
64 const TranslationBlock *tb)
42f6ed91
JS
65{
66 ARMCPU *cpu = ARM_CPU(cs);
67 CPUARMState *env = &cpu->env;
68
69 /*
70 * It's OK to look at env for the current mode here, because it's
71 * never possible for an AArch64 TB to chain to an AArch32 TB.
72 */
73 if (is_a64(env)) {
74 env->pc = tb->pc;
75 } else {
76 env->regs[15] = tb->pc;
77 }
f45748f1 78}
ec62595b 79#endif /* CONFIG_TCG */
f45748f1 80
8c2e1b00
AF
81static bool arm_cpu_has_work(CPUState *cs)
82{
543486db
RH
83 ARMCPU *cpu = ARM_CPU(cs);
84
062ba099 85 return (cpu->power_state != PSCI_OFF)
543486db 86 && cs->interrupt_request &
136e67e9
EI
87 (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
88 | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
89 | CPU_INTERRUPT_EXITTB);
8c2e1b00
AF
90}
91
b5c53d1b
AL
92void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
93 void *opaque)
94{
95 ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
96
97 entry->hook = hook;
98 entry->opaque = opaque;
99
100 QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
101}
102
08267487 103void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
bd7d00fc
PM
104 void *opaque)
105{
08267487
AL
106 ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
107
108 entry->hook = hook;
109 entry->opaque = opaque;
110
111 QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
bd7d00fc
PM
112}
113
4b6a83fb
PM
114static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
115{
116 /* Reset a single ARMCPRegInfo register */
117 ARMCPRegInfo *ri = value;
118 ARMCPU *cpu = opaque;
119
87c3f0f2 120 if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) {
4b6a83fb
PM
121 return;
122 }
123
124 if (ri->resetfn) {
125 ri->resetfn(&cpu->env, ri);
126 return;
127 }
128
129 /* A zero offset is never possible as it would be regs[0]
130 * so we use it to indicate that reset is being handled elsewhere.
131 * This is basically only used for fields in non-core coprocessors
132 * (like the pxa2xx ones).
133 */
134 if (!ri->fieldoffset) {
135 return;
136 }
137
67ed771d 138 if (cpreg_field_is_64bit(ri)) {
4b6a83fb
PM
139 CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
140 } else {
141 CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
142 }
143}
144
49a66191
PM
145static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
146{
147 /* Purely an assertion check: we've already done reset once,
148 * so now check that running the reset for the cpreg doesn't
149 * change its value. This traps bugs where two different cpregs
150 * both try to reset the same state field but to different values.
151 */
152 ARMCPRegInfo *ri = value;
153 ARMCPU *cpu = opaque;
154 uint64_t oldvalue, newvalue;
155
87c3f0f2 156 if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
49a66191
PM
157 return;
158 }
159
160 oldvalue = read_raw_cp_reg(&cpu->env, ri);
161 cp_reg_reset(key, value, opaque);
162 newvalue = read_raw_cp_reg(&cpu->env, ri);
163 assert(oldvalue == newvalue);
164}
165
781c67ca 166static void arm_cpu_reset(DeviceState *dev)
dec9c2d4 167{
781c67ca 168 CPUState *s = CPU(dev);
dec9c2d4
AF
169 ARMCPU *cpu = ARM_CPU(s);
170 ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
3c30dd5a 171 CPUARMState *env = &cpu->env;
3c30dd5a 172
781c67ca 173 acc->parent_reset(dev);
dec9c2d4 174
1f5c00cf
AB
175 memset(env, 0, offsetof(CPUARMState, end_reset_fields));
176
4b6a83fb 177 g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
49a66191
PM
178 g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
179
3c30dd5a 180 env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
47576b94
RH
181 env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
182 env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
183 env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
3c30dd5a 184
c1b70158 185 cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON;
543486db 186
3c30dd5a
PM
187 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
188 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
189 }
190
3926cc84
AG
191 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
192 /* 64 bit CPUs always start in 64 bit mode */
53221552 193 env->aarch64 = true;
d356312f
PM
194#if defined(CONFIG_USER_ONLY)
195 env->pstate = PSTATE_MODE_EL0t;
14e5f106 196 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
137feaa9 197 env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
276c6e81
RH
198 /* Enable all PAC keys. */
199 env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
200 SCTLR_EnDA | SCTLR_EnDB);
cda86e2b
RH
201 /* Trap on btype=3 for PACIxSP. */
202 env->cp15.sctlr_el[1] |= SCTLR_BT0;
8c6afa6a 203 /* and to the FP/Neon instructions */
7ebd5f2e 204 env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
802ac0e1
RH
205 /* and to the SVE instructions */
206 env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
7b6a2198
AB
207 /* with reasonable vector length */
208 if (cpu_isar_feature(aa64_sve, cpu)) {
b3d52804
RH
209 env->vfp.zcr_el[1] =
210 aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1);
7b6a2198 211 }
f6a148fe 212 /*
691f1ffd 213 * Enable 48-bit address space (TODO: take reserved_va into account).
16c84978
RH
214 * Enable TBI0 but not TBI1.
215 * Note that this must match useronly_clean_ptr.
f6a148fe 216 */
691f1ffd 217 env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37);
e3232864
RH
218
219 /* Enable MTE */
220 if (cpu_isar_feature(aa64_mte, cpu)) {
221 /* Enable tag access, but leave TCF0 as No Effect (0). */
222 env->cp15.sctlr_el[1] |= SCTLR_ATA0;
223 /*
224 * Exclude all tags, so that tag 0 is always used.
225 * This corresponds to Linux current->thread.gcr_incl = 0.
226 *
227 * Set RRND, so that helper_irg() will generate a seed later.
228 * Here in cpu_reset(), the crypto subsystem has not yet been
229 * initialized.
230 */
231 env->cp15.gcr_el1 = 0x1ffff;
232 }
d356312f 233#else
5097227c
GB
234 /* Reset into the highest available EL */
235 if (arm_feature(env, ARM_FEATURE_EL3)) {
236 env->pstate = PSTATE_MODE_EL3h;
237 } else if (arm_feature(env, ARM_FEATURE_EL2)) {
238 env->pstate = PSTATE_MODE_EL2h;
239 } else {
240 env->pstate = PSTATE_MODE_EL1h;
241 }
4a7319b7
EI
242
243 /* Sample rvbar at reset. */
244 env->cp15.rvbar = cpu->rvbar_prop;
245 env->pc = env->cp15.rvbar;
8c6afa6a
PM
246#endif
247 } else {
248#if defined(CONFIG_USER_ONLY)
249 /* Userspace expects access to cp10 and cp11 for FP/Neon */
7ebd5f2e 250 env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
d356312f 251#endif
3926cc84
AG
252 }
253
3c30dd5a
PM
254#if defined(CONFIG_USER_ONLY)
255 env->uncached_cpsr = ARM_CPU_MODE_USR;
256 /* For user mode we must enable access to coprocessors */
257 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
258 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
259 env->cp15.c15_cpar = 3;
260 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
261 env->cp15.c15_cpar = 1;
262 }
263#else
060a65df
PM
264
265 /*
266 * If the highest available EL is EL2, AArch32 will start in Hyp
267 * mode; otherwise it starts in SVC. Note that if we start in
268 * AArch64 then these values in the uncached_cpsr will be ignored.
269 */
270 if (arm_feature(env, ARM_FEATURE_EL2) &&
271 !arm_feature(env, ARM_FEATURE_EL3)) {
272 env->uncached_cpsr = ARM_CPU_MODE_HYP;
273 } else {
274 env->uncached_cpsr = ARM_CPU_MODE_SVC;
275 }
4cc35614 276 env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
1426f244
PM
277
278 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
279 * executing as AArch32 then check if highvecs are enabled and
280 * adjust the PC accordingly.
281 */
282 if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
283 env->regs[15] = 0xFFFF0000;
284 }
285
286 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
b62ceeaf 287#endif
dc7abe4d 288
531c60a9 289 if (arm_feature(env, ARM_FEATURE_M)) {
b62ceeaf 290#ifndef CONFIG_USER_ONLY
6e3cf5df
MG
291 uint32_t initial_msp; /* Loaded from 0x0 */
292 uint32_t initial_pc; /* Loaded from 0x4 */
3c30dd5a 293 uint8_t *rom;
38e2a77c 294 uint32_t vecbase;
b62ceeaf 295#endif
6e3cf5df 296
8128c8e8
PM
297 if (cpu_isar_feature(aa32_lob, cpu)) {
298 /*
299 * LTPSIZE is constant 4 if MVE not implemented, and resets
300 * to an UNKNOWN value if MVE is implemented. We choose to
301 * always reset to 4.
302 */
303 env->v7m.ltpsize = 4;
99c7834f
PM
304 /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
305 env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
306 env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
8128c8e8
PM
307 }
308
1e577cc7
PM
309 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
310 env->v7m.secure = true;
3b2e9344
PM
311 } else {
312 /* This bit resets to 0 if security is supported, but 1 if
313 * it is not. The bit is not present in v7M, but we set it
314 * here so we can avoid having to make checks on it conditional
315 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
316 */
317 env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
02ac2f7f
PM
318 /*
319 * Set NSACR to indicate "NS access permitted to everything";
320 * this avoids having to have all the tests of it being
321 * conditional on ARM_FEATURE_M_SECURITY. Note also that from
322 * v8.1M the guest-visible value of NSACR in a CPU without the
323 * Security Extension is 0xcff.
324 */
325 env->v7m.nsacr = 0xcff;
1e577cc7
PM
326 }
327
9d40cd8a 328 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
2c4da50d 329 * that it resets to 1, so QEMU always does that rather than making
9d40cd8a 330 * it dependent on CPU model. In v8M it is RES1.
2c4da50d 331 */
9d40cd8a
PM
332 env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
333 env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
334 if (arm_feature(env, ARM_FEATURE_V8)) {
335 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
336 env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
337 env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
338 }
22ab3460
JS
339 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
340 env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
341 env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
342 }
2c4da50d 343
7fbc6a40 344 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
d33abe82
PM
345 env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
346 env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
347 R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
348 }
b62ceeaf
PM
349
350#ifndef CONFIG_USER_ONLY
056f43df
PM
351 /* Unlike A/R profile, M profile defines the reset LR value */
352 env->regs[14] = 0xffffffff;
353
38e2a77c 354 env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
7cda2149 355 env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
38e2a77c
PM
356
357 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
358 vecbase = env->v7m.vecbase[env->v7m.secure];
75ce72b7 359 rom = rom_ptr_for_as(s->as, vecbase, 8);
3c30dd5a 360 if (rom) {
6e3cf5df
MG
361 /* Address zero is covered by ROM which hasn't yet been
362 * copied into physical memory.
363 */
364 initial_msp = ldl_p(rom);
365 initial_pc = ldl_p(rom + 4);
366 } else {
367 /* Address zero not covered by a ROM blob, or the ROM blob
368 * is in non-modifiable memory and this is a second reset after
369 * it got copied into memory. In the latter case, rom_ptr
370 * will return a NULL pointer and we should use ldl_phys instead.
371 */
38e2a77c
PM
372 initial_msp = ldl_phys(s->as, vecbase);
373 initial_pc = ldl_phys(s->as, vecbase + 4);
3c30dd5a 374 }
6e3cf5df 375
8cc2246c
PM
376 qemu_log_mask(CPU_LOG_INT,
377 "Loaded reset SP 0x%x PC 0x%x from vector table\n",
378 initial_msp, initial_pc);
379
6e3cf5df
MG
380 env->regs[13] = initial_msp & 0xFFFFFFFC;
381 env->regs[15] = initial_pc & ~1;
382 env->thumb = initial_pc & 1;
b62ceeaf
PM
383#else
384 /*
385 * For user mode we run non-secure and with access to the FPU.
386 * The FPU context is active (ie does not need further setup)
387 * and is owned by non-secure.
388 */
389 env->v7m.secure = false;
390 env->v7m.nsacr = 0xcff;
391 env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
392 env->v7m.fpccr[M_REG_S] &=
393 ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
394 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
395#endif
3c30dd5a 396 }
387f9806 397
dc3c4c14
PM
398 /* M profile requires that reset clears the exclusive monitor;
399 * A profile does not, but clearing it makes more sense than having it
400 * set with an exclusive access on address zero.
401 */
402 arm_clear_exclusive(env);
403
0e1a46bb 404 if (arm_feature(env, ARM_FEATURE_PMSA)) {
69ceea64 405 if (cpu->pmsav7_dregion > 0) {
0e1a46bb 406 if (arm_feature(env, ARM_FEATURE_V8)) {
62c58ee0
PM
407 memset(env->pmsav8.rbar[M_REG_NS], 0,
408 sizeof(*env->pmsav8.rbar[M_REG_NS])
409 * cpu->pmsav7_dregion);
410 memset(env->pmsav8.rlar[M_REG_NS], 0,
411 sizeof(*env->pmsav8.rlar[M_REG_NS])
412 * cpu->pmsav7_dregion);
413 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
414 memset(env->pmsav8.rbar[M_REG_S], 0,
415 sizeof(*env->pmsav8.rbar[M_REG_S])
416 * cpu->pmsav7_dregion);
417 memset(env->pmsav8.rlar[M_REG_S], 0,
418 sizeof(*env->pmsav8.rlar[M_REG_S])
419 * cpu->pmsav7_dregion);
420 }
0e1a46bb
PM
421 } else if (arm_feature(env, ARM_FEATURE_V7)) {
422 memset(env->pmsav7.drbar, 0,
423 sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
424 memset(env->pmsav7.drsr, 0,
425 sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
426 memset(env->pmsav7.dracr, 0,
427 sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
428 }
69ceea64 429 }
1bc04a88
PM
430 env->pmsav7.rnr[M_REG_NS] = 0;
431 env->pmsav7.rnr[M_REG_S] = 0;
4125e6fe
PM
432 env->pmsav8.mair0[M_REG_NS] = 0;
433 env->pmsav8.mair0[M_REG_S] = 0;
434 env->pmsav8.mair1[M_REG_NS] = 0;
435 env->pmsav8.mair1[M_REG_S] = 0;
69ceea64
PM
436 }
437
9901c576
PM
438 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
439 if (cpu->sau_sregion > 0) {
440 memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
441 memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
442 }
443 env->sau.rnr = 0;
444 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
445 * the Cortex-M33 does.
446 */
447 env->sau.ctrl = 0;
448 }
449
3c30dd5a
PM
450 set_flush_to_zero(1, &env->vfp.standard_fp_status);
451 set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
452 set_default_nan_mode(1, &env->vfp.standard_fp_status);
aaae563b 453 set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
3c30dd5a
PM
454 set_float_detect_tininess(float_tininess_before_rounding,
455 &env->vfp.fp_status);
456 set_float_detect_tininess(float_tininess_before_rounding,
457 &env->vfp.standard_fp_status);
bcc531f0
PM
458 set_float_detect_tininess(float_tininess_before_rounding,
459 &env->vfp.fp_status_f16);
aaae563b
PM
460 set_float_detect_tininess(float_tininess_before_rounding,
461 &env->vfp.standard_fp_status_f16);
50a2c6e5
PB
462#ifndef CONFIG_USER_ONLY
463 if (kvm_enabled()) {
464 kvm_arm_reset_vcpu(cpu);
465 }
466#endif
9ee98ce8 467
46747d15 468 hw_breakpoint_update_all(cpu);
9ee98ce8 469 hw_watchpoint_update_all(cpu);
a8a79c7a 470 arm_rebuild_hflags(env);
dec9c2d4
AF
471}
472
083afd18
PMD
473#ifndef CONFIG_USER_ONLY
474
310cedf3 475static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
be879556
RH
476 unsigned int target_el,
477 unsigned int cur_el, bool secure,
478 uint64_t hcr_el2)
310cedf3
RH
479{
480 CPUARMState *env = cs->env_ptr;
310cedf3 481 bool pstate_unmasked;
16e07f78 482 bool unmasked = false;
310cedf3
RH
483
484 /*
485 * Don't take exceptions if they target a lower EL.
486 * This check should catch any exceptions that would not be taken
487 * but left pending.
488 */
489 if (cur_el > target_el) {
490 return false;
491 }
492
310cedf3
RH
493 switch (excp_idx) {
494 case EXCP_FIQ:
495 pstate_unmasked = !(env->daif & PSTATE_F);
496 break;
497
498 case EXCP_IRQ:
499 pstate_unmasked = !(env->daif & PSTATE_I);
500 break;
501
502 case EXCP_VFIQ:
cc974d5c
RDC
503 if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
504 /* VFIQs are only taken when hypervized. */
310cedf3
RH
505 return false;
506 }
507 return !(env->daif & PSTATE_F);
508 case EXCP_VIRQ:
cc974d5c
RDC
509 if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
510 /* VIRQs are only taken when hypervized. */
310cedf3
RH
511 return false;
512 }
513 return !(env->daif & PSTATE_I);
514 default:
515 g_assert_not_reached();
516 }
517
518 /*
519 * Use the target EL, current execution state and SCR/HCR settings to
520 * determine whether the corresponding CPSR bit is used to mask the
521 * interrupt.
522 */
523 if ((target_el > cur_el) && (target_el != 1)) {
524 /* Exceptions targeting a higher EL may not be maskable */
525 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
526 /*
527 * 64-bit masking rules are simple: exceptions to EL3
528 * can't be masked, and exceptions to EL2 can only be
529 * masked from Secure state. The HCR and SCR settings
530 * don't affect the masking logic, only the interrupt routing.
531 */
926c1b97 532 if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) {
16e07f78 533 unmasked = true;
310cedf3
RH
534 }
535 } else {
536 /*
537 * The old 32-bit-only environment has a more complicated
538 * masking setup. HCR and SCR bits not only affect interrupt
539 * routing but also change the behaviour of masking.
540 */
541 bool hcr, scr;
542
543 switch (excp_idx) {
544 case EXCP_FIQ:
545 /*
546 * If FIQs are routed to EL3 or EL2 then there are cases where
547 * we override the CPSR.F in determining if the exception is
548 * masked or not. If neither of these are set then we fall back
549 * to the CPSR.F setting otherwise we further assess the state
550 * below.
551 */
552 hcr = hcr_el2 & HCR_FMO;
553 scr = (env->cp15.scr_el3 & SCR_FIQ);
554
555 /*
556 * When EL3 is 32-bit, the SCR.FW bit controls whether the
557 * CPSR.F bit masks FIQ interrupts when taken in non-secure
558 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
559 * when non-secure but only when FIQs are only routed to EL3.
560 */
561 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
562 break;
563 case EXCP_IRQ:
564 /*
565 * When EL3 execution state is 32-bit, if HCR.IMO is set then
566 * we may override the CPSR.I masking when in non-secure state.
567 * The SCR.IRQ setting has already been taken into consideration
568 * when setting the target EL, so it does not have a further
569 * affect here.
570 */
571 hcr = hcr_el2 & HCR_IMO;
572 scr = false;
573 break;
574 default:
575 g_assert_not_reached();
576 }
577
578 if ((scr || hcr) && !secure) {
16e07f78 579 unmasked = true;
310cedf3
RH
580 }
581 }
582 }
583
584 /*
585 * The PSTATE bits only mask the interrupt if we have not overriden the
586 * ability above.
587 */
588 return unmasked || pstate_unmasked;
589}
590
083afd18 591static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
e8925712
RH
592{
593 CPUClass *cc = CPU_GET_CLASS(cs);
012a906b
GB
594 CPUARMState *env = cs->env_ptr;
595 uint32_t cur_el = arm_current_el(env);
596 bool secure = arm_is_secure(env);
be879556 597 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
012a906b
GB
598 uint32_t target_el;
599 uint32_t excp_idx;
d63d0ec5
RH
600
601 /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
e8925712 602
012a906b
GB
603 if (interrupt_request & CPU_INTERRUPT_FIQ) {
604 excp_idx = EXCP_FIQ;
605 target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
be879556
RH
606 if (arm_excp_unmasked(cs, excp_idx, target_el,
607 cur_el, secure, hcr_el2)) {
d63d0ec5 608 goto found;
012a906b 609 }
e8925712 610 }
012a906b
GB
611 if (interrupt_request & CPU_INTERRUPT_HARD) {
612 excp_idx = EXCP_IRQ;
613 target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
be879556
RH
614 if (arm_excp_unmasked(cs, excp_idx, target_el,
615 cur_el, secure, hcr_el2)) {
d63d0ec5 616 goto found;
012a906b 617 }
e8925712 618 }
012a906b
GB
619 if (interrupt_request & CPU_INTERRUPT_VIRQ) {
620 excp_idx = EXCP_VIRQ;
621 target_el = 1;
be879556
RH
622 if (arm_excp_unmasked(cs, excp_idx, target_el,
623 cur_el, secure, hcr_el2)) {
d63d0ec5 624 goto found;
012a906b 625 }
136e67e9 626 }
012a906b
GB
627 if (interrupt_request & CPU_INTERRUPT_VFIQ) {
628 excp_idx = EXCP_VFIQ;
629 target_el = 1;
be879556
RH
630 if (arm_excp_unmasked(cs, excp_idx, target_el,
631 cur_el, secure, hcr_el2)) {
d63d0ec5 632 goto found;
012a906b 633 }
136e67e9 634 }
d63d0ec5 635 return false;
e8925712 636
d63d0ec5
RH
637 found:
638 cs->exception_index = excp_idx;
639 env->exception.target_el = target_el;
78271684 640 cc->tcg_ops->do_interrupt(cs);
d63d0ec5 641 return true;
e8925712 642}
083afd18 643#endif /* !CONFIG_USER_ONLY */
e8925712 644
89430fc6
PM
645void arm_cpu_update_virq(ARMCPU *cpu)
646{
647 /*
648 * Update the interrupt level for VIRQ, which is the logical OR of
649 * the HCR_EL2.VI bit and the input line level from the GIC.
650 */
651 CPUARMState *env = &cpu->env;
652 CPUState *cs = CPU(cpu);
653
654 bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
655 (env->irq_line_state & CPU_INTERRUPT_VIRQ);
656
657 if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
658 if (new_state) {
659 cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
660 } else {
661 cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
662 }
663 }
664}
665
666void arm_cpu_update_vfiq(ARMCPU *cpu)
667{
668 /*
669 * Update the interrupt level for VFIQ, which is the logical OR of
670 * the HCR_EL2.VF bit and the input line level from the GIC.
671 */
672 CPUARMState *env = &cpu->env;
673 CPUState *cs = CPU(cpu);
674
675 bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
676 (env->irq_line_state & CPU_INTERRUPT_VFIQ);
677
678 if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
679 if (new_state) {
680 cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
681 } else {
682 cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
683 }
684 }
685}
686
7c1840b6
PM
687#ifndef CONFIG_USER_ONLY
688static void arm_cpu_set_irq(void *opaque, int irq, int level)
689{
690 ARMCPU *cpu = opaque;
136e67e9 691 CPUARMState *env = &cpu->env;
7c1840b6 692 CPUState *cs = CPU(cpu);
136e67e9
EI
693 static const int mask[] = {
694 [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
695 [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
696 [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
697 [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
698 };
7c1840b6 699
9acd2d33
PM
700 if (!arm_feature(env, ARM_FEATURE_EL2) &&
701 (irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) {
702 /*
703 * The GIC might tell us about VIRQ and VFIQ state, but if we don't
704 * have EL2 support we don't care. (Unless the guest is doing something
705 * silly this will only be calls saying "level is still 0".)
706 */
707 return;
708 }
709
ed89f078
PM
710 if (level) {
711 env->irq_line_state |= mask[irq];
712 } else {
713 env->irq_line_state &= ~mask[irq];
714 }
715
7c1840b6 716 switch (irq) {
136e67e9 717 case ARM_CPU_VIRQ:
89430fc6
PM
718 arm_cpu_update_virq(cpu);
719 break;
136e67e9 720 case ARM_CPU_VFIQ:
89430fc6
PM
721 arm_cpu_update_vfiq(cpu);
722 break;
136e67e9 723 case ARM_CPU_IRQ:
7c1840b6
PM
724 case ARM_CPU_FIQ:
725 if (level) {
136e67e9 726 cpu_interrupt(cs, mask[irq]);
7c1840b6 727 } else {
136e67e9 728 cpu_reset_interrupt(cs, mask[irq]);
7c1840b6
PM
729 }
730 break;
731 default:
8f6fd322 732 g_assert_not_reached();
7c1840b6
PM
733 }
734}
735
736static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
737{
738#ifdef CONFIG_KVM
739 ARMCPU *cpu = opaque;
ed89f078 740 CPUARMState *env = &cpu->env;
7c1840b6 741 CPUState *cs = CPU(cpu);
ed89f078 742 uint32_t linestate_bit;
f6530926 743 int irq_id;
7c1840b6
PM
744
745 switch (irq) {
746 case ARM_CPU_IRQ:
f6530926 747 irq_id = KVM_ARM_IRQ_CPU_IRQ;
ed89f078 748 linestate_bit = CPU_INTERRUPT_HARD;
7c1840b6
PM
749 break;
750 case ARM_CPU_FIQ:
f6530926 751 irq_id = KVM_ARM_IRQ_CPU_FIQ;
ed89f078 752 linestate_bit = CPU_INTERRUPT_FIQ;
7c1840b6
PM
753 break;
754 default:
8f6fd322 755 g_assert_not_reached();
7c1840b6 756 }
ed89f078
PM
757
758 if (level) {
759 env->irq_line_state |= linestate_bit;
760 } else {
761 env->irq_line_state &= ~linestate_bit;
762 }
f6530926 763 kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
7c1840b6
PM
764#endif
765}
84f2bed3 766
ed50ff78 767static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
84f2bed3
PS
768{
769 ARMCPU *cpu = ARM_CPU(cs);
770 CPUARMState *env = &cpu->env;
84f2bed3
PS
771
772 cpu_synchronize_state(cs);
ed50ff78 773 return arm_cpu_data_is_big_endian(env);
84f2bed3
PS
774}
775
7c1840b6
PM
776#endif
777
48440620
PC
778static int
779print_insn_thumb1(bfd_vma pc, disassemble_info *info)
780{
781 return print_insn_arm(pc | 1, info);
782}
783
784static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
785{
786 ARMCPU *ac = ARM_CPU(cpu);
787 CPUARMState *env = &ac->env;
7bcdbf51 788 bool sctlr_b;
48440620
PC
789
790 if (is_a64(env)) {
791 /* We might not be compiled with the A64 disassembler
792 * because it needs a C++ compiler. Leave print_insn
793 * unset in this case to use the caller default behaviour.
794 */
795#if defined(CONFIG_ARM_A64_DIS)
796 info->print_insn = print_insn_arm_a64;
797#endif
110f6c70 798 info->cap_arch = CS_ARCH_ARM64;
15fa1a0a
RH
799 info->cap_insn_unit = 4;
800 info->cap_insn_split = 4;
48440620 801 } else {
110f6c70
RH
802 int cap_mode;
803 if (env->thumb) {
804 info->print_insn = print_insn_thumb1;
15fa1a0a
RH
805 info->cap_insn_unit = 2;
806 info->cap_insn_split = 4;
110f6c70
RH
807 cap_mode = CS_MODE_THUMB;
808 } else {
809 info->print_insn = print_insn_arm;
15fa1a0a
RH
810 info->cap_insn_unit = 4;
811 info->cap_insn_split = 4;
110f6c70
RH
812 cap_mode = CS_MODE_ARM;
813 }
814 if (arm_feature(env, ARM_FEATURE_V8)) {
815 cap_mode |= CS_MODE_V8;
816 }
817 if (arm_feature(env, ARM_FEATURE_M)) {
818 cap_mode |= CS_MODE_MCLASS;
819 }
820 info->cap_arch = CS_ARCH_ARM;
821 info->cap_mode = cap_mode;
48440620 822 }
7bcdbf51
RH
823
824 sctlr_b = arm_sctlr_b(env);
825 if (bswap_code(sctlr_b)) {
ee3eb3a7 826#if TARGET_BIG_ENDIAN
48440620
PC
827 info->endian = BFD_ENDIAN_LITTLE;
828#else
829 info->endian = BFD_ENDIAN_BIG;
830#endif
831 }
f7478a92 832 info->flags &= ~INSN_ARM_BE32;
7bcdbf51
RH
833#ifndef CONFIG_USER_ONLY
834 if (sctlr_b) {
f7478a92
JB
835 info->flags |= INSN_ARM_BE32;
836 }
7bcdbf51 837#endif
48440620
PC
838}
839
86480615
PMD
840#ifdef TARGET_AARCH64
841
842static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
843{
844 ARMCPU *cpu = ARM_CPU(cs);
845 CPUARMState *env = &cpu->env;
846 uint32_t psr = pstate_read(env);
847 int i;
848 int el = arm_current_el(env);
849 const char *ns_status;
850
851 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
852 for (i = 0; i < 32; i++) {
853 if (i == 31) {
854 qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
855 } else {
856 qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
857 (i + 2) % 3 ? " " : "\n");
858 }
859 }
860
861 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
862 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
863 } else {
864 ns_status = "";
865 }
866 qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
867 psr,
868 psr & PSTATE_N ? 'N' : '-',
869 psr & PSTATE_Z ? 'Z' : '-',
870 psr & PSTATE_C ? 'C' : '-',
871 psr & PSTATE_V ? 'V' : '-',
872 ns_status,
873 el,
874 psr & PSTATE_SP ? 'h' : 't');
875
876 if (cpu_isar_feature(aa64_bti, cpu)) {
877 qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
878 }
879 if (!(flags & CPU_DUMP_FPU)) {
880 qemu_fprintf(f, "\n");
881 return;
882 }
883 if (fp_exception_el(env, el) != 0) {
884 qemu_fprintf(f, " FPU disabled\n");
885 return;
886 }
887 qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
888 vfp_get_fpcr(env), vfp_get_fpsr(env));
889
890 if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
891 int j, zcr_len = sve_zcr_len_for_el(env, el);
892
893 for (i = 0; i <= FFR_PRED_NUM; i++) {
894 bool eol;
895 if (i == FFR_PRED_NUM) {
896 qemu_fprintf(f, "FFR=");
897 /* It's last, so end the line. */
898 eol = true;
899 } else {
900 qemu_fprintf(f, "P%02d=", i);
901 switch (zcr_len) {
902 case 0:
903 eol = i % 8 == 7;
904 break;
905 case 1:
906 eol = i % 6 == 5;
907 break;
908 case 2:
909 case 3:
910 eol = i % 3 == 2;
911 break;
912 default:
913 /* More than one quadword per predicate. */
914 eol = true;
915 break;
916 }
917 }
918 for (j = zcr_len / 4; j >= 0; j--) {
919 int digits;
920 if (j * 4 + 4 <= zcr_len + 1) {
921 digits = 16;
922 } else {
923 digits = (zcr_len % 4 + 1) * 4;
924 }
925 qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
926 env->vfp.pregs[i].p[j],
927 j ? ":" : eol ? "\n" : " ");
928 }
929 }
930
931 for (i = 0; i < 32; i++) {
932 if (zcr_len == 0) {
933 qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
934 i, env->vfp.zregs[i].d[1],
935 env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
936 } else if (zcr_len == 1) {
937 qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
938 ":%016" PRIx64 ":%016" PRIx64 "\n",
939 i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
940 env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
941 } else {
942 for (j = zcr_len; j >= 0; j--) {
943 bool odd = (zcr_len - j) % 2 != 0;
944 if (j == zcr_len) {
945 qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
946 } else if (!odd) {
947 if (j > 0) {
948 qemu_fprintf(f, " [%x-%x]=", j, j - 1);
949 } else {
950 qemu_fprintf(f, " [%x]=", j);
951 }
952 }
953 qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
954 env->vfp.zregs[i].d[j * 2 + 1],
955 env->vfp.zregs[i].d[j * 2],
956 odd || j == 0 ? "\n" : ":");
957 }
958 }
959 }
960 } else {
961 for (i = 0; i < 32; i++) {
962 uint64_t *q = aa64_vfp_qreg(env, i);
963 qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
964 i, q[1], q[0], (i & 1 ? "\n" : " "));
965 }
966 }
967}
968
969#else
970
971static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
972{
973 g_assert_not_reached();
974}
975
976#endif
977
978static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
979{
980 ARMCPU *cpu = ARM_CPU(cs);
981 CPUARMState *env = &cpu->env;
982 int i;
983
984 if (is_a64(env)) {
985 aarch64_cpu_dump_state(cs, f, flags);
986 return;
987 }
988
989 for (i = 0; i < 16; i++) {
990 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
991 if ((i % 4) == 3) {
992 qemu_fprintf(f, "\n");
993 } else {
994 qemu_fprintf(f, " ");
995 }
996 }
997
998 if (arm_feature(env, ARM_FEATURE_M)) {
999 uint32_t xpsr = xpsr_read(env);
1000 const char *mode;
1001 const char *ns_status = "";
1002
1003 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1004 ns_status = env->v7m.secure ? "S " : "NS ";
1005 }
1006
1007 if (xpsr & XPSR_EXCP) {
1008 mode = "handler";
1009 } else {
1010 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
1011 mode = "unpriv-thread";
1012 } else {
1013 mode = "priv-thread";
1014 }
1015 }
1016
1017 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
1018 xpsr,
1019 xpsr & XPSR_N ? 'N' : '-',
1020 xpsr & XPSR_Z ? 'Z' : '-',
1021 xpsr & XPSR_C ? 'C' : '-',
1022 xpsr & XPSR_V ? 'V' : '-',
1023 xpsr & XPSR_T ? 'T' : 'A',
1024 ns_status,
1025 mode);
1026 } else {
1027 uint32_t psr = cpsr_read(env);
1028 const char *ns_status = "";
1029
1030 if (arm_feature(env, ARM_FEATURE_EL3) &&
1031 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
1032 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
1033 }
1034
1035 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1036 psr,
1037 psr & CPSR_N ? 'N' : '-',
1038 psr & CPSR_Z ? 'Z' : '-',
1039 psr & CPSR_C ? 'C' : '-',
1040 psr & CPSR_V ? 'V' : '-',
1041 psr & CPSR_T ? 'T' : 'A',
1042 ns_status,
1043 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
1044 }
1045
1046 if (flags & CPU_DUMP_FPU) {
1047 int numvfpregs = 0;
a6627f5f
RH
1048 if (cpu_isar_feature(aa32_simd_r32, cpu)) {
1049 numvfpregs = 32;
7fbc6a40 1050 } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
a6627f5f 1051 numvfpregs = 16;
86480615
PMD
1052 }
1053 for (i = 0; i < numvfpregs; i++) {
1054 uint64_t v = *aa32_vfp_dreg(env, i);
1055 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
1056 i * 2, (uint32_t)v,
1057 i * 2 + 1, (uint32_t)(v >> 32),
1058 i, v);
1059 }
1060 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
aa291908
PM
1061 if (cpu_isar_feature(aa32_mve, cpu)) {
1062 qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
1063 }
86480615
PMD
1064 }
1065}
1066
46de5913
IM
1067uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
1068{
1069 uint32_t Aff1 = idx / clustersz;
1070 uint32_t Aff0 = idx % clustersz;
1071 return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
1072}
1073
ac87e507
PM
1074static void cpreg_hashtable_data_destroy(gpointer data)
1075{
1076 /*
1077 * Destroy function for cpu->cp_regs hashtable data entries.
1078 * We must free the name string because it was g_strdup()ed in
1079 * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
1080 * from r->name because we know we definitely allocated it.
1081 */
1082 ARMCPRegInfo *r = data;
1083
1084 g_free((void *)r->name);
1085 g_free(r);
1086}
1087
777dc784
PM
1088static void arm_cpu_initfn(Object *obj)
1089{
1090 ARMCPU *cpu = ARM_CPU(obj);
1091
7506ed90 1092 cpu_set_cpustate_pointers(cpu);
4b6a83fb 1093 cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
ac87e507 1094 g_free, cpreg_hashtable_data_destroy);
79614b78 1095
b5c53d1b 1096 QLIST_INIT(&cpu->pre_el_change_hooks);
08267487
AL
1097 QLIST_INIT(&cpu->el_change_hooks);
1098
b3d52804
RH
1099#ifdef CONFIG_USER_ONLY
1100# ifdef TARGET_AARCH64
1101 /*
1102 * The linux kernel defaults to 512-bit vectors, when sve is supported.
1103 * See documentation for /proc/sys/abi/sve_default_vector_length, and
1104 * our corresponding sve-default-vector-length cpu property.
1105 */
1106 cpu->sve_default_vq = 4;
1107# endif
1108#else
7c1840b6
PM
1109 /* Our inbound IRQ and FIQ lines */
1110 if (kvm_enabled()) {
136e67e9
EI
1111 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1112 * the same interface as non-KVM CPUs.
1113 */
1114 qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
7c1840b6 1115 } else {
136e67e9 1116 qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
7c1840b6 1117 }
55d284af 1118
55d284af
PM
1119 qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
1120 ARRAY_SIZE(cpu->gt_timer_outputs));
aa1b3111
PM
1121
1122 qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
1123 "gicv3-maintenance-interrupt", 1);
07f48730
AJ
1124 qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
1125 "pmu-interrupt", 1);
7c1840b6
PM
1126#endif
1127
54d3e3f5
PM
1128 /* DTB consumers generally don't in fact care what the 'compatible'
1129 * string is, so always provide some string and trust that a hypothetical
1130 * picky DTB consumer will also provide a helpful error message.
1131 */
1132 cpu->dtb_compatible = "qemu,unknown";
0dc71c70 1133 cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
3541addc 1134 cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
54d3e3f5 1135
2c9c0bf9 1136 if (tcg_enabled() || hvf_enabled()) {
0dc71c70
AO
1137 /* TCG and HVF implement PSCI 1.1 */
1138 cpu->psci_version = QEMU_PSCI_VERSION_1_1;
79614b78 1139 }
4b6a83fb
PM
1140}
1141
96eec6b2
AJ
1142static Property arm_cpu_gt_cntfrq_property =
1143 DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz,
1144 NANOSECONDS_PER_SECOND / GTIMER_SCALE);
1145
07a5b0d2 1146static Property arm_cpu_reset_cbar_property =
f318cec6 1147 DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
07a5b0d2 1148
68e0a40a
AP
1149static Property arm_cpu_reset_hivecs_property =
1150 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
1151
45ca3a14 1152#ifndef CONFIG_USER_ONLY
c25bd18a
PM
1153static Property arm_cpu_has_el2_property =
1154 DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
1155
51942aee
GB
1156static Property arm_cpu_has_el3_property =
1157 DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
45ca3a14 1158#endif
51942aee 1159
3a062d57
JB
1160static Property arm_cpu_cfgend_property =
1161 DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
1162
97a28b0e
PM
1163static Property arm_cpu_has_vfp_property =
1164 DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
1165
1166static Property arm_cpu_has_neon_property =
1167 DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
1168
ea90db0a
PM
1169static Property arm_cpu_has_dsp_property =
1170 DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
1171
8f325f56
PC
1172static Property arm_cpu_has_mpu_property =
1173 DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
1174
8d92e26b
PM
1175/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1176 * because the CPU initfn will have already set cpu->pmsav7_dregion to
1177 * the right value for that particular CPU type, and we don't want
1178 * to override that with an incorrect constant value.
1179 */
3281af81 1180static Property arm_cpu_pmsav7_dregion_property =
8d92e26b
PM
1181 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
1182 pmsav7_dregion,
1183 qdev_prop_uint32, uint32_t);
3281af81 1184
ae502508
AJ
1185static bool arm_get_pmu(Object *obj, Error **errp)
1186{
1187 ARMCPU *cpu = ARM_CPU(obj);
1188
1189 return cpu->has_pmu;
1190}
1191
1192static void arm_set_pmu(Object *obj, bool value, Error **errp)
1193{
1194 ARMCPU *cpu = ARM_CPU(obj);
1195
1196 if (value) {
7d20e681 1197 if (kvm_enabled() && !kvm_arm_pmu_supported()) {
ae502508
AJ
1198 error_setg(errp, "'pmu' feature not supported by KVM on this host");
1199 return;
1200 }
1201 set_feature(&cpu->env, ARM_FEATURE_PMU);
1202 } else {
1203 unset_feature(&cpu->env, ARM_FEATURE_PMU);
1204 }
1205 cpu->has_pmu = value;
1206}
1207
7def8754
AJ
1208unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
1209{
96eec6b2
AJ
1210 /*
1211 * The exact approach to calculating guest ticks is:
1212 *
1213 * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1214 * NANOSECONDS_PER_SECOND);
1215 *
1216 * We don't do that. Rather we intentionally use integer division
1217 * truncation below and in the caller for the conversion of host monotonic
1218 * time to guest ticks to provide the exact inverse for the semantics of
1219 * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1220 * it loses precision when representing frequencies where
1221 * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1222 * provide an exact inverse leads to scheduling timers with negative
1223 * periods, which in turn leads to sticky behaviour in the guest.
1224 *
1225 * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1226 * cannot become zero.
1227 */
7def8754
AJ
1228 return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ?
1229 NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
1230}
1231
51e5ef45 1232void arm_cpu_post_init(Object *obj)
07a5b0d2
PC
1233{
1234 ARMCPU *cpu = ARM_CPU(obj);
07a5b0d2 1235
790a1150
PM
1236 /* M profile implies PMSA. We have to do this here rather than
1237 * in realize with the other feature-implication checks because
1238 * we look at the PMSA bit to see if we should add some properties.
1239 */
1240 if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
1241 set_feature(&cpu->env, ARM_FEATURE_PMSA);
1242 }
1243
f318cec6
PM
1244 if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
1245 arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
94d912d1 1246 qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
07a5b0d2 1247 }
68e0a40a
AP
1248
1249 if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
94d912d1 1250 qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
68e0a40a 1251 }
3933443e
PM
1252
1253 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4a7319b7
EI
1254 object_property_add_uint64_ptr(obj, "rvbar",
1255 &cpu->rvbar_prop,
1256 OBJ_PROP_FLAG_READWRITE);
3933443e 1257 }
51942aee 1258
45ca3a14 1259#ifndef CONFIG_USER_ONLY
51942aee
GB
1260 if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1261 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
1262 * prevent "has_el3" from existing on CPUs which cannot support EL3.
1263 */
94d912d1 1264 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property);
9e273ef2 1265
9e273ef2
PM
1266 object_property_add_link(obj, "secure-memory",
1267 TYPE_MEMORY_REGION,
1268 (Object **)&cpu->secure_memory,
1269 qdev_prop_allow_set_link_before_realize,
d2623129 1270 OBJ_PROP_LINK_STRONG);
51942aee 1271 }
8f325f56 1272
c25bd18a 1273 if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
94d912d1 1274 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property);
c25bd18a 1275 }
45ca3a14 1276#endif
c25bd18a 1277
929e754d 1278 if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
ae502508 1279 cpu->has_pmu = true;
d2623129 1280 object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu);
929e754d
WH
1281 }
1282
97a28b0e
PM
1283 /*
1284 * Allow user to turn off VFP and Neon support, but only for TCG --
1285 * KVM does not currently allow us to lie to the guest about its
1286 * ID/feature registers, so the guest always sees what the host has.
1287 */
7d63183f
RH
1288 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
1289 ? cpu_isar_feature(aa64_fp_simd, cpu)
1290 : cpu_isar_feature(aa32_vfp, cpu)) {
97a28b0e
PM
1291 cpu->has_vfp = true;
1292 if (!kvm_enabled()) {
94d912d1 1293 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
97a28b0e
PM
1294 }
1295 }
1296
1297 if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
1298 cpu->has_neon = true;
1299 if (!kvm_enabled()) {
94d912d1 1300 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property);
97a28b0e
PM
1301 }
1302 }
1303
ea90db0a
PM
1304 if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
1305 arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
94d912d1 1306 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
ea90db0a
PM
1307 }
1308
452a0955 1309 if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
94d912d1 1310 qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
3281af81
PC
1311 if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1312 qdev_property_add_static(DEVICE(obj),
94d912d1 1313 &arm_cpu_pmsav7_dregion_property);
3281af81 1314 }
8f325f56
PC
1315 }
1316
181962fd
PM
1317 if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
1318 object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
1319 qdev_prop_allow_set_link_before_realize,
d2623129 1320 OBJ_PROP_LINK_STRONG);
f9f62e4c
PM
1321 /*
1322 * M profile: initial value of the Secure VTOR. We can't just use
1323 * a simple DEFINE_PROP_UINT32 for this because we want to permit
1324 * the property to be set after realize.
1325 */
64a7b8de
FF
1326 object_property_add_uint32_ptr(obj, "init-svtor",
1327 &cpu->init_svtor,
d2623129 1328 OBJ_PROP_FLAG_READWRITE);
181962fd 1329 }
7cda2149
PM
1330 if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
1331 /*
1332 * Initial value of the NS VTOR (for cores without the Security
1333 * extension, this is the only VTOR)
1334 */
1335 object_property_add_uint32_ptr(obj, "init-nsvtor",
1336 &cpu->init_nsvtor,
1337 OBJ_PROP_FLAG_READWRITE);
1338 }
181962fd 1339
bddd892e
PM
1340 /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1341 object_property_add_uint32_ptr(obj, "psci-conduit",
1342 &cpu->psci_conduit,
1343 OBJ_PROP_FLAG_READWRITE);
1344
94d912d1 1345 qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
96eec6b2
AJ
1346
1347 if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
94d912d1 1348 qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
96eec6b2 1349 }
9e6f8d8a 1350
1351 if (kvm_enabled()) {
1352 kvm_arm_add_vcpu_properties(obj);
1353 }
8bce44a2
RH
1354
1355#ifndef CONFIG_USER_ONLY
1356 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
1357 cpu_isar_feature(aa64_mte, cpu)) {
1358 object_property_add_link(obj, "tag-memory",
1359 TYPE_MEMORY_REGION,
1360 (Object **)&cpu->tag_memory,
1361 qdev_prop_allow_set_link_before_realize,
1362 OBJ_PROP_LINK_STRONG);
1363
1364 if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1365 object_property_add_link(obj, "secure-tag-memory",
1366 TYPE_MEMORY_REGION,
1367 (Object **)&cpu->secure_tag_memory,
1368 qdev_prop_allow_set_link_before_realize,
1369 OBJ_PROP_LINK_STRONG);
1370 }
1371 }
1372#endif
07a5b0d2
PC
1373}
1374
4b6a83fb
PM
1375static void arm_cpu_finalizefn(Object *obj)
1376{
1377 ARMCPU *cpu = ARM_CPU(obj);
08267487
AL
1378 ARMELChangeHook *hook, *next;
1379
4b6a83fb 1380 g_hash_table_destroy(cpu->cp_regs);
08267487 1381
b5c53d1b
AL
1382 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1383 QLIST_REMOVE(hook, node);
1384 g_free(hook);
1385 }
08267487
AL
1386 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1387 QLIST_REMOVE(hook, node);
1388 g_free(hook);
1389 }
4e7beb0c
AL
1390#ifndef CONFIG_USER_ONLY
1391 if (cpu->pmu_timer) {
4e7beb0c
AL
1392 timer_free(cpu->pmu_timer);
1393 }
1394#endif
777dc784
PM
1395}
1396
0df9142d
AJ
1397void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
1398{
1399 Error *local_err = NULL;
1400
1401 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1402 arm_cpu_sve_finalize(cpu, &local_err);
68970d1e
AJ
1403 if (local_err != NULL) {
1404 error_propagate(errp, local_err);
1405 return;
1406 }
eb94284d 1407
95ea96e8
MZ
1408 arm_cpu_pauth_finalize(cpu, &local_err);
1409 if (local_err != NULL) {
1410 error_propagate(errp, local_err);
1411 return;
eb94284d 1412 }
69b2265d
RH
1413
1414 arm_cpu_lpa2_finalize(cpu, &local_err);
1415 if (local_err != NULL) {
1416 error_propagate(errp, local_err);
1417 return;
1418 }
68970d1e
AJ
1419 }
1420
1421 if (kvm_enabled()) {
1422 kvm_arm_steal_time_finalize(cpu, &local_err);
0df9142d
AJ
1423 if (local_err != NULL) {
1424 error_propagate(errp, local_err);
1425 return;
1426 }
1427 }
1428}
1429
14969266 1430static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
581be094 1431{
14a10fc3 1432 CPUState *cs = CPU(dev);
14969266
AF
1433 ARMCPU *cpu = ARM_CPU(dev);
1434 ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
581be094 1435 CPUARMState *env = &cpu->env;
e97da98f 1436 int pagebits;
ce5b1bbf 1437 Error *local_err = NULL;
0f8d06f1 1438 bool no_aa32 = false;
ce5b1bbf 1439
c4487d76
PM
1440 /* If we needed to query the host kernel for the CPU features
1441 * then it's possible that might have failed in the initfn, but
1442 * this is the first point where we can report it.
1443 */
1444 if (cpu->host_cpu_probe_failed) {
585df85e
PM
1445 if (!kvm_enabled() && !hvf_enabled()) {
1446 error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
c4487d76
PM
1447 } else {
1448 error_setg(errp, "Failed to retrieve host CPU features");
1449 }
1450 return;
1451 }
1452
95f87565
PM
1453#ifndef CONFIG_USER_ONLY
1454 /* The NVIC and M-profile CPU are two halves of a single piece of
1455 * hardware; trying to use one without the other is a command line
1456 * error and will result in segfaults if not caught here.
1457 */
1458 if (arm_feature(env, ARM_FEATURE_M)) {
1459 if (!env->nvic) {
1460 error_setg(errp, "This board cannot be used with Cortex-M CPUs");
1461 return;
1462 }
1463 } else {
1464 if (env->nvic) {
1465 error_setg(errp, "This board can only be used with Cortex-M CPUs");
1466 return;
1467 }
1468 }
397cd31f 1469
49e7f191
PM
1470 if (kvm_enabled()) {
1471 /*
1472 * Catch all the cases which might cause us to create more than one
1473 * address space for the CPU (otherwise we will assert() later in
1474 * cpu_address_space_init()).
1475 */
1476 if (arm_feature(env, ARM_FEATURE_M)) {
1477 error_setg(errp,
1478 "Cannot enable KVM when using an M-profile guest CPU");
1479 return;
1480 }
1481 if (cpu->has_el3) {
1482 error_setg(errp,
1483 "Cannot enable KVM when guest CPU has EL3 enabled");
1484 return;
1485 }
1486 if (cpu->tag_memory) {
1487 error_setg(errp,
1488 "Cannot enable KVM when guest CPUs has MTE enabled");
1489 return;
1490 }
1491 }
1492
96eec6b2
AJ
1493 {
1494 uint64_t scale;
1495
1496 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1497 if (!cpu->gt_cntfrq_hz) {
1498 error_setg(errp, "Invalid CNTFRQ: %"PRId64"Hz",
1499 cpu->gt_cntfrq_hz);
1500 return;
1501 }
1502 scale = gt_cntfrq_period_ns(cpu);
1503 } else {
1504 scale = GTIMER_SCALE;
1505 }
1506
1507 cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1508 arm_gt_ptimer_cb, cpu);
1509 cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1510 arm_gt_vtimer_cb, cpu);
1511 cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1512 arm_gt_htimer_cb, cpu);
1513 cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1514 arm_gt_stimer_cb, cpu);
8c94b071
RH
1515 cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
1516 arm_gt_hvtimer_cb, cpu);
96eec6b2 1517 }
95f87565
PM
1518#endif
1519
ce5b1bbf
LV
1520 cpu_exec_realizefn(cs, &local_err);
1521 if (local_err != NULL) {
1522 error_propagate(errp, local_err);
1523 return;
1524 }
14969266 1525
0df9142d
AJ
1526 arm_cpu_finalize_features(cpu, &local_err);
1527 if (local_err != NULL) {
1528 error_propagate(errp, local_err);
1529 return;
1530 }
1531
97a28b0e
PM
1532 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
1533 cpu->has_vfp != cpu->has_neon) {
1534 /*
1535 * This is an architectural requirement for AArch64; AArch32 is
1536 * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1537 */
1538 error_setg(errp,
1539 "AArch64 CPUs must have both VFP and Neon or neither");
1540 return;
1541 }
1542
1543 if (!cpu->has_vfp) {
1544 uint64_t t;
1545 uint32_t u;
1546
97a28b0e
PM
1547 t = cpu->isar.id_aa64isar1;
1548 t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
1549 cpu->isar.id_aa64isar1 = t;
1550
1551 t = cpu->isar.id_aa64pfr0;
1552 t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
1553 cpu->isar.id_aa64pfr0 = t;
1554
1555 u = cpu->isar.id_isar6;
1556 u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
3c93dfa4 1557 u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
97a28b0e
PM
1558 cpu->isar.id_isar6 = u;
1559
1560 u = cpu->isar.mvfr0;
1561 u = FIELD_DP32(u, MVFR0, FPSP, 0);
1562 u = FIELD_DP32(u, MVFR0, FPDP, 0);
97a28b0e
PM
1563 u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
1564 u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
97a28b0e 1565 u = FIELD_DP32(u, MVFR0, FPROUND, 0);
532a3af5
PM
1566 if (!arm_feature(env, ARM_FEATURE_M)) {
1567 u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
1568 u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
1569 }
97a28b0e
PM
1570 cpu->isar.mvfr0 = u;
1571
1572 u = cpu->isar.mvfr1;
1573 u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
1574 u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
1575 u = FIELD_DP32(u, MVFR1, FPHP, 0);
532a3af5
PM
1576 if (arm_feature(env, ARM_FEATURE_M)) {
1577 u = FIELD_DP32(u, MVFR1, FP16, 0);
1578 }
97a28b0e
PM
1579 cpu->isar.mvfr1 = u;
1580
1581 u = cpu->isar.mvfr2;
1582 u = FIELD_DP32(u, MVFR2, FPMISC, 0);
1583 cpu->isar.mvfr2 = u;
1584 }
1585
1586 if (!cpu->has_neon) {
1587 uint64_t t;
1588 uint32_t u;
1589
1590 unset_feature(env, ARM_FEATURE_NEON);
1591
1592 t = cpu->isar.id_aa64isar0;
eb851c11
DH
1593 t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
1594 t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
1595 t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
1596 t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 0);
1597 t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
1598 t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
97a28b0e
PM
1599 t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
1600 cpu->isar.id_aa64isar0 = t;
1601
1602 t = cpu->isar.id_aa64isar1;
1603 t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
3c93dfa4 1604 t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
f8680aaa 1605 t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
97a28b0e
PM
1606 cpu->isar.id_aa64isar1 = t;
1607
1608 t = cpu->isar.id_aa64pfr0;
1609 t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
1610 cpu->isar.id_aa64pfr0 = t;
1611
1612 u = cpu->isar.id_isar5;
eb851c11
DH
1613 u = FIELD_DP32(u, ID_ISAR5, AES, 0);
1614 u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
1615 u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
97a28b0e
PM
1616 u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
1617 u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
1618 cpu->isar.id_isar5 = u;
1619
1620 u = cpu->isar.id_isar6;
1621 u = FIELD_DP32(u, ID_ISAR6, DP, 0);
1622 u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
3c93dfa4 1623 u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
f8680aaa 1624 u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
97a28b0e
PM
1625 cpu->isar.id_isar6 = u;
1626
532a3af5
PM
1627 if (!arm_feature(env, ARM_FEATURE_M)) {
1628 u = cpu->isar.mvfr1;
1629 u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
1630 u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
1631 u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
1632 u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
1633 cpu->isar.mvfr1 = u;
1634
1635 u = cpu->isar.mvfr2;
1636 u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
1637 cpu->isar.mvfr2 = u;
1638 }
97a28b0e
PM
1639 }
1640
1641 if (!cpu->has_neon && !cpu->has_vfp) {
1642 uint64_t t;
1643 uint32_t u;
1644
1645 t = cpu->isar.id_aa64isar0;
1646 t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
1647 cpu->isar.id_aa64isar0 = t;
1648
1649 t = cpu->isar.id_aa64isar1;
1650 t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
1651 cpu->isar.id_aa64isar1 = t;
1652
1653 u = cpu->isar.mvfr0;
1654 u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
1655 cpu->isar.mvfr0 = u;
c52881bb
RH
1656
1657 /* Despite the name, this field covers both VFP and Neon */
1658 u = cpu->isar.mvfr1;
1659 u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
1660 cpu->isar.mvfr1 = u;
97a28b0e
PM
1661 }
1662
ea90db0a
PM
1663 if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
1664 uint32_t u;
1665
1666 unset_feature(env, ARM_FEATURE_THUMB_DSP);
1667
1668 u = cpu->isar.id_isar1;
1669 u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
1670 cpu->isar.id_isar1 = u;
1671
1672 u = cpu->isar.id_isar2;
1673 u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
1674 u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
1675 cpu->isar.id_isar2 = u;
1676
1677 u = cpu->isar.id_isar3;
1678 u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
1679 u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
1680 cpu->isar.id_isar3 = u;
1681 }
1682
581be094 1683 /* Some features automatically imply others: */
81e69fb0 1684 if (arm_feature(env, ARM_FEATURE_V8)) {
5256df88
RH
1685 if (arm_feature(env, ARM_FEATURE_M)) {
1686 set_feature(env, ARM_FEATURE_V7);
1687 } else {
1688 set_feature(env, ARM_FEATURE_V7VE);
1689 }
5110e683 1690 }
0f8d06f1
RH
1691
1692 /*
1693 * There exist AArch64 cpus without AArch32 support. When KVM
1694 * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1695 * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
8f4821d7
PM
1696 * As a general principle, we also do not make ID register
1697 * consistency checks anywhere unless using TCG, because only
1698 * for TCG would a consistency-check failure be a QEMU bug.
0f8d06f1
RH
1699 */
1700 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1701 no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
1702 }
1703
5110e683
AL
1704 if (arm_feature(env, ARM_FEATURE_V7VE)) {
1705 /* v7 Virtualization Extensions. In real hardware this implies
1706 * EL2 and also the presence of the Security Extensions.
1707 * For QEMU, for backwards-compatibility we implement some
1708 * CPUs or CPU configs which have no actual EL2 or EL3 but do
1709 * include the various other features that V7VE implies.
1710 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1711 * Security Extensions is ARM_FEATURE_EL3.
1712 */
873b73c0
PM
1713 assert(!tcg_enabled() || no_aa32 ||
1714 cpu_isar_feature(aa32_arm_div, cpu));
81e69fb0 1715 set_feature(env, ARM_FEATURE_LPAE);
5110e683 1716 set_feature(env, ARM_FEATURE_V7);
81e69fb0 1717 }
581be094
PM
1718 if (arm_feature(env, ARM_FEATURE_V7)) {
1719 set_feature(env, ARM_FEATURE_VAPA);
1720 set_feature(env, ARM_FEATURE_THUMB2);
81bdde9d 1721 set_feature(env, ARM_FEATURE_MPIDR);
581be094
PM
1722 if (!arm_feature(env, ARM_FEATURE_M)) {
1723 set_feature(env, ARM_FEATURE_V6K);
1724 } else {
1725 set_feature(env, ARM_FEATURE_V6);
1726 }
91db4642
CLG
1727
1728 /* Always define VBAR for V7 CPUs even if it doesn't exist in
1729 * non-EL3 configs. This is needed by some legacy boards.
1730 */
1731 set_feature(env, ARM_FEATURE_VBAR);
581be094
PM
1732 }
1733 if (arm_feature(env, ARM_FEATURE_V6K)) {
1734 set_feature(env, ARM_FEATURE_V6);
1735 set_feature(env, ARM_FEATURE_MVFR);
1736 }
1737 if (arm_feature(env, ARM_FEATURE_V6)) {
1738 set_feature(env, ARM_FEATURE_V5);
1739 if (!arm_feature(env, ARM_FEATURE_M)) {
873b73c0
PM
1740 assert(!tcg_enabled() || no_aa32 ||
1741 cpu_isar_feature(aa32_jazelle, cpu));
581be094
PM
1742 set_feature(env, ARM_FEATURE_AUXCR);
1743 }
1744 }
1745 if (arm_feature(env, ARM_FEATURE_V5)) {
1746 set_feature(env, ARM_FEATURE_V4T);
1747 }
de9b05b8 1748 if (arm_feature(env, ARM_FEATURE_LPAE)) {
bdcc150d 1749 set_feature(env, ARM_FEATURE_V7MP);
de9b05b8 1750 }
f318cec6
PM
1751 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
1752 set_feature(env, ARM_FEATURE_CBAR);
1753 }
62b44f05
AR
1754 if (arm_feature(env, ARM_FEATURE_THUMB2) &&
1755 !arm_feature(env, ARM_FEATURE_M)) {
1756 set_feature(env, ARM_FEATURE_THUMB_DSP);
1757 }
2ceb98c0 1758
ea7ac69d
PM
1759 /*
1760 * We rely on no XScale CPU having VFP so we can use the same bits in the
1761 * TB flags field for VECSTRIDE and XSCALE_CPAR.
1762 */
7d63183f
RH
1763 assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) ||
1764 !cpu_isar_feature(aa32_vfp_simd, cpu) ||
1765 !arm_feature(env, ARM_FEATURE_XSCALE));
ea7ac69d 1766
e97da98f
PM
1767 if (arm_feature(env, ARM_FEATURE_V7) &&
1768 !arm_feature(env, ARM_FEATURE_M) &&
452a0955 1769 !arm_feature(env, ARM_FEATURE_PMSA)) {
e97da98f
PM
1770 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1771 * can use 4K pages.
1772 */
1773 pagebits = 12;
1774 } else {
1775 /* For CPUs which might have tiny 1K pages, or which have an
1776 * MPU and might have small region sizes, stick with 1K pages.
1777 */
1778 pagebits = 10;
1779 }
1780 if (!set_preferred_target_page_bits(pagebits)) {
1781 /* This can only ever happen for hotplugging a CPU, or if
1782 * the board code incorrectly creates a CPU which it has
1783 * promised via minimum_page_size that it will not.
1784 */
1785 error_setg(errp, "This CPU requires a smaller page size than the "
1786 "system is using");
1787 return;
1788 }
1789
ce5b1bbf
LV
1790 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1791 * We don't support setting cluster ID ([16..23]) (known as Aff2
1792 * in later ARM ARM versions), or any of the higher affinity level fields,
1793 * so these bits always RAZ.
1794 */
1795 if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
46de5913
IM
1796 cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
1797 ARM_DEFAULT_CPUS_PER_CLUSTER);
ce5b1bbf
LV
1798 }
1799
68e0a40a
AP
1800 if (cpu->reset_hivecs) {
1801 cpu->reset_sctlr |= (1 << 13);
1802 }
1803
3a062d57
JB
1804 if (cpu->cfgend) {
1805 if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1806 cpu->reset_sctlr |= SCTLR_EE;
1807 } else {
1808 cpu->reset_sctlr |= SCTLR_B;
1809 }
1810 }
1811
40188188 1812 if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
51942aee
GB
1813 /* If the has_el3 CPU property is disabled then we need to disable the
1814 * feature.
1815 */
1816 unset_feature(env, ARM_FEATURE_EL3);
1817
1818 /* Disable the security extension feature bits in the processor feature
3d5c84ff 1819 * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
51942aee 1820 */
8a130a7b 1821 cpu->isar.id_pfr1 &= ~0xf0;
47576b94 1822 cpu->isar.id_aa64pfr0 &= ~0xf000;
51942aee
GB
1823 }
1824
c25bd18a
PM
1825 if (!cpu->has_el2) {
1826 unset_feature(env, ARM_FEATURE_EL2);
1827 }
1828
d6f02ce3 1829 if (!cpu->has_pmu) {
929e754d 1830 unset_feature(env, ARM_FEATURE_PMU);
57a4a11b
AL
1831 }
1832 if (arm_feature(env, ARM_FEATURE_PMU)) {
bf8d0969 1833 pmu_init(cpu);
57a4a11b
AL
1834
1835 if (!kvm_enabled()) {
1836 arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
1837 arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
1838 }
4e7beb0c
AL
1839
1840#ifndef CONFIG_USER_ONLY
1841 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
1842 cpu);
1843#endif
57a4a11b 1844 } else {
2a609df8
PM
1845 cpu->isar.id_aa64dfr0 =
1846 FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
a6179538 1847 cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
57a4a11b
AL
1848 cpu->pmceid0 = 0;
1849 cpu->pmceid1 = 0;
929e754d
WH
1850 }
1851
3c2f7bb3
PM
1852 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1853 /* Disable the hypervisor feature bits in the processor feature
1854 * registers if we don't have EL2. These are id_pfr1[15:12] and
1855 * id_aa64pfr0_el1[11:8].
1856 */
47576b94 1857 cpu->isar.id_aa64pfr0 &= ~0xf00;
8a130a7b 1858 cpu->isar.id_pfr1 &= ~0xf000;
3c2f7bb3
PM
1859 }
1860
6f4e1405
RH
1861#ifndef CONFIG_USER_ONLY
1862 if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
1863 /*
1864 * Disable the MTE feature bits if we do not have tag-memory
1865 * provided by the machine.
1866 */
1867 cpu->isar.id_aa64pfr1 =
1868 FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
1869 }
1870#endif
1871
f50cd314
PM
1872 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1873 * to false or by setting pmsav7-dregion to 0.
1874 */
8f325f56 1875 if (!cpu->has_mpu) {
f50cd314
PM
1876 cpu->pmsav7_dregion = 0;
1877 }
1878 if (cpu->pmsav7_dregion == 0) {
1879 cpu->has_mpu = false;
8f325f56
PC
1880 }
1881
452a0955 1882 if (arm_feature(env, ARM_FEATURE_PMSA) &&
3281af81
PC
1883 arm_feature(env, ARM_FEATURE_V7)) {
1884 uint32_t nr = cpu->pmsav7_dregion;
1885
1886 if (nr > 0xff) {
9af9e0fe 1887 error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
3281af81
PC
1888 return;
1889 }
6cb0b013
PC
1890
1891 if (nr) {
0e1a46bb
PM
1892 if (arm_feature(env, ARM_FEATURE_V8)) {
1893 /* PMSAv8 */
62c58ee0
PM
1894 env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
1895 env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
1896 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1897 env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
1898 env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
1899 }
0e1a46bb
PM
1900 } else {
1901 env->pmsav7.drbar = g_new0(uint32_t, nr);
1902 env->pmsav7.drsr = g_new0(uint32_t, nr);
1903 env->pmsav7.dracr = g_new0(uint32_t, nr);
1904 }
6cb0b013 1905 }
3281af81
PC
1906 }
1907
9901c576
PM
1908 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1909 uint32_t nr = cpu->sau_sregion;
1910
1911 if (nr > 0xff) {
1912 error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
1913 return;
1914 }
1915
1916 if (nr) {
1917 env->sau.rbar = g_new0(uint32_t, nr);
1918 env->sau.rlar = g_new0(uint32_t, nr);
1919 }
1920 }
1921
91db4642
CLG
1922 if (arm_feature(env, ARM_FEATURE_EL3)) {
1923 set_feature(env, ARM_FEATURE_VBAR);
1924 }
1925
2ceb98c0 1926 register_cp_regs_for_features(cpu);
14969266
AF
1927 arm_cpu_register_gdb_regs_for_features(cpu);
1928
721fae12
PM
1929 init_cpreg_list(cpu);
1930
9e273ef2 1931#ifndef CONFIG_USER_ONLY
cc7d44c2
LX
1932 MachineState *ms = MACHINE(qdev_get_machine());
1933 unsigned int smp_cpus = ms->smp.cpus;
8bce44a2 1934 bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY);
cc7d44c2 1935
8bce44a2
RH
1936 /*
1937 * We must set cs->num_ases to the final value before
1938 * the first call to cpu_address_space_init.
1939 */
1940 if (cpu->tag_memory != NULL) {
1941 cs->num_ases = 3 + has_secure;
1942 } else {
1943 cs->num_ases = 1 + has_secure;
1944 }
1d2091bc 1945
8bce44a2 1946 if (has_secure) {
9e273ef2
PM
1947 if (!cpu->secure_memory) {
1948 cpu->secure_memory = cs->memory;
1949 }
80ceb07a
PX
1950 cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
1951 cpu->secure_memory);
9e273ef2 1952 }
8bce44a2
RH
1953
1954 if (cpu->tag_memory != NULL) {
1955 cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory",
1956 cpu->tag_memory);
1957 if (has_secure) {
1958 cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory",
1959 cpu->secure_tag_memory);
1960 }
8bce44a2
RH
1961 }
1962
80ceb07a 1963 cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
f9a69711
AF
1964
1965 /* No core_count specified, default to smp_cpus. */
1966 if (cpu->core_count == -1) {
1967 cpu->core_count = smp_cpus;
1968 }
9e273ef2
PM
1969#endif
1970
a4157b80
RH
1971 if (tcg_enabled()) {
1972 int dcz_blocklen = 4 << cpu->dcz_blocksize;
1973
1974 /*
1975 * We only support DCZ blocklen that fits on one page.
1976 *
1977 * Architectually this is always true. However TARGET_PAGE_SIZE
1978 * is variable and, for compatibility with -machine virt-2.7,
1979 * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
1980 * But even then, while the largest architectural DCZ blocklen
1981 * is 2KiB, no cpu actually uses such a large blocklen.
1982 */
1983 assert(dcz_blocklen <= TARGET_PAGE_SIZE);
1984
1985 /*
1986 * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
1987 * both nibbles of each byte storing tag data may be written at once.
1988 * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
1989 */
1990 if (cpu_isar_feature(aa64_mte, cpu)) {
1991 assert(dcz_blocklen >= 2 * TAG_GRANULE);
1992 }
1993 }
1994
14a10fc3 1995 qemu_init_vcpu(cs);
00d0f7cb 1996 cpu_reset(cs);
14969266
AF
1997
1998 acc->parent_realize(dev, errp);
581be094
PM
1999}
2000
5900d6b2
AF
2001static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
2002{
2003 ObjectClass *oc;
51492fd1 2004 char *typename;
fb8d6c24 2005 char **cpuname;
a0032cc5 2006 const char *cpunamestr;
5900d6b2 2007
fb8d6c24 2008 cpuname = g_strsplit(cpu_model, ",", 1);
a0032cc5
PM
2009 cpunamestr = cpuname[0];
2010#ifdef CONFIG_USER_ONLY
2011 /* For backwards compatibility usermode emulation allows "-cpu any",
2012 * which has the same semantics as "-cpu max".
2013 */
2014 if (!strcmp(cpunamestr, "any")) {
2015 cpunamestr = "max";
2016 }
2017#endif
2018 typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
51492fd1 2019 oc = object_class_by_name(typename);
fb8d6c24 2020 g_strfreev(cpuname);
51492fd1 2021 g_free(typename);
245fb54d
AF
2022 if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
2023 object_class_is_abstract(oc)) {
5900d6b2
AF
2024 return NULL;
2025 }
2026 return oc;
2027}
2028
5de16430 2029static Property arm_cpu_properties[] = {
e544f800 2030 DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
ce5b1bbf
LV
2031 DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
2032 mp_affinity, ARM64_AFFINITY_INVALID),
15f8b142 2033 DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
f9a69711 2034 DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
5de16430
PM
2035 DEFINE_PROP_END_OF_LIST()
2036};
2037
b3820e6c
DH
2038static gchar *arm_gdb_arch_name(CPUState *cs)
2039{
2040 ARMCPU *cpu = ARM_CPU(cs);
2041 CPUARMState *env = &cpu->env;
2042
2043 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2044 return g_strdup("iwmmxt");
2045 }
2046 return g_strdup("arm");
2047}
2048
8b80bd28
PMD
2049#ifndef CONFIG_USER_ONLY
2050#include "hw/core/sysemu-cpu-ops.h"
2051
2052static const struct SysemuCPUOps arm_sysemu_ops = {
08928c6d 2053 .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
faf39e82 2054 .asidx_from_attrs = arm_asidx_from_attrs,
715e3c1a
PMD
2055 .write_elf32_note = arm_cpu_write_elf32_note,
2056 .write_elf64_note = arm_cpu_write_elf64_note,
da383e02 2057 .virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
feece4d0 2058 .legacy_vmsd = &vmstate_arm_cpu,
8b80bd28
PMD
2059};
2060#endif
2061
78271684 2062#ifdef CONFIG_TCG
11906557 2063static const struct TCGCPUOps arm_tcg_ops = {
78271684
CF
2064 .initialize = arm_translate_init,
2065 .synchronize_from_tb = arm_cpu_synchronize_from_tb,
78271684
CF
2066 .debug_excp_handler = arm_debug_excp_handler,
2067
9b12b6b4
RH
2068#ifdef CONFIG_USER_ONLY
2069 .record_sigsegv = arm_cpu_record_sigsegv,
39a099ca 2070 .record_sigbus = arm_cpu_record_sigbus,
9b12b6b4
RH
2071#else
2072 .tlb_fill = arm_cpu_tlb_fill,
083afd18 2073 .cpu_exec_interrupt = arm_cpu_exec_interrupt,
78271684
CF
2074 .do_interrupt = arm_cpu_do_interrupt,
2075 .do_transaction_failed = arm_cpu_do_transaction_failed,
2076 .do_unaligned_access = arm_cpu_do_unaligned_access,
2077 .adjust_watchpoint_address = arm_adjust_watchpoint_address,
2078 .debug_check_watchpoint = arm_debug_check_watchpoint,
b00d86bc 2079 .debug_check_breakpoint = arm_debug_check_breakpoint,
78271684
CF
2080#endif /* !CONFIG_USER_ONLY */
2081};
2082#endif /* CONFIG_TCG */
2083
dec9c2d4
AF
2084static void arm_cpu_class_init(ObjectClass *oc, void *data)
2085{
2086 ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2087 CPUClass *cc = CPU_CLASS(acc);
14969266
AF
2088 DeviceClass *dc = DEVICE_CLASS(oc);
2089
bf853881
PMD
2090 device_class_set_parent_realize(dc, arm_cpu_realizefn,
2091 &acc->parent_realize);
dec9c2d4 2092
4f67d30b 2093 device_class_set_props(dc, arm_cpu_properties);
781c67ca 2094 device_class_set_parent_reset(dc, arm_cpu_reset, &acc->parent_reset);
5900d6b2
AF
2095
2096 cc->class_by_name = arm_cpu_class_by_name;
8c2e1b00 2097 cc->has_work = arm_cpu_has_work;
878096ee 2098 cc->dump_state = arm_cpu_dump_state;
f45748f1 2099 cc->set_pc = arm_cpu_set_pc;
5b50e790
AF
2100 cc->gdb_read_register = arm_cpu_gdb_read_register;
2101 cc->gdb_write_register = arm_cpu_gdb_write_register;
7350d553 2102#ifndef CONFIG_USER_ONLY
8b80bd28 2103 cc->sysemu_ops = &arm_sysemu_ops;
00b941e5 2104#endif
a0e372f0 2105 cc->gdb_num_core_regs = 26;
5b24c641 2106 cc->gdb_core_xml_file = "arm-core.xml";
b3820e6c 2107 cc->gdb_arch_name = arm_gdb_arch_name;
200bf5b7 2108 cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
2472b6c0 2109 cc->gdb_stop_before_watchpoint = true;
48440620 2110 cc->disas_set_info = arm_disas_set_info;
78271684 2111
74d7fc7f 2112#ifdef CONFIG_TCG
78271684 2113 cc->tcg_ops = &arm_tcg_ops;
cbc183d2 2114#endif /* CONFIG_TCG */
dec9c2d4
AF
2115}
2116
51e5ef45
MAL
2117static void arm_cpu_instance_init(Object *obj)
2118{
2119 ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
2120
2121 acc->info->initfn(obj);
2122 arm_cpu_post_init(obj);
2123}
2124
2125static void cpu_register_class_init(ObjectClass *oc, void *data)
2126{
2127 ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2128
2129 acc->info = data;
2130}
2131
37bcf244 2132void arm_cpu_register(const ARMCPUInfo *info)
777dc784
PM
2133{
2134 TypeInfo type_info = {
777dc784
PM
2135 .parent = TYPE_ARM_CPU,
2136 .instance_size = sizeof(ARMCPU),
d03087bd 2137 .instance_align = __alignof__(ARMCPU),
51e5ef45 2138 .instance_init = arm_cpu_instance_init,
777dc784 2139 .class_size = sizeof(ARMCPUClass),
51e5ef45
MAL
2140 .class_init = info->class_init ?: cpu_register_class_init,
2141 .class_data = (void *)info,
777dc784
PM
2142 };
2143
51492fd1 2144 type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
918fd083 2145 type_register(&type_info);
51492fd1 2146 g_free((void *)type_info.name);
777dc784
PM
2147}
2148
dec9c2d4
AF
2149static const TypeInfo arm_cpu_type_info = {
2150 .name = TYPE_ARM_CPU,
2151 .parent = TYPE_CPU,
2152 .instance_size = sizeof(ARMCPU),
d03087bd 2153 .instance_align = __alignof__(ARMCPU),
777dc784 2154 .instance_init = arm_cpu_initfn,
4b6a83fb 2155 .instance_finalize = arm_cpu_finalizefn,
777dc784 2156 .abstract = true,
dec9c2d4
AF
2157 .class_size = sizeof(ARMCPUClass),
2158 .class_init = arm_cpu_class_init,
2159};
2160
2161static void arm_cpu_register_types(void)
2162{
2163 type_register_static(&arm_cpu_type_info);
2164}
2165
2166type_init(arm_cpu_register_types)