]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/cpu.h
target/arm: Add arm_mmu_idx_is_stage1_of_2
[mirror_qemu.git] / target / arm / cpu.h
CommitLineData
2c0262af
FB
1/*
2 * ARM virtual CPU header
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
07f5a258
MA
20#ifndef ARM_CPU_H
21#define ARM_CPU_H
3cf1e035 22
72b0cd35 23#include "kvm-consts.h"
2c4da50d 24#include "hw/registerfields.h"
74433bf0
RH
25#include "cpu-qom.h"
26#include "exec/cpu-defs.h"
9042c0e2 27
ca759f9e
AB
28/* ARM processors have a weak memory model */
29#define TCG_GUEST_DEFAULT_MO (0)
30
b8a9e8f1
FB
31#define EXCP_UDEF 1 /* undefined instruction */
32#define EXCP_SWI 2 /* software interrupt */
33#define EXCP_PREFETCH_ABORT 3
34#define EXCP_DATA_ABORT 4
b5ff1b31
FB
35#define EXCP_IRQ 5
36#define EXCP_FIQ 6
06c949e6 37#define EXCP_BKPT 7
9ee6e8bb 38#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
fbb4a2e3 39#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
35979d71 40#define EXCP_HVC 11 /* HyperVisor Call */
607d98b8 41#define EXCP_HYP_TRAP 12
e0d6e6a5 42#define EXCP_SMC 13 /* Secure Monitor Call */
136e67e9
EI
43#define EXCP_VIRQ 14
44#define EXCP_VFIQ 15
19a6e31c 45#define EXCP_SEMIHOST 16 /* semihosting call */
7517748e 46#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
e13886e3 47#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
86f026de 48#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
e33cf0f8 49#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
019076b0
PM
50#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
51#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
2c4a7cc5 52/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
9ee6e8bb
PB
53
54#define ARMV7M_EXCP_RESET 1
55#define ARMV7M_EXCP_NMI 2
56#define ARMV7M_EXCP_HARD 3
57#define ARMV7M_EXCP_MEM 4
58#define ARMV7M_EXCP_BUS 5
59#define ARMV7M_EXCP_USAGE 6
1e577cc7 60#define ARMV7M_EXCP_SECURE 7
9ee6e8bb
PB
61#define ARMV7M_EXCP_SVC 11
62#define ARMV7M_EXCP_DEBUG 12
63#define ARMV7M_EXCP_PENDSV 14
64#define ARMV7M_EXCP_SYSTICK 15
2c0262af 65
acf94941
PM
66/* For M profile, some registers are banked secure vs non-secure;
67 * these are represented as a 2-element array where the first element
68 * is the non-secure copy and the second is the secure copy.
69 * When the CPU does not have implement the security extension then
70 * only the first element is used.
71 * This means that the copy for the current security state can be
72 * accessed via env->registerfield[env->v7m.secure] (whether the security
73 * extension is implemented or not).
74 */
4a16724f
PM
75enum {
76 M_REG_NS = 0,
77 M_REG_S = 1,
78 M_REG_NUM_BANKS = 2,
79};
acf94941 80
403946c0
RH
81/* ARM-specific interrupt pending bits. */
82#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
136e67e9
EI
83#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
84#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
403946c0 85
e4fe830b
PM
86/* The usual mapping for an AArch64 system register to its AArch32
87 * counterpart is for the 32 bit world to have access to the lower
88 * half only (with writes leaving the upper half untouched). It's
89 * therefore useful to be able to pass TCG the offset of the least
90 * significant half of a uint64_t struct member.
91 */
92#ifdef HOST_WORDS_BIGENDIAN
5cd8a118 93#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
b0fe2427 94#define offsetofhigh32(S, M) offsetof(S, M)
e4fe830b
PM
95#else
96#define offsetoflow32(S, M) offsetof(S, M)
b0fe2427 97#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
e4fe830b
PM
98#endif
99
136e67e9 100/* Meanings of the ARMCPU object's four inbound GPIO lines */
7c1840b6
PM
101#define ARM_CPU_IRQ 0
102#define ARM_CPU_FIQ 1
136e67e9
EI
103#define ARM_CPU_VIRQ 2
104#define ARM_CPU_VFIQ 3
403946c0 105
aaa1f954
EI
106/* ARM-specific extra insn start words:
107 * 1: Conditional execution bits
108 * 2: Partial exception syndrome for data aborts
109 */
110#define TARGET_INSN_START_EXTRA_WORDS 2
111
112/* The 2nd extra word holding syndrome info for data aborts does not use
113 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
114 * help the sleb128 encoder do a better job.
115 * When restoring the CPU state, we shift it back up.
116 */
117#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
118#define ARM_INSN_START_WORD2_SHIFT 14
6ebbf390 119
b7bcbe95
FB
120/* We currently assume float and double are IEEE single and double
121 precision respectively.
122 Doing runtime conversions is tricky because VFP registers may contain
123 integer values (eg. as the result of a FTOSI instruction).
8e96005d
FB
124 s<2n> maps to the least significant half of d<n>
125 s<2n+1> maps to the most significant half of d<n>
126 */
b7bcbe95 127
200bf5b7
AB
128/**
129 * DynamicGDBXMLInfo:
130 * @desc: Contains the XML descriptions.
131 * @num_cpregs: Number of the Coprocessor registers seen by GDB.
132 * @cpregs_keys: Array that contains the corresponding Key of
133 * a given cpreg with the same order of the cpreg in the XML description.
134 */
135typedef struct DynamicGDBXMLInfo {
136 char *desc;
137 int num_cpregs;
138 uint32_t *cpregs_keys;
139} DynamicGDBXMLInfo;
140
55d284af
PM
141/* CPU state for each instance of a generic timer (in cp15 c14) */
142typedef struct ARMGenericTimer {
143 uint64_t cval; /* Timer CompareValue register */
a7adc4b7 144 uint64_t ctl; /* Timer Control register */
55d284af
PM
145} ARMGenericTimer;
146
8c94b071
RH
147#define GTIMER_PHYS 0
148#define GTIMER_VIRT 1
149#define GTIMER_HYP 2
150#define GTIMER_SEC 3
151#define GTIMER_HYPVIRT 4
152#define NUM_GTIMERS 5
55d284af 153
11f136ee
FA
154typedef struct {
155 uint64_t raw_tcr;
156 uint32_t mask;
157 uint32_t base_mask;
158} TCR;
159
c39c2b90
RH
160/* Define a maximum sized vector register.
161 * For 32-bit, this is a 128-bit NEON/AdvSIMD register.
162 * For 64-bit, this is a 2048-bit SVE register.
163 *
164 * Note that the mapping between S, D, and Q views of the register bank
165 * differs between AArch64 and AArch32.
166 * In AArch32:
167 * Qn = regs[n].d[1]:regs[n].d[0]
168 * Dn = regs[n / 2].d[n & 1]
169 * Sn = regs[n / 4].d[n % 4 / 2],
170 * bits 31..0 for even n, and bits 63..32 for odd n
171 * (and regs[16] to regs[31] are inaccessible)
172 * In AArch64:
173 * Zn = regs[n].d[*]
174 * Qn = regs[n].d[1]:regs[n].d[0]
175 * Dn = regs[n].d[0]
176 * Sn = regs[n].d[0] bits 31..0
d0e69ea8 177 * Hn = regs[n].d[0] bits 15..0
c39c2b90
RH
178 *
179 * This corresponds to the architecturally defined mapping between
180 * the two execution states, and means we do not need to explicitly
181 * map these registers when changing states.
182 *
183 * Align the data for use with TCG host vector operations.
184 */
185
186#ifdef TARGET_AARCH64
187# define ARM_MAX_VQ 16
0df9142d 188void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
c39c2b90
RH
189#else
190# define ARM_MAX_VQ 1
0df9142d 191static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
c39c2b90
RH
192#endif
193
194typedef struct ARMVectorReg {
195 uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
196} ARMVectorReg;
197
3c7d3086 198#ifdef TARGET_AARCH64
991ad91b 199/* In AArch32 mode, predicate registers do not exist at all. */
3c7d3086 200typedef struct ARMPredicateReg {
46417784 201 uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
3c7d3086 202} ARMPredicateReg;
991ad91b
RH
203
204/* In AArch32 mode, PAC keys do not exist at all. */
205typedef struct ARMPACKey {
206 uint64_t lo, hi;
207} ARMPACKey;
3c7d3086
RH
208#endif
209
c39c2b90 210
2c0262af 211typedef struct CPUARMState {
b5ff1b31 212 /* Regs for current mode. */
2c0262af 213 uint32_t regs[16];
3926cc84
AG
214
215 /* 32/64 switch only happens when taking and returning from
216 * exceptions so the overlap semantics are taken care of then
217 * instead of having a complicated union.
218 */
219 /* Regs for A64 mode. */
220 uint64_t xregs[32];
221 uint64_t pc;
d356312f
PM
222 /* PSTATE isn't an architectural register for ARMv8. However, it is
223 * convenient for us to assemble the underlying state into a 32 bit format
224 * identical to the architectural format used for the SPSR. (This is also
225 * what the Linux kernel's 'pstate' field in signal handlers and KVM's
226 * 'pstate' register are.) Of the PSTATE bits:
227 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
228 * semantics as for AArch32, as described in the comments on each field)
229 * nRW (also known as M[4]) is kept, inverted, in env->aarch64
4cc35614 230 * DAIF (exception masks) are kept in env->daif
f6e52eaa 231 * BTYPE is kept in env->btype
d356312f 232 * all other bits are stored in their correct places in env->pstate
3926cc84
AG
233 */
234 uint32_t pstate;
235 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
236
fdd1b228
RH
237 /* Cached TBFLAGS state. See below for which bits are included. */
238 uint32_t hflags;
239
b90372ad 240 /* Frequently accessed CPSR bits are stored separately for efficiency.
d37aca66 241 This contains all the other bits. Use cpsr_{read,write} to access
b5ff1b31
FB
242 the whole CPSR. */
243 uint32_t uncached_cpsr;
244 uint32_t spsr;
245
246 /* Banked registers. */
28c9457d 247 uint64_t banked_spsr[8];
0b7d409d
FA
248 uint32_t banked_r13[8];
249 uint32_t banked_r14[8];
3b46e624 250
b5ff1b31
FB
251 /* These hold r8-r12. */
252 uint32_t usr_regs[5];
253 uint32_t fiq_regs[5];
3b46e624 254
2c0262af
FB
255 /* cpsr flag cache for faster execution */
256 uint32_t CF; /* 0 or 1 */
257 uint32_t VF; /* V is the bit 31. All other bits are undefined */
6fbe23d5
PB
258 uint32_t NF; /* N is bit 31. All other bits are undefined. */
259 uint32_t ZF; /* Z set if zero. */
99c475ab 260 uint32_t QF; /* 0 or 1 */
9ee6e8bb 261 uint32_t GE; /* cpsr[19:16] */
b26eefb6 262 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
9ee6e8bb 263 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
f6e52eaa 264 uint32_t btype; /* BTI branch type. spsr[11:10]. */
b6af0975 265 uint64_t daif; /* exception masks, in the bits they are in PSTATE */
2c0262af 266
1b174238 267 uint64_t elr_el[4]; /* AArch64 exception link regs */
73fb3b76 268 uint64_t sp_el[4]; /* AArch64 banked stack pointers */
a0618a19 269
b5ff1b31
FB
270 /* System control coprocessor (cp15) */
271 struct {
40f137e1 272 uint32_t c0_cpuid;
b85a1fd6
FA
273 union { /* Cache size selection */
274 struct {
275 uint64_t _unused_csselr0;
276 uint64_t csselr_ns;
277 uint64_t _unused_csselr1;
278 uint64_t csselr_s;
279 };
280 uint64_t csselr_el[4];
281 };
137feaa9
FA
282 union { /* System control register. */
283 struct {
284 uint64_t _unused_sctlr;
285 uint64_t sctlr_ns;
286 uint64_t hsctlr;
287 uint64_t sctlr_s;
288 };
289 uint64_t sctlr_el[4];
290 };
7ebd5f2e 291 uint64_t cpacr_el1; /* Architectural feature access control register */
c6f19164 292 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
610c3c8a 293 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
144634ae 294 uint64_t sder; /* Secure debug enable register. */
77022576 295 uint32_t nsacr; /* Non-secure access control register. */
7dd8c9af
FA
296 union { /* MMU translation table base 0. */
297 struct {
298 uint64_t _unused_ttbr0_0;
299 uint64_t ttbr0_ns;
300 uint64_t _unused_ttbr0_1;
301 uint64_t ttbr0_s;
302 };
303 uint64_t ttbr0_el[4];
304 };
305 union { /* MMU translation table base 1. */
306 struct {
307 uint64_t _unused_ttbr1_0;
308 uint64_t ttbr1_ns;
309 uint64_t _unused_ttbr1_1;
310 uint64_t ttbr1_s;
311 };
312 uint64_t ttbr1_el[4];
313 };
b698e9cf 314 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
11f136ee
FA
315 /* MMU translation table base control. */
316 TCR tcr_el[4];
68e9c2fe 317 TCR vtcr_el2; /* Virtualization Translation Control. */
67cc32eb
VL
318 uint32_t c2_data; /* MPU data cacheable bits. */
319 uint32_t c2_insn; /* MPU instruction cacheable bits. */
0c17d68c
FA
320 union { /* MMU domain access control register
321 * MPU write buffer control.
322 */
323 struct {
324 uint64_t dacr_ns;
325 uint64_t dacr_s;
326 };
327 struct {
328 uint64_t dacr32_el2;
329 };
330 };
7e09797c
PM
331 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
332 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
f149e3e8 333 uint64_t hcr_el2; /* Hypervisor configuration register */
64e0e2de 334 uint64_t scr_el3; /* Secure configuration register. */
88ca1c2d
FA
335 union { /* Fault status registers. */
336 struct {
337 uint64_t ifsr_ns;
338 uint64_t ifsr_s;
339 };
340 struct {
341 uint64_t ifsr32_el2;
342 };
343 };
4a7e2d73
FA
344 union {
345 struct {
346 uint64_t _unused_dfsr;
347 uint64_t dfsr_ns;
348 uint64_t hsr;
349 uint64_t dfsr_s;
350 };
351 uint64_t esr_el[4];
352 };
ce819861 353 uint32_t c6_region[8]; /* MPU base/size registers. */
b848ce2b
FA
354 union { /* Fault address registers. */
355 struct {
356 uint64_t _unused_far0;
357#ifdef HOST_WORDS_BIGENDIAN
358 uint32_t ifar_ns;
359 uint32_t dfar_ns;
360 uint32_t ifar_s;
361 uint32_t dfar_s;
362#else
363 uint32_t dfar_ns;
364 uint32_t ifar_ns;
365 uint32_t dfar_s;
366 uint32_t ifar_s;
367#endif
368 uint64_t _unused_far3;
369 };
370 uint64_t far_el[4];
371 };
59e05530 372 uint64_t hpfar_el2;
2a5a9abd 373 uint64_t hstr_el2;
01c097f7
FA
374 union { /* Translation result. */
375 struct {
376 uint64_t _unused_par_0;
377 uint64_t par_ns;
378 uint64_t _unused_par_1;
379 uint64_t par_s;
380 };
381 uint64_t par_el[4];
382 };
6cb0b013 383
b5ff1b31
FB
384 uint32_t c9_insn; /* Cache lockdown registers. */
385 uint32_t c9_data;
8521466b
AF
386 uint64_t c9_pmcr; /* performance monitor control register */
387 uint64_t c9_pmcnten; /* perf monitor counter enables */
e4e91a21
AL
388 uint64_t c9_pmovsr; /* perf monitor overflow status */
389 uint64_t c9_pmuserenr; /* perf monitor user enable */
6b040780 390 uint64_t c9_pmselr; /* perf monitor counter selection register */
e6ec5457 391 uint64_t c9_pminten; /* perf monitor interrupt enables */
be693c87
GB
392 union { /* Memory attribute redirection */
393 struct {
394#ifdef HOST_WORDS_BIGENDIAN
395 uint64_t _unused_mair_0;
396 uint32_t mair1_ns;
397 uint32_t mair0_ns;
398 uint64_t _unused_mair_1;
399 uint32_t mair1_s;
400 uint32_t mair0_s;
401#else
402 uint64_t _unused_mair_0;
403 uint32_t mair0_ns;
404 uint32_t mair1_ns;
405 uint64_t _unused_mair_1;
406 uint32_t mair0_s;
407 uint32_t mair1_s;
408#endif
409 };
410 uint64_t mair_el[4];
411 };
fb6c91ba
GB
412 union { /* vector base address register */
413 struct {
414 uint64_t _unused_vbar;
415 uint64_t vbar_ns;
416 uint64_t hvbar;
417 uint64_t vbar_s;
418 };
419 uint64_t vbar_el[4];
420 };
e89e51a1 421 uint32_t mvbar; /* (monitor) vector base address register */
54bf36ed
FA
422 struct { /* FCSE PID. */
423 uint32_t fcseidr_ns;
424 uint32_t fcseidr_s;
425 };
426 union { /* Context ID. */
427 struct {
428 uint64_t _unused_contextidr_0;
429 uint64_t contextidr_ns;
430 uint64_t _unused_contextidr_1;
431 uint64_t contextidr_s;
432 };
433 uint64_t contextidr_el[4];
434 };
435 union { /* User RW Thread register. */
436 struct {
437 uint64_t tpidrurw_ns;
438 uint64_t tpidrprw_ns;
439 uint64_t htpidr;
440 uint64_t _tpidr_el3;
441 };
442 uint64_t tpidr_el[4];
443 };
444 /* The secure banks of these registers don't map anywhere */
445 uint64_t tpidrurw_s;
446 uint64_t tpidrprw_s;
447 uint64_t tpidruro_s;
448
449 union { /* User RO Thread register. */
450 uint64_t tpidruro_ns;
451 uint64_t tpidrro_el[1];
452 };
a7adc4b7
PM
453 uint64_t c14_cntfrq; /* Counter Frequency register */
454 uint64_t c14_cntkctl; /* Timer Control register */
0b6440af 455 uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
edac4d8a 456 uint64_t cntvoff_el2; /* Counter Virtual Offset register */
55d284af 457 ARMGenericTimer c14_timer[NUM_GTIMERS];
c1713132 458 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
c3d2689d
AZ
459 uint32_t c15_ticonfig; /* TI925T configuration byte. */
460 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
461 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
462 uint32_t c15_threadid; /* TI debugger thread-ID. */
7da362d0
ML
463 uint32_t c15_config_base_address; /* SCU base address. */
464 uint32_t c15_diagnostic; /* diagnostic register */
465 uint32_t c15_power_diagnostic;
466 uint32_t c15_power_control; /* power control */
0b45451e
PM
467 uint64_t dbgbvr[16]; /* breakpoint value registers */
468 uint64_t dbgbcr[16]; /* breakpoint control registers */
469 uint64_t dbgwvr[16]; /* watchpoint value registers */
470 uint64_t dbgwcr[16]; /* watchpoint control registers */
3a298203 471 uint64_t mdscr_el1;
1424ca8d 472 uint64_t oslsr_el1; /* OS Lock Status */
14cc7b54 473 uint64_t mdcr_el2;
5513c3ab 474 uint64_t mdcr_el3;
5d05b9d4
AL
475 /* Stores the architectural value of the counter *the last time it was
476 * updated* by pmccntr_op_start. Accesses should always be surrounded
477 * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest
478 * architecturally-correct value is being read/set.
7c2cb42b 479 */
c92c0687 480 uint64_t c15_ccnt;
5d05b9d4
AL
481 /* Stores the delta between the architectural value and the underlying
482 * cycle count during normal operation. It is used to update c15_ccnt
483 * to be the correct architectural value before accesses. During
484 * accesses, c15_ccnt_delta contains the underlying count being used
485 * for the access, after which it reverts to the delta value in
486 * pmccntr_op_finish.
487 */
488 uint64_t c15_ccnt_delta;
5ecdd3e4
AL
489 uint64_t c14_pmevcntr[31];
490 uint64_t c14_pmevcntr_delta[31];
491 uint64_t c14_pmevtyper[31];
8521466b 492 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
731de9e6 493 uint64_t vpidr_el2; /* Virtualization Processor ID Register */
f0d574d6 494 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
b5ff1b31 495 } cp15;
40f137e1 496
9ee6e8bb 497 struct {
fb602cb7
PM
498 /* M profile has up to 4 stack pointers:
499 * a Main Stack Pointer and a Process Stack Pointer for each
500 * of the Secure and Non-Secure states. (If the CPU doesn't support
501 * the security extension then it has only two SPs.)
502 * In QEMU we always store the currently active SP in regs[13],
503 * and the non-active SP for the current security state in
504 * v7m.other_sp. The stack pointers for the inactive security state
505 * are stored in other_ss_msp and other_ss_psp.
506 * switch_v7m_security_state() is responsible for rearranging them
507 * when we change security state.
508 */
9ee6e8bb 509 uint32_t other_sp;
fb602cb7
PM
510 uint32_t other_ss_msp;
511 uint32_t other_ss_psp;
4a16724f
PM
512 uint32_t vecbase[M_REG_NUM_BANKS];
513 uint32_t basepri[M_REG_NUM_BANKS];
514 uint32_t control[M_REG_NUM_BANKS];
515 uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */
516 uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
2c4da50d
PM
517 uint32_t hfsr; /* HardFault Status */
518 uint32_t dfsr; /* Debug Fault Status Register */
bed079da 519 uint32_t sfsr; /* Secure Fault Status Register */
4a16724f 520 uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
2c4da50d 521 uint32_t bfar; /* BusFault Address */
bed079da 522 uint32_t sfar; /* Secure Fault Address Register */
4a16724f 523 unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
9ee6e8bb 524 int exception;
4a16724f
PM
525 uint32_t primask[M_REG_NUM_BANKS];
526 uint32_t faultmask[M_REG_NUM_BANKS];
3b2e9344 527 uint32_t aircr; /* only holds r/w state if security extn implemented */
1e577cc7 528 uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
43bbce7f 529 uint32_t csselr[M_REG_NUM_BANKS];
24ac0fb1 530 uint32_t scr[M_REG_NUM_BANKS];
57bb3156
PM
531 uint32_t msplim[M_REG_NUM_BANKS];
532 uint32_t psplim[M_REG_NUM_BANKS];
d33abe82
PM
533 uint32_t fpcar[M_REG_NUM_BANKS];
534 uint32_t fpccr[M_REG_NUM_BANKS];
535 uint32_t fpdscr[M_REG_NUM_BANKS];
536 uint32_t cpacr[M_REG_NUM_BANKS];
537 uint32_t nsacr;
9ee6e8bb
PB
538 } v7m;
539
abf1172f
PM
540 /* Information associated with an exception about to be taken:
541 * code which raises an exception must set cs->exception_index and
542 * the relevant parts of this structure; the cpu_do_interrupt function
543 * will then set the guest-visible registers as part of the exception
544 * entry process.
545 */
546 struct {
547 uint32_t syndrome; /* AArch64 format syndrome register */
548 uint32_t fsr; /* AArch32 format fault status register info */
549 uint64_t vaddress; /* virtual addr associated with exception, if any */
73710361 550 uint32_t target_el; /* EL the exception should be targeted for */
abf1172f
PM
551 /* If we implement EL2 we will also need to store information
552 * about the intermediate physical address for stage 2 faults.
553 */
554 } exception;
555
202ccb6b
DG
556 /* Information associated with an SError */
557 struct {
558 uint8_t pending;
559 uint8_t has_esr;
560 uint64_t esr;
561 } serror;
562
ed89f078
PM
563 /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */
564 uint32_t irq_line_state;
565
fe1479c3
PB
566 /* Thumb-2 EE state. */
567 uint32_t teecr;
568 uint32_t teehbr;
569
b7bcbe95
FB
570 /* VFP coprocessor state. */
571 struct {
c39c2b90 572 ARMVectorReg zregs[32];
b7bcbe95 573
3c7d3086
RH
574#ifdef TARGET_AARCH64
575 /* Store FFR as pregs[16] to make it easier to treat as any other. */
028e2a7b 576#define FFR_PRED_NUM 16
3c7d3086 577 ARMPredicateReg pregs[17];
516e246a
RH
578 /* Scratch space for aa64 sve predicate temporary. */
579 ARMPredicateReg preg_tmp;
3c7d3086
RH
580#endif
581
b7bcbe95 582 /* We store these fpcsr fields separately for convenience. */
a4d58462 583 uint32_t qc[4] QEMU_ALIGNED(16);
b7bcbe95
FB
584 int vec_len;
585 int vec_stride;
586
a4d58462
RH
587 uint32_t xregs[16];
588
516e246a 589 /* Scratch space for aa32 neon expansion. */
9ee6e8bb 590 uint32_t scratch[8];
3b46e624 591
d81ce0ef
AB
592 /* There are a number of distinct float control structures:
593 *
594 * fp_status: is the "normal" fp status.
595 * fp_status_fp16: used for half-precision calculations
596 * standard_fp_status : the ARM "Standard FPSCR Value"
597 *
598 * Half-precision operations are governed by a separate
599 * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
600 * status structure to control this.
601 *
602 * The "Standard FPSCR", ie default-NaN, flush-to-zero,
603 * round-to-nearest and is used by any operations (generally
604 * Neon) which the architecture defines as controlled by the
605 * standard FPSCR value rather than the FPSCR.
3a492f3a
PM
606 *
607 * To avoid having to transfer exception bits around, we simply
608 * say that the FPSCR cumulative exception flags are the logical
d81ce0ef 609 * OR of the flags in the three fp statuses. This relies on the
3a492f3a
PM
610 * only thing which needs to read the exception flags being
611 * an explicit FPSCR read.
612 */
53cd6637 613 float_status fp_status;
d81ce0ef 614 float_status fp_status_f16;
3a492f3a 615 float_status standard_fp_status;
5be5e8ed
RH
616
617 /* ZCR_EL[1-3] */
618 uint64_t zcr_el[4];
b7bcbe95 619 } vfp;
03d05e2d
PM
620 uint64_t exclusive_addr;
621 uint64_t exclusive_val;
622 uint64_t exclusive_high;
b7bcbe95 623
18c9b560
AZ
624 /* iwMMXt coprocessor state. */
625 struct {
626 uint64_t regs[16];
627 uint64_t val;
628
629 uint32_t cregs[16];
630 } iwmmxt;
631
991ad91b 632#ifdef TARGET_AARCH64
108b3ba8
RH
633 struct {
634 ARMPACKey apia;
635 ARMPACKey apib;
636 ARMPACKey apda;
637 ARMPACKey apdb;
638 ARMPACKey apga;
639 } keys;
991ad91b
RH
640#endif
641
ce4defa0
PB
642#if defined(CONFIG_USER_ONLY)
643 /* For usermode syscall translation. */
644 int eabi;
645#endif
646
46747d15 647 struct CPUBreakpoint *cpu_breakpoint[16];
9ee98ce8
PM
648 struct CPUWatchpoint *cpu_watchpoint[16];
649
1f5c00cf
AB
650 /* Fields up to this point are cleared by a CPU reset */
651 struct {} end_reset_fields;
652
e8b5fae5 653 /* Fields after this point are preserved across CPU reset. */
9ba8c3f4 654
581be094 655 /* Internal CPU feature flags. */
918f5dca 656 uint64_t features;
581be094 657
6cb0b013
PC
658 /* PMSAv7 MPU */
659 struct {
660 uint32_t *drbar;
661 uint32_t *drsr;
662 uint32_t *dracr;
4a16724f 663 uint32_t rnr[M_REG_NUM_BANKS];
6cb0b013
PC
664 } pmsav7;
665
0e1a46bb
PM
666 /* PMSAv8 MPU */
667 struct {
668 /* The PMSAv8 implementation also shares some PMSAv7 config
669 * and state:
670 * pmsav7.rnr (region number register)
671 * pmsav7_dregion (number of configured regions)
672 */
4a16724f
PM
673 uint32_t *rbar[M_REG_NUM_BANKS];
674 uint32_t *rlar[M_REG_NUM_BANKS];
675 uint32_t mair0[M_REG_NUM_BANKS];
676 uint32_t mair1[M_REG_NUM_BANKS];
0e1a46bb
PM
677 } pmsav8;
678
9901c576
PM
679 /* v8M SAU */
680 struct {
681 uint32_t *rbar;
682 uint32_t *rlar;
683 uint32_t rnr;
684 uint32_t ctrl;
685 } sau;
686
983fe826 687 void *nvic;
462a8bc6 688 const struct arm_boot_info *boot_info;
d3a3e529
VK
689 /* Store GICv3CPUState to access from this struct */
690 void *gicv3state;
2c0262af
FB
691} CPUARMState;
692
bd7d00fc 693/**
08267487 694 * ARMELChangeHookFn:
bd7d00fc
PM
695 * type of a function which can be registered via arm_register_el_change_hook()
696 * to get callbacks when the CPU changes its exception level or mode.
697 */
08267487
AL
698typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque);
699typedef struct ARMELChangeHook ARMELChangeHook;
700struct ARMELChangeHook {
701 ARMELChangeHookFn *hook;
702 void *opaque;
703 QLIST_ENTRY(ARMELChangeHook) node;
704};
062ba099
AB
705
706/* These values map onto the return values for
707 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */
708typedef enum ARMPSCIState {
d5affb0d
AJ
709 PSCI_ON = 0,
710 PSCI_OFF = 1,
062ba099
AB
711 PSCI_ON_PENDING = 2
712} ARMPSCIState;
713
962fcbf2
RH
714typedef struct ARMISARegisters ARMISARegisters;
715
74e75564
PB
716/**
717 * ARMCPU:
718 * @env: #CPUARMState
719 *
720 * An ARM CPU core.
721 */
722struct ARMCPU {
723 /*< private >*/
724 CPUState parent_obj;
725 /*< public >*/
726
5b146dc7 727 CPUNegativeOffsetState neg;
74e75564
PB
728 CPUARMState env;
729
730 /* Coprocessor information */
731 GHashTable *cp_regs;
732 /* For marshalling (mostly coprocessor) register state between the
733 * kernel and QEMU (for KVM) and between two QEMUs (for migration),
734 * we use these arrays.
735 */
736 /* List of register indexes managed via these arrays; (full KVM style
737 * 64 bit indexes, not CPRegInfo 32 bit indexes)
738 */
739 uint64_t *cpreg_indexes;
740 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
741 uint64_t *cpreg_values;
742 /* Length of the indexes, values, reset_values arrays */
743 int32_t cpreg_array_len;
744 /* These are used only for migration: incoming data arrives in
745 * these fields and is sanity checked in post_load before copying
746 * to the working data structures above.
747 */
748 uint64_t *cpreg_vmstate_indexes;
749 uint64_t *cpreg_vmstate_values;
750 int32_t cpreg_vmstate_array_len;
751
200bf5b7
AB
752 DynamicGDBXMLInfo dyn_xml;
753
74e75564
PB
754 /* Timers used by the generic (architected) timer */
755 QEMUTimer *gt_timer[NUM_GTIMERS];
4e7beb0c
AL
756 /*
757 * Timer used by the PMU. Its state is restored after migration by
758 * pmu_op_finish() - it does not need other handling during migration
759 */
760 QEMUTimer *pmu_timer;
74e75564
PB
761 /* GPIO outputs for generic timer */
762 qemu_irq gt_timer_outputs[NUM_GTIMERS];
aa1b3111
PM
763 /* GPIO output for GICv3 maintenance interrupt signal */
764 qemu_irq gicv3_maintenance_interrupt;
07f48730
AJ
765 /* GPIO output for the PMU interrupt */
766 qemu_irq pmu_interrupt;
74e75564
PB
767
768 /* MemoryRegion to use for secure physical accesses */
769 MemoryRegion *secure_memory;
770
181962fd
PM
771 /* For v8M, pointer to the IDAU interface provided by board/SoC */
772 Object *idau;
773
74e75564
PB
774 /* 'compatible' string for this CPU for Linux device trees */
775 const char *dtb_compatible;
776
777 /* PSCI version for this CPU
778 * Bits[31:16] = Major Version
779 * Bits[15:0] = Minor Version
780 */
781 uint32_t psci_version;
782
783 /* Should CPU start in PSCI powered-off state? */
784 bool start_powered_off;
062ba099
AB
785
786 /* Current power state, access guarded by BQL */
787 ARMPSCIState power_state;
788
c25bd18a
PM
789 /* CPU has virtualization extension */
790 bool has_el2;
74e75564
PB
791 /* CPU has security extension */
792 bool has_el3;
5c0a3819
SZ
793 /* CPU has PMU (Performance Monitor Unit) */
794 bool has_pmu;
97a28b0e
PM
795 /* CPU has VFP */
796 bool has_vfp;
797 /* CPU has Neon */
798 bool has_neon;
ea90db0a
PM
799 /* CPU has M-profile DSP extension */
800 bool has_dsp;
74e75564
PB
801
802 /* CPU has memory protection unit */
803 bool has_mpu;
804 /* PMSAv7 MPU number of supported regions */
805 uint32_t pmsav7_dregion;
9901c576
PM
806 /* v8M SAU number of supported regions */
807 uint32_t sau_sregion;
74e75564
PB
808
809 /* PSCI conduit used to invoke PSCI methods
810 * 0 - disabled, 1 - smc, 2 - hvc
811 */
812 uint32_t psci_conduit;
813
38e2a77c
PM
814 /* For v8M, initial value of the Secure VTOR */
815 uint32_t init_svtor;
816
74e75564
PB
817 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
818 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
819 */
820 uint32_t kvm_target;
821
822 /* KVM init features for this CPU */
823 uint32_t kvm_init_features[7];
824
e5ac4200
AJ
825 /* KVM CPU state */
826
827 /* KVM virtual time adjustment */
828 bool kvm_adjvtime;
829 bool kvm_vtime_dirty;
830 uint64_t kvm_vtime;
831
74e75564
PB
832 /* Uniprocessor system with MP extensions */
833 bool mp_is_up;
834
c4487d76
PM
835 /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init
836 * and the probe failed (so we need to report the error in realize)
837 */
838 bool host_cpu_probe_failed;
839
f9a69711
AF
840 /* Specify the number of cores in this CPU cluster. Used for the L2CTLR
841 * register.
842 */
843 int32_t core_count;
844
74e75564
PB
845 /* The instance init functions for implementation-specific subclasses
846 * set these fields to specify the implementation-dependent values of
847 * various constant registers and reset values of non-constant
848 * registers.
849 * Some of these might become QOM properties eventually.
850 * Field names match the official register names as defined in the
851 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
852 * is used for reset values of non-constant registers; no reset_
853 * prefix means a constant register.
47576b94
RH
854 * Some of these registers are split out into a substructure that
855 * is shared with the translators to control the ISA.
74e75564 856 */
47576b94
RH
857 struct ARMISARegisters {
858 uint32_t id_isar0;
859 uint32_t id_isar1;
860 uint32_t id_isar2;
861 uint32_t id_isar3;
862 uint32_t id_isar4;
863 uint32_t id_isar5;
864 uint32_t id_isar6;
865 uint32_t mvfr0;
866 uint32_t mvfr1;
867 uint32_t mvfr2;
868 uint64_t id_aa64isar0;
869 uint64_t id_aa64isar1;
870 uint64_t id_aa64pfr0;
871 uint64_t id_aa64pfr1;
3dc91ddb
PM
872 uint64_t id_aa64mmfr0;
873 uint64_t id_aa64mmfr1;
47576b94 874 } isar;
74e75564
PB
875 uint32_t midr;
876 uint32_t revidr;
877 uint32_t reset_fpsid;
74e75564
PB
878 uint32_t ctr;
879 uint32_t reset_sctlr;
880 uint32_t id_pfr0;
881 uint32_t id_pfr1;
882 uint32_t id_dfr0;
cad86737
AL
883 uint64_t pmceid0;
884 uint64_t pmceid1;
74e75564
PB
885 uint32_t id_afr0;
886 uint32_t id_mmfr0;
887 uint32_t id_mmfr1;
888 uint32_t id_mmfr2;
889 uint32_t id_mmfr3;
890 uint32_t id_mmfr4;
74e75564
PB
891 uint64_t id_aa64dfr0;
892 uint64_t id_aa64dfr1;
893 uint64_t id_aa64afr0;
894 uint64_t id_aa64afr1;
74e75564
PB
895 uint32_t dbgdidr;
896 uint32_t clidr;
897 uint64_t mp_affinity; /* MP ID without feature bits */
898 /* The elements of this array are the CCSIDR values for each cache,
899 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
900 */
901 uint32_t ccsidr[16];
902 uint64_t reset_cbar;
903 uint32_t reset_auxcr;
904 bool reset_hivecs;
905 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
906 uint32_t dcz_blocksize;
907 uint64_t rvbar;
bd7d00fc 908
e45868a3
PM
909 /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
910 int gic_num_lrs; /* number of list registers */
911 int gic_vpribits; /* number of virtual priority bits */
912 int gic_vprebits; /* number of virtual preemption bits */
913
3a062d57
JB
914 /* Whether the cfgend input is high (i.e. this CPU should reset into
915 * big-endian mode). This setting isn't used directly: instead it modifies
916 * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the
917 * architecture version.
918 */
919 bool cfgend;
920
b5c53d1b 921 QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks;
08267487 922 QLIST_HEAD(, ARMELChangeHook) el_change_hooks;
15f8b142
IM
923
924 int32_t node_id; /* NUMA node this CPU belongs to */
5d721b78
AG
925
926 /* Used to synchronize KVM and QEMU in-kernel device levels */
927 uint8_t device_irq_level;
adf92eab
RH
928
929 /* Used to set the maximum vector length the cpu will support. */
930 uint32_t sve_max_vq;
0df9142d
AJ
931
932 /*
933 * In sve_vq_map each set bit is a supported vector length of
934 * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
935 * length in quadwords.
936 *
937 * While processing properties during initialization, corresponding
938 * sve_vq_init bits are set for bits in sve_vq_map that have been
939 * set by properties.
940 */
941 DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ);
942 DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ);
7def8754
AJ
943
944 /* Generic timer counter frequency, in Hz */
945 uint64_t gt_cntfrq_hz;
74e75564
PB
946};
947
7def8754
AJ
948unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
949
51e5ef45
MAL
950void arm_cpu_post_init(Object *obj);
951
46de5913
IM
952uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
953
74e75564 954#ifndef CONFIG_USER_ONLY
8a9358cc 955extern const VMStateDescription vmstate_arm_cpu;
74e75564
PB
956#endif
957
958void arm_cpu_do_interrupt(CPUState *cpu);
959void arm_v7m_cpu_do_interrupt(CPUState *cpu);
960bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
961
74e75564
PB
962hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
963 MemTxAttrs *attrs);
964
965int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
966int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
967
200bf5b7
AB
968/* Dynamically generates for gdb stub an XML description of the sysregs from
969 * the cp_regs hashtable. Returns the registered sysregs number.
970 */
971int arm_gen_dynamic_xml(CPUState *cpu);
972
973/* Returns the dynamically generated XML for the gdb stub.
974 * Returns a pointer to the XML contents for the specified XML file or NULL
975 * if the XML name doesn't match the predefined one.
976 */
977const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname);
978
74e75564
PB
979int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
980 int cpuid, void *opaque);
981int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
982 int cpuid, void *opaque);
983
984#ifdef TARGET_AARCH64
985int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
986int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
85fc7167 987void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
9a05f7b6
RH
988void aarch64_sve_change_el(CPUARMState *env, int old_el,
989 int new_el, bool el0_a64);
87014c6b 990void aarch64_add_sve_properties(Object *obj);
538baab2
AJ
991
992/*
993 * SVE registers are encoded in KVM's memory in an endianness-invariant format.
994 * The byte at offset i from the start of the in-memory representation contains
995 * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
996 * lowest offsets are stored in the lowest memory addresses, then that nearly
997 * matches QEMU's representation, which is to use an array of host-endian
998 * uint64_t's, where the lower offsets are at the lower indices. To complete
999 * the translation we just need to byte swap the uint64_t's on big-endian hosts.
1000 */
1001static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
1002{
1003#ifdef HOST_WORDS_BIGENDIAN
1004 int i;
1005
1006 for (i = 0; i < nr; ++i) {
1007 dst[i] = bswap64(src[i]);
1008 }
1009
1010 return dst;
1011#else
1012 return src;
1013#endif
1014}
1015
0ab5953b
RH
1016#else
1017static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
9a05f7b6
RH
1018static inline void aarch64_sve_change_el(CPUARMState *env, int o,
1019 int n, bool a)
1020{ }
87014c6b 1021static inline void aarch64_add_sve_properties(Object *obj) { }
74e75564 1022#endif
778c3a06 1023
91f78c58
PMD
1024#if !defined(CONFIG_TCG)
1025static inline target_ulong do_arm_semihosting(CPUARMState *env)
1026{
1027 g_assert_not_reached();
1028}
1029#else
faacc041 1030target_ulong do_arm_semihosting(CPUARMState *env);
91f78c58 1031#endif
ce02049d
GB
1032void aarch64_sync_32_to_64(CPUARMState *env);
1033void aarch64_sync_64_to_32(CPUARMState *env);
b5ff1b31 1034
ced31551
RH
1035int fp_exception_el(CPUARMState *env, int cur_el);
1036int sve_exception_el(CPUARMState *env, int cur_el);
1037uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
1038
3926cc84
AG
1039static inline bool is_a64(CPUARMState *env)
1040{
1041 return env->aarch64;
1042}
1043
2c0262af
FB
1044/* you can call this signal handler from your SIGBUS and SIGSEGV
1045 signal handlers to inform the virtual CPU of exceptions. non zero
1046 is returned if the signal was handled by the virtual CPU. */
5fafdf24 1047int cpu_arm_signal_handler(int host_signum, void *pinfo,
2c0262af
FB
1048 void *puc);
1049
5d05b9d4
AL
1050/**
1051 * pmu_op_start/finish
ec7b4ce4
AF
1052 * @env: CPUARMState
1053 *
5d05b9d4
AL
1054 * Convert all PMU counters between their delta form (the typical mode when
1055 * they are enabled) and the guest-visible values. These two calls must
1056 * surround any action which might affect the counters.
ec7b4ce4 1057 */
5d05b9d4
AL
1058void pmu_op_start(CPUARMState *env);
1059void pmu_op_finish(CPUARMState *env);
ec7b4ce4 1060
4e7beb0c
AL
1061/*
1062 * Called when a PMU counter is due to overflow
1063 */
1064void arm_pmu_timer_cb(void *opaque);
1065
033614c4
AL
1066/**
1067 * Functions to register as EL change hooks for PMU mode filtering
1068 */
1069void pmu_pre_el_change(ARMCPU *cpu, void *ignored);
1070void pmu_post_el_change(ARMCPU *cpu, void *ignored);
1071
57a4a11b 1072/*
bf8d0969
AL
1073 * pmu_init
1074 * @cpu: ARMCPU
57a4a11b 1075 *
bf8d0969
AL
1076 * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state
1077 * for the current configuration
57a4a11b 1078 */
bf8d0969 1079void pmu_init(ARMCPU *cpu);
57a4a11b 1080
76e3e1bc
PM
1081/* SCTLR bit meanings. Several bits have been reused in newer
1082 * versions of the architecture; in that case we define constants
1083 * for both old and new bit meanings. Code which tests against those
1084 * bits should probably check or otherwise arrange that the CPU
1085 * is the architectural version it expects.
1086 */
1087#define SCTLR_M (1U << 0)
1088#define SCTLR_A (1U << 1)
1089#define SCTLR_C (1U << 2)
1090#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
b2af69d0
RH
1091#define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */
1092#define SCTLR_SA (1U << 3) /* AArch64 only */
76e3e1bc 1093#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
b2af69d0 1094#define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */
76e3e1bc
PM
1095#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
1096#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
1097#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
1098#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
b2af69d0 1099#define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */
76e3e1bc
PM
1100#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
1101#define SCTLR_ITD (1U << 7) /* v8 onward */
1102#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
1103#define SCTLR_SED (1U << 8) /* v8 onward */
1104#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
1105#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
1106#define SCTLR_F (1U << 10) /* up to v6 */
cb570bd3
RH
1107#define SCTLR_SW (1U << 10) /* v7 */
1108#define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */
b2af69d0
RH
1109#define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */
1110#define SCTLR_EOS (1U << 11) /* v8.5-ExS */
76e3e1bc 1111#define SCTLR_I (1U << 12)
b2af69d0
RH
1112#define SCTLR_V (1U << 13) /* AArch32 only */
1113#define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */
76e3e1bc
PM
1114#define SCTLR_RR (1U << 14) /* up to v7 */
1115#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
1116#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
1117#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
1118#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
1119#define SCTLR_nTWI (1U << 16) /* v8 onward */
b2af69d0 1120#define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */
f6bda88f 1121#define SCTLR_BR (1U << 17) /* PMSA only */
76e3e1bc
PM
1122#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
1123#define SCTLR_nTWE (1U << 18) /* v8 onward */
1124#define SCTLR_WXN (1U << 19)
1125#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
b2af69d0
RH
1126#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
1127#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
1128#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
1129#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
1130#define SCTLR_EIS (1U << 22) /* v8.5-ExS */
76e3e1bc 1131#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
b2af69d0 1132#define SCTLR_SPAN (1U << 23) /* v8.1-PAN */
76e3e1bc
PM
1133#define SCTLR_VE (1U << 24) /* up to v7 */
1134#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
1135#define SCTLR_EE (1U << 25)
1136#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
1137#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
b2af69d0
RH
1138#define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */
1139#define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */
1140#define SCTLR_TRE (1U << 28) /* AArch32 only */
1141#define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */
1142#define SCTLR_AFE (1U << 29) /* AArch32 only */
1143#define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */
1144#define SCTLR_TE (1U << 30) /* AArch32 only */
1145#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
1146#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
1147#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
1148#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
1149#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
1150#define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */
1151#define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */
1152#define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */
1153#define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */
1154#define SCTLR_DSSBS (1ULL << 44) /* v8.5 */
76e3e1bc 1155
c6f19164
GB
1156#define CPTR_TCPAC (1U << 31)
1157#define CPTR_TTA (1U << 20)
1158#define CPTR_TFP (1U << 10)
5be5e8ed
RH
1159#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
1160#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
c6f19164 1161
187f678d
PM
1162#define MDCR_EPMAD (1U << 21)
1163#define MDCR_EDAD (1U << 20)
033614c4
AL
1164#define MDCR_SPME (1U << 17) /* MDCR_EL3 */
1165#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */
187f678d 1166#define MDCR_SDD (1U << 16)
a8d64e73 1167#define MDCR_SPD (3U << 14)
187f678d
PM
1168#define MDCR_TDRA (1U << 11)
1169#define MDCR_TDOSA (1U << 10)
1170#define MDCR_TDA (1U << 9)
1171#define MDCR_TDE (1U << 8)
1172#define MDCR_HPME (1U << 7)
1173#define MDCR_TPM (1U << 6)
1174#define MDCR_TPMCR (1U << 5)
033614c4 1175#define MDCR_HPMN (0x1fU)
187f678d 1176
a8d64e73
PM
1177/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
1178#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
1179
78dbbbe4
PM
1180#define CPSR_M (0x1fU)
1181#define CPSR_T (1U << 5)
1182#define CPSR_F (1U << 6)
1183#define CPSR_I (1U << 7)
1184#define CPSR_A (1U << 8)
1185#define CPSR_E (1U << 9)
1186#define CPSR_IT_2_7 (0xfc00U)
1187#define CPSR_GE (0xfU << 16)
4051e12c
PM
1188#define CPSR_IL (1U << 20)
1189/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
1190 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
1191 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
1192 * where it is live state but not accessible to the AArch32 code.
1193 */
1194#define CPSR_RESERVED (0x7U << 21)
78dbbbe4
PM
1195#define CPSR_J (1U << 24)
1196#define CPSR_IT_0_1 (3U << 25)
1197#define CPSR_Q (1U << 27)
1198#define CPSR_V (1U << 28)
1199#define CPSR_C (1U << 29)
1200#define CPSR_Z (1U << 30)
1201#define CPSR_N (1U << 31)
9ee6e8bb 1202#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
4cc35614 1203#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
9ee6e8bb
PB
1204
1205#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
4cc35614
PM
1206#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
1207 | CPSR_NZCV)
9ee6e8bb
PB
1208/* Bits writable in user mode. */
1209#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
1210/* Execution state bits. MRS read as zero, MSR writes ignored. */
4051e12c
PM
1211#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
1212/* Mask of bits which may be set by exception return copying them from SPSR */
1213#define CPSR_ERET_MASK (~CPSR_RESERVED)
b5ff1b31 1214
987ab45e
PM
1215/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
1216#define XPSR_EXCP 0x1ffU
1217#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
1218#define XPSR_IT_2_7 CPSR_IT_2_7
1219#define XPSR_GE CPSR_GE
1220#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
1221#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
1222#define XPSR_IT_0_1 CPSR_IT_0_1
1223#define XPSR_Q CPSR_Q
1224#define XPSR_V CPSR_V
1225#define XPSR_C CPSR_C
1226#define XPSR_Z CPSR_Z
1227#define XPSR_N CPSR_N
1228#define XPSR_NZCV CPSR_NZCV
1229#define XPSR_IT CPSR_IT
1230
e389be16
FA
1231#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
1232#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
1233#define TTBCR_PD0 (1U << 4)
1234#define TTBCR_PD1 (1U << 5)
1235#define TTBCR_EPD0 (1U << 7)
1236#define TTBCR_IRGN0 (3U << 8)
1237#define TTBCR_ORGN0 (3U << 10)
1238#define TTBCR_SH0 (3U << 12)
1239#define TTBCR_T1SZ (3U << 16)
1240#define TTBCR_A1 (1U << 22)
1241#define TTBCR_EPD1 (1U << 23)
1242#define TTBCR_IRGN1 (3U << 24)
1243#define TTBCR_ORGN1 (3U << 26)
1244#define TTBCR_SH1 (1U << 28)
1245#define TTBCR_EAE (1U << 31)
1246
d356312f
PM
1247/* Bit definitions for ARMv8 SPSR (PSTATE) format.
1248 * Only these are valid when in AArch64 mode; in
1249 * AArch32 mode SPSRs are basically CPSR-format.
1250 */
f502cfc2 1251#define PSTATE_SP (1U)
d356312f
PM
1252#define PSTATE_M (0xFU)
1253#define PSTATE_nRW (1U << 4)
1254#define PSTATE_F (1U << 6)
1255#define PSTATE_I (1U << 7)
1256#define PSTATE_A (1U << 8)
1257#define PSTATE_D (1U << 9)
f6e52eaa 1258#define PSTATE_BTYPE (3U << 10)
d356312f
PM
1259#define PSTATE_IL (1U << 20)
1260#define PSTATE_SS (1U << 21)
1261#define PSTATE_V (1U << 28)
1262#define PSTATE_C (1U << 29)
1263#define PSTATE_Z (1U << 30)
1264#define PSTATE_N (1U << 31)
1265#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
4cc35614 1266#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
f6e52eaa 1267#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
d356312f
PM
1268/* Mode values for AArch64 */
1269#define PSTATE_MODE_EL3h 13
1270#define PSTATE_MODE_EL3t 12
1271#define PSTATE_MODE_EL2h 9
1272#define PSTATE_MODE_EL2t 8
1273#define PSTATE_MODE_EL1h 5
1274#define PSTATE_MODE_EL1t 4
1275#define PSTATE_MODE_EL0t 0
1276
de2db7ec
PM
1277/* Write a new value to v7m.exception, thus transitioning into or out
1278 * of Handler mode; this may result in a change of active stack pointer.
1279 */
1280void write_v7m_exception(CPUARMState *env, uint32_t new_exc);
1281
9e729b57
EI
1282/* Map EL and handler into a PSTATE_MODE. */
1283static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
1284{
1285 return (el << 2) | handler;
1286}
1287
d356312f
PM
1288/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
1289 * interprocessing, so we don't attempt to sync with the cpsr state used by
1290 * the 32 bit decoder.
1291 */
1292static inline uint32_t pstate_read(CPUARMState *env)
1293{
1294 int ZF;
1295
1296 ZF = (env->ZF == 0);
1297 return (env->NF & 0x80000000) | (ZF << 30)
1298 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
f6e52eaa 1299 | env->pstate | env->daif | (env->btype << 10);
d356312f
PM
1300}
1301
1302static inline void pstate_write(CPUARMState *env, uint32_t val)
1303{
1304 env->ZF = (~val) & PSTATE_Z;
1305 env->NF = val;
1306 env->CF = (val >> 29) & 1;
1307 env->VF = (val << 3) & 0x80000000;
4cc35614 1308 env->daif = val & PSTATE_DAIF;
f6e52eaa 1309 env->btype = (val >> 10) & 3;
d356312f
PM
1310 env->pstate = val & ~CACHED_PSTATE_BITS;
1311}
1312
b5ff1b31 1313/* Return the current CPSR value. */
2f4a40e5 1314uint32_t cpsr_read(CPUARMState *env);
50866ba5
PM
1315
1316typedef enum CPSRWriteType {
1317 CPSRWriteByInstr = 0, /* from guest MSR or CPS */
1318 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
1319 CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
1320 CPSRWriteByGDBStub = 3, /* from the GDB stub */
1321} CPSRWriteType;
1322
1323/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
1324void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
1325 CPSRWriteType write_type);
9ee6e8bb
PB
1326
1327/* Return the current xPSR value. */
1328static inline uint32_t xpsr_read(CPUARMState *env)
1329{
1330 int ZF;
6fbe23d5
PB
1331 ZF = (env->ZF == 0);
1332 return (env->NF & 0x80000000) | (ZF << 30)
9ee6e8bb
PB
1333 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
1334 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
1335 | ((env->condexec_bits & 0xfc) << 8)
f1e2598c 1336 | (env->GE << 16)
9ee6e8bb 1337 | env->v7m.exception;
b5ff1b31
FB
1338}
1339
9ee6e8bb
PB
1340/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
1341static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
1342{
987ab45e
PM
1343 if (mask & XPSR_NZCV) {
1344 env->ZF = (~val) & XPSR_Z;
6fbe23d5 1345 env->NF = val;
9ee6e8bb
PB
1346 env->CF = (val >> 29) & 1;
1347 env->VF = (val << 3) & 0x80000000;
1348 }
987ab45e
PM
1349 if (mask & XPSR_Q) {
1350 env->QF = ((val & XPSR_Q) != 0);
1351 }
f1e2598c
PM
1352 if (mask & XPSR_GE) {
1353 env->GE = (val & XPSR_GE) >> 16;
1354 }
04c9c81b 1355#ifndef CONFIG_USER_ONLY
987ab45e
PM
1356 if (mask & XPSR_T) {
1357 env->thumb = ((val & XPSR_T) != 0);
1358 }
1359 if (mask & XPSR_IT_0_1) {
9ee6e8bb
PB
1360 env->condexec_bits &= ~3;
1361 env->condexec_bits |= (val >> 25) & 3;
1362 }
987ab45e 1363 if (mask & XPSR_IT_2_7) {
9ee6e8bb
PB
1364 env->condexec_bits &= 3;
1365 env->condexec_bits |= (val >> 8) & 0xfc;
1366 }
987ab45e 1367 if (mask & XPSR_EXCP) {
de2db7ec
PM
1368 /* Note that this only happens on exception exit */
1369 write_v7m_exception(env, val & XPSR_EXCP);
9ee6e8bb 1370 }
04c9c81b 1371#endif
9ee6e8bb
PB
1372}
1373
f149e3e8
EI
1374#define HCR_VM (1ULL << 0)
1375#define HCR_SWIO (1ULL << 1)
1376#define HCR_PTW (1ULL << 2)
1377#define HCR_FMO (1ULL << 3)
1378#define HCR_IMO (1ULL << 4)
1379#define HCR_AMO (1ULL << 5)
1380#define HCR_VF (1ULL << 6)
1381#define HCR_VI (1ULL << 7)
1382#define HCR_VSE (1ULL << 8)
1383#define HCR_FB (1ULL << 9)
1384#define HCR_BSU_MASK (3ULL << 10)
1385#define HCR_DC (1ULL << 12)
1386#define HCR_TWI (1ULL << 13)
1387#define HCR_TWE (1ULL << 14)
1388#define HCR_TID0 (1ULL << 15)
1389#define HCR_TID1 (1ULL << 16)
1390#define HCR_TID2 (1ULL << 17)
1391#define HCR_TID3 (1ULL << 18)
1392#define HCR_TSC (1ULL << 19)
1393#define HCR_TIDCP (1ULL << 20)
1394#define HCR_TACR (1ULL << 21)
1395#define HCR_TSW (1ULL << 22)
099bf53b 1396#define HCR_TPCP (1ULL << 23)
f149e3e8
EI
1397#define HCR_TPU (1ULL << 24)
1398#define HCR_TTLB (1ULL << 25)
1399#define HCR_TVM (1ULL << 26)
1400#define HCR_TGE (1ULL << 27)
1401#define HCR_TDZ (1ULL << 28)
1402#define HCR_HCD (1ULL << 29)
1403#define HCR_TRVM (1ULL << 30)
1404#define HCR_RW (1ULL << 31)
1405#define HCR_CD (1ULL << 32)
1406#define HCR_ID (1ULL << 33)
ac656b16 1407#define HCR_E2H (1ULL << 34)
099bf53b
RH
1408#define HCR_TLOR (1ULL << 35)
1409#define HCR_TERR (1ULL << 36)
1410#define HCR_TEA (1ULL << 37)
1411#define HCR_MIOCNCE (1ULL << 38)
1412#define HCR_APK (1ULL << 40)
1413#define HCR_API (1ULL << 41)
1414#define HCR_NV (1ULL << 42)
1415#define HCR_NV1 (1ULL << 43)
1416#define HCR_AT (1ULL << 44)
1417#define HCR_NV2 (1ULL << 45)
1418#define HCR_FWB (1ULL << 46)
1419#define HCR_FIEN (1ULL << 47)
1420#define HCR_TID4 (1ULL << 49)
1421#define HCR_TICAB (1ULL << 50)
1422#define HCR_TOCU (1ULL << 52)
1423#define HCR_TTLBIS (1ULL << 54)
1424#define HCR_TTLBOS (1ULL << 55)
1425#define HCR_ATA (1ULL << 56)
1426#define HCR_DCT (1ULL << 57)
1427
64e0e2de
EI
1428#define SCR_NS (1U << 0)
1429#define SCR_IRQ (1U << 1)
1430#define SCR_FIQ (1U << 2)
1431#define SCR_EA (1U << 3)
1432#define SCR_FW (1U << 4)
1433#define SCR_AW (1U << 5)
1434#define SCR_NET (1U << 6)
1435#define SCR_SMD (1U << 7)
1436#define SCR_HCE (1U << 8)
1437#define SCR_SIF (1U << 9)
1438#define SCR_RW (1U << 10)
1439#define SCR_ST (1U << 11)
1440#define SCR_TWI (1U << 12)
1441#define SCR_TWE (1U << 13)
99f8f86d
RH
1442#define SCR_TLOR (1U << 14)
1443#define SCR_TERR (1U << 15)
1444#define SCR_APK (1U << 16)
1445#define SCR_API (1U << 17)
1446#define SCR_EEL2 (1U << 18)
1447#define SCR_EASE (1U << 19)
1448#define SCR_NMEA (1U << 20)
1449#define SCR_FIEN (1U << 21)
1450#define SCR_ENSCXT (1U << 25)
1451#define SCR_ATA (1U << 26)
64e0e2de 1452
01653295
PM
1453/* Return the current FPSCR value. */
1454uint32_t vfp_get_fpscr(CPUARMState *env);
1455void vfp_set_fpscr(CPUARMState *env, uint32_t val);
1456
d81ce0ef
AB
1457/* FPCR, Floating Point Control Register
1458 * FPSR, Floating Poiht Status Register
1459 *
1460 * For A64 the FPSCR is split into two logically distinct registers,
f903fa22
PM
1461 * FPCR and FPSR. However since they still use non-overlapping bits
1462 * we store the underlying state in fpscr and just mask on read/write.
1463 */
1464#define FPSR_MASK 0xf800009f
0b62159b 1465#define FPCR_MASK 0x07ff9f00
d81ce0ef 1466
a15945d9
PM
1467#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
1468#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
1469#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
1470#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
1471#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
1472#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
d81ce0ef
AB
1473#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
1474#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
1475#define FPCR_DN (1 << 25) /* Default NaN enable bit */
a4d58462 1476#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
d81ce0ef 1477
f903fa22
PM
1478static inline uint32_t vfp_get_fpsr(CPUARMState *env)
1479{
1480 return vfp_get_fpscr(env) & FPSR_MASK;
1481}
1482
1483static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
1484{
1485 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
1486 vfp_set_fpscr(env, new_fpscr);
1487}
1488
1489static inline uint32_t vfp_get_fpcr(CPUARMState *env)
1490{
1491 return vfp_get_fpscr(env) & FPCR_MASK;
1492}
1493
1494static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
1495{
1496 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
1497 vfp_set_fpscr(env, new_fpscr);
1498}
1499
b5ff1b31
FB
1500enum arm_cpu_mode {
1501 ARM_CPU_MODE_USR = 0x10,
1502 ARM_CPU_MODE_FIQ = 0x11,
1503 ARM_CPU_MODE_IRQ = 0x12,
1504 ARM_CPU_MODE_SVC = 0x13,
28c9457d 1505 ARM_CPU_MODE_MON = 0x16,
b5ff1b31 1506 ARM_CPU_MODE_ABT = 0x17,
28c9457d 1507 ARM_CPU_MODE_HYP = 0x1a,
b5ff1b31
FB
1508 ARM_CPU_MODE_UND = 0x1b,
1509 ARM_CPU_MODE_SYS = 0x1f
1510};
1511
40f137e1
PB
1512/* VFP system registers. */
1513#define ARM_VFP_FPSID 0
1514#define ARM_VFP_FPSCR 1
a50c0f51 1515#define ARM_VFP_MVFR2 5
9ee6e8bb
PB
1516#define ARM_VFP_MVFR1 6
1517#define ARM_VFP_MVFR0 7
40f137e1
PB
1518#define ARM_VFP_FPEXC 8
1519#define ARM_VFP_FPINST 9
1520#define ARM_VFP_FPINST2 10
1521
18c9b560 1522/* iwMMXt coprocessor control registers. */
6e0fafe2
PM
1523#define ARM_IWMMXT_wCID 0
1524#define ARM_IWMMXT_wCon 1
1525#define ARM_IWMMXT_wCSSF 2
1526#define ARM_IWMMXT_wCASF 3
1527#define ARM_IWMMXT_wCGR0 8
1528#define ARM_IWMMXT_wCGR1 9
1529#define ARM_IWMMXT_wCGR2 10
1530#define ARM_IWMMXT_wCGR3 11
18c9b560 1531
2c4da50d
PM
1532/* V7M CCR bits */
1533FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
1534FIELD(V7M_CCR, USERSETMPEND, 1, 1)
1535FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
1536FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
1537FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
1538FIELD(V7M_CCR, STKALIGN, 9, 1)
4730fb85 1539FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
2c4da50d
PM
1540FIELD(V7M_CCR, DC, 16, 1)
1541FIELD(V7M_CCR, IC, 17, 1)
4730fb85 1542FIELD(V7M_CCR, BP, 18, 1)
2c4da50d 1543
24ac0fb1
PM
1544/* V7M SCR bits */
1545FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
1546FIELD(V7M_SCR, SLEEPDEEP, 2, 1)
1547FIELD(V7M_SCR, SLEEPDEEPS, 3, 1)
1548FIELD(V7M_SCR, SEVONPEND, 4, 1)
1549
3b2e9344
PM
1550/* V7M AIRCR bits */
1551FIELD(V7M_AIRCR, VECTRESET, 0, 1)
1552FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
1553FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
1554FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
1555FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
1556FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
1557FIELD(V7M_AIRCR, PRIS, 14, 1)
1558FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
1559FIELD(V7M_AIRCR, VECTKEY, 16, 16)
1560
2c4da50d
PM
1561/* V7M CFSR bits for MMFSR */
1562FIELD(V7M_CFSR, IACCVIOL, 0, 1)
1563FIELD(V7M_CFSR, DACCVIOL, 1, 1)
1564FIELD(V7M_CFSR, MUNSTKERR, 3, 1)
1565FIELD(V7M_CFSR, MSTKERR, 4, 1)
1566FIELD(V7M_CFSR, MLSPERR, 5, 1)
1567FIELD(V7M_CFSR, MMARVALID, 7, 1)
1568
1569/* V7M CFSR bits for BFSR */
1570FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1)
1571FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1)
1572FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1)
1573FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1)
1574FIELD(V7M_CFSR, STKERR, 8 + 4, 1)
1575FIELD(V7M_CFSR, LSPERR, 8 + 5, 1)
1576FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1)
1577
1578/* V7M CFSR bits for UFSR */
1579FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
1580FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
1581FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
1582FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
86f026de 1583FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
2c4da50d
PM
1584FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
1585FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
1586
334e8dad
PM
1587/* V7M CFSR bit masks covering all of the subregister bits */
1588FIELD(V7M_CFSR, MMFSR, 0, 8)
1589FIELD(V7M_CFSR, BFSR, 8, 8)
1590FIELD(V7M_CFSR, UFSR, 16, 16)
1591
2c4da50d
PM
1592/* V7M HFSR bits */
1593FIELD(V7M_HFSR, VECTTBL, 1, 1)
1594FIELD(V7M_HFSR, FORCED, 30, 1)
1595FIELD(V7M_HFSR, DEBUGEVT, 31, 1)
1596
1597/* V7M DFSR bits */
1598FIELD(V7M_DFSR, HALTED, 0, 1)
1599FIELD(V7M_DFSR, BKPT, 1, 1)
1600FIELD(V7M_DFSR, DWTTRAP, 2, 1)
1601FIELD(V7M_DFSR, VCATCH, 3, 1)
1602FIELD(V7M_DFSR, EXTERNAL, 4, 1)
1603
bed079da
PM
1604/* V7M SFSR bits */
1605FIELD(V7M_SFSR, INVEP, 0, 1)
1606FIELD(V7M_SFSR, INVIS, 1, 1)
1607FIELD(V7M_SFSR, INVER, 2, 1)
1608FIELD(V7M_SFSR, AUVIOL, 3, 1)
1609FIELD(V7M_SFSR, INVTRAN, 4, 1)
1610FIELD(V7M_SFSR, LSPERR, 5, 1)
1611FIELD(V7M_SFSR, SFARVALID, 6, 1)
1612FIELD(V7M_SFSR, LSERR, 7, 1)
1613
29c483a5
MD
1614/* v7M MPU_CTRL bits */
1615FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
1616FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
1617FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
1618
43bbce7f
PM
1619/* v7M CLIDR bits */
1620FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21)
1621FIELD(V7M_CLIDR, LOUIS, 21, 3)
1622FIELD(V7M_CLIDR, LOC, 24, 3)
1623FIELD(V7M_CLIDR, LOUU, 27, 3)
1624FIELD(V7M_CLIDR, ICB, 30, 2)
1625
1626FIELD(V7M_CSSELR, IND, 0, 1)
1627FIELD(V7M_CSSELR, LEVEL, 1, 3)
1628/* We use the combination of InD and Level to index into cpu->ccsidr[];
1629 * define a mask for this and check that it doesn't permit running off
1630 * the end of the array.
1631 */
1632FIELD(V7M_CSSELR, INDEX, 0, 4)
d33abe82
PM
1633
1634/* v7M FPCCR bits */
1635FIELD(V7M_FPCCR, LSPACT, 0, 1)
1636FIELD(V7M_FPCCR, USER, 1, 1)
1637FIELD(V7M_FPCCR, S, 2, 1)
1638FIELD(V7M_FPCCR, THREAD, 3, 1)
1639FIELD(V7M_FPCCR, HFRDY, 4, 1)
1640FIELD(V7M_FPCCR, MMRDY, 5, 1)
1641FIELD(V7M_FPCCR, BFRDY, 6, 1)
1642FIELD(V7M_FPCCR, SFRDY, 7, 1)
1643FIELD(V7M_FPCCR, MONRDY, 8, 1)
1644FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1)
1645FIELD(V7M_FPCCR, UFRDY, 10, 1)
1646FIELD(V7M_FPCCR, RES0, 11, 15)
1647FIELD(V7M_FPCCR, TS, 26, 1)
1648FIELD(V7M_FPCCR, CLRONRETS, 27, 1)
1649FIELD(V7M_FPCCR, CLRONRET, 28, 1)
1650FIELD(V7M_FPCCR, LSPENS, 29, 1)
1651FIELD(V7M_FPCCR, LSPEN, 30, 1)
1652FIELD(V7M_FPCCR, ASPEN, 31, 1)
1653/* These bits are banked. Others are non-banked and live in the M_REG_S bank */
1654#define R_V7M_FPCCR_BANKED_MASK \
1655 (R_V7M_FPCCR_LSPACT_MASK | \
1656 R_V7M_FPCCR_USER_MASK | \
1657 R_V7M_FPCCR_THREAD_MASK | \
1658 R_V7M_FPCCR_MMRDY_MASK | \
1659 R_V7M_FPCCR_SPLIMVIOL_MASK | \
1660 R_V7M_FPCCR_UFRDY_MASK | \
1661 R_V7M_FPCCR_ASPEN_MASK)
43bbce7f 1662
a62e62af
RH
1663/*
1664 * System register ID fields.
1665 */
2bd5f41c
AB
1666FIELD(MIDR_EL1, REVISION, 0, 4)
1667FIELD(MIDR_EL1, PARTNUM, 4, 12)
1668FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
1669FIELD(MIDR_EL1, VARIANT, 20, 4)
1670FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
1671
a62e62af
RH
1672FIELD(ID_ISAR0, SWAP, 0, 4)
1673FIELD(ID_ISAR0, BITCOUNT, 4, 4)
1674FIELD(ID_ISAR0, BITFIELD, 8, 4)
1675FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
1676FIELD(ID_ISAR0, COPROC, 16, 4)
1677FIELD(ID_ISAR0, DEBUG, 20, 4)
1678FIELD(ID_ISAR0, DIVIDE, 24, 4)
1679
1680FIELD(ID_ISAR1, ENDIAN, 0, 4)
1681FIELD(ID_ISAR1, EXCEPT, 4, 4)
1682FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
1683FIELD(ID_ISAR1, EXTEND, 12, 4)
1684FIELD(ID_ISAR1, IFTHEN, 16, 4)
1685FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
1686FIELD(ID_ISAR1, INTERWORK, 24, 4)
1687FIELD(ID_ISAR1, JAZELLE, 28, 4)
1688
1689FIELD(ID_ISAR2, LOADSTORE, 0, 4)
1690FIELD(ID_ISAR2, MEMHINT, 4, 4)
1691FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
1692FIELD(ID_ISAR2, MULT, 12, 4)
1693FIELD(ID_ISAR2, MULTS, 16, 4)
1694FIELD(ID_ISAR2, MULTU, 20, 4)
1695FIELD(ID_ISAR2, PSR_AR, 24, 4)
1696FIELD(ID_ISAR2, REVERSAL, 28, 4)
1697
1698FIELD(ID_ISAR3, SATURATE, 0, 4)
1699FIELD(ID_ISAR3, SIMD, 4, 4)
1700FIELD(ID_ISAR3, SVC, 8, 4)
1701FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
1702FIELD(ID_ISAR3, TABBRANCH, 16, 4)
1703FIELD(ID_ISAR3, T32COPY, 20, 4)
1704FIELD(ID_ISAR3, TRUENOP, 24, 4)
1705FIELD(ID_ISAR3, T32EE, 28, 4)
1706
1707FIELD(ID_ISAR4, UNPRIV, 0, 4)
1708FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
1709FIELD(ID_ISAR4, WRITEBACK, 8, 4)
1710FIELD(ID_ISAR4, SMC, 12, 4)
1711FIELD(ID_ISAR4, BARRIER, 16, 4)
1712FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
1713FIELD(ID_ISAR4, PSR_M, 24, 4)
1714FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
1715
1716FIELD(ID_ISAR5, SEVL, 0, 4)
1717FIELD(ID_ISAR5, AES, 4, 4)
1718FIELD(ID_ISAR5, SHA1, 8, 4)
1719FIELD(ID_ISAR5, SHA2, 12, 4)
1720FIELD(ID_ISAR5, CRC32, 16, 4)
1721FIELD(ID_ISAR5, RDM, 24, 4)
1722FIELD(ID_ISAR5, VCMA, 28, 4)
1723
1724FIELD(ID_ISAR6, JSCVT, 0, 4)
1725FIELD(ID_ISAR6, DP, 4, 4)
1726FIELD(ID_ISAR6, FHM, 8, 4)
1727FIELD(ID_ISAR6, SB, 12, 4)
1728FIELD(ID_ISAR6, SPECRES, 16, 4)
1729
ab638a32
RH
1730FIELD(ID_MMFR4, SPECSEI, 0, 4)
1731FIELD(ID_MMFR4, AC2, 4, 4)
1732FIELD(ID_MMFR4, XNX, 8, 4)
1733FIELD(ID_MMFR4, CNP, 12, 4)
1734FIELD(ID_MMFR4, HPDS, 16, 4)
1735FIELD(ID_MMFR4, LSM, 20, 4)
1736FIELD(ID_MMFR4, CCIDX, 24, 4)
1737FIELD(ID_MMFR4, EVT, 28, 4)
1738
a62e62af
RH
1739FIELD(ID_AA64ISAR0, AES, 4, 4)
1740FIELD(ID_AA64ISAR0, SHA1, 8, 4)
1741FIELD(ID_AA64ISAR0, SHA2, 12, 4)
1742FIELD(ID_AA64ISAR0, CRC32, 16, 4)
1743FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
1744FIELD(ID_AA64ISAR0, RDM, 28, 4)
1745FIELD(ID_AA64ISAR0, SHA3, 32, 4)
1746FIELD(ID_AA64ISAR0, SM3, 36, 4)
1747FIELD(ID_AA64ISAR0, SM4, 40, 4)
1748FIELD(ID_AA64ISAR0, DP, 44, 4)
1749FIELD(ID_AA64ISAR0, FHM, 48, 4)
1750FIELD(ID_AA64ISAR0, TS, 52, 4)
1751FIELD(ID_AA64ISAR0, TLB, 56, 4)
1752FIELD(ID_AA64ISAR0, RNDR, 60, 4)
1753
1754FIELD(ID_AA64ISAR1, DPB, 0, 4)
1755FIELD(ID_AA64ISAR1, APA, 4, 4)
1756FIELD(ID_AA64ISAR1, API, 8, 4)
1757FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
1758FIELD(ID_AA64ISAR1, FCMA, 16, 4)
1759FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
1760FIELD(ID_AA64ISAR1, GPA, 24, 4)
1761FIELD(ID_AA64ISAR1, GPI, 28, 4)
1762FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
1763FIELD(ID_AA64ISAR1, SB, 36, 4)
1764FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
1765
cd208a1c
RH
1766FIELD(ID_AA64PFR0, EL0, 0, 4)
1767FIELD(ID_AA64PFR0, EL1, 4, 4)
1768FIELD(ID_AA64PFR0, EL2, 8, 4)
1769FIELD(ID_AA64PFR0, EL3, 12, 4)
1770FIELD(ID_AA64PFR0, FP, 16, 4)
1771FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
1772FIELD(ID_AA64PFR0, GIC, 24, 4)
1773FIELD(ID_AA64PFR0, RAS, 28, 4)
1774FIELD(ID_AA64PFR0, SVE, 32, 4)
1775
be53b6f4
RH
1776FIELD(ID_AA64PFR1, BT, 0, 4)
1777FIELD(ID_AA64PFR1, SBSS, 4, 4)
1778FIELD(ID_AA64PFR1, MTE, 8, 4)
1779FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
1780
3dc91ddb
PM
1781FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
1782FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
1783FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
1784FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
1785FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
1786FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
1787FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
1788FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
1789FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
1790FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
1791FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
1792FIELD(ID_AA64MMFR0, EXS, 44, 4)
1793
1794FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
1795FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
1796FIELD(ID_AA64MMFR1, VH, 8, 4)
1797FIELD(ID_AA64MMFR1, HPDS, 12, 4)
1798FIELD(ID_AA64MMFR1, LO, 16, 4)
1799FIELD(ID_AA64MMFR1, PAN, 20, 4)
1800FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
1801FIELD(ID_AA64MMFR1, XNX, 28, 4)
1802
beceb99c
AL
1803FIELD(ID_DFR0, COPDBG, 0, 4)
1804FIELD(ID_DFR0, COPSDBG, 4, 4)
1805FIELD(ID_DFR0, MMAPDBG, 8, 4)
1806FIELD(ID_DFR0, COPTRC, 12, 4)
1807FIELD(ID_DFR0, MMAPTRC, 16, 4)
1808FIELD(ID_DFR0, MPROFDBG, 20, 4)
1809FIELD(ID_DFR0, PERFMON, 24, 4)
1810FIELD(ID_DFR0, TRACEFILT, 28, 4)
1811
602f6e42
PM
1812FIELD(MVFR0, SIMDREG, 0, 4)
1813FIELD(MVFR0, FPSP, 4, 4)
1814FIELD(MVFR0, FPDP, 8, 4)
1815FIELD(MVFR0, FPTRAP, 12, 4)
1816FIELD(MVFR0, FPDIVIDE, 16, 4)
1817FIELD(MVFR0, FPSQRT, 20, 4)
1818FIELD(MVFR0, FPSHVEC, 24, 4)
1819FIELD(MVFR0, FPROUND, 28, 4)
1820
1821FIELD(MVFR1, FPFTZ, 0, 4)
1822FIELD(MVFR1, FPDNAN, 4, 4)
1823FIELD(MVFR1, SIMDLS, 8, 4)
1824FIELD(MVFR1, SIMDINT, 12, 4)
1825FIELD(MVFR1, SIMDSP, 16, 4)
1826FIELD(MVFR1, SIMDHP, 20, 4)
1827FIELD(MVFR1, FPHP, 24, 4)
1828FIELD(MVFR1, SIMDFMAC, 28, 4)
1829
1830FIELD(MVFR2, SIMDMISC, 0, 4)
1831FIELD(MVFR2, FPMISC, 4, 4)
1832
43bbce7f
PM
1833QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
1834
ce854d7c
BC
1835/* If adding a feature bit which corresponds to a Linux ELF
1836 * HWCAP bit, remember to update the feature-bit-to-hwcap
1837 * mapping in linux-user/elfload.c:get_elf_hwcap().
1838 */
40f137e1
PB
1839enum arm_features {
1840 ARM_FEATURE_VFP,
c1713132
AZ
1841 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
1842 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
ce819861 1843 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
9ee6e8bb
PB
1844 ARM_FEATURE_V6,
1845 ARM_FEATURE_V6K,
1846 ARM_FEATURE_V7,
1847 ARM_FEATURE_THUMB2,
452a0955 1848 ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
9ee6e8bb
PB
1849 ARM_FEATURE_VFP3,
1850 ARM_FEATURE_NEON,
9ee6e8bb 1851 ARM_FEATURE_M, /* Microcontroller profile. */
fe1479c3 1852 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
e1bbf446 1853 ARM_FEATURE_THUMB2EE,
be5e7a76 1854 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
5110e683 1855 ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
be5e7a76
DES
1856 ARM_FEATURE_V4T,
1857 ARM_FEATURE_V5,
5bc95aa2 1858 ARM_FEATURE_STRONGARM,
906879a9 1859 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
da97f52c 1860 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
0383ac00 1861 ARM_FEATURE_GENERIC_TIMER,
06ed5d66 1862 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
1047b9d7 1863 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
c4804214
PM
1864 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
1865 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
1866 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
81bdde9d 1867 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
de9b05b8
PM
1868 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
1869 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
81e69fb0 1870 ARM_FEATURE_V8,
3926cc84 1871 ARM_FEATURE_AARCH64, /* supports 64 bit mode */
d8ba780b 1872 ARM_FEATURE_CBAR, /* has cp15 CBAR */
eb0ecd5a 1873 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
f318cec6 1874 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
cca7c2f5 1875 ARM_FEATURE_EL2, /* has EL2 Virtualization support */
1fe8141e 1876 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
62b44f05 1877 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
929e754d 1878 ARM_FEATURE_PMU, /* has PMU support */
91db4642 1879 ARM_FEATURE_VBAR, /* has cp15 VBAR */
1e577cc7 1880 ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
cc2ae7c9 1881 ARM_FEATURE_M_MAIN, /* M profile Main Extension */
40f137e1
PB
1882};
1883
1884static inline int arm_feature(CPUARMState *env, int feature)
1885{
918f5dca 1886 return (env->features & (1ULL << feature)) != 0;
40f137e1
PB
1887}
1888
0df9142d
AJ
1889void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
1890
19e0fefa
FA
1891#if !defined(CONFIG_USER_ONLY)
1892/* Return true if exception levels below EL3 are in secure state,
1893 * or would be following an exception return to that level.
1894 * Unlike arm_is_secure() (which is always a question about the
1895 * _current_ state of the CPU) this doesn't care about the current
1896 * EL or mode.
1897 */
1898static inline bool arm_is_secure_below_el3(CPUARMState *env)
1899{
1900 if (arm_feature(env, ARM_FEATURE_EL3)) {
1901 return !(env->cp15.scr_el3 & SCR_NS);
1902 } else {
6b7f0b61 1903 /* If EL3 is not supported then the secure state is implementation
19e0fefa
FA
1904 * defined, in which case QEMU defaults to non-secure.
1905 */
1906 return false;
1907 }
1908}
1909
71205876
PM
1910/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
1911static inline bool arm_is_el3_or_mon(CPUARMState *env)
19e0fefa
FA
1912{
1913 if (arm_feature(env, ARM_FEATURE_EL3)) {
1914 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
1915 /* CPU currently in AArch64 state and EL3 */
1916 return true;
1917 } else if (!is_a64(env) &&
1918 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
1919 /* CPU currently in AArch32 state and monitor mode */
1920 return true;
1921 }
1922 }
71205876
PM
1923 return false;
1924}
1925
1926/* Return true if the processor is in secure state */
1927static inline bool arm_is_secure(CPUARMState *env)
1928{
1929 if (arm_is_el3_or_mon(env)) {
1930 return true;
1931 }
19e0fefa
FA
1932 return arm_is_secure_below_el3(env);
1933}
1934
1935#else
1936static inline bool arm_is_secure_below_el3(CPUARMState *env)
1937{
1938 return false;
1939}
1940
1941static inline bool arm_is_secure(CPUARMState *env)
1942{
1943 return false;
1944}
1945#endif
1946
f7778444
RH
1947/**
1948 * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
1949 * E.g. when in secure state, fields in HCR_EL2 are suppressed,
1950 * "for all purposes other than a direct read or write access of HCR_EL2."
1951 * Not included here is HCR_RW.
1952 */
1953uint64_t arm_hcr_el2_eff(CPUARMState *env);
1954
1f79ee32
PM
1955/* Return true if the specified exception level is running in AArch64 state. */
1956static inline bool arm_el_is_aa64(CPUARMState *env, int el)
1957{
446c81ab
PM
1958 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
1959 * and if we're not in EL0 then the state of EL0 isn't well defined.)
1f79ee32 1960 */
446c81ab
PM
1961 assert(el >= 1 && el <= 3);
1962 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
592125f8 1963
446c81ab
PM
1964 /* The highest exception level is always at the maximum supported
1965 * register width, and then lower levels have a register width controlled
1966 * by bits in the SCR or HCR registers.
1f79ee32 1967 */
446c81ab
PM
1968 if (el == 3) {
1969 return aa64;
1970 }
1971
1972 if (arm_feature(env, ARM_FEATURE_EL3)) {
1973 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
1974 }
1975
1976 if (el == 2) {
1977 return aa64;
1978 }
1979
1980 if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
1981 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
1982 }
1983
1984 return aa64;
1f79ee32
PM
1985}
1986
3f342b9e
SF
1987/* Function for determing whether guest cp register reads and writes should
1988 * access the secure or non-secure bank of a cp register. When EL3 is
1989 * operating in AArch32 state, the NS-bit determines whether the secure
1990 * instance of a cp register should be used. When EL3 is AArch64 (or if
1991 * it doesn't exist at all) then there is no register banking, and all
1992 * accesses are to the non-secure version.
1993 */
1994static inline bool access_secure_reg(CPUARMState *env)
1995{
1996 bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
1997 !arm_el_is_aa64(env, 3) &&
1998 !(env->cp15.scr_el3 & SCR_NS));
1999
2000 return ret;
2001}
2002
ea30a4b8
FA
2003/* Macros for accessing a specified CP register bank */
2004#define A32_BANKED_REG_GET(_env, _regname, _secure) \
2005 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
2006
2007#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
2008 do { \
2009 if (_secure) { \
2010 (_env)->cp15._regname##_s = (_val); \
2011 } else { \
2012 (_env)->cp15._regname##_ns = (_val); \
2013 } \
2014 } while (0)
2015
2016/* Macros for automatically accessing a specific CP register bank depending on
2017 * the current secure state of the system. These macros are not intended for
2018 * supporting instruction translation reads/writes as these are dependent
2019 * solely on the SCR.NS bit and not the mode.
2020 */
2021#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
2022 A32_BANKED_REG_GET((_env), _regname, \
2cde031f 2023 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
ea30a4b8
FA
2024
2025#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
2026 A32_BANKED_REG_SET((_env), _regname, \
2cde031f 2027 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
ea30a4b8
FA
2028 (_val))
2029
0442428a 2030void arm_cpu_list(void);
012a906b
GB
2031uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
2032 uint32_t cur_el, bool secure);
40f137e1 2033
9ee6e8bb 2034/* Interface between CPU and Interrupt controller. */
7ecdaa4a
PM
2035#ifndef CONFIG_USER_ONLY
2036bool armv7m_nvic_can_take_pending_exception(void *opaque);
2037#else
2038static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
2039{
2040 return true;
2041}
2042#endif
2fb50a33
PM
2043/**
2044 * armv7m_nvic_set_pending: mark the specified exception as pending
2045 * @opaque: the NVIC
2046 * @irq: the exception number to mark pending
2047 * @secure: false for non-banked exceptions or for the nonsecure
2048 * version of a banked exception, true for the secure version of a banked
2049 * exception.
2050 *
2051 * Marks the specified exception as pending. Note that we will assert()
2052 * if @secure is true and @irq does not specify one of the fixed set
2053 * of architecturally banked exceptions.
2054 */
2055void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
5ede82b8
PM
2056/**
2057 * armv7m_nvic_set_pending_derived: mark this derived exception as pending
2058 * @opaque: the NVIC
2059 * @irq: the exception number to mark pending
2060 * @secure: false for non-banked exceptions or for the nonsecure
2061 * version of a banked exception, true for the secure version of a banked
2062 * exception.
2063 *
2064 * Similar to armv7m_nvic_set_pending(), but specifically for derived
2065 * exceptions (exceptions generated in the course of trying to take
2066 * a different exception).
2067 */
2068void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
a99ba8ab
PM
2069/**
2070 * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
2071 * @opaque: the NVIC
2072 * @irq: the exception number to mark pending
2073 * @secure: false for non-banked exceptions or for the nonsecure
2074 * version of a banked exception, true for the secure version of a banked
2075 * exception.
2076 *
2077 * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
2078 * generated in the course of lazy stacking of FP registers.
2079 */
2080void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure);
6c948518
PM
2081/**
2082 * armv7m_nvic_get_pending_irq_info: return highest priority pending
2083 * exception, and whether it targets Secure state
2084 * @opaque: the NVIC
2085 * @pirq: set to pending exception number
2086 * @ptargets_secure: set to whether pending exception targets Secure
2087 *
2088 * This function writes the number of the highest priority pending
2089 * exception (the one which would be made active by
2090 * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
2091 * to true if the current highest priority pending exception should
2092 * be taken to Secure state, false for NS.
2093 */
2094void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq,
2095 bool *ptargets_secure);
5cb18069
PM
2096/**
2097 * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
2098 * @opaque: the NVIC
2099 *
2100 * Move the current highest priority pending exception from the pending
2101 * state to the active state, and update v7m.exception to indicate that
2102 * it is the exception currently being handled.
5cb18069 2103 */
6c948518 2104void armv7m_nvic_acknowledge_irq(void *opaque);
aa488fe3
PM
2105/**
2106 * armv7m_nvic_complete_irq: complete specified interrupt or exception
2107 * @opaque: the NVIC
2108 * @irq: the exception number to complete
5cb18069 2109 * @secure: true if this exception was secure
aa488fe3
PM
2110 *
2111 * Returns: -1 if the irq was not active
2112 * 1 if completing this irq brought us back to base (no active irqs)
2113 * 0 if there is still an irq active after this one was completed
2114 * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
2115 */
5cb18069 2116int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure);
b593c2b8
PM
2117/**
2118 * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
2119 * @opaque: the NVIC
2120 * @irq: the exception number to mark pending
2121 * @secure: false for non-banked exceptions or for the nonsecure
2122 * version of a banked exception, true for the secure version of a banked
2123 * exception.
2124 *
2125 * Return whether an exception is "ready", i.e. whether the exception is
2126 * enabled and is configured at a priority which would allow it to
2127 * interrupt the current execution priority. This controls whether the
2128 * RDY bit for it in the FPCCR is set.
2129 */
2130bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure);
42a6686b
PM
2131/**
2132 * armv7m_nvic_raw_execution_priority: return the raw execution priority
2133 * @opaque: the NVIC
2134 *
2135 * Returns: the raw execution priority as defined by the v8M architecture.
2136 * This is the execution priority minus the effects of AIRCR.PRIS,
2137 * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
2138 * (v8M ARM ARM I_PKLD.)
2139 */
2140int armv7m_nvic_raw_execution_priority(void *opaque);
5d479199
PM
2141/**
2142 * armv7m_nvic_neg_prio_requested: return true if the requested execution
2143 * priority is negative for the specified security state.
2144 * @opaque: the NVIC
2145 * @secure: the security state to test
2146 * This corresponds to the pseudocode IsReqExecPriNeg().
2147 */
2148#ifndef CONFIG_USER_ONLY
2149bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure);
2150#else
2151static inline bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
2152{
2153 return false;
2154}
2155#endif
9ee6e8bb 2156
4b6a83fb
PM
2157/* Interface for defining coprocessor registers.
2158 * Registers are defined in tables of arm_cp_reginfo structs
2159 * which are passed to define_arm_cp_regs().
2160 */
2161
2162/* When looking up a coprocessor register we look for it
2163 * via an integer which encodes all of:
2164 * coprocessor number
2165 * Crn, Crm, opc1, opc2 fields
2166 * 32 or 64 bit register (ie is it accessed via MRC/MCR
2167 * or via MRRC/MCRR?)
51a79b03 2168 * non-secure/secure bank (AArch32 only)
4b6a83fb
PM
2169 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
2170 * (In this case crn and opc2 should be zero.)
f5a0a5a5
PM
2171 * For AArch64, there is no 32/64 bit size distinction;
2172 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
2173 * and 4 bit CRn and CRm. The encoding patterns are chosen
2174 * to be easy to convert to and from the KVM encodings, and also
2175 * so that the hashtable can contain both AArch32 and AArch64
2176 * registers (to allow for interprocessing where we might run
2177 * 32 bit code on a 64 bit core).
4b6a83fb 2178 */
f5a0a5a5
PM
2179/* This bit is private to our hashtable cpreg; in KVM register
2180 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
2181 * in the upper bits of the 64 bit ID.
2182 */
2183#define CP_REG_AA64_SHIFT 28
2184#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
2185
51a79b03
PM
2186/* To enable banking of coprocessor registers depending on ns-bit we
2187 * add a bit to distinguish between secure and non-secure cpregs in the
2188 * hashtable.
2189 */
2190#define CP_REG_NS_SHIFT 29
2191#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
2192
2193#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
2194 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
2195 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
4b6a83fb 2196
f5a0a5a5
PM
2197#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
2198 (CP_REG_AA64_MASK | \
2199 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
2200 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
2201 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
2202 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
2203 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
2204 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
2205
721fae12
PM
2206/* Convert a full 64 bit KVM register ID to the truncated 32 bit
2207 * version used as a key for the coprocessor register hashtable
2208 */
2209static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
2210{
2211 uint32_t cpregid = kvmid;
f5a0a5a5
PM
2212 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
2213 cpregid |= CP_REG_AA64_MASK;
51a79b03
PM
2214 } else {
2215 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
2216 cpregid |= (1 << 15);
2217 }
2218
2219 /* KVM is always non-secure so add the NS flag on AArch32 register
2220 * entries.
2221 */
2222 cpregid |= 1 << CP_REG_NS_SHIFT;
721fae12
PM
2223 }
2224 return cpregid;
2225}
2226
2227/* Convert a truncated 32 bit hashtable key into the full
2228 * 64 bit KVM register ID.
2229 */
2230static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
2231{
f5a0a5a5
PM
2232 uint64_t kvmid;
2233
2234 if (cpregid & CP_REG_AA64_MASK) {
2235 kvmid = cpregid & ~CP_REG_AA64_MASK;
2236 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
721fae12 2237 } else {
f5a0a5a5
PM
2238 kvmid = cpregid & ~(1 << 15);
2239 if (cpregid & (1 << 15)) {
2240 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
2241 } else {
2242 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
2243 }
721fae12
PM
2244 }
2245 return kvmid;
2246}
2247
4b6a83fb 2248/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
fe03d45f 2249 * special-behaviour cp reg and bits [11..8] indicate what behaviour
4b6a83fb
PM
2250 * it has. Otherwise it is a simple cp reg, where CONST indicates that
2251 * TCG can assume the value to be constant (ie load at translate time)
2252 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
2253 * indicates that the TB should not be ended after a write to this register
2254 * (the default is that the TB ends after cp writes). OVERRIDE permits
2255 * a register definition to override a previous definition for the
2256 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
2257 * old must have the OVERRIDE bit set.
7a0e58fa
PM
2258 * ALIAS indicates that this register is an alias view of some underlying
2259 * state which is also visible via another register, and that the other
b061a82b
SF
2260 * register is handling migration and reset; registers marked ALIAS will not be
2261 * migrated but may have their state set by syncing of register state from KVM.
7a0e58fa
PM
2262 * NO_RAW indicates that this register has no underlying state and does not
2263 * support raw access for state saving/loading; it will not be used for either
2264 * migration or KVM state synchronization. (Typically this is for "registers"
2265 * which are actually used as instructions for cache maintenance and so on.)
2452731c
PM
2266 * IO indicates that this register does I/O and therefore its accesses
2267 * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
2268 * registers which implement clocks or timers require this.
37ff584c
PM
2269 * RAISES_EXC is for when the read or write hook might raise an exception;
2270 * the generated code will synchronize the CPU state before calling the hook
2271 * so that it is safe for the hook to call raise_exception().
f80741d1
AB
2272 * NEWEL is for writes to registers that might change the exception
2273 * level - typically on older ARM chips. For those cases we need to
2274 * re-read the new el when recomputing the translation flags.
4b6a83fb 2275 */
fe03d45f
RH
2276#define ARM_CP_SPECIAL 0x0001
2277#define ARM_CP_CONST 0x0002
2278#define ARM_CP_64BIT 0x0004
2279#define ARM_CP_SUPPRESS_TB_END 0x0008
2280#define ARM_CP_OVERRIDE 0x0010
2281#define ARM_CP_ALIAS 0x0020
2282#define ARM_CP_IO 0x0040
2283#define ARM_CP_NO_RAW 0x0080
2284#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100)
2285#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200)
2286#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300)
2287#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400)
2288#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500)
2289#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
2290#define ARM_CP_FPU 0x1000
490aa7f1 2291#define ARM_CP_SVE 0x2000
1f163787 2292#define ARM_CP_NO_GDB 0x4000
37ff584c 2293#define ARM_CP_RAISES_EXC 0x8000
f80741d1 2294#define ARM_CP_NEWEL 0x10000
4b6a83fb 2295/* Used only as a terminator for ARMCPRegInfo lists */
f80741d1 2296#define ARM_CP_SENTINEL 0xfffff
4b6a83fb 2297/* Mask of only the flag bits in a type field */
f80741d1 2298#define ARM_CP_FLAG_MASK 0x1f0ff
4b6a83fb 2299
f5a0a5a5
PM
2300/* Valid values for ARMCPRegInfo state field, indicating which of
2301 * the AArch32 and AArch64 execution states this register is visible in.
2302 * If the reginfo doesn't explicitly specify then it is AArch32 only.
2303 * If the reginfo is declared to be visible in both states then a second
2304 * reginfo is synthesised for the AArch32 view of the AArch64 register,
2305 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
2306 * Note that we rely on the values of these enums as we iterate through
2307 * the various states in some places.
2308 */
2309enum {
2310 ARM_CP_STATE_AA32 = 0,
2311 ARM_CP_STATE_AA64 = 1,
2312 ARM_CP_STATE_BOTH = 2,
2313};
2314
c3e30260
FA
2315/* ARM CP register secure state flags. These flags identify security state
2316 * attributes for a given CP register entry.
2317 * The existence of both or neither secure and non-secure flags indicates that
2318 * the register has both a secure and non-secure hash entry. A single one of
2319 * these flags causes the register to only be hashed for the specified
2320 * security state.
2321 * Although definitions may have any combination of the S/NS bits, each
2322 * registered entry will only have one to identify whether the entry is secure
2323 * or non-secure.
2324 */
2325enum {
2326 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
2327 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
2328};
2329
4b6a83fb
PM
2330/* Return true if cptype is a valid type field. This is used to try to
2331 * catch errors where the sentinel has been accidentally left off the end
2332 * of a list of registers.
2333 */
2334static inline bool cptype_valid(int cptype)
2335{
2336 return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
2337 || ((cptype & ARM_CP_SPECIAL) &&
34affeef 2338 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
4b6a83fb
PM
2339}
2340
2341/* Access rights:
2342 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
2343 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
2344 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
2345 * (ie any of the privileged modes in Secure state, or Monitor mode).
2346 * If a register is accessible in one privilege level it's always accessible
2347 * in higher privilege levels too. Since "Secure PL1" also follows this rule
2348 * (ie anything visible in PL2 is visible in S-PL1, some things are only
2349 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
2350 * terminology a little and call this PL3.
f5a0a5a5
PM
2351 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
2352 * with the ELx exception levels.
4b6a83fb
PM
2353 *
2354 * If access permissions for a register are more complex than can be
2355 * described with these bits, then use a laxer set of restrictions, and
2356 * do the more restrictive/complex check inside a helper function.
2357 */
2358#define PL3_R 0x80
2359#define PL3_W 0x40
2360#define PL2_R (0x20 | PL3_R)
2361#define PL2_W (0x10 | PL3_W)
2362#define PL1_R (0x08 | PL2_R)
2363#define PL1_W (0x04 | PL2_W)
2364#define PL0_R (0x02 | PL1_R)
2365#define PL0_W (0x01 | PL1_W)
2366
b5bd7440
AB
2367/*
2368 * For user-mode some registers are accessible to EL0 via a kernel
2369 * trap-and-emulate ABI. In this case we define the read permissions
2370 * as actually being PL0_R. However some bits of any given register
2371 * may still be masked.
2372 */
2373#ifdef CONFIG_USER_ONLY
2374#define PL0U_R PL0_R
2375#else
2376#define PL0U_R PL1_R
2377#endif
2378
4b6a83fb
PM
2379#define PL3_RW (PL3_R | PL3_W)
2380#define PL2_RW (PL2_R | PL2_W)
2381#define PL1_RW (PL1_R | PL1_W)
2382#define PL0_RW (PL0_R | PL0_W)
2383
75502672
PM
2384/* Return the highest implemented Exception Level */
2385static inline int arm_highest_el(CPUARMState *env)
2386{
2387 if (arm_feature(env, ARM_FEATURE_EL3)) {
2388 return 3;
2389 }
2390 if (arm_feature(env, ARM_FEATURE_EL2)) {
2391 return 2;
2392 }
2393 return 1;
2394}
2395
15b3f556
PM
2396/* Return true if a v7M CPU is in Handler mode */
2397static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
2398{
2399 return env->v7m.exception != 0;
2400}
2401
dcbff19b
GB
2402/* Return the current Exception Level (as per ARMv8; note that this differs
2403 * from the ARMv7 Privilege Level).
2404 */
2405static inline int arm_current_el(CPUARMState *env)
4b6a83fb 2406{
6d54ed3c 2407 if (arm_feature(env, ARM_FEATURE_M)) {
8bfc26ea
PM
2408 return arm_v7m_is_handler_mode(env) ||
2409 !(env->v7m.control[env->v7m.secure] & 1);
6d54ed3c
PM
2410 }
2411
592125f8 2412 if (is_a64(env)) {
f5a0a5a5
PM
2413 return extract32(env->pstate, 2, 2);
2414 }
2415
592125f8
FA
2416 switch (env->uncached_cpsr & 0x1f) {
2417 case ARM_CPU_MODE_USR:
4b6a83fb 2418 return 0;
592125f8
FA
2419 case ARM_CPU_MODE_HYP:
2420 return 2;
2421 case ARM_CPU_MODE_MON:
2422 return 3;
2423 default:
2424 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
2425 /* If EL3 is 32-bit then all secure privileged modes run in
2426 * EL3
2427 */
2428 return 3;
2429 }
2430
2431 return 1;
4b6a83fb 2432 }
4b6a83fb
PM
2433}
2434
2435typedef struct ARMCPRegInfo ARMCPRegInfo;
2436
f59df3f2
PM
2437typedef enum CPAccessResult {
2438 /* Access is permitted */
2439 CP_ACCESS_OK = 0,
2440 /* Access fails due to a configurable trap or enable which would
2441 * result in a categorized exception syndrome giving information about
2442 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
38836a2c
PM
2443 * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
2444 * PL1 if in EL0, otherwise to the current EL).
f59df3f2
PM
2445 */
2446 CP_ACCESS_TRAP = 1,
2447 /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
2448 * Note that this is not a catch-all case -- the set of cases which may
2449 * result in this failure is specifically defined by the architecture.
2450 */
2451 CP_ACCESS_TRAP_UNCATEGORIZED = 2,
38836a2c
PM
2452 /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
2453 CP_ACCESS_TRAP_EL2 = 3,
2454 CP_ACCESS_TRAP_EL3 = 4,
e7615726
PM
2455 /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
2456 CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
2457 CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
f2cae609
PM
2458 /* Access fails and results in an exception syndrome for an FP access,
2459 * trapped directly to EL2 or EL3
2460 */
2461 CP_ACCESS_TRAP_FP_EL2 = 7,
2462 CP_ACCESS_TRAP_FP_EL3 = 8,
f59df3f2
PM
2463} CPAccessResult;
2464
c4241c7d
PM
2465/* Access functions for coprocessor registers. These cannot fail and
2466 * may not raise exceptions.
2467 */
2468typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
2469typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
2470 uint64_t value);
f59df3f2 2471/* Access permission check functions for coprocessor registers. */
3f208fd7
PM
2472typedef CPAccessResult CPAccessFn(CPUARMState *env,
2473 const ARMCPRegInfo *opaque,
2474 bool isread);
4b6a83fb
PM
2475/* Hook function for register reset */
2476typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
2477
2478#define CP_ANY 0xff
2479
2480/* Definition of an ARM coprocessor register */
2481struct ARMCPRegInfo {
2482 /* Name of register (useful mainly for debugging, need not be unique) */
2483 const char *name;
2484 /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
2485 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
2486 * 'wildcard' field -- any value of that field in the MRC/MCR insn
2487 * will be decoded to this register. The register read and write
2488 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
2489 * used by the program, so it is possible to register a wildcard and
2490 * then behave differently on read/write if necessary.
2491 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
2492 * must both be zero.
f5a0a5a5
PM
2493 * For AArch64-visible registers, opc0 is also used.
2494 * Since there are no "coprocessors" in AArch64, cp is purely used as a
2495 * way to distinguish (for KVM's benefit) guest-visible system registers
2496 * from demuxed ones provided to preserve the "no side effects on
2497 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
2498 * visible (to match KVM's encoding); cp==0 will be converted to
2499 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
4b6a83fb
PM
2500 */
2501 uint8_t cp;
2502 uint8_t crn;
2503 uint8_t crm;
f5a0a5a5 2504 uint8_t opc0;
4b6a83fb
PM
2505 uint8_t opc1;
2506 uint8_t opc2;
f5a0a5a5
PM
2507 /* Execution state in which this register is visible: ARM_CP_STATE_* */
2508 int state;
4b6a83fb
PM
2509 /* Register type: ARM_CP_* bits/values */
2510 int type;
2511 /* Access rights: PL*_[RW] */
2512 int access;
c3e30260
FA
2513 /* Security state: ARM_CP_SECSTATE_* bits/values */
2514 int secure;
4b6a83fb
PM
2515 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
2516 * this register was defined: can be used to hand data through to the
2517 * register read/write functions, since they are passed the ARMCPRegInfo*.
2518 */
2519 void *opaque;
2520 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
2521 * fieldoffset is non-zero, the reset value of the register.
2522 */
2523 uint64_t resetvalue;
c3e30260
FA
2524 /* Offset of the field in CPUARMState for this register.
2525 *
2526 * This is not needed if either:
4b6a83fb
PM
2527 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
2528 * 2. both readfn and writefn are specified
2529 */
2530 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
c3e30260
FA
2531
2532 /* Offsets of the secure and non-secure fields in CPUARMState for the
2533 * register if it is banked. These fields are only used during the static
2534 * registration of a register. During hashing the bank associated
2535 * with a given security state is copied to fieldoffset which is used from
2536 * there on out.
2537 *
2538 * It is expected that register definitions use either fieldoffset or
2539 * bank_fieldoffsets in the definition but not both. It is also expected
2540 * that both bank offsets are set when defining a banked register. This
2541 * use indicates that a register is banked.
2542 */
2543 ptrdiff_t bank_fieldoffsets[2];
2544
f59df3f2
PM
2545 /* Function for making any access checks for this register in addition to
2546 * those specified by the 'access' permissions bits. If NULL, no extra
2547 * checks required. The access check is performed at runtime, not at
2548 * translate time.
2549 */
2550 CPAccessFn *accessfn;
4b6a83fb
PM
2551 /* Function for handling reads of this register. If NULL, then reads
2552 * will be done by loading from the offset into CPUARMState specified
2553 * by fieldoffset.
2554 */
2555 CPReadFn *readfn;
2556 /* Function for handling writes of this register. If NULL, then writes
2557 * will be done by writing to the offset into CPUARMState specified
2558 * by fieldoffset.
2559 */
2560 CPWriteFn *writefn;
7023ec7e
PM
2561 /* Function for doing a "raw" read; used when we need to copy
2562 * coprocessor state to the kernel for KVM or out for
2563 * migration. This only needs to be provided if there is also a
c4241c7d 2564 * readfn and it has side effects (for instance clear-on-read bits).
7023ec7e
PM
2565 */
2566 CPReadFn *raw_readfn;
2567 /* Function for doing a "raw" write; used when we need to copy KVM
2568 * kernel coprocessor state into userspace, or for inbound
2569 * migration. This only needs to be provided if there is also a
c4241c7d
PM
2570 * writefn and it masks out "unwritable" bits or has write-one-to-clear
2571 * or similar behaviour.
7023ec7e
PM
2572 */
2573 CPWriteFn *raw_writefn;
4b6a83fb
PM
2574 /* Function for resetting the register. If NULL, then reset will be done
2575 * by writing resetvalue to the field specified in fieldoffset. If
2576 * fieldoffset is 0 then no reset will be done.
2577 */
2578 CPResetFn *resetfn;
e2cce18f
RH
2579
2580 /*
2581 * "Original" writefn and readfn.
2582 * For ARMv8.1-VHE register aliases, we overwrite the read/write
2583 * accessor functions of various EL1/EL0 to perform the runtime
2584 * check for which sysreg should actually be modified, and then
2585 * forwards the operation. Before overwriting the accessors,
2586 * the original function is copied here, so that accesses that
2587 * really do go to the EL1/EL0 version proceed normally.
2588 * (The corresponding EL2 register is linked via opaque.)
2589 */
2590 CPReadFn *orig_readfn;
2591 CPWriteFn *orig_writefn;
4b6a83fb
PM
2592};
2593
2594/* Macros which are lvalues for the field in CPUARMState for the
2595 * ARMCPRegInfo *ri.
2596 */
2597#define CPREG_FIELD32(env, ri) \
2598 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
2599#define CPREG_FIELD64(env, ri) \
2600 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
2601
2602#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
2603
2604void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2605 const ARMCPRegInfo *regs, void *opaque);
2606void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
2607 const ARMCPRegInfo *regs, void *opaque);
2608static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
2609{
2610 define_arm_cp_regs_with_opaque(cpu, regs, 0);
2611}
2612static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
2613{
2614 define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
2615}
60322b39 2616const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
4b6a83fb 2617
6c5c0fec
AB
2618/*
2619 * Definition of an ARM co-processor register as viewed from
2620 * userspace. This is used for presenting sanitised versions of
2621 * registers to userspace when emulating the Linux AArch64 CPU
2622 * ID/feature ABI (advertised as HWCAP_CPUID).
2623 */
2624typedef struct ARMCPRegUserSpaceInfo {
2625 /* Name of register */
2626 const char *name;
2627
d040242e
AB
2628 /* Is the name actually a glob pattern */
2629 bool is_glob;
2630
6c5c0fec
AB
2631 /* Only some bits are exported to user space */
2632 uint64_t exported_bits;
2633
2634 /* Fixed bits are applied after the mask */
2635 uint64_t fixed_bits;
2636} ARMCPRegUserSpaceInfo;
2637
2638#define REGUSERINFO_SENTINEL { .name = NULL }
2639
2640void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
2641
4b6a83fb 2642/* CPWriteFn that can be used to implement writes-ignored behaviour */
c4241c7d
PM
2643void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2644 uint64_t value);
4b6a83fb 2645/* CPReadFn that can be used for read-as-zero behaviour */
c4241c7d 2646uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
4b6a83fb 2647
f5a0a5a5
PM
2648/* CPResetFn that does nothing, for use if no reset is required even
2649 * if fieldoffset is non zero.
2650 */
2651void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
2652
67ed771d
PM
2653/* Return true if this reginfo struct's field in the cpu state struct
2654 * is 64 bits wide.
2655 */
2656static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
2657{
2658 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
2659}
2660
dcbff19b 2661static inline bool cp_access_ok(int current_el,
4b6a83fb
PM
2662 const ARMCPRegInfo *ri, int isread)
2663{
dcbff19b 2664 return (ri->access >> ((current_el * 2) + isread)) & 1;
4b6a83fb
PM
2665}
2666
49a66191
PM
2667/* Raw read of a coprocessor register (as needed for migration, etc) */
2668uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
2669
721fae12
PM
2670/**
2671 * write_list_to_cpustate
2672 * @cpu: ARMCPU
2673 *
2674 * For each register listed in the ARMCPU cpreg_indexes list, write
2675 * its value from the cpreg_values list into the ARMCPUState structure.
2676 * This updates TCG's working data structures from KVM data or
2677 * from incoming migration state.
2678 *
2679 * Returns: true if all register values were updated correctly,
2680 * false if some register was unknown or could not be written.
2681 * Note that we do not stop early on failure -- we will attempt
2682 * writing all registers in the list.
2683 */
2684bool write_list_to_cpustate(ARMCPU *cpu);
2685
2686/**
2687 * write_cpustate_to_list:
2688 * @cpu: ARMCPU
b698e4ee 2689 * @kvm_sync: true if this is for syncing back to KVM
721fae12
PM
2690 *
2691 * For each register listed in the ARMCPU cpreg_indexes list, write
2692 * its value from the ARMCPUState structure into the cpreg_values list.
2693 * This is used to copy info from TCG's working data structures into
2694 * KVM or for outbound migration.
2695 *
b698e4ee
PM
2696 * @kvm_sync is true if we are doing this in order to sync the
2697 * register state back to KVM. In this case we will only update
2698 * values in the list if the previous list->cpustate sync actually
2699 * successfully wrote the CPU state. Otherwise we will keep the value
2700 * that is in the list.
2701 *
721fae12
PM
2702 * Returns: true if all register values were read correctly,
2703 * false if some register was unknown or could not be read.
2704 * Note that we do not stop early on failure -- we will attempt
2705 * reading all registers in the list.
2706 */
b698e4ee 2707bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
721fae12 2708
9ee6e8bb
PB
2709#define ARM_CPUID_TI915T 0x54029152
2710#define ARM_CPUID_TI925T 0x54029252
40f137e1 2711
ba1ba5cc
IM
2712#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
2713#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
0dacec87 2714#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
ba1ba5cc 2715
9467d44c 2716#define cpu_signal_handler cpu_arm_signal_handler
c732abe2 2717#define cpu_list arm_cpu_list
9467d44c 2718
c1e37810
PM
2719/* ARM has the following "translation regimes" (as the ARM ARM calls them):
2720 *
2721 * If EL3 is 64-bit:
2722 * + NonSecure EL1 & 0 stage 1
2723 * + NonSecure EL1 & 0 stage 2
2724 * + NonSecure EL2
b9f6033c
RH
2725 * + NonSecure EL2 & 0 (ARMv8.1-VHE)
2726 * + Secure EL1 & 0
c1e37810
PM
2727 * + Secure EL3
2728 * If EL3 is 32-bit:
2729 * + NonSecure PL1 & 0 stage 1
2730 * + NonSecure PL1 & 0 stage 2
2731 * + NonSecure PL2
b9f6033c
RH
2732 * + Secure PL0
2733 * + Secure PL1
c1e37810
PM
2734 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
2735 *
2736 * For QEMU, an mmu_idx is not quite the same as a translation regime because:
b9f6033c
RH
2737 * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
2738 * because they may differ in access permissions even if the VA->PA map is
2739 * the same
c1e37810
PM
2740 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
2741 * translation, which means that we have one mmu_idx that deals with two
2742 * concatenated translation regimes [this sort of combined s1+2 TLB is
2743 * architecturally permitted]
2744 * 3. we don't need to allocate an mmu_idx to translations that we won't be
2745 * handling via the TLB. The only way to do a stage 1 translation without
2746 * the immediate stage 2 translation is via the ATS or AT system insns,
2747 * which can be slow-pathed and always do a page table walk.
2748 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
2749 * translation regimes, because they map reasonably well to each other
2750 * and they can't both be active at the same time.
b9f6033c
RH
2751 * 5. we want to be able to use the TLB for accesses done as part of a
2752 * stage1 page table walk, rather than having to walk the stage2 page
2753 * table over and over.
c1e37810 2754 *
b9f6033c
RH
2755 * This gives us the following list of cases:
2756 *
2757 * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
2758 * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
2759 * NS EL0 EL2&0
2760 * NS EL2 EL2&0
c1e37810 2761 * NS EL2 (aka NS PL2)
b9f6033c
RH
2762 * S EL0 EL1&0 (aka S PL0)
2763 * S EL1 EL1&0 (not used if EL3 is 32 bit)
c1e37810 2764 * S EL3 (aka S PL1)
b9f6033c 2765 * NS EL1&0 stage 2
c1e37810 2766 *
b9f6033c 2767 * for a total of 9 different mmu_idx.
c1e37810 2768 *
3bef7012
PM
2769 * R profile CPUs have an MPU, but can use the same set of MMU indexes
2770 * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
2771 * NS EL2 if we ever model a Cortex-R52).
2772 *
2773 * M profile CPUs are rather different as they do not have a true MMU.
2774 * They have the following different MMU indexes:
2775 * User
2776 * Privileged
62593718
PM
2777 * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
2778 * Privileged, execution priority negative (ditto)
66787c78
PM
2779 * If the CPU supports the v8M Security Extension then there are also:
2780 * Secure User
2781 * Secure Privileged
62593718
PM
2782 * Secure User, execution priority negative
2783 * Secure Privileged, execution priority negative
3bef7012 2784 *
8bd5c820
PM
2785 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
2786 * are not quite the same -- different CPU types (most notably M profile
2787 * vs A/R profile) would like to use MMU indexes with different semantics,
2788 * but since we don't ever need to use all of those in a single CPU we
2789 * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
2790 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
2791 * the same for any particular CPU.
2792 * Variables of type ARMMUIdx are always full values, and the core
2793 * index values are in variables of type 'int'.
2794 *
c1e37810
PM
2795 * Our enumeration includes at the end some entries which are not "true"
2796 * mmu_idx values in that they don't have corresponding TLBs and are only
2797 * valid for doing slow path page table walks.
2798 *
2799 * The constant names here are patterned after the general style of the names
2800 * of the AT/ATS operations.
2801 * The values used are carefully arranged to make mmu_idx => EL lookup easy.
62593718
PM
2802 * For M profile we arrange them to have a bit for priv, a bit for negpri
2803 * and a bit for secure.
c1e37810 2804 */
b9f6033c
RH
2805#define ARM_MMU_IDX_A 0x10 /* A profile */
2806#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
2807#define ARM_MMU_IDX_M 0x40 /* M profile */
8bd5c820 2808
b9f6033c
RH
2809/* Meanings of the bits for M profile mmu idx values */
2810#define ARM_MMU_IDX_M_PRIV 0x1
62593718 2811#define ARM_MMU_IDX_M_NEGPRI 0x2
b9f6033c 2812#define ARM_MMU_IDX_M_S 0x4 /* Secure */
62593718 2813
b9f6033c
RH
2814#define ARM_MMU_IDX_TYPE_MASK \
2815 (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
2816#define ARM_MMU_IDX_COREIDX_MASK 0xf
8bd5c820 2817
c1e37810 2818typedef enum ARMMMUIdx {
b9f6033c
RH
2819 /*
2820 * A-profile.
2821 */
2822 ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
2823 ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
2824
2825 ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
2826
2827 ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A,
2828 ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A,
2829
2830 ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A,
2831 ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A,
2832 ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
2833
2834 ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
2835
2836 /*
2837 * These are not allocated TLBs and are used only for AT system
2838 * instructions or for the first stage of an S12 page table walk.
2839 */
2840 ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
2841 ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
2842
2843 /*
2844 * M-profile.
2845 */
25568316
RH
2846 ARMMMUIdx_MUser = ARM_MMU_IDX_M,
2847 ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
2848 ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
2849 ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
2850 ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
2851 ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
2852 ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
2853 ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
c1e37810
PM
2854} ARMMMUIdx;
2855
5f09a6df
RH
2856/*
2857 * Bit macros for the core-mmu-index values for each index,
8bd5c820
PM
2858 * for use when calling tlb_flush_by_mmuidx() and friends.
2859 */
5f09a6df
RH
2860#define TO_CORE_BIT(NAME) \
2861 ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
2862
8bd5c820 2863typedef enum ARMMMUIdxBit {
5f09a6df 2864 TO_CORE_BIT(E10_0),
b9f6033c 2865 TO_CORE_BIT(E20_0),
5f09a6df
RH
2866 TO_CORE_BIT(E10_1),
2867 TO_CORE_BIT(E2),
b9f6033c 2868 TO_CORE_BIT(E20_2),
5f09a6df
RH
2869 TO_CORE_BIT(SE10_0),
2870 TO_CORE_BIT(SE10_1),
2871 TO_CORE_BIT(SE3),
2872 TO_CORE_BIT(Stage2),
2873
2874 TO_CORE_BIT(MUser),
2875 TO_CORE_BIT(MPriv),
2876 TO_CORE_BIT(MUserNegPri),
2877 TO_CORE_BIT(MPrivNegPri),
2878 TO_CORE_BIT(MSUser),
2879 TO_CORE_BIT(MSPriv),
2880 TO_CORE_BIT(MSUserNegPri),
2881 TO_CORE_BIT(MSPrivNegPri),
8bd5c820
PM
2882} ARMMMUIdxBit;
2883
5f09a6df
RH
2884#undef TO_CORE_BIT
2885
f79fbf39 2886#define MMU_USER_IDX 0
c1e37810 2887
50494a27
RH
2888/**
2889 * cpu_mmu_index:
2890 * @env: The cpu environment
2891 * @ifetch: True for code access, false for data access.
2892 *
2893 * Return the core mmu index for the current translation regime.
2894 * This function is used by generic TCG code paths.
2895 */
65e4655c 2896int cpu_mmu_index(CPUARMState *env, bool ifetch);
6ebbf390 2897
9e273ef2
PM
2898/* Indexes used when registering address spaces with cpu_address_space_init */
2899typedef enum ARMASIdx {
2900 ARMASIdx_NS = 0,
2901 ARMASIdx_S = 1,
2902} ARMASIdx;
2903
533e93f1 2904/* Return the Exception Level targeted by debug exceptions. */
3a298203
PM
2905static inline int arm_debug_target_el(CPUARMState *env)
2906{
81669b8b
SF
2907 bool secure = arm_is_secure(env);
2908 bool route_to_el2 = false;
2909
2910 if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
2911 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
b281ba42 2912 env->cp15.mdcr_el2 & MDCR_TDE;
81669b8b
SF
2913 }
2914
2915 if (route_to_el2) {
2916 return 2;
2917 } else if (arm_feature(env, ARM_FEATURE_EL3) &&
2918 !arm_el_is_aa64(env, 3) && secure) {
2919 return 3;
2920 } else {
2921 return 1;
2922 }
3a298203
PM
2923}
2924
43bbce7f
PM
2925static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
2926{
2927 /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
2928 * CSSELR is RAZ/WI.
2929 */
2930 return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
2931}
2932
22af9025 2933/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */
3a298203
PM
2934static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
2935{
22af9025
AB
2936 int cur_el = arm_current_el(env);
2937 int debug_el;
2938
2939 if (cur_el == 3) {
2940 return false;
533e93f1
PM
2941 }
2942
22af9025
AB
2943 /* MDCR_EL3.SDD disables debug events from Secure state */
2944 if (arm_is_secure_below_el3(env)
2945 && extract32(env->cp15.mdcr_el3, 16, 1)) {
2946 return false;
3a298203 2947 }
22af9025
AB
2948
2949 /*
2950 * Same EL to same EL debug exceptions need MDSCR_KDE enabled
2951 * while not masking the (D)ebug bit in DAIF.
2952 */
2953 debug_el = arm_debug_target_el(env);
2954
2955 if (cur_el == debug_el) {
2956 return extract32(env->cp15.mdscr_el1, 13, 1)
2957 && !(env->daif & PSTATE_D);
2958 }
2959
2960 /* Otherwise the debug target needs to be a higher EL */
2961 return debug_el > cur_el;
3a298203
PM
2962}
2963
2964static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
2965{
533e93f1
PM
2966 int el = arm_current_el(env);
2967
2968 if (el == 0 && arm_el_is_aa64(env, 1)) {
3a298203
PM
2969 return aa64_generate_debug_exceptions(env);
2970 }
533e93f1
PM
2971
2972 if (arm_is_secure(env)) {
2973 int spd;
2974
2975 if (el == 0 && (env->cp15.sder & 1)) {
2976 /* SDER.SUIDEN means debug exceptions from Secure EL0
2977 * are always enabled. Otherwise they are controlled by
2978 * SDCR.SPD like those from other Secure ELs.
2979 */
2980 return true;
2981 }
2982
2983 spd = extract32(env->cp15.mdcr_el3, 14, 2);
2984 switch (spd) {
2985 case 1:
2986 /* SPD == 0b01 is reserved, but behaves as 0b00. */
2987 case 0:
2988 /* For 0b00 we return true if external secure invasive debug
2989 * is enabled. On real hardware this is controlled by external
2990 * signals to the core. QEMU always permits debug, and behaves
2991 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
2992 */
2993 return true;
2994 case 2:
2995 return false;
2996 case 3:
2997 return true;
2998 }
2999 }
3000
3001 return el != 2;
3a298203
PM
3002}
3003
3004/* Return true if debugging exceptions are currently enabled.
3005 * This corresponds to what in ARM ARM pseudocode would be
3006 * if UsingAArch32() then
3007 * return AArch32.GenerateDebugExceptions()
3008 * else
3009 * return AArch64.GenerateDebugExceptions()
3010 * We choose to push the if() down into this function for clarity,
3011 * since the pseudocode has it at all callsites except for the one in
3012 * CheckSoftwareStep(), where it is elided because both branches would
3013 * always return the same value.
3a298203
PM
3014 */
3015static inline bool arm_generate_debug_exceptions(CPUARMState *env)
3016{
3017 if (env->aarch64) {
3018 return aa64_generate_debug_exceptions(env);
3019 } else {
3020 return aa32_generate_debug_exceptions(env);
3021 }
3022}
3023
3024/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
3025 * implicitly means this always returns false in pre-v8 CPUs.)
3026 */
3027static inline bool arm_singlestep_active(CPUARMState *env)
3028{
3029 return extract32(env->cp15.mdscr_el1, 0, 1)
3030 && arm_el_is_aa64(env, arm_debug_target_el(env))
3031 && arm_generate_debug_exceptions(env);
3032}
3033
f9fd40eb
PB
3034static inline bool arm_sctlr_b(CPUARMState *env)
3035{
3036 return
3037 /* We need not implement SCTLR.ITD in user-mode emulation, so
3038 * let linux-user ignore the fact that it conflicts with SCTLR_B.
3039 * This lets people run BE32 binaries with "-cpu any".
3040 */
3041#ifndef CONFIG_USER_ONLY
3042 !arm_feature(env, ARM_FEATURE_V7) &&
3043#endif
3044 (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
3045}
3046
aaec1432 3047uint64_t arm_sctlr(CPUARMState *env, int el);
64e40755 3048
8061a649
RH
3049static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
3050 bool sctlr_b)
3051{
3052#ifdef CONFIG_USER_ONLY
3053 /*
3054 * In system mode, BE32 is modelled in line with the
3055 * architecture (as word-invariant big-endianness), where loads
3056 * and stores are done little endian but from addresses which
3057 * are adjusted by XORing with the appropriate constant. So the
3058 * endianness to use for the raw data access is not affected by
3059 * SCTLR.B.
3060 * In user mode, however, we model BE32 as byte-invariant
3061 * big-endianness (because user-only code cannot tell the
3062 * difference), and so we need to use a data access endianness
3063 * that depends on SCTLR.B.
3064 */
3065 if (sctlr_b) {
3066 return true;
3067 }
3068#endif
3069 /* In 32bit endianness is determined by looking at CPSR's E bit */
3070 return env->uncached_cpsr & CPSR_E;
3071}
3072
3073static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
3074{
3075 return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
3076}
64e40755 3077
ed50ff78
PC
3078/* Return true if the processor is in big-endian mode. */
3079static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
3080{
ed50ff78 3081 if (!is_a64(env)) {
8061a649 3082 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
64e40755
RH
3083 } else {
3084 int cur_el = arm_current_el(env);
3085 uint64_t sctlr = arm_sctlr(env, cur_el);
8061a649 3086 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
ed50ff78 3087 }
ed50ff78
PC
3088}
3089
4f7c64b3 3090typedef CPUARMState CPUArchState;
2161a612 3091typedef ARMCPU ArchCPU;
4f7c64b3 3092
022c62cb 3093#include "exec/cpu-all.h"
622ed360 3094
fdd1b228
RH
3095/*
3096 * Bit usage in the TB flags field: bit 31 indicates whether we are
3926cc84 3097 * in 32 or 64 bit mode. The meaning of the other bits depends on that.
c1e37810
PM
3098 * We put flags which are shared between 32 and 64 bit mode at the top
3099 * of the word, and flags which apply to only one mode at the bottom.
fdd1b228 3100 *
506f1498 3101 * 31 20 18 14 9 0
79cabf1f
RH
3102 * +--------------+-----+-----+----------+--------------+
3103 * | | | TBFLAG_A32 | |
3104 * | | +-----+----------+ TBFLAG_AM32 |
3105 * | TBFLAG_ANY | |TBFLAG_M32| |
cc28fc30
RH
3106 * | | +-+----------+--------------|
3107 * | | | TBFLAG_A64 |
3108 * +--------------+---------+---------------------------+
3109 * 31 20 15 0
79cabf1f 3110 *
fdd1b228 3111 * Unless otherwise noted, these bits are cached in env->hflags.
3926cc84 3112 */
aad821ac 3113FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
506f1498
RH
3114FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
3115FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
3116FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
3117FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
9dbbc748 3118/* Target EL if we take a floating-point-disabled exception */
506f1498 3119FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
79cabf1f 3120/* For A-profile only, target EL for debug exceptions. */
506f1498 3121FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
79cabf1f 3122
8bd587c1 3123/*
79cabf1f 3124 * Bit usage when in AArch32 state, both A- and M-profile.
8bd587c1 3125 */
79cabf1f
RH
3126FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
3127FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
3926cc84 3128
79cabf1f
RH
3129/*
3130 * Bit usage when in AArch32 state, for A-profile only.
3131 */
3132FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
3133FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
ea7ac69d
PM
3134/*
3135 * We store the bottom two bits of the CPAR as TB flags and handle
3136 * checks on the other bits at runtime. This shares the same bits as
3137 * VECSTRIDE, which is OK as no XScale CPU has VFP.
fdd1b228 3138 * Not cached, because VECLEN+VECSTRIDE are not cached.
ea7ac69d 3139 */
79cabf1f
RH
3140FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
3141FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
3142FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
3143FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
7fbb535f
PM
3144/*
3145 * Indicates whether cp register reads and writes by guest code should access
3146 * the secure or nonsecure bank of banked registers; note that this is not
3147 * the same thing as the current security state of the processor!
3148 */
79cabf1f
RH
3149FIELD(TBFLAG_A32, NS, 17, 1)
3150
3151/*
3152 * Bit usage when in AArch32 state, for M-profile only.
3153 */
3154/* Handler (ie not Thread) mode */
3155FIELD(TBFLAG_M32, HANDLER, 9, 1)
3156/* Whether we should generate stack-limit checks */
3157FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
3158/* Set if FPCCR.LSPACT is set */
3159FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
3160/* Set if we must create a new FP context */
3161FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
3162/* Set if FPCCR.S does not match current security state */
3163FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
3164
3165/*
3166 * Bit usage when in AArch64 state
3167 */
476a4692 3168FIELD(TBFLAG_A64, TBII, 0, 2)
aad821ac
RH
3169FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
3170FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
0816ef1b 3171FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
08f1434a 3172FIELD(TBFLAG_A64, BT, 9, 1)
fdd1b228 3173FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
4a9ee99d 3174FIELD(TBFLAG_A64, TBID, 12, 2)
cc28fc30 3175FIELD(TBFLAG_A64, UNPRIV, 14, 1)
a1705768 3176
f9fd40eb
PB
3177static inline bool bswap_code(bool sctlr_b)
3178{
3179#ifdef CONFIG_USER_ONLY
3180 /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
3181 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
3182 * would also end up as a mixed-endian mode with BE code, LE data.
3183 */
3184 return
3185#ifdef TARGET_WORDS_BIGENDIAN
3186 1 ^
3187#endif
3188 sctlr_b;
3189#else
e334bd31
PB
3190 /* All code access in ARM is little endian, and there are no loaders
3191 * doing swaps that need to be reversed
f9fd40eb
PB
3192 */
3193 return 0;
3194#endif
3195}
3196
c3ae85fc
PB
3197#ifdef CONFIG_USER_ONLY
3198static inline bool arm_cpu_bswap_data(CPUARMState *env)
3199{
3200 return
3201#ifdef TARGET_WORDS_BIGENDIAN
3202 1 ^
3203#endif
3204 arm_cpu_data_is_big_endian(env);
3205}
3206#endif
3207
a9e01311
RH
3208void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
3209 target_ulong *cs_base, uint32_t *flags);
6b917547 3210
98128601
RH
3211enum {
3212 QEMU_PSCI_CONDUIT_DISABLED = 0,
3213 QEMU_PSCI_CONDUIT_SMC = 1,
3214 QEMU_PSCI_CONDUIT_HVC = 2,
3215};
3216
017518c1
PM
3217#ifndef CONFIG_USER_ONLY
3218/* Return the address space index to use for a memory access */
3219static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
3220{
3221 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
3222}
5ce4ff65
PM
3223
3224/* Return the AddressSpace to use for a memory access
3225 * (which depends on whether the access is S or NS, and whether
3226 * the board gave us a separate AddressSpace for S accesses).
3227 */
3228static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
3229{
3230 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
3231}
017518c1
PM
3232#endif
3233
bd7d00fc 3234/**
b5c53d1b
AL
3235 * arm_register_pre_el_change_hook:
3236 * Register a hook function which will be called immediately before this
bd7d00fc
PM
3237 * CPU changes exception level or mode. The hook function will be
3238 * passed a pointer to the ARMCPU and the opaque data pointer passed
3239 * to this function when the hook was registered.
b5c53d1b
AL
3240 *
3241 * Note that if a pre-change hook is called, any registered post-change hooks
3242 * are guaranteed to subsequently be called.
bd7d00fc 3243 */
b5c53d1b 3244void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
bd7d00fc 3245 void *opaque);
b5c53d1b
AL
3246/**
3247 * arm_register_el_change_hook:
3248 * Register a hook function which will be called immediately after this
3249 * CPU changes exception level or mode. The hook function will be
3250 * passed a pointer to the ARMCPU and the opaque data pointer passed
3251 * to this function when the hook was registered.
3252 *
3253 * Note that any registered hooks registered here are guaranteed to be called
3254 * if pre-change hooks have been.
3255 */
3256void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void
3257 *opaque);
bd7d00fc 3258
3d74e2e9
RH
3259/**
3260 * arm_rebuild_hflags:
3261 * Rebuild the cached TBFLAGS for arbitrary changed processor state.
3262 */
3263void arm_rebuild_hflags(CPUARMState *env);
3264
9a2b5256
RH
3265/**
3266 * aa32_vfp_dreg:
3267 * Return a pointer to the Dn register within env in 32-bit mode.
3268 */
3269static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
3270{
c39c2b90 3271 return &env->vfp.zregs[regno >> 1].d[regno & 1];
9a2b5256
RH
3272}
3273
3274/**
3275 * aa32_vfp_qreg:
3276 * Return a pointer to the Qn register within env in 32-bit mode.
3277 */
3278static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
3279{
c39c2b90 3280 return &env->vfp.zregs[regno].d[0];
9a2b5256
RH
3281}
3282
3283/**
3284 * aa64_vfp_qreg:
3285 * Return a pointer to the Qn register within env in 64-bit mode.
3286 */
3287static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
3288{
c39c2b90 3289 return &env->vfp.zregs[regno].d[0];
9a2b5256
RH
3290}
3291
028e2a7b
RH
3292/* Shared between translate-sve.c and sve_helper.c. */
3293extern const uint64_t pred_esz_masks[4];
3294
962fcbf2
RH
3295/*
3296 * 32-bit feature tests via id registers.
3297 */
7e0cf8b4
RH
3298static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
3299{
3300 return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
3301}
3302
3303static inline bool isar_feature_arm_div(const ARMISARegisters *id)
3304{
3305 return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
3306}
3307
09cbd501
RH
3308static inline bool isar_feature_jazelle(const ARMISARegisters *id)
3309{
3310 return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
3311}
3312
962fcbf2
RH
3313static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
3314{
3315 return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
3316}
3317
3318static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
3319{
3320 return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
3321}
3322
3323static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
3324{
3325 return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
3326}
3327
3328static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
3329{
3330 return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
3331}
3332
3333static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
3334{
3335 return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
3336}
3337
3338static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
3339{
3340 return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
3341}
3342
3343static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
3344{
3345 return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
3346}
3347
6c1f6f27
RH
3348static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id)
3349{
3350 return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0;
3351}
3352
962fcbf2
RH
3353static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
3354{
3355 return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
3356}
3357
87732318
RH
3358static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
3359{
3360 return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
3361}
3362
9888bd1e
RH
3363static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
3364{
3365 return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
3366}
3367
cb570bd3
RH
3368static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
3369{
3370 return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
3371}
3372
5763190f
RH
3373static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
3374{
3375 /*
3376 * This is a placeholder for use by VCMA until the rest of
3377 * the ARMv8.2-FP16 extension is implemented for aa32 mode.
3378 * At which point we can properly set and check MVFR1.FPHP.
3379 */
3380 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
3381}
3382
b3ff4b87
PM
3383static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
3384{
3385 /* Return true if D16-D31 are implemented */
3386 return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2;
3387}
3388
266bd25c
PM
3389static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
3390{
3391 return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
3392}
3393
1120827f
PM
3394static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
3395{
3396 /* Return true if CPU supports double precision floating point */
3397 return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0;
3398}
3399
602f6e42
PM
3400/*
3401 * We always set the FP and SIMD FP16 fields to indicate identical
3402 * levels of support (assuming SIMD is implemented at all), so
3403 * we only need one set of accessors.
3404 */
3405static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id)
3406{
3407 return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 0;
3408}
3409
3410static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id)
3411{
3412 return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 1;
3413}
3414
c0c760af
PM
3415static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id)
3416{
3417 return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 1;
3418}
3419
3420static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id)
3421{
3422 return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 2;
3423}
3424
3425static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id)
3426{
3427 return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 3;
3428}
3429
3430static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
3431{
3432 return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4;
3433}
3434
962fcbf2
RH
3435/*
3436 * 64-bit feature tests via id registers.
3437 */
3438static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
3439{
3440 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
3441}
3442
3443static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
3444{
3445 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
3446}
3447
3448static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
3449{
3450 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
3451}
3452
3453static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
3454{
3455 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
3456}
3457
3458static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
3459{
3460 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
3461}
3462
3463static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
3464{
3465 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
3466}
3467
3468static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
3469{
3470 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
3471}
3472
3473static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
3474{
3475 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
3476}
3477
3478static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
3479{
3480 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
3481}
3482
3483static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
3484{
3485 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
3486}
3487
3488static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
3489{
3490 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
3491}
3492
3493static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
3494{
3495 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
3496}
3497
0caa5af8
RH
3498static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
3499{
3500 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
3501}
3502
b89d9c98
RH
3503static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
3504{
3505 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
3506}
3507
5ef84f11
RH
3508static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
3509{
3510 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
3511}
3512
de390645
RH
3513static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id)
3514{
3515 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0;
3516}
3517
6c1f6f27
RH
3518static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
3519{
3520 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
3521}
3522
962fcbf2
RH
3523static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
3524{
3525 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
3526}
3527
991ad91b
RH
3528static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
3529{
3530 /*
3531 * Note that while QEMU will only implement the architected algorithm
3532 * QARMA, and thus APA+GPA, the host cpu for kvm may use implementation
3533 * defined algorithms, and thus API+GPI, and this predicate controls
3534 * migration of the 128-bit keys.
3535 */
3536 return (id->id_aa64isar1 &
3537 (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
3538 FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
3539 FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
3540 FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
3541}
3542
9888bd1e
RH
3543static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
3544{
3545 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
3546}
3547
cb570bd3
RH
3548static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
3549{
3550 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
3551}
3552
6bea2563
RH
3553static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
3554{
3555 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
3556}
3557
0d57b499
BM
3558static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id)
3559{
3560 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0;
3561}
3562
3563static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
3564{
3565 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
3566}
3567
5763190f
RH
3568static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
3569{
3570 /* We always set the AdvSIMD and FP fields identically wrt FP16. */
3571 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
3572}
3573
0f8d06f1
RH
3574static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
3575{
3576 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
3577}
3578
cd208a1c
RH
3579static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
3580{
3581 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
3582}
3583
8fc2ea21
RH
3584static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
3585{
3586 return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
3587}
3588
2d7137c1
RH
3589static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
3590{
3591 return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
3592}
3593
be53b6f4
RH
3594static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
3595{
3596 return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
3597}
3598
962fcbf2
RH
3599/*
3600 * Forward to the above feature tests given an ARMCPU pointer.
3601 */
3602#define cpu_isar_feature(name, cpu) \
3603 ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
3604
2c0262af 3605#endif