]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/cpu.h
bitops.h: Implement half-shuffle and half-unshuffle ops
[mirror_qemu.git] / target-arm / cpu.h
CommitLineData
2c0262af
FB
1/*
2 * ARM virtual CPU header
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#ifndef CPU_ARM_H
20#define CPU_ARM_H
21
3cf1e035 22
72b0cd35
PM
23#include "kvm-consts.h"
24
3926cc84
AG
25#if defined(TARGET_AARCH64)
26 /* AArch64 definitions */
27# define TARGET_LONG_BITS 64
3926cc84
AG
28#else
29# define TARGET_LONG_BITS 32
3926cc84 30#endif
9042c0e2 31
9349b4f9 32#define CPUArchState struct CPUARMState
c2764719 33
9a78eead 34#include "qemu-common.h"
74e75564 35#include "cpu-qom.h"
022c62cb 36#include "exec/cpu-defs.h"
2c0262af 37
6b4c305c 38#include "fpu/softfloat.h"
53cd6637 39
b8a9e8f1
FB
40#define EXCP_UDEF 1 /* undefined instruction */
41#define EXCP_SWI 2 /* software interrupt */
42#define EXCP_PREFETCH_ABORT 3
43#define EXCP_DATA_ABORT 4
b5ff1b31
FB
44#define EXCP_IRQ 5
45#define EXCP_FIQ 6
06c949e6 46#define EXCP_BKPT 7
9ee6e8bb 47#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
fbb4a2e3 48#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
426f5abc 49#define EXCP_STREX 10
35979d71 50#define EXCP_HVC 11 /* HyperVisor Call */
607d98b8 51#define EXCP_HYP_TRAP 12
e0d6e6a5 52#define EXCP_SMC 13 /* Secure Monitor Call */
136e67e9
EI
53#define EXCP_VIRQ 14
54#define EXCP_VFIQ 15
8012c84f 55#define EXCP_SEMIHOST 16 /* semihosting call (A64 only) */
9ee6e8bb
PB
56
57#define ARMV7M_EXCP_RESET 1
58#define ARMV7M_EXCP_NMI 2
59#define ARMV7M_EXCP_HARD 3
60#define ARMV7M_EXCP_MEM 4
61#define ARMV7M_EXCP_BUS 5
62#define ARMV7M_EXCP_USAGE 6
63#define ARMV7M_EXCP_SVC 11
64#define ARMV7M_EXCP_DEBUG 12
65#define ARMV7M_EXCP_PENDSV 14
66#define ARMV7M_EXCP_SYSTICK 15
2c0262af 67
403946c0
RH
68/* ARM-specific interrupt pending bits. */
69#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
136e67e9
EI
70#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
71#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
403946c0 72
e4fe830b
PM
73/* The usual mapping for an AArch64 system register to its AArch32
74 * counterpart is for the 32 bit world to have access to the lower
75 * half only (with writes leaving the upper half untouched). It's
76 * therefore useful to be able to pass TCG the offset of the least
77 * significant half of a uint64_t struct member.
78 */
79#ifdef HOST_WORDS_BIGENDIAN
5cd8a118 80#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
b0fe2427 81#define offsetofhigh32(S, M) offsetof(S, M)
e4fe830b
PM
82#else
83#define offsetoflow32(S, M) offsetof(S, M)
b0fe2427 84#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
e4fe830b
PM
85#endif
86
136e67e9 87/* Meanings of the ARMCPU object's four inbound GPIO lines */
7c1840b6
PM
88#define ARM_CPU_IRQ 0
89#define ARM_CPU_FIQ 1
136e67e9
EI
90#define ARM_CPU_VIRQ 2
91#define ARM_CPU_VFIQ 3
403946c0 92
c1e37810 93#define NB_MMU_MODES 7
aaa1f954
EI
94/* ARM-specific extra insn start words:
95 * 1: Conditional execution bits
96 * 2: Partial exception syndrome for data aborts
97 */
98#define TARGET_INSN_START_EXTRA_WORDS 2
99
100/* The 2nd extra word holding syndrome info for data aborts does not use
101 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
102 * help the sleb128 encoder do a better job.
103 * When restoring the CPU state, we shift it back up.
104 */
105#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
106#define ARM_INSN_START_WORD2_SHIFT 14
6ebbf390 107
b7bcbe95
FB
108/* We currently assume float and double are IEEE single and double
109 precision respectively.
110 Doing runtime conversions is tricky because VFP registers may contain
111 integer values (eg. as the result of a FTOSI instruction).
8e96005d
FB
112 s<2n> maps to the least significant half of d<n>
113 s<2n+1> maps to the most significant half of d<n>
114 */
b7bcbe95 115
55d284af
PM
116/* CPU state for each instance of a generic timer (in cp15 c14) */
117typedef struct ARMGenericTimer {
118 uint64_t cval; /* Timer CompareValue register */
a7adc4b7 119 uint64_t ctl; /* Timer Control register */
55d284af
PM
120} ARMGenericTimer;
121
122#define GTIMER_PHYS 0
123#define GTIMER_VIRT 1
b0e66d95 124#define GTIMER_HYP 2
b4d3978c
PM
125#define GTIMER_SEC 3
126#define NUM_GTIMERS 4
55d284af 127
11f136ee
FA
128typedef struct {
129 uint64_t raw_tcr;
130 uint32_t mask;
131 uint32_t base_mask;
132} TCR;
133
2c0262af 134typedef struct CPUARMState {
b5ff1b31 135 /* Regs for current mode. */
2c0262af 136 uint32_t regs[16];
3926cc84
AG
137
138 /* 32/64 switch only happens when taking and returning from
139 * exceptions so the overlap semantics are taken care of then
140 * instead of having a complicated union.
141 */
142 /* Regs for A64 mode. */
143 uint64_t xregs[32];
144 uint64_t pc;
d356312f
PM
145 /* PSTATE isn't an architectural register for ARMv8. However, it is
146 * convenient for us to assemble the underlying state into a 32 bit format
147 * identical to the architectural format used for the SPSR. (This is also
148 * what the Linux kernel's 'pstate' field in signal handlers and KVM's
149 * 'pstate' register are.) Of the PSTATE bits:
150 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
151 * semantics as for AArch32, as described in the comments on each field)
152 * nRW (also known as M[4]) is kept, inverted, in env->aarch64
4cc35614 153 * DAIF (exception masks) are kept in env->daif
d356312f 154 * all other bits are stored in their correct places in env->pstate
3926cc84
AG
155 */
156 uint32_t pstate;
157 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
158
b90372ad 159 /* Frequently accessed CPSR bits are stored separately for efficiency.
d37aca66 160 This contains all the other bits. Use cpsr_{read,write} to access
b5ff1b31
FB
161 the whole CPSR. */
162 uint32_t uncached_cpsr;
163 uint32_t spsr;
164
165 /* Banked registers. */
28c9457d 166 uint64_t banked_spsr[8];
0b7d409d
FA
167 uint32_t banked_r13[8];
168 uint32_t banked_r14[8];
3b46e624 169
b5ff1b31
FB
170 /* These hold r8-r12. */
171 uint32_t usr_regs[5];
172 uint32_t fiq_regs[5];
3b46e624 173
2c0262af
FB
174 /* cpsr flag cache for faster execution */
175 uint32_t CF; /* 0 or 1 */
176 uint32_t VF; /* V is the bit 31. All other bits are undefined */
6fbe23d5
PB
177 uint32_t NF; /* N is bit 31. All other bits are undefined. */
178 uint32_t ZF; /* Z set if zero. */
99c475ab 179 uint32_t QF; /* 0 or 1 */
9ee6e8bb 180 uint32_t GE; /* cpsr[19:16] */
b26eefb6 181 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
9ee6e8bb 182 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
b6af0975 183 uint64_t daif; /* exception masks, in the bits they are in PSTATE */
2c0262af 184
1b174238 185 uint64_t elr_el[4]; /* AArch64 exception link regs */
73fb3b76 186 uint64_t sp_el[4]; /* AArch64 banked stack pointers */
a0618a19 187
b5ff1b31
FB
188 /* System control coprocessor (cp15) */
189 struct {
40f137e1 190 uint32_t c0_cpuid;
b85a1fd6
FA
191 union { /* Cache size selection */
192 struct {
193 uint64_t _unused_csselr0;
194 uint64_t csselr_ns;
195 uint64_t _unused_csselr1;
196 uint64_t csselr_s;
197 };
198 uint64_t csselr_el[4];
199 };
137feaa9
FA
200 union { /* System control register. */
201 struct {
202 uint64_t _unused_sctlr;
203 uint64_t sctlr_ns;
204 uint64_t hsctlr;
205 uint64_t sctlr_s;
206 };
207 uint64_t sctlr_el[4];
208 };
7ebd5f2e 209 uint64_t cpacr_el1; /* Architectural feature access control register */
c6f19164 210 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
610c3c8a 211 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
144634ae 212 uint64_t sder; /* Secure debug enable register. */
77022576 213 uint32_t nsacr; /* Non-secure access control register. */
7dd8c9af
FA
214 union { /* MMU translation table base 0. */
215 struct {
216 uint64_t _unused_ttbr0_0;
217 uint64_t ttbr0_ns;
218 uint64_t _unused_ttbr0_1;
219 uint64_t ttbr0_s;
220 };
221 uint64_t ttbr0_el[4];
222 };
223 union { /* MMU translation table base 1. */
224 struct {
225 uint64_t _unused_ttbr1_0;
226 uint64_t ttbr1_ns;
227 uint64_t _unused_ttbr1_1;
228 uint64_t ttbr1_s;
229 };
230 uint64_t ttbr1_el[4];
231 };
b698e9cf 232 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
11f136ee
FA
233 /* MMU translation table base control. */
234 TCR tcr_el[4];
68e9c2fe 235 TCR vtcr_el2; /* Virtualization Translation Control. */
67cc32eb
VL
236 uint32_t c2_data; /* MPU data cacheable bits. */
237 uint32_t c2_insn; /* MPU instruction cacheable bits. */
0c17d68c
FA
238 union { /* MMU domain access control register
239 * MPU write buffer control.
240 */
241 struct {
242 uint64_t dacr_ns;
243 uint64_t dacr_s;
244 };
245 struct {
246 uint64_t dacr32_el2;
247 };
248 };
7e09797c
PM
249 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
250 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
f149e3e8 251 uint64_t hcr_el2; /* Hypervisor configuration register */
64e0e2de 252 uint64_t scr_el3; /* Secure configuration register. */
88ca1c2d
FA
253 union { /* Fault status registers. */
254 struct {
255 uint64_t ifsr_ns;
256 uint64_t ifsr_s;
257 };
258 struct {
259 uint64_t ifsr32_el2;
260 };
261 };
4a7e2d73
FA
262 union {
263 struct {
264 uint64_t _unused_dfsr;
265 uint64_t dfsr_ns;
266 uint64_t hsr;
267 uint64_t dfsr_s;
268 };
269 uint64_t esr_el[4];
270 };
ce819861 271 uint32_t c6_region[8]; /* MPU base/size registers. */
b848ce2b
FA
272 union { /* Fault address registers. */
273 struct {
274 uint64_t _unused_far0;
275#ifdef HOST_WORDS_BIGENDIAN
276 uint32_t ifar_ns;
277 uint32_t dfar_ns;
278 uint32_t ifar_s;
279 uint32_t dfar_s;
280#else
281 uint32_t dfar_ns;
282 uint32_t ifar_ns;
283 uint32_t dfar_s;
284 uint32_t ifar_s;
285#endif
286 uint64_t _unused_far3;
287 };
288 uint64_t far_el[4];
289 };
59e05530 290 uint64_t hpfar_el2;
2a5a9abd 291 uint64_t hstr_el2;
01c097f7
FA
292 union { /* Translation result. */
293 struct {
294 uint64_t _unused_par_0;
295 uint64_t par_ns;
296 uint64_t _unused_par_1;
297 uint64_t par_s;
298 };
299 uint64_t par_el[4];
300 };
6cb0b013
PC
301
302 uint32_t c6_rgnr;
303
b5ff1b31
FB
304 uint32_t c9_insn; /* Cache lockdown registers. */
305 uint32_t c9_data;
8521466b
AF
306 uint64_t c9_pmcr; /* performance monitor control register */
307 uint64_t c9_pmcnten; /* perf monitor counter enables */
74594c9d
PM
308 uint32_t c9_pmovsr; /* perf monitor overflow status */
309 uint32_t c9_pmxevtyper; /* perf monitor event type */
310 uint32_t c9_pmuserenr; /* perf monitor user enable */
311 uint32_t c9_pminten; /* perf monitor interrupt enables */
be693c87
GB
312 union { /* Memory attribute redirection */
313 struct {
314#ifdef HOST_WORDS_BIGENDIAN
315 uint64_t _unused_mair_0;
316 uint32_t mair1_ns;
317 uint32_t mair0_ns;
318 uint64_t _unused_mair_1;
319 uint32_t mair1_s;
320 uint32_t mair0_s;
321#else
322 uint64_t _unused_mair_0;
323 uint32_t mair0_ns;
324 uint32_t mair1_ns;
325 uint64_t _unused_mair_1;
326 uint32_t mair0_s;
327 uint32_t mair1_s;
328#endif
329 };
330 uint64_t mair_el[4];
331 };
fb6c91ba
GB
332 union { /* vector base address register */
333 struct {
334 uint64_t _unused_vbar;
335 uint64_t vbar_ns;
336 uint64_t hvbar;
337 uint64_t vbar_s;
338 };
339 uint64_t vbar_el[4];
340 };
e89e51a1 341 uint32_t mvbar; /* (monitor) vector base address register */
54bf36ed
FA
342 struct { /* FCSE PID. */
343 uint32_t fcseidr_ns;
344 uint32_t fcseidr_s;
345 };
346 union { /* Context ID. */
347 struct {
348 uint64_t _unused_contextidr_0;
349 uint64_t contextidr_ns;
350 uint64_t _unused_contextidr_1;
351 uint64_t contextidr_s;
352 };
353 uint64_t contextidr_el[4];
354 };
355 union { /* User RW Thread register. */
356 struct {
357 uint64_t tpidrurw_ns;
358 uint64_t tpidrprw_ns;
359 uint64_t htpidr;
360 uint64_t _tpidr_el3;
361 };
362 uint64_t tpidr_el[4];
363 };
364 /* The secure banks of these registers don't map anywhere */
365 uint64_t tpidrurw_s;
366 uint64_t tpidrprw_s;
367 uint64_t tpidruro_s;
368
369 union { /* User RO Thread register. */
370 uint64_t tpidruro_ns;
371 uint64_t tpidrro_el[1];
372 };
a7adc4b7
PM
373 uint64_t c14_cntfrq; /* Counter Frequency register */
374 uint64_t c14_cntkctl; /* Timer Control register */
0b6440af 375 uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
edac4d8a 376 uint64_t cntvoff_el2; /* Counter Virtual Offset register */
55d284af 377 ARMGenericTimer c14_timer[NUM_GTIMERS];
c1713132 378 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
c3d2689d
AZ
379 uint32_t c15_ticonfig; /* TI925T configuration byte. */
380 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
381 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
382 uint32_t c15_threadid; /* TI debugger thread-ID. */
7da362d0
ML
383 uint32_t c15_config_base_address; /* SCU base address. */
384 uint32_t c15_diagnostic; /* diagnostic register */
385 uint32_t c15_power_diagnostic;
386 uint32_t c15_power_control; /* power control */
0b45451e
PM
387 uint64_t dbgbvr[16]; /* breakpoint value registers */
388 uint64_t dbgbcr[16]; /* breakpoint control registers */
389 uint64_t dbgwvr[16]; /* watchpoint value registers */
390 uint64_t dbgwcr[16]; /* watchpoint control registers */
3a298203 391 uint64_t mdscr_el1;
1424ca8d 392 uint64_t oslsr_el1; /* OS Lock Status */
14cc7b54 393 uint64_t mdcr_el2;
5513c3ab 394 uint64_t mdcr_el3;
7c2cb42b
AF
395 /* If the counter is enabled, this stores the last time the counter
396 * was reset. Otherwise it stores the counter value
397 */
c92c0687 398 uint64_t c15_ccnt;
8521466b 399 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
731de9e6 400 uint64_t vpidr_el2; /* Virtualization Processor ID Register */
f0d574d6 401 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
b5ff1b31 402 } cp15;
40f137e1 403
9ee6e8bb
PB
404 struct {
405 uint32_t other_sp;
406 uint32_t vecbase;
407 uint32_t basepri;
408 uint32_t control;
409 int current_sp;
410 int exception;
9ee6e8bb
PB
411 } v7m;
412
abf1172f
PM
413 /* Information associated with an exception about to be taken:
414 * code which raises an exception must set cs->exception_index and
415 * the relevant parts of this structure; the cpu_do_interrupt function
416 * will then set the guest-visible registers as part of the exception
417 * entry process.
418 */
419 struct {
420 uint32_t syndrome; /* AArch64 format syndrome register */
421 uint32_t fsr; /* AArch32 format fault status register info */
422 uint64_t vaddress; /* virtual addr associated with exception, if any */
73710361 423 uint32_t target_el; /* EL the exception should be targeted for */
abf1172f
PM
424 /* If we implement EL2 we will also need to store information
425 * about the intermediate physical address for stage 2 faults.
426 */
427 } exception;
428
fe1479c3
PB
429 /* Thumb-2 EE state. */
430 uint32_t teecr;
431 uint32_t teehbr;
432
b7bcbe95
FB
433 /* VFP coprocessor state. */
434 struct {
3926cc84
AG
435 /* VFP/Neon register state. Note that the mapping between S, D and Q
436 * views of the register bank differs between AArch64 and AArch32:
437 * In AArch32:
438 * Qn = regs[2n+1]:regs[2n]
439 * Dn = regs[n]
440 * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
441 * (and regs[32] to regs[63] are inaccessible)
442 * In AArch64:
443 * Qn = regs[2n+1]:regs[2n]
444 * Dn = regs[2n]
445 * Sn = regs[2n] bits 31..0
446 * This corresponds to the architecturally defined mapping between
447 * the two execution states, and means we do not need to explicitly
448 * map these registers when changing states.
449 */
450 float64 regs[64];
b7bcbe95 451
40f137e1 452 uint32_t xregs[16];
b7bcbe95
FB
453 /* We store these fpcsr fields separately for convenience. */
454 int vec_len;
455 int vec_stride;
456
9ee6e8bb
PB
457 /* scratch space when Tn are not sufficient. */
458 uint32_t scratch[8];
3b46e624 459
3a492f3a
PM
460 /* fp_status is the "normal" fp status. standard_fp_status retains
461 * values corresponding to the ARM "Standard FPSCR Value", ie
462 * default-NaN, flush-to-zero, round-to-nearest and is used by
463 * any operations (generally Neon) which the architecture defines
464 * as controlled by the standard FPSCR value rather than the FPSCR.
465 *
466 * To avoid having to transfer exception bits around, we simply
467 * say that the FPSCR cumulative exception flags are the logical
468 * OR of the flags in the two fp statuses. This relies on the
469 * only thing which needs to read the exception flags being
470 * an explicit FPSCR read.
471 */
53cd6637 472 float_status fp_status;
3a492f3a 473 float_status standard_fp_status;
b7bcbe95 474 } vfp;
03d05e2d
PM
475 uint64_t exclusive_addr;
476 uint64_t exclusive_val;
477 uint64_t exclusive_high;
9ee6e8bb 478#if defined(CONFIG_USER_ONLY)
03d05e2d 479 uint64_t exclusive_test;
426f5abc 480 uint32_t exclusive_info;
9ee6e8bb 481#endif
b7bcbe95 482
18c9b560
AZ
483 /* iwMMXt coprocessor state. */
484 struct {
485 uint64_t regs[16];
486 uint64_t val;
487
488 uint32_t cregs[16];
489 } iwmmxt;
490
ce4defa0
PB
491#if defined(CONFIG_USER_ONLY)
492 /* For usermode syscall translation. */
493 int eabi;
494#endif
495
46747d15 496 struct CPUBreakpoint *cpu_breakpoint[16];
9ee98ce8
PM
497 struct CPUWatchpoint *cpu_watchpoint[16];
498
a316d335
FB
499 CPU_COMMON
500
9d551997 501 /* These fields after the common ones so they are preserved on reset. */
9ba8c3f4 502
581be094 503 /* Internal CPU feature flags. */
918f5dca 504 uint64_t features;
581be094 505
6cb0b013
PC
506 /* PMSAv7 MPU */
507 struct {
508 uint32_t *drbar;
509 uint32_t *drsr;
510 uint32_t *dracr;
511 } pmsav7;
512
983fe826 513 void *nvic;
462a8bc6 514 const struct arm_boot_info *boot_info;
2c0262af
FB
515} CPUARMState;
516
74e75564
PB
517/**
518 * ARMCPU:
519 * @env: #CPUARMState
520 *
521 * An ARM CPU core.
522 */
523struct ARMCPU {
524 /*< private >*/
525 CPUState parent_obj;
526 /*< public >*/
527
528 CPUARMState env;
529
530 /* Coprocessor information */
531 GHashTable *cp_regs;
532 /* For marshalling (mostly coprocessor) register state between the
533 * kernel and QEMU (for KVM) and between two QEMUs (for migration),
534 * we use these arrays.
535 */
536 /* List of register indexes managed via these arrays; (full KVM style
537 * 64 bit indexes, not CPRegInfo 32 bit indexes)
538 */
539 uint64_t *cpreg_indexes;
540 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
541 uint64_t *cpreg_values;
542 /* Length of the indexes, values, reset_values arrays */
543 int32_t cpreg_array_len;
544 /* These are used only for migration: incoming data arrives in
545 * these fields and is sanity checked in post_load before copying
546 * to the working data structures above.
547 */
548 uint64_t *cpreg_vmstate_indexes;
549 uint64_t *cpreg_vmstate_values;
550 int32_t cpreg_vmstate_array_len;
551
552 /* Timers used by the generic (architected) timer */
553 QEMUTimer *gt_timer[NUM_GTIMERS];
554 /* GPIO outputs for generic timer */
555 qemu_irq gt_timer_outputs[NUM_GTIMERS];
556
557 /* MemoryRegion to use for secure physical accesses */
558 MemoryRegion *secure_memory;
559
560 /* 'compatible' string for this CPU for Linux device trees */
561 const char *dtb_compatible;
562
563 /* PSCI version for this CPU
564 * Bits[31:16] = Major Version
565 * Bits[15:0] = Minor Version
566 */
567 uint32_t psci_version;
568
569 /* Should CPU start in PSCI powered-off state? */
570 bool start_powered_off;
571 /* CPU currently in PSCI powered-off state */
572 bool powered_off;
573 /* CPU has security extension */
574 bool has_el3;
5c0a3819
SZ
575 /* CPU has PMU (Performance Monitor Unit) */
576 bool has_pmu;
74e75564
PB
577
578 /* CPU has memory protection unit */
579 bool has_mpu;
580 /* PMSAv7 MPU number of supported regions */
581 uint32_t pmsav7_dregion;
582
583 /* PSCI conduit used to invoke PSCI methods
584 * 0 - disabled, 1 - smc, 2 - hvc
585 */
586 uint32_t psci_conduit;
587
588 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
589 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
590 */
591 uint32_t kvm_target;
592
593 /* KVM init features for this CPU */
594 uint32_t kvm_init_features[7];
595
596 /* Uniprocessor system with MP extensions */
597 bool mp_is_up;
598
599 /* The instance init functions for implementation-specific subclasses
600 * set these fields to specify the implementation-dependent values of
601 * various constant registers and reset values of non-constant
602 * registers.
603 * Some of these might become QOM properties eventually.
604 * Field names match the official register names as defined in the
605 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
606 * is used for reset values of non-constant registers; no reset_
607 * prefix means a constant register.
608 */
609 uint32_t midr;
610 uint32_t revidr;
611 uint32_t reset_fpsid;
612 uint32_t mvfr0;
613 uint32_t mvfr1;
614 uint32_t mvfr2;
615 uint32_t ctr;
616 uint32_t reset_sctlr;
617 uint32_t id_pfr0;
618 uint32_t id_pfr1;
619 uint32_t id_dfr0;
620 uint32_t pmceid0;
621 uint32_t pmceid1;
622 uint32_t id_afr0;
623 uint32_t id_mmfr0;
624 uint32_t id_mmfr1;
625 uint32_t id_mmfr2;
626 uint32_t id_mmfr3;
627 uint32_t id_mmfr4;
628 uint32_t id_isar0;
629 uint32_t id_isar1;
630 uint32_t id_isar2;
631 uint32_t id_isar3;
632 uint32_t id_isar4;
633 uint32_t id_isar5;
634 uint64_t id_aa64pfr0;
635 uint64_t id_aa64pfr1;
636 uint64_t id_aa64dfr0;
637 uint64_t id_aa64dfr1;
638 uint64_t id_aa64afr0;
639 uint64_t id_aa64afr1;
640 uint64_t id_aa64isar0;
641 uint64_t id_aa64isar1;
642 uint64_t id_aa64mmfr0;
643 uint64_t id_aa64mmfr1;
644 uint32_t dbgdidr;
645 uint32_t clidr;
646 uint64_t mp_affinity; /* MP ID without feature bits */
647 /* The elements of this array are the CCSIDR values for each cache,
648 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
649 */
650 uint32_t ccsidr[16];
651 uint64_t reset_cbar;
652 uint32_t reset_auxcr;
653 bool reset_hivecs;
654 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
655 uint32_t dcz_blocksize;
656 uint64_t rvbar;
657};
658
659static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
660{
661 return container_of(env, ARMCPU, env);
662}
663
664#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e))
665
666#define ENV_OFFSET offsetof(ARMCPU, env)
667
668#ifndef CONFIG_USER_ONLY
669extern const struct VMStateDescription vmstate_arm_cpu;
670#endif
671
672void arm_cpu_do_interrupt(CPUState *cpu);
673void arm_v7m_cpu_do_interrupt(CPUState *cpu);
674bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
675
676void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
677 int flags);
678
679hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
680 MemTxAttrs *attrs);
681
682int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
683int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
684
685int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
686 int cpuid, void *opaque);
687int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
688 int cpuid, void *opaque);
689
690#ifdef TARGET_AARCH64
691int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
692int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
693#endif
778c3a06
AF
694
695ARMCPU *cpu_arm_init(const char *cpu_model);
ea3e9847 696int cpu_arm_exec(CPUState *cpu);
faacc041 697target_ulong do_arm_semihosting(CPUARMState *env);
ce02049d
GB
698void aarch64_sync_32_to_64(CPUARMState *env);
699void aarch64_sync_64_to_32(CPUARMState *env);
b5ff1b31 700
3926cc84
AG
701static inline bool is_a64(CPUARMState *env)
702{
703 return env->aarch64;
704}
705
2c0262af
FB
706/* you can call this signal handler from your SIGBUS and SIGSEGV
707 signal handlers to inform the virtual CPU of exceptions. non zero
708 is returned if the signal was handled by the virtual CPU. */
5fafdf24 709int cpu_arm_signal_handler(int host_signum, void *pinfo,
2c0262af
FB
710 void *puc);
711
ec7b4ce4
AF
712/**
713 * pmccntr_sync
714 * @env: CPUARMState
715 *
716 * Synchronises the counter in the PMCCNTR. This must always be called twice,
717 * once before any action that might affect the timer and again afterwards.
718 * The function is used to swap the state of the register if required.
719 * This only happens when not in user mode (!CONFIG_USER_ONLY)
720 */
721void pmccntr_sync(CPUARMState *env);
722
76e3e1bc
PM
723/* SCTLR bit meanings. Several bits have been reused in newer
724 * versions of the architecture; in that case we define constants
725 * for both old and new bit meanings. Code which tests against those
726 * bits should probably check or otherwise arrange that the CPU
727 * is the architectural version it expects.
728 */
729#define SCTLR_M (1U << 0)
730#define SCTLR_A (1U << 1)
731#define SCTLR_C (1U << 2)
732#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
733#define SCTLR_SA (1U << 3)
734#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
735#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
736#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
737#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
738#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
739#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
740#define SCTLR_ITD (1U << 7) /* v8 onward */
741#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
742#define SCTLR_SED (1U << 8) /* v8 onward */
743#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
744#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
745#define SCTLR_F (1U << 10) /* up to v6 */
746#define SCTLR_SW (1U << 10) /* v7 onward */
747#define SCTLR_Z (1U << 11)
748#define SCTLR_I (1U << 12)
749#define SCTLR_V (1U << 13)
750#define SCTLR_RR (1U << 14) /* up to v7 */
751#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
752#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
753#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
754#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
755#define SCTLR_nTWI (1U << 16) /* v8 onward */
756#define SCTLR_HA (1U << 17)
f6bda88f 757#define SCTLR_BR (1U << 17) /* PMSA only */
76e3e1bc
PM
758#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
759#define SCTLR_nTWE (1U << 18) /* v8 onward */
760#define SCTLR_WXN (1U << 19)
761#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
762#define SCTLR_UWXN (1U << 20) /* v7 onward */
763#define SCTLR_FI (1U << 21)
764#define SCTLR_U (1U << 22)
765#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
766#define SCTLR_VE (1U << 24) /* up to v7 */
767#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
768#define SCTLR_EE (1U << 25)
769#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
770#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
771#define SCTLR_NMFI (1U << 27)
772#define SCTLR_TRE (1U << 28)
773#define SCTLR_AFE (1U << 29)
774#define SCTLR_TE (1U << 30)
775
c6f19164
GB
776#define CPTR_TCPAC (1U << 31)
777#define CPTR_TTA (1U << 20)
778#define CPTR_TFP (1U << 10)
779
187f678d
PM
780#define MDCR_EPMAD (1U << 21)
781#define MDCR_EDAD (1U << 20)
782#define MDCR_SPME (1U << 17)
783#define MDCR_SDD (1U << 16)
a8d64e73 784#define MDCR_SPD (3U << 14)
187f678d
PM
785#define MDCR_TDRA (1U << 11)
786#define MDCR_TDOSA (1U << 10)
787#define MDCR_TDA (1U << 9)
788#define MDCR_TDE (1U << 8)
789#define MDCR_HPME (1U << 7)
790#define MDCR_TPM (1U << 6)
791#define MDCR_TPMCR (1U << 5)
792
a8d64e73
PM
793/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
794#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
795
78dbbbe4
PM
796#define CPSR_M (0x1fU)
797#define CPSR_T (1U << 5)
798#define CPSR_F (1U << 6)
799#define CPSR_I (1U << 7)
800#define CPSR_A (1U << 8)
801#define CPSR_E (1U << 9)
802#define CPSR_IT_2_7 (0xfc00U)
803#define CPSR_GE (0xfU << 16)
4051e12c
PM
804#define CPSR_IL (1U << 20)
805/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
806 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
807 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
808 * where it is live state but not accessible to the AArch32 code.
809 */
810#define CPSR_RESERVED (0x7U << 21)
78dbbbe4
PM
811#define CPSR_J (1U << 24)
812#define CPSR_IT_0_1 (3U << 25)
813#define CPSR_Q (1U << 27)
814#define CPSR_V (1U << 28)
815#define CPSR_C (1U << 29)
816#define CPSR_Z (1U << 30)
817#define CPSR_N (1U << 31)
9ee6e8bb 818#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
4cc35614 819#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
9ee6e8bb
PB
820
821#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
4cc35614
PM
822#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
823 | CPSR_NZCV)
9ee6e8bb
PB
824/* Bits writable in user mode. */
825#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
826/* Execution state bits. MRS read as zero, MSR writes ignored. */
4051e12c
PM
827#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
828/* Mask of bits which may be set by exception return copying them from SPSR */
829#define CPSR_ERET_MASK (~CPSR_RESERVED)
b5ff1b31 830
e389be16
FA
831#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
832#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
833#define TTBCR_PD0 (1U << 4)
834#define TTBCR_PD1 (1U << 5)
835#define TTBCR_EPD0 (1U << 7)
836#define TTBCR_IRGN0 (3U << 8)
837#define TTBCR_ORGN0 (3U << 10)
838#define TTBCR_SH0 (3U << 12)
839#define TTBCR_T1SZ (3U << 16)
840#define TTBCR_A1 (1U << 22)
841#define TTBCR_EPD1 (1U << 23)
842#define TTBCR_IRGN1 (3U << 24)
843#define TTBCR_ORGN1 (3U << 26)
844#define TTBCR_SH1 (1U << 28)
845#define TTBCR_EAE (1U << 31)
846
d356312f
PM
847/* Bit definitions for ARMv8 SPSR (PSTATE) format.
848 * Only these are valid when in AArch64 mode; in
849 * AArch32 mode SPSRs are basically CPSR-format.
850 */
f502cfc2 851#define PSTATE_SP (1U)
d356312f
PM
852#define PSTATE_M (0xFU)
853#define PSTATE_nRW (1U << 4)
854#define PSTATE_F (1U << 6)
855#define PSTATE_I (1U << 7)
856#define PSTATE_A (1U << 8)
857#define PSTATE_D (1U << 9)
858#define PSTATE_IL (1U << 20)
859#define PSTATE_SS (1U << 21)
860#define PSTATE_V (1U << 28)
861#define PSTATE_C (1U << 29)
862#define PSTATE_Z (1U << 30)
863#define PSTATE_N (1U << 31)
864#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
4cc35614
PM
865#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
866#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
d356312f
PM
867/* Mode values for AArch64 */
868#define PSTATE_MODE_EL3h 13
869#define PSTATE_MODE_EL3t 12
870#define PSTATE_MODE_EL2h 9
871#define PSTATE_MODE_EL2t 8
872#define PSTATE_MODE_EL1h 5
873#define PSTATE_MODE_EL1t 4
874#define PSTATE_MODE_EL0t 0
875
9e729b57
EI
876/* Map EL and handler into a PSTATE_MODE. */
877static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
878{
879 return (el << 2) | handler;
880}
881
d356312f
PM
882/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
883 * interprocessing, so we don't attempt to sync with the cpsr state used by
884 * the 32 bit decoder.
885 */
886static inline uint32_t pstate_read(CPUARMState *env)
887{
888 int ZF;
889
890 ZF = (env->ZF == 0);
891 return (env->NF & 0x80000000) | (ZF << 30)
892 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
4cc35614 893 | env->pstate | env->daif;
d356312f
PM
894}
895
896static inline void pstate_write(CPUARMState *env, uint32_t val)
897{
898 env->ZF = (~val) & PSTATE_Z;
899 env->NF = val;
900 env->CF = (val >> 29) & 1;
901 env->VF = (val << 3) & 0x80000000;
4cc35614 902 env->daif = val & PSTATE_DAIF;
d356312f
PM
903 env->pstate = val & ~CACHED_PSTATE_BITS;
904}
905
b5ff1b31 906/* Return the current CPSR value. */
2f4a40e5 907uint32_t cpsr_read(CPUARMState *env);
50866ba5
PM
908
909typedef enum CPSRWriteType {
910 CPSRWriteByInstr = 0, /* from guest MSR or CPS */
911 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
912 CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
913 CPSRWriteByGDBStub = 3, /* from the GDB stub */
914} CPSRWriteType;
915
916/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
917void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
918 CPSRWriteType write_type);
9ee6e8bb
PB
919
920/* Return the current xPSR value. */
921static inline uint32_t xpsr_read(CPUARMState *env)
922{
923 int ZF;
6fbe23d5
PB
924 ZF = (env->ZF == 0);
925 return (env->NF & 0x80000000) | (ZF << 30)
9ee6e8bb
PB
926 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
927 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
928 | ((env->condexec_bits & 0xfc) << 8)
929 | env->v7m.exception;
b5ff1b31
FB
930}
931
9ee6e8bb
PB
932/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
933static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
934{
9ee6e8bb 935 if (mask & CPSR_NZCV) {
6fbe23d5
PB
936 env->ZF = (~val) & CPSR_Z;
937 env->NF = val;
9ee6e8bb
PB
938 env->CF = (val >> 29) & 1;
939 env->VF = (val << 3) & 0x80000000;
940 }
941 if (mask & CPSR_Q)
942 env->QF = ((val & CPSR_Q) != 0);
943 if (mask & (1 << 24))
944 env->thumb = ((val & (1 << 24)) != 0);
945 if (mask & CPSR_IT_0_1) {
946 env->condexec_bits &= ~3;
947 env->condexec_bits |= (val >> 25) & 3;
948 }
949 if (mask & CPSR_IT_2_7) {
950 env->condexec_bits &= 3;
951 env->condexec_bits |= (val >> 8) & 0xfc;
952 }
953 if (mask & 0x1ff) {
954 env->v7m.exception = val & 0x1ff;
955 }
956}
957
f149e3e8
EI
958#define HCR_VM (1ULL << 0)
959#define HCR_SWIO (1ULL << 1)
960#define HCR_PTW (1ULL << 2)
961#define HCR_FMO (1ULL << 3)
962#define HCR_IMO (1ULL << 4)
963#define HCR_AMO (1ULL << 5)
964#define HCR_VF (1ULL << 6)
965#define HCR_VI (1ULL << 7)
966#define HCR_VSE (1ULL << 8)
967#define HCR_FB (1ULL << 9)
968#define HCR_BSU_MASK (3ULL << 10)
969#define HCR_DC (1ULL << 12)
970#define HCR_TWI (1ULL << 13)
971#define HCR_TWE (1ULL << 14)
972#define HCR_TID0 (1ULL << 15)
973#define HCR_TID1 (1ULL << 16)
974#define HCR_TID2 (1ULL << 17)
975#define HCR_TID3 (1ULL << 18)
976#define HCR_TSC (1ULL << 19)
977#define HCR_TIDCP (1ULL << 20)
978#define HCR_TACR (1ULL << 21)
979#define HCR_TSW (1ULL << 22)
980#define HCR_TPC (1ULL << 23)
981#define HCR_TPU (1ULL << 24)
982#define HCR_TTLB (1ULL << 25)
983#define HCR_TVM (1ULL << 26)
984#define HCR_TGE (1ULL << 27)
985#define HCR_TDZ (1ULL << 28)
986#define HCR_HCD (1ULL << 29)
987#define HCR_TRVM (1ULL << 30)
988#define HCR_RW (1ULL << 31)
989#define HCR_CD (1ULL << 32)
990#define HCR_ID (1ULL << 33)
991#define HCR_MASK ((1ULL << 34) - 1)
992
64e0e2de
EI
993#define SCR_NS (1U << 0)
994#define SCR_IRQ (1U << 1)
995#define SCR_FIQ (1U << 2)
996#define SCR_EA (1U << 3)
997#define SCR_FW (1U << 4)
998#define SCR_AW (1U << 5)
999#define SCR_NET (1U << 6)
1000#define SCR_SMD (1U << 7)
1001#define SCR_HCE (1U << 8)
1002#define SCR_SIF (1U << 9)
1003#define SCR_RW (1U << 10)
1004#define SCR_ST (1U << 11)
1005#define SCR_TWI (1U << 12)
1006#define SCR_TWE (1U << 13)
1007#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
1008#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
1009
01653295
PM
1010/* Return the current FPSCR value. */
1011uint32_t vfp_get_fpscr(CPUARMState *env);
1012void vfp_set_fpscr(CPUARMState *env, uint32_t val);
1013
f903fa22
PM
1014/* For A64 the FPSCR is split into two logically distinct registers,
1015 * FPCR and FPSR. However since they still use non-overlapping bits
1016 * we store the underlying state in fpscr and just mask on read/write.
1017 */
1018#define FPSR_MASK 0xf800009f
1019#define FPCR_MASK 0x07f79f00
1020static inline uint32_t vfp_get_fpsr(CPUARMState *env)
1021{
1022 return vfp_get_fpscr(env) & FPSR_MASK;
1023}
1024
1025static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
1026{
1027 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
1028 vfp_set_fpscr(env, new_fpscr);
1029}
1030
1031static inline uint32_t vfp_get_fpcr(CPUARMState *env)
1032{
1033 return vfp_get_fpscr(env) & FPCR_MASK;
1034}
1035
1036static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
1037{
1038 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
1039 vfp_set_fpscr(env, new_fpscr);
1040}
1041
b5ff1b31
FB
1042enum arm_cpu_mode {
1043 ARM_CPU_MODE_USR = 0x10,
1044 ARM_CPU_MODE_FIQ = 0x11,
1045 ARM_CPU_MODE_IRQ = 0x12,
1046 ARM_CPU_MODE_SVC = 0x13,
28c9457d 1047 ARM_CPU_MODE_MON = 0x16,
b5ff1b31 1048 ARM_CPU_MODE_ABT = 0x17,
28c9457d 1049 ARM_CPU_MODE_HYP = 0x1a,
b5ff1b31
FB
1050 ARM_CPU_MODE_UND = 0x1b,
1051 ARM_CPU_MODE_SYS = 0x1f
1052};
1053
40f137e1
PB
1054/* VFP system registers. */
1055#define ARM_VFP_FPSID 0
1056#define ARM_VFP_FPSCR 1
a50c0f51 1057#define ARM_VFP_MVFR2 5
9ee6e8bb
PB
1058#define ARM_VFP_MVFR1 6
1059#define ARM_VFP_MVFR0 7
40f137e1
PB
1060#define ARM_VFP_FPEXC 8
1061#define ARM_VFP_FPINST 9
1062#define ARM_VFP_FPINST2 10
1063
18c9b560
AZ
1064/* iwMMXt coprocessor control registers. */
1065#define ARM_IWMMXT_wCID 0
1066#define ARM_IWMMXT_wCon 1
1067#define ARM_IWMMXT_wCSSF 2
1068#define ARM_IWMMXT_wCASF 3
1069#define ARM_IWMMXT_wCGR0 8
1070#define ARM_IWMMXT_wCGR1 9
1071#define ARM_IWMMXT_wCGR2 10
1072#define ARM_IWMMXT_wCGR3 11
1073
ce854d7c
BC
1074/* If adding a feature bit which corresponds to a Linux ELF
1075 * HWCAP bit, remember to update the feature-bit-to-hwcap
1076 * mapping in linux-user/elfload.c:get_elf_hwcap().
1077 */
40f137e1
PB
1078enum arm_features {
1079 ARM_FEATURE_VFP,
c1713132
AZ
1080 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
1081 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
ce819861 1082 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
9ee6e8bb
PB
1083 ARM_FEATURE_V6,
1084 ARM_FEATURE_V6K,
1085 ARM_FEATURE_V7,
1086 ARM_FEATURE_THUMB2,
c3d2689d 1087 ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
9ee6e8bb 1088 ARM_FEATURE_VFP3,
60011498 1089 ARM_FEATURE_VFP_FP16,
9ee6e8bb 1090 ARM_FEATURE_NEON,
47789990 1091 ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
9ee6e8bb 1092 ARM_FEATURE_M, /* Microcontroller profile. */
fe1479c3 1093 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
e1bbf446 1094 ARM_FEATURE_THUMB2EE,
be5e7a76
DES
1095 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
1096 ARM_FEATURE_V4T,
1097 ARM_FEATURE_V5,
5bc95aa2 1098 ARM_FEATURE_STRONGARM,
906879a9 1099 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
b8b8ea05 1100 ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
da97f52c 1101 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
0383ac00 1102 ARM_FEATURE_GENERIC_TIMER,
06ed5d66 1103 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
1047b9d7 1104 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
c4804214
PM
1105 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
1106 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
1107 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
81bdde9d 1108 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
de9b05b8
PM
1109 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
1110 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
81e69fb0 1111 ARM_FEATURE_V8,
3926cc84 1112 ARM_FEATURE_AARCH64, /* supports 64 bit mode */
9d935509 1113 ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
d8ba780b 1114 ARM_FEATURE_CBAR, /* has cp15 CBAR */
eb0ecd5a 1115 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
f318cec6 1116 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
cca7c2f5 1117 ARM_FEATURE_EL2, /* has EL2 Virtualization support */
1fe8141e 1118 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
f1ecb913
AB
1119 ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
1120 ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
4e624eda 1121 ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
62b44f05 1122 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
40f137e1
PB
1123};
1124
1125static inline int arm_feature(CPUARMState *env, int feature)
1126{
918f5dca 1127 return (env->features & (1ULL << feature)) != 0;
40f137e1
PB
1128}
1129
19e0fefa
FA
1130#if !defined(CONFIG_USER_ONLY)
1131/* Return true if exception levels below EL3 are in secure state,
1132 * or would be following an exception return to that level.
1133 * Unlike arm_is_secure() (which is always a question about the
1134 * _current_ state of the CPU) this doesn't care about the current
1135 * EL or mode.
1136 */
1137static inline bool arm_is_secure_below_el3(CPUARMState *env)
1138{
1139 if (arm_feature(env, ARM_FEATURE_EL3)) {
1140 return !(env->cp15.scr_el3 & SCR_NS);
1141 } else {
6b7f0b61 1142 /* If EL3 is not supported then the secure state is implementation
19e0fefa
FA
1143 * defined, in which case QEMU defaults to non-secure.
1144 */
1145 return false;
1146 }
1147}
1148
1149/* Return true if the processor is in secure state */
1150static inline bool arm_is_secure(CPUARMState *env)
1151{
1152 if (arm_feature(env, ARM_FEATURE_EL3)) {
1153 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
1154 /* CPU currently in AArch64 state and EL3 */
1155 return true;
1156 } else if (!is_a64(env) &&
1157 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
1158 /* CPU currently in AArch32 state and monitor mode */
1159 return true;
1160 }
1161 }
1162 return arm_is_secure_below_el3(env);
1163}
1164
1165#else
1166static inline bool arm_is_secure_below_el3(CPUARMState *env)
1167{
1168 return false;
1169}
1170
1171static inline bool arm_is_secure(CPUARMState *env)
1172{
1173 return false;
1174}
1175#endif
1176
1f79ee32
PM
1177/* Return true if the specified exception level is running in AArch64 state. */
1178static inline bool arm_el_is_aa64(CPUARMState *env, int el)
1179{
446c81ab
PM
1180 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
1181 * and if we're not in EL0 then the state of EL0 isn't well defined.)
1f79ee32 1182 */
446c81ab
PM
1183 assert(el >= 1 && el <= 3);
1184 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
592125f8 1185
446c81ab
PM
1186 /* The highest exception level is always at the maximum supported
1187 * register width, and then lower levels have a register width controlled
1188 * by bits in the SCR or HCR registers.
1f79ee32 1189 */
446c81ab
PM
1190 if (el == 3) {
1191 return aa64;
1192 }
1193
1194 if (arm_feature(env, ARM_FEATURE_EL3)) {
1195 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
1196 }
1197
1198 if (el == 2) {
1199 return aa64;
1200 }
1201
1202 if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
1203 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
1204 }
1205
1206 return aa64;
1f79ee32
PM
1207}
1208
3f342b9e
SF
1209/* Function for determing whether guest cp register reads and writes should
1210 * access the secure or non-secure bank of a cp register. When EL3 is
1211 * operating in AArch32 state, the NS-bit determines whether the secure
1212 * instance of a cp register should be used. When EL3 is AArch64 (or if
1213 * it doesn't exist at all) then there is no register banking, and all
1214 * accesses are to the non-secure version.
1215 */
1216static inline bool access_secure_reg(CPUARMState *env)
1217{
1218 bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
1219 !arm_el_is_aa64(env, 3) &&
1220 !(env->cp15.scr_el3 & SCR_NS));
1221
1222 return ret;
1223}
1224
ea30a4b8
FA
1225/* Macros for accessing a specified CP register bank */
1226#define A32_BANKED_REG_GET(_env, _regname, _secure) \
1227 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
1228
1229#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
1230 do { \
1231 if (_secure) { \
1232 (_env)->cp15._regname##_s = (_val); \
1233 } else { \
1234 (_env)->cp15._regname##_ns = (_val); \
1235 } \
1236 } while (0)
1237
1238/* Macros for automatically accessing a specific CP register bank depending on
1239 * the current secure state of the system. These macros are not intended for
1240 * supporting instruction translation reads/writes as these are dependent
1241 * solely on the SCR.NS bit and not the mode.
1242 */
1243#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
1244 A32_BANKED_REG_GET((_env), _regname, \
2cde031f 1245 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
ea30a4b8
FA
1246
1247#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
1248 A32_BANKED_REG_SET((_env), _regname, \
2cde031f 1249 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
ea30a4b8
FA
1250 (_val))
1251
9a78eead 1252void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
012a906b
GB
1253uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
1254 uint32_t cur_el, bool secure);
40f137e1 1255
9ee6e8bb
PB
1256/* Interface between CPU and Interrupt controller. */
1257void armv7m_nvic_set_pending(void *opaque, int irq);
1258int armv7m_nvic_acknowledge_irq(void *opaque);
1259void armv7m_nvic_complete_irq(void *opaque, int irq);
1260
4b6a83fb
PM
1261/* Interface for defining coprocessor registers.
1262 * Registers are defined in tables of arm_cp_reginfo structs
1263 * which are passed to define_arm_cp_regs().
1264 */
1265
1266/* When looking up a coprocessor register we look for it
1267 * via an integer which encodes all of:
1268 * coprocessor number
1269 * Crn, Crm, opc1, opc2 fields
1270 * 32 or 64 bit register (ie is it accessed via MRC/MCR
1271 * or via MRRC/MCRR?)
51a79b03 1272 * non-secure/secure bank (AArch32 only)
4b6a83fb
PM
1273 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
1274 * (In this case crn and opc2 should be zero.)
f5a0a5a5
PM
1275 * For AArch64, there is no 32/64 bit size distinction;
1276 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
1277 * and 4 bit CRn and CRm. The encoding patterns are chosen
1278 * to be easy to convert to and from the KVM encodings, and also
1279 * so that the hashtable can contain both AArch32 and AArch64
1280 * registers (to allow for interprocessing where we might run
1281 * 32 bit code on a 64 bit core).
4b6a83fb 1282 */
f5a0a5a5
PM
1283/* This bit is private to our hashtable cpreg; in KVM register
1284 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
1285 * in the upper bits of the 64 bit ID.
1286 */
1287#define CP_REG_AA64_SHIFT 28
1288#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
1289
51a79b03
PM
1290/* To enable banking of coprocessor registers depending on ns-bit we
1291 * add a bit to distinguish between secure and non-secure cpregs in the
1292 * hashtable.
1293 */
1294#define CP_REG_NS_SHIFT 29
1295#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
1296
1297#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
1298 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
1299 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
4b6a83fb 1300
f5a0a5a5
PM
1301#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
1302 (CP_REG_AA64_MASK | \
1303 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
1304 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
1305 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
1306 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
1307 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
1308 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
1309
721fae12
PM
1310/* Convert a full 64 bit KVM register ID to the truncated 32 bit
1311 * version used as a key for the coprocessor register hashtable
1312 */
1313static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
1314{
1315 uint32_t cpregid = kvmid;
f5a0a5a5
PM
1316 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
1317 cpregid |= CP_REG_AA64_MASK;
51a79b03
PM
1318 } else {
1319 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
1320 cpregid |= (1 << 15);
1321 }
1322
1323 /* KVM is always non-secure so add the NS flag on AArch32 register
1324 * entries.
1325 */
1326 cpregid |= 1 << CP_REG_NS_SHIFT;
721fae12
PM
1327 }
1328 return cpregid;
1329}
1330
1331/* Convert a truncated 32 bit hashtable key into the full
1332 * 64 bit KVM register ID.
1333 */
1334static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
1335{
f5a0a5a5
PM
1336 uint64_t kvmid;
1337
1338 if (cpregid & CP_REG_AA64_MASK) {
1339 kvmid = cpregid & ~CP_REG_AA64_MASK;
1340 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
721fae12 1341 } else {
f5a0a5a5
PM
1342 kvmid = cpregid & ~(1 << 15);
1343 if (cpregid & (1 << 15)) {
1344 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
1345 } else {
1346 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
1347 }
721fae12
PM
1348 }
1349 return kvmid;
1350}
1351
4b6a83fb
PM
1352/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
1353 * special-behaviour cp reg and bits [15..8] indicate what behaviour
1354 * it has. Otherwise it is a simple cp reg, where CONST indicates that
1355 * TCG can assume the value to be constant (ie load at translate time)
1356 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
1357 * indicates that the TB should not be ended after a write to this register
1358 * (the default is that the TB ends after cp writes). OVERRIDE permits
1359 * a register definition to override a previous definition for the
1360 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
1361 * old must have the OVERRIDE bit set.
7a0e58fa
PM
1362 * ALIAS indicates that this register is an alias view of some underlying
1363 * state which is also visible via another register, and that the other
b061a82b
SF
1364 * register is handling migration and reset; registers marked ALIAS will not be
1365 * migrated but may have their state set by syncing of register state from KVM.
7a0e58fa
PM
1366 * NO_RAW indicates that this register has no underlying state and does not
1367 * support raw access for state saving/loading; it will not be used for either
1368 * migration or KVM state synchronization. (Typically this is for "registers"
1369 * which are actually used as instructions for cache maintenance and so on.)
2452731c
PM
1370 * IO indicates that this register does I/O and therefore its accesses
1371 * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
1372 * registers which implement clocks or timers require this.
4b6a83fb
PM
1373 */
1374#define ARM_CP_SPECIAL 1
1375#define ARM_CP_CONST 2
1376#define ARM_CP_64BIT 4
1377#define ARM_CP_SUPPRESS_TB_END 8
1378#define ARM_CP_OVERRIDE 16
7a0e58fa 1379#define ARM_CP_ALIAS 32
2452731c 1380#define ARM_CP_IO 64
7a0e58fa 1381#define ARM_CP_NO_RAW 128
4b6a83fb
PM
1382#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
1383#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
b0d2b7d0 1384#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
0eef9d98 1385#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
aca3f40b
PM
1386#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
1387#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
4b6a83fb
PM
1388/* Used only as a terminator for ARMCPRegInfo lists */
1389#define ARM_CP_SENTINEL 0xffff
1390/* Mask of only the flag bits in a type field */
7a0e58fa 1391#define ARM_CP_FLAG_MASK 0xff
4b6a83fb 1392
f5a0a5a5
PM
1393/* Valid values for ARMCPRegInfo state field, indicating which of
1394 * the AArch32 and AArch64 execution states this register is visible in.
1395 * If the reginfo doesn't explicitly specify then it is AArch32 only.
1396 * If the reginfo is declared to be visible in both states then a second
1397 * reginfo is synthesised for the AArch32 view of the AArch64 register,
1398 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
1399 * Note that we rely on the values of these enums as we iterate through
1400 * the various states in some places.
1401 */
1402enum {
1403 ARM_CP_STATE_AA32 = 0,
1404 ARM_CP_STATE_AA64 = 1,
1405 ARM_CP_STATE_BOTH = 2,
1406};
1407
c3e30260
FA
1408/* ARM CP register secure state flags. These flags identify security state
1409 * attributes for a given CP register entry.
1410 * The existence of both or neither secure and non-secure flags indicates that
1411 * the register has both a secure and non-secure hash entry. A single one of
1412 * these flags causes the register to only be hashed for the specified
1413 * security state.
1414 * Although definitions may have any combination of the S/NS bits, each
1415 * registered entry will only have one to identify whether the entry is secure
1416 * or non-secure.
1417 */
1418enum {
1419 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
1420 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
1421};
1422
4b6a83fb
PM
1423/* Return true if cptype is a valid type field. This is used to try to
1424 * catch errors where the sentinel has been accidentally left off the end
1425 * of a list of registers.
1426 */
1427static inline bool cptype_valid(int cptype)
1428{
1429 return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
1430 || ((cptype & ARM_CP_SPECIAL) &&
34affeef 1431 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
4b6a83fb
PM
1432}
1433
1434/* Access rights:
1435 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
1436 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
1437 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
1438 * (ie any of the privileged modes in Secure state, or Monitor mode).
1439 * If a register is accessible in one privilege level it's always accessible
1440 * in higher privilege levels too. Since "Secure PL1" also follows this rule
1441 * (ie anything visible in PL2 is visible in S-PL1, some things are only
1442 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
1443 * terminology a little and call this PL3.
f5a0a5a5
PM
1444 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
1445 * with the ELx exception levels.
4b6a83fb
PM
1446 *
1447 * If access permissions for a register are more complex than can be
1448 * described with these bits, then use a laxer set of restrictions, and
1449 * do the more restrictive/complex check inside a helper function.
1450 */
1451#define PL3_R 0x80
1452#define PL3_W 0x40
1453#define PL2_R (0x20 | PL3_R)
1454#define PL2_W (0x10 | PL3_W)
1455#define PL1_R (0x08 | PL2_R)
1456#define PL1_W (0x04 | PL2_W)
1457#define PL0_R (0x02 | PL1_R)
1458#define PL0_W (0x01 | PL1_W)
1459
1460#define PL3_RW (PL3_R | PL3_W)
1461#define PL2_RW (PL2_R | PL2_W)
1462#define PL1_RW (PL1_R | PL1_W)
1463#define PL0_RW (PL0_R | PL0_W)
1464
75502672
PM
1465/* Return the highest implemented Exception Level */
1466static inline int arm_highest_el(CPUARMState *env)
1467{
1468 if (arm_feature(env, ARM_FEATURE_EL3)) {
1469 return 3;
1470 }
1471 if (arm_feature(env, ARM_FEATURE_EL2)) {
1472 return 2;
1473 }
1474 return 1;
1475}
1476
dcbff19b
GB
1477/* Return the current Exception Level (as per ARMv8; note that this differs
1478 * from the ARMv7 Privilege Level).
1479 */
1480static inline int arm_current_el(CPUARMState *env)
4b6a83fb 1481{
6d54ed3c
PM
1482 if (arm_feature(env, ARM_FEATURE_M)) {
1483 return !((env->v7m.exception == 0) && (env->v7m.control & 1));
1484 }
1485
592125f8 1486 if (is_a64(env)) {
f5a0a5a5
PM
1487 return extract32(env->pstate, 2, 2);
1488 }
1489
592125f8
FA
1490 switch (env->uncached_cpsr & 0x1f) {
1491 case ARM_CPU_MODE_USR:
4b6a83fb 1492 return 0;
592125f8
FA
1493 case ARM_CPU_MODE_HYP:
1494 return 2;
1495 case ARM_CPU_MODE_MON:
1496 return 3;
1497 default:
1498 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
1499 /* If EL3 is 32-bit then all secure privileged modes run in
1500 * EL3
1501 */
1502 return 3;
1503 }
1504
1505 return 1;
4b6a83fb 1506 }
4b6a83fb
PM
1507}
1508
1509typedef struct ARMCPRegInfo ARMCPRegInfo;
1510
f59df3f2
PM
1511typedef enum CPAccessResult {
1512 /* Access is permitted */
1513 CP_ACCESS_OK = 0,
1514 /* Access fails due to a configurable trap or enable which would
1515 * result in a categorized exception syndrome giving information about
1516 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
38836a2c
PM
1517 * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
1518 * PL1 if in EL0, otherwise to the current EL).
f59df3f2
PM
1519 */
1520 CP_ACCESS_TRAP = 1,
1521 /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
1522 * Note that this is not a catch-all case -- the set of cases which may
1523 * result in this failure is specifically defined by the architecture.
1524 */
1525 CP_ACCESS_TRAP_UNCATEGORIZED = 2,
38836a2c
PM
1526 /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
1527 CP_ACCESS_TRAP_EL2 = 3,
1528 CP_ACCESS_TRAP_EL3 = 4,
e7615726
PM
1529 /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
1530 CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
1531 CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
f2cae609
PM
1532 /* Access fails and results in an exception syndrome for an FP access,
1533 * trapped directly to EL2 or EL3
1534 */
1535 CP_ACCESS_TRAP_FP_EL2 = 7,
1536 CP_ACCESS_TRAP_FP_EL3 = 8,
f59df3f2
PM
1537} CPAccessResult;
1538
c4241c7d
PM
1539/* Access functions for coprocessor registers. These cannot fail and
1540 * may not raise exceptions.
1541 */
1542typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1543typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
1544 uint64_t value);
f59df3f2 1545/* Access permission check functions for coprocessor registers. */
3f208fd7
PM
1546typedef CPAccessResult CPAccessFn(CPUARMState *env,
1547 const ARMCPRegInfo *opaque,
1548 bool isread);
4b6a83fb
PM
1549/* Hook function for register reset */
1550typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1551
1552#define CP_ANY 0xff
1553
1554/* Definition of an ARM coprocessor register */
1555struct ARMCPRegInfo {
1556 /* Name of register (useful mainly for debugging, need not be unique) */
1557 const char *name;
1558 /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
1559 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
1560 * 'wildcard' field -- any value of that field in the MRC/MCR insn
1561 * will be decoded to this register. The register read and write
1562 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
1563 * used by the program, so it is possible to register a wildcard and
1564 * then behave differently on read/write if necessary.
1565 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
1566 * must both be zero.
f5a0a5a5
PM
1567 * For AArch64-visible registers, opc0 is also used.
1568 * Since there are no "coprocessors" in AArch64, cp is purely used as a
1569 * way to distinguish (for KVM's benefit) guest-visible system registers
1570 * from demuxed ones provided to preserve the "no side effects on
1571 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
1572 * visible (to match KVM's encoding); cp==0 will be converted to
1573 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
4b6a83fb
PM
1574 */
1575 uint8_t cp;
1576 uint8_t crn;
1577 uint8_t crm;
f5a0a5a5 1578 uint8_t opc0;
4b6a83fb
PM
1579 uint8_t opc1;
1580 uint8_t opc2;
f5a0a5a5
PM
1581 /* Execution state in which this register is visible: ARM_CP_STATE_* */
1582 int state;
4b6a83fb
PM
1583 /* Register type: ARM_CP_* bits/values */
1584 int type;
1585 /* Access rights: PL*_[RW] */
1586 int access;
c3e30260
FA
1587 /* Security state: ARM_CP_SECSTATE_* bits/values */
1588 int secure;
4b6a83fb
PM
1589 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
1590 * this register was defined: can be used to hand data through to the
1591 * register read/write functions, since they are passed the ARMCPRegInfo*.
1592 */
1593 void *opaque;
1594 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
1595 * fieldoffset is non-zero, the reset value of the register.
1596 */
1597 uint64_t resetvalue;
c3e30260
FA
1598 /* Offset of the field in CPUARMState for this register.
1599 *
1600 * This is not needed if either:
4b6a83fb
PM
1601 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
1602 * 2. both readfn and writefn are specified
1603 */
1604 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
c3e30260
FA
1605
1606 /* Offsets of the secure and non-secure fields in CPUARMState for the
1607 * register if it is banked. These fields are only used during the static
1608 * registration of a register. During hashing the bank associated
1609 * with a given security state is copied to fieldoffset which is used from
1610 * there on out.
1611 *
1612 * It is expected that register definitions use either fieldoffset or
1613 * bank_fieldoffsets in the definition but not both. It is also expected
1614 * that both bank offsets are set when defining a banked register. This
1615 * use indicates that a register is banked.
1616 */
1617 ptrdiff_t bank_fieldoffsets[2];
1618
f59df3f2
PM
1619 /* Function for making any access checks for this register in addition to
1620 * those specified by the 'access' permissions bits. If NULL, no extra
1621 * checks required. The access check is performed at runtime, not at
1622 * translate time.
1623 */
1624 CPAccessFn *accessfn;
4b6a83fb
PM
1625 /* Function for handling reads of this register. If NULL, then reads
1626 * will be done by loading from the offset into CPUARMState specified
1627 * by fieldoffset.
1628 */
1629 CPReadFn *readfn;
1630 /* Function for handling writes of this register. If NULL, then writes
1631 * will be done by writing to the offset into CPUARMState specified
1632 * by fieldoffset.
1633 */
1634 CPWriteFn *writefn;
7023ec7e
PM
1635 /* Function for doing a "raw" read; used when we need to copy
1636 * coprocessor state to the kernel for KVM or out for
1637 * migration. This only needs to be provided if there is also a
c4241c7d 1638 * readfn and it has side effects (for instance clear-on-read bits).
7023ec7e
PM
1639 */
1640 CPReadFn *raw_readfn;
1641 /* Function for doing a "raw" write; used when we need to copy KVM
1642 * kernel coprocessor state into userspace, or for inbound
1643 * migration. This only needs to be provided if there is also a
c4241c7d
PM
1644 * writefn and it masks out "unwritable" bits or has write-one-to-clear
1645 * or similar behaviour.
7023ec7e
PM
1646 */
1647 CPWriteFn *raw_writefn;
4b6a83fb
PM
1648 /* Function for resetting the register. If NULL, then reset will be done
1649 * by writing resetvalue to the field specified in fieldoffset. If
1650 * fieldoffset is 0 then no reset will be done.
1651 */
1652 CPResetFn *resetfn;
1653};
1654
1655/* Macros which are lvalues for the field in CPUARMState for the
1656 * ARMCPRegInfo *ri.
1657 */
1658#define CPREG_FIELD32(env, ri) \
1659 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
1660#define CPREG_FIELD64(env, ri) \
1661 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
1662
1663#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
1664
1665void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
1666 const ARMCPRegInfo *regs, void *opaque);
1667void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1668 const ARMCPRegInfo *regs, void *opaque);
1669static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
1670{
1671 define_arm_cp_regs_with_opaque(cpu, regs, 0);
1672}
1673static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
1674{
1675 define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
1676}
60322b39 1677const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
4b6a83fb
PM
1678
1679/* CPWriteFn that can be used to implement writes-ignored behaviour */
c4241c7d
PM
1680void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1681 uint64_t value);
4b6a83fb 1682/* CPReadFn that can be used for read-as-zero behaviour */
c4241c7d 1683uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
4b6a83fb 1684
f5a0a5a5
PM
1685/* CPResetFn that does nothing, for use if no reset is required even
1686 * if fieldoffset is non zero.
1687 */
1688void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1689
67ed771d
PM
1690/* Return true if this reginfo struct's field in the cpu state struct
1691 * is 64 bits wide.
1692 */
1693static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1694{
1695 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1696}
1697
dcbff19b 1698static inline bool cp_access_ok(int current_el,
4b6a83fb
PM
1699 const ARMCPRegInfo *ri, int isread)
1700{
dcbff19b 1701 return (ri->access >> ((current_el * 2) + isread)) & 1;
4b6a83fb
PM
1702}
1703
49a66191
PM
1704/* Raw read of a coprocessor register (as needed for migration, etc) */
1705uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
1706
721fae12
PM
1707/**
1708 * write_list_to_cpustate
1709 * @cpu: ARMCPU
1710 *
1711 * For each register listed in the ARMCPU cpreg_indexes list, write
1712 * its value from the cpreg_values list into the ARMCPUState structure.
1713 * This updates TCG's working data structures from KVM data or
1714 * from incoming migration state.
1715 *
1716 * Returns: true if all register values were updated correctly,
1717 * false if some register was unknown or could not be written.
1718 * Note that we do not stop early on failure -- we will attempt
1719 * writing all registers in the list.
1720 */
1721bool write_list_to_cpustate(ARMCPU *cpu);
1722
1723/**
1724 * write_cpustate_to_list:
1725 * @cpu: ARMCPU
1726 *
1727 * For each register listed in the ARMCPU cpreg_indexes list, write
1728 * its value from the ARMCPUState structure into the cpreg_values list.
1729 * This is used to copy info from TCG's working data structures into
1730 * KVM or for outbound migration.
1731 *
1732 * Returns: true if all register values were read correctly,
1733 * false if some register was unknown or could not be read.
1734 * Note that we do not stop early on failure -- we will attempt
1735 * reading all registers in the list.
1736 */
1737bool write_cpustate_to_list(ARMCPU *cpu);
1738
b6af0975 1739/* Does the core conform to the "MicroController" profile. e.g. Cortex-M3.
9ee6e8bb
PB
1740 Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
1741 conventional cores (ie. Application or Realtime profile). */
1742
1743#define IS_M(env) arm_feature(env, ARM_FEATURE_M)
9ee6e8bb 1744
9ee6e8bb
PB
1745#define ARM_CPUID_TI915T 0x54029152
1746#define ARM_CPUID_TI925T 0x54029252
40f137e1 1747
b5ff1b31 1748#if defined(CONFIG_USER_ONLY)
2c0262af 1749#define TARGET_PAGE_BITS 12
b5ff1b31
FB
1750#else
1751/* The ARM MMU allows 1k pages. */
1752/* ??? Linux doesn't actually use these, and they're deprecated in recent
82d17978 1753 architecture revisions. Maybe a configure option to disable them. */
b5ff1b31
FB
1754#define TARGET_PAGE_BITS 10
1755#endif
9467d44c 1756
3926cc84
AG
1757#if defined(TARGET_AARCH64)
1758# define TARGET_PHYS_ADDR_SPACE_BITS 48
1759# define TARGET_VIRT_ADDR_SPACE_BITS 64
1760#else
1761# define TARGET_PHYS_ADDR_SPACE_BITS 40
1762# define TARGET_VIRT_ADDR_SPACE_BITS 32
1763#endif
52705890 1764
012a906b
GB
1765static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
1766 unsigned int target_el)
043b7f8d
EI
1767{
1768 CPUARMState *env = cs->env_ptr;
dcbff19b 1769 unsigned int cur_el = arm_current_el(env);
57e3a0c7 1770 bool secure = arm_is_secure(env);
57e3a0c7
GB
1771 bool pstate_unmasked;
1772 int8_t unmasked = 0;
1773
1774 /* Don't take exceptions if they target a lower EL.
1775 * This check should catch any exceptions that would not be taken but left
1776 * pending.
1777 */
dfafd090
EI
1778 if (cur_el > target_el) {
1779 return false;
1780 }
043b7f8d
EI
1781
1782 switch (excp_idx) {
1783 case EXCP_FIQ:
57e3a0c7
GB
1784 pstate_unmasked = !(env->daif & PSTATE_F);
1785 break;
1786
043b7f8d 1787 case EXCP_IRQ:
57e3a0c7
GB
1788 pstate_unmasked = !(env->daif & PSTATE_I);
1789 break;
1790
136e67e9 1791 case EXCP_VFIQ:
9fae24f5 1792 if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
136e67e9
EI
1793 /* VFIQs are only taken when hypervized and non-secure. */
1794 return false;
1795 }
1796 return !(env->daif & PSTATE_F);
1797 case EXCP_VIRQ:
9fae24f5 1798 if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
136e67e9
EI
1799 /* VIRQs are only taken when hypervized and non-secure. */
1800 return false;
1801 }
b5c633c5 1802 return !(env->daif & PSTATE_I);
043b7f8d
EI
1803 default:
1804 g_assert_not_reached();
1805 }
57e3a0c7
GB
1806
1807 /* Use the target EL, current execution state and SCR/HCR settings to
1808 * determine whether the corresponding CPSR bit is used to mask the
1809 * interrupt.
1810 */
1811 if ((target_el > cur_el) && (target_el != 1)) {
7cd6de3b
PM
1812 /* Exceptions targeting a higher EL may not be maskable */
1813 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
1814 /* 64-bit masking rules are simple: exceptions to EL3
1815 * can't be masked, and exceptions to EL2 can only be
1816 * masked from Secure state. The HCR and SCR settings
1817 * don't affect the masking logic, only the interrupt routing.
1818 */
1819 if (target_el == 3 || !secure) {
1820 unmasked = 1;
1821 }
1822 } else {
1823 /* The old 32-bit-only environment has a more complicated
1824 * masking setup. HCR and SCR bits not only affect interrupt
1825 * routing but also change the behaviour of masking.
1826 */
1827 bool hcr, scr;
1828
1829 switch (excp_idx) {
1830 case EXCP_FIQ:
1831 /* If FIQs are routed to EL3 or EL2 then there are cases where
1832 * we override the CPSR.F in determining if the exception is
1833 * masked or not. If neither of these are set then we fall back
1834 * to the CPSR.F setting otherwise we further assess the state
1835 * below.
1836 */
1837 hcr = (env->cp15.hcr_el2 & HCR_FMO);
1838 scr = (env->cp15.scr_el3 & SCR_FIQ);
1839
1840 /* When EL3 is 32-bit, the SCR.FW bit controls whether the
1841 * CPSR.F bit masks FIQ interrupts when taken in non-secure
1842 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
1843 * when non-secure but only when FIQs are only routed to EL3.
1844 */
1845 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
1846 break;
1847 case EXCP_IRQ:
1848 /* When EL3 execution state is 32-bit, if HCR.IMO is set then
1849 * we may override the CPSR.I masking when in non-secure state.
1850 * The SCR.IRQ setting has already been taken into consideration
1851 * when setting the target EL, so it does not have a further
1852 * affect here.
1853 */
1854 hcr = (env->cp15.hcr_el2 & HCR_IMO);
1855 scr = false;
1856 break;
1857 default:
1858 g_assert_not_reached();
1859 }
1860
1861 if ((scr || hcr) && !secure) {
1862 unmasked = 1;
1863 }
57e3a0c7
GB
1864 }
1865 }
1866
1867 /* The PSTATE bits only mask the interrupt if we have not overriden the
1868 * ability above.
1869 */
1870 return unmasked || pstate_unmasked;
043b7f8d
EI
1871}
1872
2994fd96 1873#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
ad37ad5b 1874
9467d44c 1875#define cpu_exec cpu_arm_exec
9467d44c 1876#define cpu_signal_handler cpu_arm_signal_handler
c732abe2 1877#define cpu_list arm_cpu_list
9467d44c 1878
c1e37810
PM
1879/* ARM has the following "translation regimes" (as the ARM ARM calls them):
1880 *
1881 * If EL3 is 64-bit:
1882 * + NonSecure EL1 & 0 stage 1
1883 * + NonSecure EL1 & 0 stage 2
1884 * + NonSecure EL2
1885 * + Secure EL1 & EL0
1886 * + Secure EL3
1887 * If EL3 is 32-bit:
1888 * + NonSecure PL1 & 0 stage 1
1889 * + NonSecure PL1 & 0 stage 2
1890 * + NonSecure PL2
1891 * + Secure PL0 & PL1
1892 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
1893 *
1894 * For QEMU, an mmu_idx is not quite the same as a translation regime because:
1895 * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
1896 * may differ in access permissions even if the VA->PA map is the same
1897 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
1898 * translation, which means that we have one mmu_idx that deals with two
1899 * concatenated translation regimes [this sort of combined s1+2 TLB is
1900 * architecturally permitted]
1901 * 3. we don't need to allocate an mmu_idx to translations that we won't be
1902 * handling via the TLB. The only way to do a stage 1 translation without
1903 * the immediate stage 2 translation is via the ATS or AT system insns,
1904 * which can be slow-pathed and always do a page table walk.
1905 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
1906 * translation regimes, because they map reasonably well to each other
1907 * and they can't both be active at the same time.
1908 * This gives us the following list of mmu_idx values:
1909 *
1910 * NS EL0 (aka NS PL0) stage 1+2
1911 * NS EL1 (aka NS PL1) stage 1+2
1912 * NS EL2 (aka NS PL2)
1913 * S EL3 (aka S PL1)
1914 * S EL0 (aka S PL0)
1915 * S EL1 (not used if EL3 is 32 bit)
1916 * NS EL0+1 stage 2
1917 *
1918 * (The last of these is an mmu_idx because we want to be able to use the TLB
1919 * for the accesses done as part of a stage 1 page table walk, rather than
1920 * having to walk the stage 2 page table over and over.)
1921 *
1922 * Our enumeration includes at the end some entries which are not "true"
1923 * mmu_idx values in that they don't have corresponding TLBs and are only
1924 * valid for doing slow path page table walks.
1925 *
1926 * The constant names here are patterned after the general style of the names
1927 * of the AT/ATS operations.
1928 * The values used are carefully arranged to make mmu_idx => EL lookup easy.
1929 */
1930typedef enum ARMMMUIdx {
1931 ARMMMUIdx_S12NSE0 = 0,
1932 ARMMMUIdx_S12NSE1 = 1,
1933 ARMMMUIdx_S1E2 = 2,
1934 ARMMMUIdx_S1E3 = 3,
1935 ARMMMUIdx_S1SE0 = 4,
1936 ARMMMUIdx_S1SE1 = 5,
1937 ARMMMUIdx_S2NS = 6,
1938 /* Indexes below here don't have TLBs and are used only for AT system
1939 * instructions or for the first stage of an S12 page table walk.
1940 */
1941 ARMMMUIdx_S1NSE0 = 7,
1942 ARMMMUIdx_S1NSE1 = 8,
1943} ARMMMUIdx;
1944
f79fbf39 1945#define MMU_USER_IDX 0
c1e37810
PM
1946
1947/* Return the exception level we're running at if this is our mmu_idx */
1948static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
6ebbf390 1949{
c1e37810
PM
1950 assert(mmu_idx < ARMMMUIdx_S2NS);
1951 return mmu_idx & 3;
1952}
1953
1954/* Determine the current mmu_idx to use for normal loads/stores */
97ed5ccd 1955static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
c1e37810
PM
1956{
1957 int el = arm_current_el(env);
1958
1959 if (el < 2 && arm_is_secure_below_el3(env)) {
1960 return ARMMMUIdx_S1SE0 + el;
1961 }
1962 return el;
6ebbf390
JM
1963}
1964
9e273ef2
PM
1965/* Indexes used when registering address spaces with cpu_address_space_init */
1966typedef enum ARMASIdx {
1967 ARMASIdx_NS = 0,
1968 ARMASIdx_S = 1,
1969} ARMASIdx;
1970
533e93f1 1971/* Return the Exception Level targeted by debug exceptions. */
3a298203
PM
1972static inline int arm_debug_target_el(CPUARMState *env)
1973{
81669b8b
SF
1974 bool secure = arm_is_secure(env);
1975 bool route_to_el2 = false;
1976
1977 if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
1978 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
1979 env->cp15.mdcr_el2 & (1 << 8);
1980 }
1981
1982 if (route_to_el2) {
1983 return 2;
1984 } else if (arm_feature(env, ARM_FEATURE_EL3) &&
1985 !arm_el_is_aa64(env, 3) && secure) {
1986 return 3;
1987 } else {
1988 return 1;
1989 }
3a298203
PM
1990}
1991
1992static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
1993{
533e93f1
PM
1994 if (arm_is_secure(env)) {
1995 /* MDCR_EL3.SDD disables debug events from Secure state */
1996 if (extract32(env->cp15.mdcr_el3, 16, 1) != 0
1997 || arm_current_el(env) == 3) {
1998 return false;
1999 }
2000 }
2001
dcbff19b 2002 if (arm_current_el(env) == arm_debug_target_el(env)) {
3a298203
PM
2003 if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
2004 || (env->daif & PSTATE_D)) {
2005 return false;
2006 }
2007 }
2008 return true;
2009}
2010
2011static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
2012{
533e93f1
PM
2013 int el = arm_current_el(env);
2014
2015 if (el == 0 && arm_el_is_aa64(env, 1)) {
3a298203
PM
2016 return aa64_generate_debug_exceptions(env);
2017 }
533e93f1
PM
2018
2019 if (arm_is_secure(env)) {
2020 int spd;
2021
2022 if (el == 0 && (env->cp15.sder & 1)) {
2023 /* SDER.SUIDEN means debug exceptions from Secure EL0
2024 * are always enabled. Otherwise they are controlled by
2025 * SDCR.SPD like those from other Secure ELs.
2026 */
2027 return true;
2028 }
2029
2030 spd = extract32(env->cp15.mdcr_el3, 14, 2);
2031 switch (spd) {
2032 case 1:
2033 /* SPD == 0b01 is reserved, but behaves as 0b00. */
2034 case 0:
2035 /* For 0b00 we return true if external secure invasive debug
2036 * is enabled. On real hardware this is controlled by external
2037 * signals to the core. QEMU always permits debug, and behaves
2038 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
2039 */
2040 return true;
2041 case 2:
2042 return false;
2043 case 3:
2044 return true;
2045 }
2046 }
2047
2048 return el != 2;
3a298203
PM
2049}
2050
2051/* Return true if debugging exceptions are currently enabled.
2052 * This corresponds to what in ARM ARM pseudocode would be
2053 * if UsingAArch32() then
2054 * return AArch32.GenerateDebugExceptions()
2055 * else
2056 * return AArch64.GenerateDebugExceptions()
2057 * We choose to push the if() down into this function for clarity,
2058 * since the pseudocode has it at all callsites except for the one in
2059 * CheckSoftwareStep(), where it is elided because both branches would
2060 * always return the same value.
2061 *
2062 * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
2063 * don't yet implement those exception levels or their associated trap bits.
2064 */
2065static inline bool arm_generate_debug_exceptions(CPUARMState *env)
2066{
2067 if (env->aarch64) {
2068 return aa64_generate_debug_exceptions(env);
2069 } else {
2070 return aa32_generate_debug_exceptions(env);
2071 }
2072}
2073
2074/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
2075 * implicitly means this always returns false in pre-v8 CPUs.)
2076 */
2077static inline bool arm_singlestep_active(CPUARMState *env)
2078{
2079 return extract32(env->cp15.mdscr_el1, 0, 1)
2080 && arm_el_is_aa64(env, arm_debug_target_el(env))
2081 && arm_generate_debug_exceptions(env);
2082}
2083
f9fd40eb
PB
2084static inline bool arm_sctlr_b(CPUARMState *env)
2085{
2086 return
2087 /* We need not implement SCTLR.ITD in user-mode emulation, so
2088 * let linux-user ignore the fact that it conflicts with SCTLR_B.
2089 * This lets people run BE32 binaries with "-cpu any".
2090 */
2091#ifndef CONFIG_USER_ONLY
2092 !arm_feature(env, ARM_FEATURE_V7) &&
2093#endif
2094 (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
2095}
2096
ed50ff78
PC
2097/* Return true if the processor is in big-endian mode. */
2098static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
2099{
2100 int cur_el;
2101
2102 /* In 32bit endianness is determined by looking at CPSR's E bit */
2103 if (!is_a64(env)) {
b2e62d9a
PC
2104 return
2105#ifdef CONFIG_USER_ONLY
2106 /* In system mode, BE32 is modelled in line with the
2107 * architecture (as word-invariant big-endianness), where loads
2108 * and stores are done little endian but from addresses which
2109 * are adjusted by XORing with the appropriate constant. So the
2110 * endianness to use for the raw data access is not affected by
2111 * SCTLR.B.
2112 * In user mode, however, we model BE32 as byte-invariant
2113 * big-endianness (because user-only code cannot tell the
2114 * difference), and so we need to use a data access endianness
2115 * that depends on SCTLR.B.
2116 */
2117 arm_sctlr_b(env) ||
2118#endif
2119 ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
ed50ff78
PC
2120 }
2121
2122 cur_el = arm_current_el(env);
2123
2124 if (cur_el == 0) {
2125 return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
2126 }
2127
2128 return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
2129}
2130
022c62cb 2131#include "exec/cpu-all.h"
622ed360 2132
3926cc84
AG
2133/* Bit usage in the TB flags field: bit 31 indicates whether we are
2134 * in 32 or 64 bit mode. The meaning of the other bits depends on that.
c1e37810
PM
2135 * We put flags which are shared between 32 and 64 bit mode at the top
2136 * of the word, and flags which apply to only one mode at the bottom.
3926cc84
AG
2137 */
2138#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
2139#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
c1e37810
PM
2140#define ARM_TBFLAG_MMUIDX_SHIFT 28
2141#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
3cf6a0fc
PM
2142#define ARM_TBFLAG_SS_ACTIVE_SHIFT 27
2143#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
2144#define ARM_TBFLAG_PSTATE_SS_SHIFT 26
2145#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
9dbbc748
GB
2146/* Target EL if we take a floating-point-disabled exception */
2147#define ARM_TBFLAG_FPEXC_EL_SHIFT 24
2148#define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
3926cc84
AG
2149
2150/* Bit usage when in AArch32 state: */
a1705768
PM
2151#define ARM_TBFLAG_THUMB_SHIFT 0
2152#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
2153#define ARM_TBFLAG_VECLEN_SHIFT 1
2154#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
2155#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
2156#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
a1705768
PM
2157#define ARM_TBFLAG_VFPEN_SHIFT 7
2158#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
2159#define ARM_TBFLAG_CONDEXEC_SHIFT 8
2160#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
f9fd40eb
PB
2161#define ARM_TBFLAG_SCTLR_B_SHIFT 16
2162#define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT)
c0f4af17
PM
2163/* We store the bottom two bits of the CPAR as TB flags and handle
2164 * checks on the other bits at runtime
2165 */
647f767b 2166#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17
c0f4af17 2167#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
3f342b9e
SF
2168/* Indicates whether cp register reads and writes by guest code should access
2169 * the secure or nonsecure bank of banked registers; note that this is not
2170 * the same thing as the current security state of the processor!
2171 */
647f767b 2172#define ARM_TBFLAG_NS_SHIFT 19
3f342b9e 2173#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
91cca2cd
PC
2174#define ARM_TBFLAG_BE_DATA_SHIFT 20
2175#define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
3926cc84 2176
9dbbc748 2177/* Bit usage when in AArch64 state: currently we have no A64 specific bits */
a1705768
PM
2178
2179/* some convenience accessor macros */
3926cc84
AG
2180#define ARM_TBFLAG_AARCH64_STATE(F) \
2181 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
c1e37810
PM
2182#define ARM_TBFLAG_MMUIDX(F) \
2183 (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
3cf6a0fc
PM
2184#define ARM_TBFLAG_SS_ACTIVE(F) \
2185 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
2186#define ARM_TBFLAG_PSTATE_SS(F) \
2187 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
9dbbc748
GB
2188#define ARM_TBFLAG_FPEXC_EL(F) \
2189 (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
a1705768
PM
2190#define ARM_TBFLAG_THUMB(F) \
2191 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
2192#define ARM_TBFLAG_VECLEN(F) \
2193 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
2194#define ARM_TBFLAG_VECSTRIDE(F) \
2195 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
a1705768
PM
2196#define ARM_TBFLAG_VFPEN(F) \
2197 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
2198#define ARM_TBFLAG_CONDEXEC(F) \
2199 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
f9fd40eb
PB
2200#define ARM_TBFLAG_SCTLR_B(F) \
2201 (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT)
c0f4af17
PM
2202#define ARM_TBFLAG_XSCALE_CPAR(F) \
2203 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
3f342b9e
SF
2204#define ARM_TBFLAG_NS(F) \
2205 (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
91cca2cd
PC
2206#define ARM_TBFLAG_BE_DATA(F) \
2207 (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
a1705768 2208
f9fd40eb
PB
2209static inline bool bswap_code(bool sctlr_b)
2210{
2211#ifdef CONFIG_USER_ONLY
2212 /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
2213 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
2214 * would also end up as a mixed-endian mode with BE code, LE data.
2215 */
2216 return
2217#ifdef TARGET_WORDS_BIGENDIAN
2218 1 ^
2219#endif
2220 sctlr_b;
2221#else
e334bd31
PB
2222 /* All code access in ARM is little endian, and there are no loaders
2223 * doing swaps that need to be reversed
f9fd40eb
PB
2224 */
2225 return 0;
2226#endif
2227}
2228
9dbbc748
GB
2229/* Return the exception level to which FP-disabled exceptions should
2230 * be taken, or 0 if FP is enabled.
2231 */
2232static inline int fp_exception_el(CPUARMState *env)
6b917547 2233{
ed1f13d6 2234 int fpen;
9dbbc748 2235 int cur_el = arm_current_el(env);
ed1f13d6 2236
9dbbc748
GB
2237 /* CPACR and the CPTR registers don't exist before v6, so FP is
2238 * always accessible
2239 */
2240 if (!arm_feature(env, ARM_FEATURE_V6)) {
2241 return 0;
2242 }
2243
2244 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
2245 * 0, 2 : trap EL0 and EL1/PL1 accesses
2246 * 1 : trap only EL0 accesses
2247 * 3 : trap no accesses
2248 */
2249 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
2250 switch (fpen) {
2251 case 0:
2252 case 2:
2253 if (cur_el == 0 || cur_el == 1) {
2254 /* Trap to PL1, which might be EL1 or EL3 */
2255 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
2256 return 3;
2257 }
2258 return 1;
2259 }
2260 if (cur_el == 3 && !is_a64(env)) {
2261 /* Secure PL1 running at EL3 */
2262 return 3;
2263 }
2264 break;
2265 case 1:
2266 if (cur_el == 0) {
2267 return 1;
2268 }
2269 break;
2270 case 3:
2271 break;
2272 }
2273
2274 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
2275 * check because zero bits in the registers mean "don't trap".
2276 */
2277
2278 /* CPTR_EL2 : present in v7VE or v8 */
2279 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
2280 && !arm_is_secure_below_el3(env)) {
2281 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
2282 return 2;
2283 }
2284
2285 /* CPTR_EL3 : present in v8 */
2286 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
2287 /* Trap all FP ops to EL3 */
2288 return 3;
ed1f13d6 2289 }
8c6afa6a 2290
9dbbc748
GB
2291 return 0;
2292}
2293
c3ae85fc
PB
2294#ifdef CONFIG_USER_ONLY
2295static inline bool arm_cpu_bswap_data(CPUARMState *env)
2296{
2297 return
2298#ifdef TARGET_WORDS_BIGENDIAN
2299 1 ^
2300#endif
2301 arm_cpu_data_is_big_endian(env);
2302}
2303#endif
2304
9dbbc748 2305static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
89fee74a 2306 target_ulong *cs_base, uint32_t *flags)
9dbbc748 2307{
3926cc84
AG
2308 if (is_a64(env)) {
2309 *pc = env->pc;
c1e37810 2310 *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
05ed9a99 2311 } else {
3926cc84
AG
2312 *pc = env->regs[15];
2313 *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
2314 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
2315 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
2316 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
f9fd40eb 2317 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
3f342b9e
SF
2318 if (!(access_secure_reg(env))) {
2319 *flags |= ARM_TBFLAG_NS_MASK;
2320 }
2c7ffc41
PM
2321 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
2322 || arm_el_is_aa64(env, 1)) {
3926cc84
AG
2323 *flags |= ARM_TBFLAG_VFPEN_MASK;
2324 }
c0f4af17
PM
2325 *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
2326 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
a1705768 2327 }
3926cc84 2328
97ed5ccd 2329 *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
3cf6a0fc
PM
2330 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
2331 * states defined in the ARM ARM for software singlestep:
2332 * SS_ACTIVE PSTATE.SS State
2333 * 0 x Inactive (the TB flag for SS is always 0)
2334 * 1 0 Active-pending
2335 * 1 1 Active-not-pending
2336 */
2337 if (arm_singlestep_active(env)) {
2338 *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
2339 if (is_a64(env)) {
2340 if (env->pstate & PSTATE_SS) {
2341 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
2342 }
2343 } else {
2344 if (env->uncached_cpsr & PSTATE_SS) {
2345 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
2346 }
2347 }
2348 }
91cca2cd
PC
2349 if (arm_cpu_data_is_big_endian(env)) {
2350 *flags |= ARM_TBFLAG_BE_DATA_MASK;
2351 }
9dbbc748 2352 *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
c1e37810 2353
3926cc84 2354 *cs_base = 0;
6b917547
AL
2355}
2356
98128601
RH
2357enum {
2358 QEMU_PSCI_CONDUIT_DISABLED = 0,
2359 QEMU_PSCI_CONDUIT_SMC = 1,
2360 QEMU_PSCI_CONDUIT_HVC = 2,
2361};
2362
017518c1
PM
2363#ifndef CONFIG_USER_ONLY
2364/* Return the address space index to use for a memory access */
2365static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
2366{
2367 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
2368}
5ce4ff65
PM
2369
2370/* Return the AddressSpace to use for a memory access
2371 * (which depends on whether the access is S or NS, and whether
2372 * the board gave us a separate AddressSpace for S accesses).
2373 */
2374static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
2375{
2376 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
2377}
017518c1
PM
2378#endif
2379
2c0262af 2380#endif