]> git.proxmox.com Git - mirror_qemu.git/blob - target-arm/cpu.h
target-arm: make DACR banked
[mirror_qemu.git] / target-arm / cpu.h
1 /*
2 * ARM virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef CPU_ARM_H
20 #define CPU_ARM_H
21
22 #include "config.h"
23
24 #include "kvm-consts.h"
25
26 #if defined(TARGET_AARCH64)
27 /* AArch64 definitions */
28 # define TARGET_LONG_BITS 64
29 # define ELF_MACHINE EM_AARCH64
30 #else
31 # define TARGET_LONG_BITS 32
32 # define ELF_MACHINE EM_ARM
33 #endif
34
35 #define CPUArchState struct CPUARMState
36
37 #include "qemu-common.h"
38 #include "exec/cpu-defs.h"
39
40 #include "fpu/softfloat.h"
41
42 #define TARGET_HAS_ICE 1
43
44 #define EXCP_UDEF 1 /* undefined instruction */
45 #define EXCP_SWI 2 /* software interrupt */
46 #define EXCP_PREFETCH_ABORT 3
47 #define EXCP_DATA_ABORT 4
48 #define EXCP_IRQ 5
49 #define EXCP_FIQ 6
50 #define EXCP_BKPT 7
51 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
52 #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
53 #define EXCP_STREX 10
54 #define EXCP_HVC 11 /* HyperVisor Call */
55 #define EXCP_HYP_TRAP 12
56 #define EXCP_SMC 13 /* Secure Monitor Call */
57 #define EXCP_VIRQ 14
58 #define EXCP_VFIQ 15
59
60 #define ARMV7M_EXCP_RESET 1
61 #define ARMV7M_EXCP_NMI 2
62 #define ARMV7M_EXCP_HARD 3
63 #define ARMV7M_EXCP_MEM 4
64 #define ARMV7M_EXCP_BUS 5
65 #define ARMV7M_EXCP_USAGE 6
66 #define ARMV7M_EXCP_SVC 11
67 #define ARMV7M_EXCP_DEBUG 12
68 #define ARMV7M_EXCP_PENDSV 14
69 #define ARMV7M_EXCP_SYSTICK 15
70
71 /* ARM-specific interrupt pending bits. */
72 #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
73 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
74 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
75
76 /* The usual mapping for an AArch64 system register to its AArch32
77 * counterpart is for the 32 bit world to have access to the lower
78 * half only (with writes leaving the upper half untouched). It's
79 * therefore useful to be able to pass TCG the offset of the least
80 * significant half of a uint64_t struct member.
81 */
82 #ifdef HOST_WORDS_BIGENDIAN
83 #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
84 #define offsetofhigh32(S, M) offsetof(S, M)
85 #else
86 #define offsetoflow32(S, M) offsetof(S, M)
87 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
88 #endif
89
90 /* Meanings of the ARMCPU object's four inbound GPIO lines */
91 #define ARM_CPU_IRQ 0
92 #define ARM_CPU_FIQ 1
93 #define ARM_CPU_VIRQ 2
94 #define ARM_CPU_VFIQ 3
95
96 typedef void ARMWriteCPFunc(void *opaque, int cp_info,
97 int srcreg, int operand, uint32_t value);
98 typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
99 int dstreg, int operand);
100
101 struct arm_boot_info;
102
103 #define NB_MMU_MODES 4
104
105 /* We currently assume float and double are IEEE single and double
106 precision respectively.
107 Doing runtime conversions is tricky because VFP registers may contain
108 integer values (eg. as the result of a FTOSI instruction).
109 s<2n> maps to the least significant half of d<n>
110 s<2n+1> maps to the most significant half of d<n>
111 */
112
113 /* CPU state for each instance of a generic timer (in cp15 c14) */
114 typedef struct ARMGenericTimer {
115 uint64_t cval; /* Timer CompareValue register */
116 uint64_t ctl; /* Timer Control register */
117 } ARMGenericTimer;
118
119 #define GTIMER_PHYS 0
120 #define GTIMER_VIRT 1
121 #define NUM_GTIMERS 2
122
123 typedef struct {
124 uint64_t raw_tcr;
125 uint32_t mask;
126 uint32_t base_mask;
127 } TCR;
128
129 typedef struct CPUARMState {
130 /* Regs for current mode. */
131 uint32_t regs[16];
132
133 /* 32/64 switch only happens when taking and returning from
134 * exceptions so the overlap semantics are taken care of then
135 * instead of having a complicated union.
136 */
137 /* Regs for A64 mode. */
138 uint64_t xregs[32];
139 uint64_t pc;
140 /* PSTATE isn't an architectural register for ARMv8. However, it is
141 * convenient for us to assemble the underlying state into a 32 bit format
142 * identical to the architectural format used for the SPSR. (This is also
143 * what the Linux kernel's 'pstate' field in signal handlers and KVM's
144 * 'pstate' register are.) Of the PSTATE bits:
145 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
146 * semantics as for AArch32, as described in the comments on each field)
147 * nRW (also known as M[4]) is kept, inverted, in env->aarch64
148 * DAIF (exception masks) are kept in env->daif
149 * all other bits are stored in their correct places in env->pstate
150 */
151 uint32_t pstate;
152 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
153
154 /* Frequently accessed CPSR bits are stored separately for efficiency.
155 This contains all the other bits. Use cpsr_{read,write} to access
156 the whole CPSR. */
157 uint32_t uncached_cpsr;
158 uint32_t spsr;
159
160 /* Banked registers. */
161 uint64_t banked_spsr[8];
162 uint32_t banked_r13[8];
163 uint32_t banked_r14[8];
164
165 /* These hold r8-r12. */
166 uint32_t usr_regs[5];
167 uint32_t fiq_regs[5];
168
169 /* cpsr flag cache for faster execution */
170 uint32_t CF; /* 0 or 1 */
171 uint32_t VF; /* V is the bit 31. All other bits are undefined */
172 uint32_t NF; /* N is bit 31. All other bits are undefined. */
173 uint32_t ZF; /* Z set if zero. */
174 uint32_t QF; /* 0 or 1 */
175 uint32_t GE; /* cpsr[19:16] */
176 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
177 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
178 uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
179
180 uint64_t elr_el[4]; /* AArch64 exception link regs */
181 uint64_t sp_el[4]; /* AArch64 banked stack pointers */
182
183 /* System control coprocessor (cp15) */
184 struct {
185 uint32_t c0_cpuid;
186 union { /* Cache size selection */
187 struct {
188 uint64_t _unused_csselr0;
189 uint64_t csselr_ns;
190 uint64_t _unused_csselr1;
191 uint64_t csselr_s;
192 };
193 uint64_t csselr_el[4];
194 };
195 union { /* System control register. */
196 struct {
197 uint64_t _unused_sctlr;
198 uint64_t sctlr_ns;
199 uint64_t hsctlr;
200 uint64_t sctlr_s;
201 };
202 uint64_t sctlr_el[4];
203 };
204 uint64_t c1_coproc; /* Coprocessor access register. */
205 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
206 uint64_t sder; /* Secure debug enable register. */
207 uint32_t nsacr; /* Non-secure access control register. */
208 union { /* MMU translation table base 0. */
209 struct {
210 uint64_t _unused_ttbr0_0;
211 uint64_t ttbr0_ns;
212 uint64_t _unused_ttbr0_1;
213 uint64_t ttbr0_s;
214 };
215 uint64_t ttbr0_el[4];
216 };
217 union { /* MMU translation table base 1. */
218 struct {
219 uint64_t _unused_ttbr1_0;
220 uint64_t ttbr1_ns;
221 uint64_t _unused_ttbr1_1;
222 uint64_t ttbr1_s;
223 };
224 uint64_t ttbr1_el[4];
225 };
226 /* MMU translation table base control. */
227 TCR tcr_el[4];
228 uint32_t c2_data; /* MPU data cachable bits. */
229 uint32_t c2_insn; /* MPU instruction cachable bits. */
230 union { /* MMU domain access control register
231 * MPU write buffer control.
232 */
233 struct {
234 uint64_t dacr_ns;
235 uint64_t dacr_s;
236 };
237 struct {
238 uint64_t dacr32_el2;
239 };
240 };
241 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
242 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
243 uint64_t hcr_el2; /* Hypervisor configuration register */
244 uint64_t scr_el3; /* Secure configuration register. */
245 uint32_t ifsr_el2; /* Fault status registers. */
246 uint64_t esr_el[4];
247 uint32_t c6_region[8]; /* MPU base/size registers. */
248 uint64_t far_el[4]; /* Fault address registers. */
249 uint64_t par_el1; /* Translation result. */
250 uint32_t c9_insn; /* Cache lockdown registers. */
251 uint32_t c9_data;
252 uint64_t c9_pmcr; /* performance monitor control register */
253 uint64_t c9_pmcnten; /* perf monitor counter enables */
254 uint32_t c9_pmovsr; /* perf monitor overflow status */
255 uint32_t c9_pmxevtyper; /* perf monitor event type */
256 uint32_t c9_pmuserenr; /* perf monitor user enable */
257 uint32_t c9_pminten; /* perf monitor interrupt enables */
258 uint64_t mair_el1;
259 uint64_t vbar_el[4]; /* vector base address register */
260 uint32_t mvbar; /* (monitor) vector base address register */
261 uint32_t c13_fcse; /* FCSE PID. */
262 uint64_t contextidr_el1; /* Context ID. */
263 uint64_t tpidr_el0; /* User RW Thread register. */
264 uint64_t tpidrro_el0; /* User RO Thread register. */
265 uint64_t tpidr_el1; /* Privileged Thread register. */
266 uint64_t c14_cntfrq; /* Counter Frequency register */
267 uint64_t c14_cntkctl; /* Timer Control register */
268 ARMGenericTimer c14_timer[NUM_GTIMERS];
269 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
270 uint32_t c15_ticonfig; /* TI925T configuration byte. */
271 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
272 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
273 uint32_t c15_threadid; /* TI debugger thread-ID. */
274 uint32_t c15_config_base_address; /* SCU base address. */
275 uint32_t c15_diagnostic; /* diagnostic register */
276 uint32_t c15_power_diagnostic;
277 uint32_t c15_power_control; /* power control */
278 uint64_t dbgbvr[16]; /* breakpoint value registers */
279 uint64_t dbgbcr[16]; /* breakpoint control registers */
280 uint64_t dbgwvr[16]; /* watchpoint value registers */
281 uint64_t dbgwcr[16]; /* watchpoint control registers */
282 uint64_t mdscr_el1;
283 /* If the counter is enabled, this stores the last time the counter
284 * was reset. Otherwise it stores the counter value
285 */
286 uint64_t c15_ccnt;
287 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
288 } cp15;
289
290 struct {
291 uint32_t other_sp;
292 uint32_t vecbase;
293 uint32_t basepri;
294 uint32_t control;
295 int current_sp;
296 int exception;
297 int pending_exception;
298 } v7m;
299
300 /* Information associated with an exception about to be taken:
301 * code which raises an exception must set cs->exception_index and
302 * the relevant parts of this structure; the cpu_do_interrupt function
303 * will then set the guest-visible registers as part of the exception
304 * entry process.
305 */
306 struct {
307 uint32_t syndrome; /* AArch64 format syndrome register */
308 uint32_t fsr; /* AArch32 format fault status register info */
309 uint64_t vaddress; /* virtual addr associated with exception, if any */
310 /* If we implement EL2 we will also need to store information
311 * about the intermediate physical address for stage 2 faults.
312 */
313 } exception;
314
315 /* Thumb-2 EE state. */
316 uint32_t teecr;
317 uint32_t teehbr;
318
319 /* VFP coprocessor state. */
320 struct {
321 /* VFP/Neon register state. Note that the mapping between S, D and Q
322 * views of the register bank differs between AArch64 and AArch32:
323 * In AArch32:
324 * Qn = regs[2n+1]:regs[2n]
325 * Dn = regs[n]
326 * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
327 * (and regs[32] to regs[63] are inaccessible)
328 * In AArch64:
329 * Qn = regs[2n+1]:regs[2n]
330 * Dn = regs[2n]
331 * Sn = regs[2n] bits 31..0
332 * This corresponds to the architecturally defined mapping between
333 * the two execution states, and means we do not need to explicitly
334 * map these registers when changing states.
335 */
336 float64 regs[64];
337
338 uint32_t xregs[16];
339 /* We store these fpcsr fields separately for convenience. */
340 int vec_len;
341 int vec_stride;
342
343 /* scratch space when Tn are not sufficient. */
344 uint32_t scratch[8];
345
346 /* fp_status is the "normal" fp status. standard_fp_status retains
347 * values corresponding to the ARM "Standard FPSCR Value", ie
348 * default-NaN, flush-to-zero, round-to-nearest and is used by
349 * any operations (generally Neon) which the architecture defines
350 * as controlled by the standard FPSCR value rather than the FPSCR.
351 *
352 * To avoid having to transfer exception bits around, we simply
353 * say that the FPSCR cumulative exception flags are the logical
354 * OR of the flags in the two fp statuses. This relies on the
355 * only thing which needs to read the exception flags being
356 * an explicit FPSCR read.
357 */
358 float_status fp_status;
359 float_status standard_fp_status;
360 } vfp;
361 uint64_t exclusive_addr;
362 uint64_t exclusive_val;
363 uint64_t exclusive_high;
364 #if defined(CONFIG_USER_ONLY)
365 uint64_t exclusive_test;
366 uint32_t exclusive_info;
367 #endif
368
369 /* iwMMXt coprocessor state. */
370 struct {
371 uint64_t regs[16];
372 uint64_t val;
373
374 uint32_t cregs[16];
375 } iwmmxt;
376
377 /* For mixed endian mode. */
378 bool bswap_code;
379
380 #if defined(CONFIG_USER_ONLY)
381 /* For usermode syscall translation. */
382 int eabi;
383 #endif
384
385 struct CPUBreakpoint *cpu_breakpoint[16];
386 struct CPUWatchpoint *cpu_watchpoint[16];
387
388 CPU_COMMON
389
390 /* These fields after the common ones so they are preserved on reset. */
391
392 /* Internal CPU feature flags. */
393 uint64_t features;
394
395 void *nvic;
396 const struct arm_boot_info *boot_info;
397 } CPUARMState;
398
399 #include "cpu-qom.h"
400
401 ARMCPU *cpu_arm_init(const char *cpu_model);
402 int cpu_arm_exec(CPUARMState *s);
403 uint32_t do_arm_semihosting(CPUARMState *env);
404
405 static inline bool is_a64(CPUARMState *env)
406 {
407 return env->aarch64;
408 }
409
410 /* you can call this signal handler from your SIGBUS and SIGSEGV
411 signal handlers to inform the virtual CPU of exceptions. non zero
412 is returned if the signal was handled by the virtual CPU. */
413 int cpu_arm_signal_handler(int host_signum, void *pinfo,
414 void *puc);
415 int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
416 int mmu_idx);
417
418 /**
419 * pmccntr_sync
420 * @env: CPUARMState
421 *
422 * Synchronises the counter in the PMCCNTR. This must always be called twice,
423 * once before any action that might affect the timer and again afterwards.
424 * The function is used to swap the state of the register if required.
425 * This only happens when not in user mode (!CONFIG_USER_ONLY)
426 */
427 void pmccntr_sync(CPUARMState *env);
428
429 /* SCTLR bit meanings. Several bits have been reused in newer
430 * versions of the architecture; in that case we define constants
431 * for both old and new bit meanings. Code which tests against those
432 * bits should probably check or otherwise arrange that the CPU
433 * is the architectural version it expects.
434 */
435 #define SCTLR_M (1U << 0)
436 #define SCTLR_A (1U << 1)
437 #define SCTLR_C (1U << 2)
438 #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
439 #define SCTLR_SA (1U << 3)
440 #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
441 #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
442 #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
443 #define SCTLR_CP15BEN (1U << 5) /* v7 onward */
444 #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
445 #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
446 #define SCTLR_ITD (1U << 7) /* v8 onward */
447 #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
448 #define SCTLR_SED (1U << 8) /* v8 onward */
449 #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
450 #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
451 #define SCTLR_F (1U << 10) /* up to v6 */
452 #define SCTLR_SW (1U << 10) /* v7 onward */
453 #define SCTLR_Z (1U << 11)
454 #define SCTLR_I (1U << 12)
455 #define SCTLR_V (1U << 13)
456 #define SCTLR_RR (1U << 14) /* up to v7 */
457 #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
458 #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
459 #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
460 #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
461 #define SCTLR_nTWI (1U << 16) /* v8 onward */
462 #define SCTLR_HA (1U << 17)
463 #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
464 #define SCTLR_nTWE (1U << 18) /* v8 onward */
465 #define SCTLR_WXN (1U << 19)
466 #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
467 #define SCTLR_UWXN (1U << 20) /* v7 onward */
468 #define SCTLR_FI (1U << 21)
469 #define SCTLR_U (1U << 22)
470 #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
471 #define SCTLR_VE (1U << 24) /* up to v7 */
472 #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
473 #define SCTLR_EE (1U << 25)
474 #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
475 #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
476 #define SCTLR_NMFI (1U << 27)
477 #define SCTLR_TRE (1U << 28)
478 #define SCTLR_AFE (1U << 29)
479 #define SCTLR_TE (1U << 30)
480
481 #define CPSR_M (0x1fU)
482 #define CPSR_T (1U << 5)
483 #define CPSR_F (1U << 6)
484 #define CPSR_I (1U << 7)
485 #define CPSR_A (1U << 8)
486 #define CPSR_E (1U << 9)
487 #define CPSR_IT_2_7 (0xfc00U)
488 #define CPSR_GE (0xfU << 16)
489 #define CPSR_IL (1U << 20)
490 /* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
491 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
492 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
493 * where it is live state but not accessible to the AArch32 code.
494 */
495 #define CPSR_RESERVED (0x7U << 21)
496 #define CPSR_J (1U << 24)
497 #define CPSR_IT_0_1 (3U << 25)
498 #define CPSR_Q (1U << 27)
499 #define CPSR_V (1U << 28)
500 #define CPSR_C (1U << 29)
501 #define CPSR_Z (1U << 30)
502 #define CPSR_N (1U << 31)
503 #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
504 #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
505
506 #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
507 #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
508 | CPSR_NZCV)
509 /* Bits writable in user mode. */
510 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
511 /* Execution state bits. MRS read as zero, MSR writes ignored. */
512 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
513 /* Mask of bits which may be set by exception return copying them from SPSR */
514 #define CPSR_ERET_MASK (~CPSR_RESERVED)
515
516 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
517 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
518 #define TTBCR_PD0 (1U << 4)
519 #define TTBCR_PD1 (1U << 5)
520 #define TTBCR_EPD0 (1U << 7)
521 #define TTBCR_IRGN0 (3U << 8)
522 #define TTBCR_ORGN0 (3U << 10)
523 #define TTBCR_SH0 (3U << 12)
524 #define TTBCR_T1SZ (3U << 16)
525 #define TTBCR_A1 (1U << 22)
526 #define TTBCR_EPD1 (1U << 23)
527 #define TTBCR_IRGN1 (3U << 24)
528 #define TTBCR_ORGN1 (3U << 26)
529 #define TTBCR_SH1 (1U << 28)
530 #define TTBCR_EAE (1U << 31)
531
532 /* Bit definitions for ARMv8 SPSR (PSTATE) format.
533 * Only these are valid when in AArch64 mode; in
534 * AArch32 mode SPSRs are basically CPSR-format.
535 */
536 #define PSTATE_SP (1U)
537 #define PSTATE_M (0xFU)
538 #define PSTATE_nRW (1U << 4)
539 #define PSTATE_F (1U << 6)
540 #define PSTATE_I (1U << 7)
541 #define PSTATE_A (1U << 8)
542 #define PSTATE_D (1U << 9)
543 #define PSTATE_IL (1U << 20)
544 #define PSTATE_SS (1U << 21)
545 #define PSTATE_V (1U << 28)
546 #define PSTATE_C (1U << 29)
547 #define PSTATE_Z (1U << 30)
548 #define PSTATE_N (1U << 31)
549 #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
550 #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
551 #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
552 /* Mode values for AArch64 */
553 #define PSTATE_MODE_EL3h 13
554 #define PSTATE_MODE_EL3t 12
555 #define PSTATE_MODE_EL2h 9
556 #define PSTATE_MODE_EL2t 8
557 #define PSTATE_MODE_EL1h 5
558 #define PSTATE_MODE_EL1t 4
559 #define PSTATE_MODE_EL0t 0
560
561 /* Map EL and handler into a PSTATE_MODE. */
562 static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
563 {
564 return (el << 2) | handler;
565 }
566
567 /* Return the current PSTATE value. For the moment we don't support 32<->64 bit
568 * interprocessing, so we don't attempt to sync with the cpsr state used by
569 * the 32 bit decoder.
570 */
571 static inline uint32_t pstate_read(CPUARMState *env)
572 {
573 int ZF;
574
575 ZF = (env->ZF == 0);
576 return (env->NF & 0x80000000) | (ZF << 30)
577 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
578 | env->pstate | env->daif;
579 }
580
581 static inline void pstate_write(CPUARMState *env, uint32_t val)
582 {
583 env->ZF = (~val) & PSTATE_Z;
584 env->NF = val;
585 env->CF = (val >> 29) & 1;
586 env->VF = (val << 3) & 0x80000000;
587 env->daif = val & PSTATE_DAIF;
588 env->pstate = val & ~CACHED_PSTATE_BITS;
589 }
590
591 /* Return the current CPSR value. */
592 uint32_t cpsr_read(CPUARMState *env);
593 /* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
594 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask);
595
596 /* Return the current xPSR value. */
597 static inline uint32_t xpsr_read(CPUARMState *env)
598 {
599 int ZF;
600 ZF = (env->ZF == 0);
601 return (env->NF & 0x80000000) | (ZF << 30)
602 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
603 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
604 | ((env->condexec_bits & 0xfc) << 8)
605 | env->v7m.exception;
606 }
607
608 /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
609 static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
610 {
611 if (mask & CPSR_NZCV) {
612 env->ZF = (~val) & CPSR_Z;
613 env->NF = val;
614 env->CF = (val >> 29) & 1;
615 env->VF = (val << 3) & 0x80000000;
616 }
617 if (mask & CPSR_Q)
618 env->QF = ((val & CPSR_Q) != 0);
619 if (mask & (1 << 24))
620 env->thumb = ((val & (1 << 24)) != 0);
621 if (mask & CPSR_IT_0_1) {
622 env->condexec_bits &= ~3;
623 env->condexec_bits |= (val >> 25) & 3;
624 }
625 if (mask & CPSR_IT_2_7) {
626 env->condexec_bits &= 3;
627 env->condexec_bits |= (val >> 8) & 0xfc;
628 }
629 if (mask & 0x1ff) {
630 env->v7m.exception = val & 0x1ff;
631 }
632 }
633
634 #define HCR_VM (1ULL << 0)
635 #define HCR_SWIO (1ULL << 1)
636 #define HCR_PTW (1ULL << 2)
637 #define HCR_FMO (1ULL << 3)
638 #define HCR_IMO (1ULL << 4)
639 #define HCR_AMO (1ULL << 5)
640 #define HCR_VF (1ULL << 6)
641 #define HCR_VI (1ULL << 7)
642 #define HCR_VSE (1ULL << 8)
643 #define HCR_FB (1ULL << 9)
644 #define HCR_BSU_MASK (3ULL << 10)
645 #define HCR_DC (1ULL << 12)
646 #define HCR_TWI (1ULL << 13)
647 #define HCR_TWE (1ULL << 14)
648 #define HCR_TID0 (1ULL << 15)
649 #define HCR_TID1 (1ULL << 16)
650 #define HCR_TID2 (1ULL << 17)
651 #define HCR_TID3 (1ULL << 18)
652 #define HCR_TSC (1ULL << 19)
653 #define HCR_TIDCP (1ULL << 20)
654 #define HCR_TACR (1ULL << 21)
655 #define HCR_TSW (1ULL << 22)
656 #define HCR_TPC (1ULL << 23)
657 #define HCR_TPU (1ULL << 24)
658 #define HCR_TTLB (1ULL << 25)
659 #define HCR_TVM (1ULL << 26)
660 #define HCR_TGE (1ULL << 27)
661 #define HCR_TDZ (1ULL << 28)
662 #define HCR_HCD (1ULL << 29)
663 #define HCR_TRVM (1ULL << 30)
664 #define HCR_RW (1ULL << 31)
665 #define HCR_CD (1ULL << 32)
666 #define HCR_ID (1ULL << 33)
667 #define HCR_MASK ((1ULL << 34) - 1)
668
669 #define SCR_NS (1U << 0)
670 #define SCR_IRQ (1U << 1)
671 #define SCR_FIQ (1U << 2)
672 #define SCR_EA (1U << 3)
673 #define SCR_FW (1U << 4)
674 #define SCR_AW (1U << 5)
675 #define SCR_NET (1U << 6)
676 #define SCR_SMD (1U << 7)
677 #define SCR_HCE (1U << 8)
678 #define SCR_SIF (1U << 9)
679 #define SCR_RW (1U << 10)
680 #define SCR_ST (1U << 11)
681 #define SCR_TWI (1U << 12)
682 #define SCR_TWE (1U << 13)
683 #define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
684 #define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
685
686 /* Return the current FPSCR value. */
687 uint32_t vfp_get_fpscr(CPUARMState *env);
688 void vfp_set_fpscr(CPUARMState *env, uint32_t val);
689
690 /* For A64 the FPSCR is split into two logically distinct registers,
691 * FPCR and FPSR. However since they still use non-overlapping bits
692 * we store the underlying state in fpscr and just mask on read/write.
693 */
694 #define FPSR_MASK 0xf800009f
695 #define FPCR_MASK 0x07f79f00
696 static inline uint32_t vfp_get_fpsr(CPUARMState *env)
697 {
698 return vfp_get_fpscr(env) & FPSR_MASK;
699 }
700
701 static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
702 {
703 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
704 vfp_set_fpscr(env, new_fpscr);
705 }
706
707 static inline uint32_t vfp_get_fpcr(CPUARMState *env)
708 {
709 return vfp_get_fpscr(env) & FPCR_MASK;
710 }
711
712 static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
713 {
714 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
715 vfp_set_fpscr(env, new_fpscr);
716 }
717
718 enum arm_cpu_mode {
719 ARM_CPU_MODE_USR = 0x10,
720 ARM_CPU_MODE_FIQ = 0x11,
721 ARM_CPU_MODE_IRQ = 0x12,
722 ARM_CPU_MODE_SVC = 0x13,
723 ARM_CPU_MODE_MON = 0x16,
724 ARM_CPU_MODE_ABT = 0x17,
725 ARM_CPU_MODE_HYP = 0x1a,
726 ARM_CPU_MODE_UND = 0x1b,
727 ARM_CPU_MODE_SYS = 0x1f
728 };
729
730 /* VFP system registers. */
731 #define ARM_VFP_FPSID 0
732 #define ARM_VFP_FPSCR 1
733 #define ARM_VFP_MVFR2 5
734 #define ARM_VFP_MVFR1 6
735 #define ARM_VFP_MVFR0 7
736 #define ARM_VFP_FPEXC 8
737 #define ARM_VFP_FPINST 9
738 #define ARM_VFP_FPINST2 10
739
740 /* iwMMXt coprocessor control registers. */
741 #define ARM_IWMMXT_wCID 0
742 #define ARM_IWMMXT_wCon 1
743 #define ARM_IWMMXT_wCSSF 2
744 #define ARM_IWMMXT_wCASF 3
745 #define ARM_IWMMXT_wCGR0 8
746 #define ARM_IWMMXT_wCGR1 9
747 #define ARM_IWMMXT_wCGR2 10
748 #define ARM_IWMMXT_wCGR3 11
749
750 /* If adding a feature bit which corresponds to a Linux ELF
751 * HWCAP bit, remember to update the feature-bit-to-hwcap
752 * mapping in linux-user/elfload.c:get_elf_hwcap().
753 */
754 enum arm_features {
755 ARM_FEATURE_VFP,
756 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
757 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
758 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
759 ARM_FEATURE_V6,
760 ARM_FEATURE_V6K,
761 ARM_FEATURE_V7,
762 ARM_FEATURE_THUMB2,
763 ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
764 ARM_FEATURE_VFP3,
765 ARM_FEATURE_VFP_FP16,
766 ARM_FEATURE_NEON,
767 ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
768 ARM_FEATURE_M, /* Microcontroller profile. */
769 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
770 ARM_FEATURE_THUMB2EE,
771 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
772 ARM_FEATURE_V4T,
773 ARM_FEATURE_V5,
774 ARM_FEATURE_STRONGARM,
775 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
776 ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
777 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
778 ARM_FEATURE_GENERIC_TIMER,
779 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
780 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
781 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
782 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
783 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
784 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
785 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
786 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
787 ARM_FEATURE_V8,
788 ARM_FEATURE_AARCH64, /* supports 64 bit mode */
789 ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
790 ARM_FEATURE_CBAR, /* has cp15 CBAR */
791 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
792 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
793 ARM_FEATURE_EL2, /* has EL2 Virtualization support */
794 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
795 ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
796 ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
797 ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
798 };
799
800 static inline int arm_feature(CPUARMState *env, int feature)
801 {
802 return (env->features & (1ULL << feature)) != 0;
803 }
804
805 #if !defined(CONFIG_USER_ONLY)
806 /* Return true if exception levels below EL3 are in secure state,
807 * or would be following an exception return to that level.
808 * Unlike arm_is_secure() (which is always a question about the
809 * _current_ state of the CPU) this doesn't care about the current
810 * EL or mode.
811 */
812 static inline bool arm_is_secure_below_el3(CPUARMState *env)
813 {
814 if (arm_feature(env, ARM_FEATURE_EL3)) {
815 return !(env->cp15.scr_el3 & SCR_NS);
816 } else {
817 /* If EL2 is not supported then the secure state is implementation
818 * defined, in which case QEMU defaults to non-secure.
819 */
820 return false;
821 }
822 }
823
824 /* Return true if the processor is in secure state */
825 static inline bool arm_is_secure(CPUARMState *env)
826 {
827 if (arm_feature(env, ARM_FEATURE_EL3)) {
828 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
829 /* CPU currently in AArch64 state and EL3 */
830 return true;
831 } else if (!is_a64(env) &&
832 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
833 /* CPU currently in AArch32 state and monitor mode */
834 return true;
835 }
836 }
837 return arm_is_secure_below_el3(env);
838 }
839
840 #else
841 static inline bool arm_is_secure_below_el3(CPUARMState *env)
842 {
843 return false;
844 }
845
846 static inline bool arm_is_secure(CPUARMState *env)
847 {
848 return false;
849 }
850 #endif
851
852 /* Return true if the specified exception level is running in AArch64 state. */
853 static inline bool arm_el_is_aa64(CPUARMState *env, int el)
854 {
855 /* We don't currently support EL2, and this isn't valid for EL0
856 * (if we're in EL0, is_a64() is what you want, and if we're not in EL0
857 * then the state of EL0 isn't well defined.)
858 */
859 assert(el == 1 || el == 3);
860
861 /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This
862 * is a QEMU-imposed simplification which we may wish to change later.
863 * If we in future support EL2 and/or EL3, then the state of lower
864 * exception levels is controlled by the HCR.RW and SCR.RW bits.
865 */
866 return arm_feature(env, ARM_FEATURE_AARCH64);
867 }
868
869 /* Function for determing whether guest cp register reads and writes should
870 * access the secure or non-secure bank of a cp register. When EL3 is
871 * operating in AArch32 state, the NS-bit determines whether the secure
872 * instance of a cp register should be used. When EL3 is AArch64 (or if
873 * it doesn't exist at all) then there is no register banking, and all
874 * accesses are to the non-secure version.
875 */
876 static inline bool access_secure_reg(CPUARMState *env)
877 {
878 bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
879 !arm_el_is_aa64(env, 3) &&
880 !(env->cp15.scr_el3 & SCR_NS));
881
882 return ret;
883 }
884
885 /* Macros for accessing a specified CP register bank */
886 #define A32_BANKED_REG_GET(_env, _regname, _secure) \
887 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
888
889 #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
890 do { \
891 if (_secure) { \
892 (_env)->cp15._regname##_s = (_val); \
893 } else { \
894 (_env)->cp15._regname##_ns = (_val); \
895 } \
896 } while (0)
897
898 /* Macros for automatically accessing a specific CP register bank depending on
899 * the current secure state of the system. These macros are not intended for
900 * supporting instruction translation reads/writes as these are dependent
901 * solely on the SCR.NS bit and not the mode.
902 */
903 #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
904 A32_BANKED_REG_GET((_env), _regname, \
905 ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))))
906
907 #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
908 A32_BANKED_REG_SET((_env), _regname, \
909 ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))), \
910 (_val))
911
912 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
913 unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
914
915 /* Interface between CPU and Interrupt controller. */
916 void armv7m_nvic_set_pending(void *opaque, int irq);
917 int armv7m_nvic_acknowledge_irq(void *opaque);
918 void armv7m_nvic_complete_irq(void *opaque, int irq);
919
920 /* Interface for defining coprocessor registers.
921 * Registers are defined in tables of arm_cp_reginfo structs
922 * which are passed to define_arm_cp_regs().
923 */
924
925 /* When looking up a coprocessor register we look for it
926 * via an integer which encodes all of:
927 * coprocessor number
928 * Crn, Crm, opc1, opc2 fields
929 * 32 or 64 bit register (ie is it accessed via MRC/MCR
930 * or via MRRC/MCRR?)
931 * non-secure/secure bank (AArch32 only)
932 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
933 * (In this case crn and opc2 should be zero.)
934 * For AArch64, there is no 32/64 bit size distinction;
935 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
936 * and 4 bit CRn and CRm. The encoding patterns are chosen
937 * to be easy to convert to and from the KVM encodings, and also
938 * so that the hashtable can contain both AArch32 and AArch64
939 * registers (to allow for interprocessing where we might run
940 * 32 bit code on a 64 bit core).
941 */
942 /* This bit is private to our hashtable cpreg; in KVM register
943 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
944 * in the upper bits of the 64 bit ID.
945 */
946 #define CP_REG_AA64_SHIFT 28
947 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
948
949 /* To enable banking of coprocessor registers depending on ns-bit we
950 * add a bit to distinguish between secure and non-secure cpregs in the
951 * hashtable.
952 */
953 #define CP_REG_NS_SHIFT 29
954 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
955
956 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
957 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
958 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
959
960 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
961 (CP_REG_AA64_MASK | \
962 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
963 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
964 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
965 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
966 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
967 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
968
969 /* Convert a full 64 bit KVM register ID to the truncated 32 bit
970 * version used as a key for the coprocessor register hashtable
971 */
972 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
973 {
974 uint32_t cpregid = kvmid;
975 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
976 cpregid |= CP_REG_AA64_MASK;
977 } else {
978 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
979 cpregid |= (1 << 15);
980 }
981
982 /* KVM is always non-secure so add the NS flag on AArch32 register
983 * entries.
984 */
985 cpregid |= 1 << CP_REG_NS_SHIFT;
986 }
987 return cpregid;
988 }
989
990 /* Convert a truncated 32 bit hashtable key into the full
991 * 64 bit KVM register ID.
992 */
993 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
994 {
995 uint64_t kvmid;
996
997 if (cpregid & CP_REG_AA64_MASK) {
998 kvmid = cpregid & ~CP_REG_AA64_MASK;
999 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
1000 } else {
1001 kvmid = cpregid & ~(1 << 15);
1002 if (cpregid & (1 << 15)) {
1003 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
1004 } else {
1005 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
1006 }
1007 }
1008 return kvmid;
1009 }
1010
1011 /* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
1012 * special-behaviour cp reg and bits [15..8] indicate what behaviour
1013 * it has. Otherwise it is a simple cp reg, where CONST indicates that
1014 * TCG can assume the value to be constant (ie load at translate time)
1015 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
1016 * indicates that the TB should not be ended after a write to this register
1017 * (the default is that the TB ends after cp writes). OVERRIDE permits
1018 * a register definition to override a previous definition for the
1019 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
1020 * old must have the OVERRIDE bit set.
1021 * NO_MIGRATE indicates that this register should be ignored for migration;
1022 * (eg because any state is accessed via some other coprocessor register).
1023 * IO indicates that this register does I/O and therefore its accesses
1024 * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
1025 * registers which implement clocks or timers require this.
1026 */
1027 #define ARM_CP_SPECIAL 1
1028 #define ARM_CP_CONST 2
1029 #define ARM_CP_64BIT 4
1030 #define ARM_CP_SUPPRESS_TB_END 8
1031 #define ARM_CP_OVERRIDE 16
1032 #define ARM_CP_NO_MIGRATE 32
1033 #define ARM_CP_IO 64
1034 #define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
1035 #define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
1036 #define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
1037 #define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
1038 #define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
1039 #define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
1040 /* Used only as a terminator for ARMCPRegInfo lists */
1041 #define ARM_CP_SENTINEL 0xffff
1042 /* Mask of only the flag bits in a type field */
1043 #define ARM_CP_FLAG_MASK 0x7f
1044
1045 /* Valid values for ARMCPRegInfo state field, indicating which of
1046 * the AArch32 and AArch64 execution states this register is visible in.
1047 * If the reginfo doesn't explicitly specify then it is AArch32 only.
1048 * If the reginfo is declared to be visible in both states then a second
1049 * reginfo is synthesised for the AArch32 view of the AArch64 register,
1050 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
1051 * Note that we rely on the values of these enums as we iterate through
1052 * the various states in some places.
1053 */
1054 enum {
1055 ARM_CP_STATE_AA32 = 0,
1056 ARM_CP_STATE_AA64 = 1,
1057 ARM_CP_STATE_BOTH = 2,
1058 };
1059
1060 /* ARM CP register secure state flags. These flags identify security state
1061 * attributes for a given CP register entry.
1062 * The existence of both or neither secure and non-secure flags indicates that
1063 * the register has both a secure and non-secure hash entry. A single one of
1064 * these flags causes the register to only be hashed for the specified
1065 * security state.
1066 * Although definitions may have any combination of the S/NS bits, each
1067 * registered entry will only have one to identify whether the entry is secure
1068 * or non-secure.
1069 */
1070 enum {
1071 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
1072 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
1073 };
1074
1075 /* Return true if cptype is a valid type field. This is used to try to
1076 * catch errors where the sentinel has been accidentally left off the end
1077 * of a list of registers.
1078 */
1079 static inline bool cptype_valid(int cptype)
1080 {
1081 return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
1082 || ((cptype & ARM_CP_SPECIAL) &&
1083 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
1084 }
1085
1086 /* Access rights:
1087 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
1088 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
1089 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
1090 * (ie any of the privileged modes in Secure state, or Monitor mode).
1091 * If a register is accessible in one privilege level it's always accessible
1092 * in higher privilege levels too. Since "Secure PL1" also follows this rule
1093 * (ie anything visible in PL2 is visible in S-PL1, some things are only
1094 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
1095 * terminology a little and call this PL3.
1096 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
1097 * with the ELx exception levels.
1098 *
1099 * If access permissions for a register are more complex than can be
1100 * described with these bits, then use a laxer set of restrictions, and
1101 * do the more restrictive/complex check inside a helper function.
1102 */
1103 #define PL3_R 0x80
1104 #define PL3_W 0x40
1105 #define PL2_R (0x20 | PL3_R)
1106 #define PL2_W (0x10 | PL3_W)
1107 #define PL1_R (0x08 | PL2_R)
1108 #define PL1_W (0x04 | PL2_W)
1109 #define PL0_R (0x02 | PL1_R)
1110 #define PL0_W (0x01 | PL1_W)
1111
1112 #define PL3_RW (PL3_R | PL3_W)
1113 #define PL2_RW (PL2_R | PL2_W)
1114 #define PL1_RW (PL1_R | PL1_W)
1115 #define PL0_RW (PL0_R | PL0_W)
1116
1117 /* Return the current Exception Level (as per ARMv8; note that this differs
1118 * from the ARMv7 Privilege Level).
1119 */
1120 static inline int arm_current_el(CPUARMState *env)
1121 {
1122 if (is_a64(env)) {
1123 return extract32(env->pstate, 2, 2);
1124 }
1125
1126 switch (env->uncached_cpsr & 0x1f) {
1127 case ARM_CPU_MODE_USR:
1128 return 0;
1129 case ARM_CPU_MODE_HYP:
1130 return 2;
1131 case ARM_CPU_MODE_MON:
1132 return 3;
1133 default:
1134 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
1135 /* If EL3 is 32-bit then all secure privileged modes run in
1136 * EL3
1137 */
1138 return 3;
1139 }
1140
1141 return 1;
1142 }
1143 }
1144
1145 typedef struct ARMCPRegInfo ARMCPRegInfo;
1146
1147 typedef enum CPAccessResult {
1148 /* Access is permitted */
1149 CP_ACCESS_OK = 0,
1150 /* Access fails due to a configurable trap or enable which would
1151 * result in a categorized exception syndrome giving information about
1152 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
1153 * 0xc or 0x18).
1154 */
1155 CP_ACCESS_TRAP = 1,
1156 /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
1157 * Note that this is not a catch-all case -- the set of cases which may
1158 * result in this failure is specifically defined by the architecture.
1159 */
1160 CP_ACCESS_TRAP_UNCATEGORIZED = 2,
1161 } CPAccessResult;
1162
1163 /* Access functions for coprocessor registers. These cannot fail and
1164 * may not raise exceptions.
1165 */
1166 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1167 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
1168 uint64_t value);
1169 /* Access permission check functions for coprocessor registers. */
1170 typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1171 /* Hook function for register reset */
1172 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1173
1174 #define CP_ANY 0xff
1175
1176 /* Definition of an ARM coprocessor register */
1177 struct ARMCPRegInfo {
1178 /* Name of register (useful mainly for debugging, need not be unique) */
1179 const char *name;
1180 /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
1181 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
1182 * 'wildcard' field -- any value of that field in the MRC/MCR insn
1183 * will be decoded to this register. The register read and write
1184 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
1185 * used by the program, so it is possible to register a wildcard and
1186 * then behave differently on read/write if necessary.
1187 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
1188 * must both be zero.
1189 * For AArch64-visible registers, opc0 is also used.
1190 * Since there are no "coprocessors" in AArch64, cp is purely used as a
1191 * way to distinguish (for KVM's benefit) guest-visible system registers
1192 * from demuxed ones provided to preserve the "no side effects on
1193 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
1194 * visible (to match KVM's encoding); cp==0 will be converted to
1195 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
1196 */
1197 uint8_t cp;
1198 uint8_t crn;
1199 uint8_t crm;
1200 uint8_t opc0;
1201 uint8_t opc1;
1202 uint8_t opc2;
1203 /* Execution state in which this register is visible: ARM_CP_STATE_* */
1204 int state;
1205 /* Register type: ARM_CP_* bits/values */
1206 int type;
1207 /* Access rights: PL*_[RW] */
1208 int access;
1209 /* Security state: ARM_CP_SECSTATE_* bits/values */
1210 int secure;
1211 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
1212 * this register was defined: can be used to hand data through to the
1213 * register read/write functions, since they are passed the ARMCPRegInfo*.
1214 */
1215 void *opaque;
1216 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
1217 * fieldoffset is non-zero, the reset value of the register.
1218 */
1219 uint64_t resetvalue;
1220 /* Offset of the field in CPUARMState for this register.
1221 *
1222 * This is not needed if either:
1223 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
1224 * 2. both readfn and writefn are specified
1225 */
1226 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
1227
1228 /* Offsets of the secure and non-secure fields in CPUARMState for the
1229 * register if it is banked. These fields are only used during the static
1230 * registration of a register. During hashing the bank associated
1231 * with a given security state is copied to fieldoffset which is used from
1232 * there on out.
1233 *
1234 * It is expected that register definitions use either fieldoffset or
1235 * bank_fieldoffsets in the definition but not both. It is also expected
1236 * that both bank offsets are set when defining a banked register. This
1237 * use indicates that a register is banked.
1238 */
1239 ptrdiff_t bank_fieldoffsets[2];
1240
1241 /* Function for making any access checks for this register in addition to
1242 * those specified by the 'access' permissions bits. If NULL, no extra
1243 * checks required. The access check is performed at runtime, not at
1244 * translate time.
1245 */
1246 CPAccessFn *accessfn;
1247 /* Function for handling reads of this register. If NULL, then reads
1248 * will be done by loading from the offset into CPUARMState specified
1249 * by fieldoffset.
1250 */
1251 CPReadFn *readfn;
1252 /* Function for handling writes of this register. If NULL, then writes
1253 * will be done by writing to the offset into CPUARMState specified
1254 * by fieldoffset.
1255 */
1256 CPWriteFn *writefn;
1257 /* Function for doing a "raw" read; used when we need to copy
1258 * coprocessor state to the kernel for KVM or out for
1259 * migration. This only needs to be provided if there is also a
1260 * readfn and it has side effects (for instance clear-on-read bits).
1261 */
1262 CPReadFn *raw_readfn;
1263 /* Function for doing a "raw" write; used when we need to copy KVM
1264 * kernel coprocessor state into userspace, or for inbound
1265 * migration. This only needs to be provided if there is also a
1266 * writefn and it masks out "unwritable" bits or has write-one-to-clear
1267 * or similar behaviour.
1268 */
1269 CPWriteFn *raw_writefn;
1270 /* Function for resetting the register. If NULL, then reset will be done
1271 * by writing resetvalue to the field specified in fieldoffset. If
1272 * fieldoffset is 0 then no reset will be done.
1273 */
1274 CPResetFn *resetfn;
1275 };
1276
1277 /* Macros which are lvalues for the field in CPUARMState for the
1278 * ARMCPRegInfo *ri.
1279 */
1280 #define CPREG_FIELD32(env, ri) \
1281 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
1282 #define CPREG_FIELD64(env, ri) \
1283 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
1284
1285 #define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
1286
1287 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
1288 const ARMCPRegInfo *regs, void *opaque);
1289 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1290 const ARMCPRegInfo *regs, void *opaque);
1291 static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
1292 {
1293 define_arm_cp_regs_with_opaque(cpu, regs, 0);
1294 }
1295 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
1296 {
1297 define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
1298 }
1299 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
1300
1301 /* CPWriteFn that can be used to implement writes-ignored behaviour */
1302 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1303 uint64_t value);
1304 /* CPReadFn that can be used for read-as-zero behaviour */
1305 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
1306
1307 /* CPResetFn that does nothing, for use if no reset is required even
1308 * if fieldoffset is non zero.
1309 */
1310 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1311
1312 /* Return true if this reginfo struct's field in the cpu state struct
1313 * is 64 bits wide.
1314 */
1315 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1316 {
1317 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1318 }
1319
1320 static inline bool cp_access_ok(int current_el,
1321 const ARMCPRegInfo *ri, int isread)
1322 {
1323 return (ri->access >> ((current_el * 2) + isread)) & 1;
1324 }
1325
1326 /**
1327 * write_list_to_cpustate
1328 * @cpu: ARMCPU
1329 *
1330 * For each register listed in the ARMCPU cpreg_indexes list, write
1331 * its value from the cpreg_values list into the ARMCPUState structure.
1332 * This updates TCG's working data structures from KVM data or
1333 * from incoming migration state.
1334 *
1335 * Returns: true if all register values were updated correctly,
1336 * false if some register was unknown or could not be written.
1337 * Note that we do not stop early on failure -- we will attempt
1338 * writing all registers in the list.
1339 */
1340 bool write_list_to_cpustate(ARMCPU *cpu);
1341
1342 /**
1343 * write_cpustate_to_list:
1344 * @cpu: ARMCPU
1345 *
1346 * For each register listed in the ARMCPU cpreg_indexes list, write
1347 * its value from the ARMCPUState structure into the cpreg_values list.
1348 * This is used to copy info from TCG's working data structures into
1349 * KVM or for outbound migration.
1350 *
1351 * Returns: true if all register values were read correctly,
1352 * false if some register was unknown or could not be read.
1353 * Note that we do not stop early on failure -- we will attempt
1354 * reading all registers in the list.
1355 */
1356 bool write_cpustate_to_list(ARMCPU *cpu);
1357
1358 /* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3.
1359 Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
1360 conventional cores (ie. Application or Realtime profile). */
1361
1362 #define IS_M(env) arm_feature(env, ARM_FEATURE_M)
1363
1364 #define ARM_CPUID_TI915T 0x54029152
1365 #define ARM_CPUID_TI925T 0x54029252
1366
1367 #if defined(CONFIG_USER_ONLY)
1368 #define TARGET_PAGE_BITS 12
1369 #else
1370 /* The ARM MMU allows 1k pages. */
1371 /* ??? Linux doesn't actually use these, and they're deprecated in recent
1372 architecture revisions. Maybe a configure option to disable them. */
1373 #define TARGET_PAGE_BITS 10
1374 #endif
1375
1376 #if defined(TARGET_AARCH64)
1377 # define TARGET_PHYS_ADDR_SPACE_BITS 48
1378 # define TARGET_VIRT_ADDR_SPACE_BITS 64
1379 #else
1380 # define TARGET_PHYS_ADDR_SPACE_BITS 40
1381 # define TARGET_VIRT_ADDR_SPACE_BITS 32
1382 #endif
1383
1384 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
1385 {
1386 CPUARMState *env = cs->env_ptr;
1387 unsigned int cur_el = arm_current_el(env);
1388 unsigned int target_el = arm_excp_target_el(cs, excp_idx);
1389 bool secure = arm_is_secure(env);
1390 uint32_t scr;
1391 uint32_t hcr;
1392 bool pstate_unmasked;
1393 int8_t unmasked = 0;
1394
1395 /* Don't take exceptions if they target a lower EL.
1396 * This check should catch any exceptions that would not be taken but left
1397 * pending.
1398 */
1399 if (cur_el > target_el) {
1400 return false;
1401 }
1402
1403 switch (excp_idx) {
1404 case EXCP_FIQ:
1405 /* If FIQs are routed to EL3 or EL2 then there are cases where we
1406 * override the CPSR.F in determining if the exception is masked or
1407 * not. If neither of these are set then we fall back to the CPSR.F
1408 * setting otherwise we further assess the state below.
1409 */
1410 hcr = (env->cp15.hcr_el2 & HCR_FMO);
1411 scr = (env->cp15.scr_el3 & SCR_FIQ);
1412
1413 /* When EL3 is 32-bit, the SCR.FW bit controls whether the CPSR.F bit
1414 * masks FIQ interrupts when taken in non-secure state. If SCR.FW is
1415 * set then FIQs can be masked by CPSR.F when non-secure but only
1416 * when FIQs are only routed to EL3.
1417 */
1418 scr &= !((env->cp15.scr_el3 & SCR_FW) && !hcr);
1419 pstate_unmasked = !(env->daif & PSTATE_F);
1420 break;
1421
1422 case EXCP_IRQ:
1423 /* When EL3 execution state is 32-bit, if HCR.IMO is set then we may
1424 * override the CPSR.I masking when in non-secure state. The SCR.IRQ
1425 * setting has already been taken into consideration when setting the
1426 * target EL, so it does not have a further affect here.
1427 */
1428 hcr = (env->cp15.hcr_el2 & HCR_IMO);
1429 scr = false;
1430 pstate_unmasked = !(env->daif & PSTATE_I);
1431 break;
1432
1433 case EXCP_VFIQ:
1434 if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
1435 /* VFIQs are only taken when hypervized and non-secure. */
1436 return false;
1437 }
1438 return !(env->daif & PSTATE_F);
1439 case EXCP_VIRQ:
1440 if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
1441 /* VIRQs are only taken when hypervized and non-secure. */
1442 return false;
1443 }
1444 return !(env->daif & PSTATE_I);
1445 default:
1446 g_assert_not_reached();
1447 }
1448
1449 /* Use the target EL, current execution state and SCR/HCR settings to
1450 * determine whether the corresponding CPSR bit is used to mask the
1451 * interrupt.
1452 */
1453 if ((target_el > cur_el) && (target_el != 1)) {
1454 if (arm_el_is_aa64(env, 3) || ((scr || hcr) && (!secure))) {
1455 unmasked = 1;
1456 }
1457 }
1458
1459 /* The PSTATE bits only mask the interrupt if we have not overriden the
1460 * ability above.
1461 */
1462 return unmasked || pstate_unmasked;
1463 }
1464
1465 static inline CPUARMState *cpu_init(const char *cpu_model)
1466 {
1467 ARMCPU *cpu = cpu_arm_init(cpu_model);
1468 if (cpu) {
1469 return &cpu->env;
1470 }
1471 return NULL;
1472 }
1473
1474 #define cpu_exec cpu_arm_exec
1475 #define cpu_gen_code cpu_arm_gen_code
1476 #define cpu_signal_handler cpu_arm_signal_handler
1477 #define cpu_list arm_cpu_list
1478
1479 /* MMU modes definitions */
1480 #define MMU_MODE0_SUFFIX _user
1481 #define MMU_MODE1_SUFFIX _kernel
1482 #define MMU_USER_IDX 0
1483 static inline int cpu_mmu_index (CPUARMState *env)
1484 {
1485 return arm_current_el(env);
1486 }
1487
1488 /* Return the Exception Level targeted by debug exceptions;
1489 * currently always EL1 since we don't implement EL2 or EL3.
1490 */
1491 static inline int arm_debug_target_el(CPUARMState *env)
1492 {
1493 return 1;
1494 }
1495
1496 static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
1497 {
1498 if (arm_current_el(env) == arm_debug_target_el(env)) {
1499 if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
1500 || (env->daif & PSTATE_D)) {
1501 return false;
1502 }
1503 }
1504 return true;
1505 }
1506
1507 static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
1508 {
1509 if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) {
1510 return aa64_generate_debug_exceptions(env);
1511 }
1512 return arm_current_el(env) != 2;
1513 }
1514
1515 /* Return true if debugging exceptions are currently enabled.
1516 * This corresponds to what in ARM ARM pseudocode would be
1517 * if UsingAArch32() then
1518 * return AArch32.GenerateDebugExceptions()
1519 * else
1520 * return AArch64.GenerateDebugExceptions()
1521 * We choose to push the if() down into this function for clarity,
1522 * since the pseudocode has it at all callsites except for the one in
1523 * CheckSoftwareStep(), where it is elided because both branches would
1524 * always return the same value.
1525 *
1526 * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
1527 * don't yet implement those exception levels or their associated trap bits.
1528 */
1529 static inline bool arm_generate_debug_exceptions(CPUARMState *env)
1530 {
1531 if (env->aarch64) {
1532 return aa64_generate_debug_exceptions(env);
1533 } else {
1534 return aa32_generate_debug_exceptions(env);
1535 }
1536 }
1537
1538 /* Is single-stepping active? (Note that the "is EL_D AArch64?" check
1539 * implicitly means this always returns false in pre-v8 CPUs.)
1540 */
1541 static inline bool arm_singlestep_active(CPUARMState *env)
1542 {
1543 return extract32(env->cp15.mdscr_el1, 0, 1)
1544 && arm_el_is_aa64(env, arm_debug_target_el(env))
1545 && arm_generate_debug_exceptions(env);
1546 }
1547
1548 #include "exec/cpu-all.h"
1549
1550 /* Bit usage in the TB flags field: bit 31 indicates whether we are
1551 * in 32 or 64 bit mode. The meaning of the other bits depends on that.
1552 */
1553 #define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
1554 #define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
1555
1556 /* Bit usage when in AArch32 state: */
1557 #define ARM_TBFLAG_THUMB_SHIFT 0
1558 #define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
1559 #define ARM_TBFLAG_VECLEN_SHIFT 1
1560 #define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
1561 #define ARM_TBFLAG_VECSTRIDE_SHIFT 4
1562 #define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
1563 #define ARM_TBFLAG_PRIV_SHIFT 6
1564 #define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT)
1565 #define ARM_TBFLAG_VFPEN_SHIFT 7
1566 #define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
1567 #define ARM_TBFLAG_CONDEXEC_SHIFT 8
1568 #define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
1569 #define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
1570 #define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
1571 #define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
1572 #define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
1573 #define ARM_TBFLAG_SS_ACTIVE_SHIFT 18
1574 #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
1575 #define ARM_TBFLAG_PSTATE_SS_SHIFT 19
1576 #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
1577 /* We store the bottom two bits of the CPAR as TB flags and handle
1578 * checks on the other bits at runtime
1579 */
1580 #define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
1581 #define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
1582 /* Indicates whether cp register reads and writes by guest code should access
1583 * the secure or nonsecure bank of banked registers; note that this is not
1584 * the same thing as the current security state of the processor!
1585 */
1586 #define ARM_TBFLAG_NS_SHIFT 22
1587 #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
1588
1589 /* Bit usage when in AArch64 state */
1590 #define ARM_TBFLAG_AA64_EL_SHIFT 0
1591 #define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
1592 #define ARM_TBFLAG_AA64_FPEN_SHIFT 2
1593 #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
1594 #define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
1595 #define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
1596 #define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4
1597 #define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
1598
1599 /* some convenience accessor macros */
1600 #define ARM_TBFLAG_AARCH64_STATE(F) \
1601 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
1602 #define ARM_TBFLAG_THUMB(F) \
1603 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
1604 #define ARM_TBFLAG_VECLEN(F) \
1605 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
1606 #define ARM_TBFLAG_VECSTRIDE(F) \
1607 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
1608 #define ARM_TBFLAG_PRIV(F) \
1609 (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT)
1610 #define ARM_TBFLAG_VFPEN(F) \
1611 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
1612 #define ARM_TBFLAG_CONDEXEC(F) \
1613 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
1614 #define ARM_TBFLAG_BSWAP_CODE(F) \
1615 (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
1616 #define ARM_TBFLAG_CPACR_FPEN(F) \
1617 (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
1618 #define ARM_TBFLAG_SS_ACTIVE(F) \
1619 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
1620 #define ARM_TBFLAG_PSTATE_SS(F) \
1621 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
1622 #define ARM_TBFLAG_XSCALE_CPAR(F) \
1623 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
1624 #define ARM_TBFLAG_AA64_EL(F) \
1625 (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
1626 #define ARM_TBFLAG_AA64_FPEN(F) \
1627 (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
1628 #define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
1629 (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
1630 #define ARM_TBFLAG_AA64_PSTATE_SS(F) \
1631 (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
1632 #define ARM_TBFLAG_NS(F) \
1633 (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
1634
1635 static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
1636 target_ulong *cs_base, int *flags)
1637 {
1638 int fpen;
1639
1640 if (arm_feature(env, ARM_FEATURE_V6)) {
1641 fpen = extract32(env->cp15.c1_coproc, 20, 2);
1642 } else {
1643 /* CPACR doesn't exist before v6, so VFP is always accessible */
1644 fpen = 3;
1645 }
1646
1647 if (is_a64(env)) {
1648 *pc = env->pc;
1649 *flags = ARM_TBFLAG_AARCH64_STATE_MASK
1650 | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
1651 if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
1652 *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
1653 }
1654 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
1655 * states defined in the ARM ARM for software singlestep:
1656 * SS_ACTIVE PSTATE.SS State
1657 * 0 x Inactive (the TB flag for SS is always 0)
1658 * 1 0 Active-pending
1659 * 1 1 Active-not-pending
1660 */
1661 if (arm_singlestep_active(env)) {
1662 *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK;
1663 if (env->pstate & PSTATE_SS) {
1664 *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK;
1665 }
1666 }
1667 } else {
1668 int privmode;
1669 *pc = env->regs[15];
1670 *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
1671 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
1672 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
1673 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
1674 | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
1675 if (arm_feature(env, ARM_FEATURE_M)) {
1676 privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
1677 } else {
1678 privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
1679 }
1680 if (privmode) {
1681 *flags |= ARM_TBFLAG_PRIV_MASK;
1682 }
1683 if (!(access_secure_reg(env))) {
1684 *flags |= ARM_TBFLAG_NS_MASK;
1685 }
1686 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
1687 || arm_el_is_aa64(env, 1)) {
1688 *flags |= ARM_TBFLAG_VFPEN_MASK;
1689 }
1690 if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
1691 *flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
1692 }
1693 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
1694 * states defined in the ARM ARM for software singlestep:
1695 * SS_ACTIVE PSTATE.SS State
1696 * 0 x Inactive (the TB flag for SS is always 0)
1697 * 1 0 Active-pending
1698 * 1 1 Active-not-pending
1699 */
1700 if (arm_singlestep_active(env)) {
1701 *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
1702 if (env->uncached_cpsr & PSTATE_SS) {
1703 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
1704 }
1705 }
1706 *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
1707 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
1708 }
1709
1710 *cs_base = 0;
1711 }
1712
1713 #include "exec/exec-all.h"
1714
1715 static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
1716 {
1717 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
1718 env->pc = tb->pc;
1719 } else {
1720 env->regs[15] = tb->pc;
1721 }
1722 }
1723
1724 enum {
1725 QEMU_PSCI_CONDUIT_DISABLED = 0,
1726 QEMU_PSCI_CONDUIT_SMC = 1,
1727 QEMU_PSCI_CONDUIT_HVC = 2,
1728 };
1729
1730 #endif