]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/cpu.h
target-arm: correctly UNDEF writes to FPINST/FPINST2 from EL0
[mirror_qemu.git] / target-arm / cpu.h
CommitLineData
2c0262af
FB
1/*
2 * ARM virtual CPU header
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#ifndef CPU_ARM_H
20#define CPU_ARM_H
21
3926cc84 22#include "config.h"
3cf1e035 23
72b0cd35
PM
24#include "kvm-consts.h"
25
3926cc84
AG
26#if defined(TARGET_AARCH64)
27 /* AArch64 definitions */
28# define TARGET_LONG_BITS 64
29# define ELF_MACHINE EM_AARCH64
30#else
31# define TARGET_LONG_BITS 32
32# define ELF_MACHINE EM_ARM
33#endif
9042c0e2 34
9349b4f9 35#define CPUArchState struct CPUARMState
c2764719 36
9a78eead 37#include "qemu-common.h"
022c62cb 38#include "exec/cpu-defs.h"
2c0262af 39
6b4c305c 40#include "fpu/softfloat.h"
53cd6637 41
1fddef4b
FB
42#define TARGET_HAS_ICE 1
43
b8a9e8f1
FB
44#define EXCP_UDEF 1 /* undefined instruction */
45#define EXCP_SWI 2 /* software interrupt */
46#define EXCP_PREFETCH_ABORT 3
47#define EXCP_DATA_ABORT 4
b5ff1b31
FB
48#define EXCP_IRQ 5
49#define EXCP_FIQ 6
06c949e6 50#define EXCP_BKPT 7
9ee6e8bb 51#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
fbb4a2e3 52#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
426f5abc 53#define EXCP_STREX 10
35979d71 54#define EXCP_HVC 11 /* HyperVisor Call */
607d98b8 55#define EXCP_HYP_TRAP 12
e0d6e6a5 56#define EXCP_SMC 13 /* Secure Monitor Call */
136e67e9
EI
57#define EXCP_VIRQ 14
58#define EXCP_VFIQ 15
9ee6e8bb
PB
59
60#define ARMV7M_EXCP_RESET 1
61#define ARMV7M_EXCP_NMI 2
62#define ARMV7M_EXCP_HARD 3
63#define ARMV7M_EXCP_MEM 4
64#define ARMV7M_EXCP_BUS 5
65#define ARMV7M_EXCP_USAGE 6
66#define ARMV7M_EXCP_SVC 11
67#define ARMV7M_EXCP_DEBUG 12
68#define ARMV7M_EXCP_PENDSV 14
69#define ARMV7M_EXCP_SYSTICK 15
2c0262af 70
403946c0
RH
71/* ARM-specific interrupt pending bits. */
72#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
136e67e9
EI
73#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
74#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
403946c0 75
e4fe830b
PM
76/* The usual mapping for an AArch64 system register to its AArch32
77 * counterpart is for the 32 bit world to have access to the lower
78 * half only (with writes leaving the upper half untouched). It's
79 * therefore useful to be able to pass TCG the offset of the least
80 * significant half of a uint64_t struct member.
81 */
82#ifdef HOST_WORDS_BIGENDIAN
5cd8a118 83#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
b0fe2427 84#define offsetofhigh32(S, M) offsetof(S, M)
e4fe830b
PM
85#else
86#define offsetoflow32(S, M) offsetof(S, M)
b0fe2427 87#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
e4fe830b
PM
88#endif
89
136e67e9 90/* Meanings of the ARMCPU object's four inbound GPIO lines */
7c1840b6
PM
91#define ARM_CPU_IRQ 0
92#define ARM_CPU_FIQ 1
136e67e9
EI
93#define ARM_CPU_VIRQ 2
94#define ARM_CPU_VFIQ 3
403946c0 95
c1713132
AZ
96typedef void ARMWriteCPFunc(void *opaque, int cp_info,
97 int srcreg, int operand, uint32_t value);
98typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
99 int dstreg, int operand);
100
f93eb9ff
AZ
101struct arm_boot_info;
102
6ebbf390
JM
103#define NB_MMU_MODES 2
104
b7bcbe95
FB
105/* We currently assume float and double are IEEE single and double
106 precision respectively.
107 Doing runtime conversions is tricky because VFP registers may contain
108 integer values (eg. as the result of a FTOSI instruction).
8e96005d
FB
109 s<2n> maps to the least significant half of d<n>
110 s<2n+1> maps to the most significant half of d<n>
111 */
b7bcbe95 112
55d284af
PM
113/* CPU state for each instance of a generic timer (in cp15 c14) */
114typedef struct ARMGenericTimer {
115 uint64_t cval; /* Timer CompareValue register */
a7adc4b7 116 uint64_t ctl; /* Timer Control register */
55d284af
PM
117} ARMGenericTimer;
118
119#define GTIMER_PHYS 0
120#define GTIMER_VIRT 1
121#define NUM_GTIMERS 2
122
2c0262af 123typedef struct CPUARMState {
b5ff1b31 124 /* Regs for current mode. */
2c0262af 125 uint32_t regs[16];
3926cc84
AG
126
127 /* 32/64 switch only happens when taking and returning from
128 * exceptions so the overlap semantics are taken care of then
129 * instead of having a complicated union.
130 */
131 /* Regs for A64 mode. */
132 uint64_t xregs[32];
133 uint64_t pc;
d356312f
PM
134 /* PSTATE isn't an architectural register for ARMv8. However, it is
135 * convenient for us to assemble the underlying state into a 32 bit format
136 * identical to the architectural format used for the SPSR. (This is also
137 * what the Linux kernel's 'pstate' field in signal handlers and KVM's
138 * 'pstate' register are.) Of the PSTATE bits:
139 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
140 * semantics as for AArch32, as described in the comments on each field)
141 * nRW (also known as M[4]) is kept, inverted, in env->aarch64
4cc35614 142 * DAIF (exception masks) are kept in env->daif
d356312f 143 * all other bits are stored in their correct places in env->pstate
3926cc84
AG
144 */
145 uint32_t pstate;
146 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
147
b90372ad 148 /* Frequently accessed CPSR bits are stored separately for efficiency.
d37aca66 149 This contains all the other bits. Use cpsr_{read,write} to access
b5ff1b31
FB
150 the whole CPSR. */
151 uint32_t uncached_cpsr;
152 uint32_t spsr;
153
154 /* Banked registers. */
28c9457d 155 uint64_t banked_spsr[8];
b5ff1b31
FB
156 uint32_t banked_r13[6];
157 uint32_t banked_r14[6];
3b46e624 158
b5ff1b31
FB
159 /* These hold r8-r12. */
160 uint32_t usr_regs[5];
161 uint32_t fiq_regs[5];
3b46e624 162
2c0262af
FB
163 /* cpsr flag cache for faster execution */
164 uint32_t CF; /* 0 or 1 */
165 uint32_t VF; /* V is the bit 31. All other bits are undefined */
6fbe23d5
PB
166 uint32_t NF; /* N is bit 31. All other bits are undefined. */
167 uint32_t ZF; /* Z set if zero. */
99c475ab 168 uint32_t QF; /* 0 or 1 */
9ee6e8bb 169 uint32_t GE; /* cpsr[19:16] */
b26eefb6 170 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
9ee6e8bb 171 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
c2b820fe 172 uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
2c0262af 173
1b174238 174 uint64_t elr_el[4]; /* AArch64 exception link regs */
73fb3b76 175 uint64_t sp_el[4]; /* AArch64 banked stack pointers */
a0618a19 176
b5ff1b31
FB
177 /* System control coprocessor (cp15) */
178 struct {
40f137e1 179 uint32_t c0_cpuid;
7da845b0 180 uint64_t c0_cssel; /* Cache size selection. */
5ebafdf3 181 uint64_t c1_sys; /* System control register. */
34222fb8 182 uint64_t c1_coproc; /* Coprocessor access register. */
610c3c8a 183 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
327ed10f
PM
184 uint64_t ttbr0_el1; /* MMU translation table base 0. */
185 uint64_t ttbr1_el1; /* MMU translation table base 1. */
cb2e37df 186 uint64_t c2_control; /* MMU translation table base control. */
b2fa1797
PB
187 uint32_t c2_mask; /* MMU translation table base selection mask. */
188 uint32_t c2_base_mask; /* MMU translation table base 0 mask. */
ce819861
PB
189 uint32_t c2_data; /* MPU data cachable bits. */
190 uint32_t c2_insn; /* MPU instruction cachable bits. */
191 uint32_t c3; /* MMU domain access control register
192 MPU write buffer control. */
7e09797c
PM
193 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
194 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
f149e3e8 195 uint64_t hcr_el2; /* Hypervisor configuration register */
64e0e2de 196 uint64_t scr_el3; /* Secure configuration register. */
6cd8a264 197 uint32_t ifsr_el2; /* Fault status registers. */
f2c30f42 198 uint64_t esr_el[4];
ce819861 199 uint32_t c6_region[8]; /* MPU base/size registers. */
63b60551 200 uint64_t far_el[4]; /* Fault address registers. */
19525524 201 uint64_t par_el1; /* Translation result. */
b5ff1b31
FB
202 uint32_t c9_insn; /* Cache lockdown registers. */
203 uint32_t c9_data;
8521466b
AF
204 uint64_t c9_pmcr; /* performance monitor control register */
205 uint64_t c9_pmcnten; /* perf monitor counter enables */
74594c9d
PM
206 uint32_t c9_pmovsr; /* perf monitor overflow status */
207 uint32_t c9_pmxevtyper; /* perf monitor event type */
208 uint32_t c9_pmuserenr; /* perf monitor user enable */
209 uint32_t c9_pminten; /* perf monitor interrupt enables */
b0fe2427 210 uint64_t mair_el1;
a1ba125c 211 uint64_t vbar_el[4]; /* vector base address register */
b5ff1b31 212 uint32_t c13_fcse; /* FCSE PID. */
014406b5 213 uint64_t contextidr_el1; /* Context ID. */
e4fe830b
PM
214 uint64_t tpidr_el0; /* User RW Thread register. */
215 uint64_t tpidrro_el0; /* User RO Thread register. */
216 uint64_t tpidr_el1; /* Privileged Thread register. */
a7adc4b7
PM
217 uint64_t c14_cntfrq; /* Counter Frequency register */
218 uint64_t c14_cntkctl; /* Timer Control register */
55d284af 219 ARMGenericTimer c14_timer[NUM_GTIMERS];
c1713132 220 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
c3d2689d
AZ
221 uint32_t c15_ticonfig; /* TI925T configuration byte. */
222 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
223 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
224 uint32_t c15_threadid; /* TI debugger thread-ID. */
7da362d0
ML
225 uint32_t c15_config_base_address; /* SCU base address. */
226 uint32_t c15_diagnostic; /* diagnostic register */
227 uint32_t c15_power_diagnostic;
228 uint32_t c15_power_control; /* power control */
0b45451e
PM
229 uint64_t dbgbvr[16]; /* breakpoint value registers */
230 uint64_t dbgbcr[16]; /* breakpoint control registers */
231 uint64_t dbgwvr[16]; /* watchpoint value registers */
232 uint64_t dbgwcr[16]; /* watchpoint control registers */
3a298203 233 uint64_t mdscr_el1;
7c2cb42b
AF
234 /* If the counter is enabled, this stores the last time the counter
235 * was reset. Otherwise it stores the counter value
236 */
c92c0687 237 uint64_t c15_ccnt;
8521466b 238 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
b5ff1b31 239 } cp15;
40f137e1 240
9ee6e8bb
PB
241 struct {
242 uint32_t other_sp;
243 uint32_t vecbase;
244 uint32_t basepri;
245 uint32_t control;
246 int current_sp;
247 int exception;
248 int pending_exception;
9ee6e8bb
PB
249 } v7m;
250
abf1172f
PM
251 /* Information associated with an exception about to be taken:
252 * code which raises an exception must set cs->exception_index and
253 * the relevant parts of this structure; the cpu_do_interrupt function
254 * will then set the guest-visible registers as part of the exception
255 * entry process.
256 */
257 struct {
258 uint32_t syndrome; /* AArch64 format syndrome register */
259 uint32_t fsr; /* AArch32 format fault status register info */
260 uint64_t vaddress; /* virtual addr associated with exception, if any */
261 /* If we implement EL2 we will also need to store information
262 * about the intermediate physical address for stage 2 faults.
263 */
264 } exception;
265
fe1479c3
PB
266 /* Thumb-2 EE state. */
267 uint32_t teecr;
268 uint32_t teehbr;
269
b7bcbe95
FB
270 /* VFP coprocessor state. */
271 struct {
3926cc84
AG
272 /* VFP/Neon register state. Note that the mapping between S, D and Q
273 * views of the register bank differs between AArch64 and AArch32:
274 * In AArch32:
275 * Qn = regs[2n+1]:regs[2n]
276 * Dn = regs[n]
277 * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
278 * (and regs[32] to regs[63] are inaccessible)
279 * In AArch64:
280 * Qn = regs[2n+1]:regs[2n]
281 * Dn = regs[2n]
282 * Sn = regs[2n] bits 31..0
283 * This corresponds to the architecturally defined mapping between
284 * the two execution states, and means we do not need to explicitly
285 * map these registers when changing states.
286 */
287 float64 regs[64];
b7bcbe95 288
40f137e1 289 uint32_t xregs[16];
b7bcbe95
FB
290 /* We store these fpcsr fields separately for convenience. */
291 int vec_len;
292 int vec_stride;
293
9ee6e8bb
PB
294 /* scratch space when Tn are not sufficient. */
295 uint32_t scratch[8];
3b46e624 296
3a492f3a
PM
297 /* fp_status is the "normal" fp status. standard_fp_status retains
298 * values corresponding to the ARM "Standard FPSCR Value", ie
299 * default-NaN, flush-to-zero, round-to-nearest and is used by
300 * any operations (generally Neon) which the architecture defines
301 * as controlled by the standard FPSCR value rather than the FPSCR.
302 *
303 * To avoid having to transfer exception bits around, we simply
304 * say that the FPSCR cumulative exception flags are the logical
305 * OR of the flags in the two fp statuses. This relies on the
306 * only thing which needs to read the exception flags being
307 * an explicit FPSCR read.
308 */
53cd6637 309 float_status fp_status;
3a492f3a 310 float_status standard_fp_status;
b7bcbe95 311 } vfp;
03d05e2d
PM
312 uint64_t exclusive_addr;
313 uint64_t exclusive_val;
314 uint64_t exclusive_high;
9ee6e8bb 315#if defined(CONFIG_USER_ONLY)
03d05e2d 316 uint64_t exclusive_test;
426f5abc 317 uint32_t exclusive_info;
9ee6e8bb 318#endif
b7bcbe95 319
18c9b560
AZ
320 /* iwMMXt coprocessor state. */
321 struct {
322 uint64_t regs[16];
323 uint64_t val;
324
325 uint32_t cregs[16];
326 } iwmmxt;
327
d8fd2954
PB
328 /* For mixed endian mode. */
329 bool bswap_code;
330
ce4defa0
PB
331#if defined(CONFIG_USER_ONLY)
332 /* For usermode syscall translation. */
333 int eabi;
334#endif
335
46747d15 336 struct CPUBreakpoint *cpu_breakpoint[16];
9ee98ce8
PM
337 struct CPUWatchpoint *cpu_watchpoint[16];
338
a316d335
FB
339 CPU_COMMON
340
9d551997 341 /* These fields after the common ones so they are preserved on reset. */
9ba8c3f4 342
581be094 343 /* Internal CPU feature flags. */
918f5dca 344 uint64_t features;
581be094 345
983fe826 346 void *nvic;
462a8bc6 347 const struct arm_boot_info *boot_info;
2c0262af
FB
348} CPUARMState;
349
778c3a06
AF
350#include "cpu-qom.h"
351
352ARMCPU *cpu_arm_init(const char *cpu_model);
2c0262af 353int cpu_arm_exec(CPUARMState *s);
9ee6e8bb 354uint32_t do_arm_semihosting(CPUARMState *env);
b5ff1b31 355
3926cc84
AG
356static inline bool is_a64(CPUARMState *env)
357{
358 return env->aarch64;
359}
360
2c0262af
FB
361/* you can call this signal handler from your SIGBUS and SIGSEGV
362 signal handlers to inform the virtual CPU of exceptions. non zero
363 is returned if the signal was handled by the virtual CPU. */
5fafdf24 364int cpu_arm_signal_handler(int host_signum, void *pinfo,
2c0262af 365 void *puc);
7510454e
AF
366int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
367 int mmu_idx);
2c0262af 368
ec7b4ce4
AF
369/**
370 * pmccntr_sync
371 * @env: CPUARMState
372 *
373 * Synchronises the counter in the PMCCNTR. This must always be called twice,
374 * once before any action that might affect the timer and again afterwards.
375 * The function is used to swap the state of the register if required.
376 * This only happens when not in user mode (!CONFIG_USER_ONLY)
377 */
378void pmccntr_sync(CPUARMState *env);
379
76e3e1bc
PM
380/* SCTLR bit meanings. Several bits have been reused in newer
381 * versions of the architecture; in that case we define constants
382 * for both old and new bit meanings. Code which tests against those
383 * bits should probably check or otherwise arrange that the CPU
384 * is the architectural version it expects.
385 */
386#define SCTLR_M (1U << 0)
387#define SCTLR_A (1U << 1)
388#define SCTLR_C (1U << 2)
389#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
390#define SCTLR_SA (1U << 3)
391#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
392#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
393#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
394#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
395#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
396#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
397#define SCTLR_ITD (1U << 7) /* v8 onward */
398#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
399#define SCTLR_SED (1U << 8) /* v8 onward */
400#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
401#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
402#define SCTLR_F (1U << 10) /* up to v6 */
403#define SCTLR_SW (1U << 10) /* v7 onward */
404#define SCTLR_Z (1U << 11)
405#define SCTLR_I (1U << 12)
406#define SCTLR_V (1U << 13)
407#define SCTLR_RR (1U << 14) /* up to v7 */
408#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
409#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
410#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
411#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
412#define SCTLR_nTWI (1U << 16) /* v8 onward */
413#define SCTLR_HA (1U << 17)
414#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
415#define SCTLR_nTWE (1U << 18) /* v8 onward */
416#define SCTLR_WXN (1U << 19)
417#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
418#define SCTLR_UWXN (1U << 20) /* v7 onward */
419#define SCTLR_FI (1U << 21)
420#define SCTLR_U (1U << 22)
421#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
422#define SCTLR_VE (1U << 24) /* up to v7 */
423#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
424#define SCTLR_EE (1U << 25)
425#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
426#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
427#define SCTLR_NMFI (1U << 27)
428#define SCTLR_TRE (1U << 28)
429#define SCTLR_AFE (1U << 29)
430#define SCTLR_TE (1U << 30)
431
78dbbbe4
PM
432#define CPSR_M (0x1fU)
433#define CPSR_T (1U << 5)
434#define CPSR_F (1U << 6)
435#define CPSR_I (1U << 7)
436#define CPSR_A (1U << 8)
437#define CPSR_E (1U << 9)
438#define CPSR_IT_2_7 (0xfc00U)
439#define CPSR_GE (0xfU << 16)
4051e12c
PM
440#define CPSR_IL (1U << 20)
441/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
442 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
443 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
444 * where it is live state but not accessible to the AArch32 code.
445 */
446#define CPSR_RESERVED (0x7U << 21)
78dbbbe4
PM
447#define CPSR_J (1U << 24)
448#define CPSR_IT_0_1 (3U << 25)
449#define CPSR_Q (1U << 27)
450#define CPSR_V (1U << 28)
451#define CPSR_C (1U << 29)
452#define CPSR_Z (1U << 30)
453#define CPSR_N (1U << 31)
9ee6e8bb 454#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
4cc35614 455#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
9ee6e8bb
PB
456
457#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
4cc35614
PM
458#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
459 | CPSR_NZCV)
9ee6e8bb
PB
460/* Bits writable in user mode. */
461#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
462/* Execution state bits. MRS read as zero, MSR writes ignored. */
4051e12c
PM
463#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
464/* Mask of bits which may be set by exception return copying them from SPSR */
465#define CPSR_ERET_MASK (~CPSR_RESERVED)
b5ff1b31 466
e389be16
FA
467#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
468#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
469#define TTBCR_PD0 (1U << 4)
470#define TTBCR_PD1 (1U << 5)
471#define TTBCR_EPD0 (1U << 7)
472#define TTBCR_IRGN0 (3U << 8)
473#define TTBCR_ORGN0 (3U << 10)
474#define TTBCR_SH0 (3U << 12)
475#define TTBCR_T1SZ (3U << 16)
476#define TTBCR_A1 (1U << 22)
477#define TTBCR_EPD1 (1U << 23)
478#define TTBCR_IRGN1 (3U << 24)
479#define TTBCR_ORGN1 (3U << 26)
480#define TTBCR_SH1 (1U << 28)
481#define TTBCR_EAE (1U << 31)
482
d356312f
PM
483/* Bit definitions for ARMv8 SPSR (PSTATE) format.
484 * Only these are valid when in AArch64 mode; in
485 * AArch32 mode SPSRs are basically CPSR-format.
486 */
f502cfc2 487#define PSTATE_SP (1U)
d356312f
PM
488#define PSTATE_M (0xFU)
489#define PSTATE_nRW (1U << 4)
490#define PSTATE_F (1U << 6)
491#define PSTATE_I (1U << 7)
492#define PSTATE_A (1U << 8)
493#define PSTATE_D (1U << 9)
494#define PSTATE_IL (1U << 20)
495#define PSTATE_SS (1U << 21)
496#define PSTATE_V (1U << 28)
497#define PSTATE_C (1U << 29)
498#define PSTATE_Z (1U << 30)
499#define PSTATE_N (1U << 31)
500#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
4cc35614
PM
501#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
502#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
d356312f
PM
503/* Mode values for AArch64 */
504#define PSTATE_MODE_EL3h 13
505#define PSTATE_MODE_EL3t 12
506#define PSTATE_MODE_EL2h 9
507#define PSTATE_MODE_EL2t 8
508#define PSTATE_MODE_EL1h 5
509#define PSTATE_MODE_EL1t 4
510#define PSTATE_MODE_EL0t 0
511
9e729b57
EI
512/* Map EL and handler into a PSTATE_MODE. */
513static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
514{
515 return (el << 2) | handler;
516}
517
d356312f
PM
518/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
519 * interprocessing, so we don't attempt to sync with the cpsr state used by
520 * the 32 bit decoder.
521 */
522static inline uint32_t pstate_read(CPUARMState *env)
523{
524 int ZF;
525
526 ZF = (env->ZF == 0);
527 return (env->NF & 0x80000000) | (ZF << 30)
528 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
4cc35614 529 | env->pstate | env->daif;
d356312f
PM
530}
531
532static inline void pstate_write(CPUARMState *env, uint32_t val)
533{
534 env->ZF = (~val) & PSTATE_Z;
535 env->NF = val;
536 env->CF = (val >> 29) & 1;
537 env->VF = (val << 3) & 0x80000000;
4cc35614 538 env->daif = val & PSTATE_DAIF;
d356312f
PM
539 env->pstate = val & ~CACHED_PSTATE_BITS;
540}
541
b5ff1b31 542/* Return the current CPSR value. */
2f4a40e5
AZ
543uint32_t cpsr_read(CPUARMState *env);
544/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
545void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask);
9ee6e8bb
PB
546
547/* Return the current xPSR value. */
548static inline uint32_t xpsr_read(CPUARMState *env)
549{
550 int ZF;
6fbe23d5
PB
551 ZF = (env->ZF == 0);
552 return (env->NF & 0x80000000) | (ZF << 30)
9ee6e8bb
PB
553 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
554 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
555 | ((env->condexec_bits & 0xfc) << 8)
556 | env->v7m.exception;
b5ff1b31
FB
557}
558
9ee6e8bb
PB
559/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
560static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
561{
9ee6e8bb 562 if (mask & CPSR_NZCV) {
6fbe23d5
PB
563 env->ZF = (~val) & CPSR_Z;
564 env->NF = val;
9ee6e8bb
PB
565 env->CF = (val >> 29) & 1;
566 env->VF = (val << 3) & 0x80000000;
567 }
568 if (mask & CPSR_Q)
569 env->QF = ((val & CPSR_Q) != 0);
570 if (mask & (1 << 24))
571 env->thumb = ((val & (1 << 24)) != 0);
572 if (mask & CPSR_IT_0_1) {
573 env->condexec_bits &= ~3;
574 env->condexec_bits |= (val >> 25) & 3;
575 }
576 if (mask & CPSR_IT_2_7) {
577 env->condexec_bits &= 3;
578 env->condexec_bits |= (val >> 8) & 0xfc;
579 }
580 if (mask & 0x1ff) {
581 env->v7m.exception = val & 0x1ff;
582 }
583}
584
f149e3e8
EI
585#define HCR_VM (1ULL << 0)
586#define HCR_SWIO (1ULL << 1)
587#define HCR_PTW (1ULL << 2)
588#define HCR_FMO (1ULL << 3)
589#define HCR_IMO (1ULL << 4)
590#define HCR_AMO (1ULL << 5)
591#define HCR_VF (1ULL << 6)
592#define HCR_VI (1ULL << 7)
593#define HCR_VSE (1ULL << 8)
594#define HCR_FB (1ULL << 9)
595#define HCR_BSU_MASK (3ULL << 10)
596#define HCR_DC (1ULL << 12)
597#define HCR_TWI (1ULL << 13)
598#define HCR_TWE (1ULL << 14)
599#define HCR_TID0 (1ULL << 15)
600#define HCR_TID1 (1ULL << 16)
601#define HCR_TID2 (1ULL << 17)
602#define HCR_TID3 (1ULL << 18)
603#define HCR_TSC (1ULL << 19)
604#define HCR_TIDCP (1ULL << 20)
605#define HCR_TACR (1ULL << 21)
606#define HCR_TSW (1ULL << 22)
607#define HCR_TPC (1ULL << 23)
608#define HCR_TPU (1ULL << 24)
609#define HCR_TTLB (1ULL << 25)
610#define HCR_TVM (1ULL << 26)
611#define HCR_TGE (1ULL << 27)
612#define HCR_TDZ (1ULL << 28)
613#define HCR_HCD (1ULL << 29)
614#define HCR_TRVM (1ULL << 30)
615#define HCR_RW (1ULL << 31)
616#define HCR_CD (1ULL << 32)
617#define HCR_ID (1ULL << 33)
618#define HCR_MASK ((1ULL << 34) - 1)
619
64e0e2de
EI
620#define SCR_NS (1U << 0)
621#define SCR_IRQ (1U << 1)
622#define SCR_FIQ (1U << 2)
623#define SCR_EA (1U << 3)
624#define SCR_FW (1U << 4)
625#define SCR_AW (1U << 5)
626#define SCR_NET (1U << 6)
627#define SCR_SMD (1U << 7)
628#define SCR_HCE (1U << 8)
629#define SCR_SIF (1U << 9)
630#define SCR_RW (1U << 10)
631#define SCR_ST (1U << 11)
632#define SCR_TWI (1U << 12)
633#define SCR_TWE (1U << 13)
634#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
635#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
636
01653295
PM
637/* Return the current FPSCR value. */
638uint32_t vfp_get_fpscr(CPUARMState *env);
639void vfp_set_fpscr(CPUARMState *env, uint32_t val);
640
f903fa22
PM
641/* For A64 the FPSCR is split into two logically distinct registers,
642 * FPCR and FPSR. However since they still use non-overlapping bits
643 * we store the underlying state in fpscr and just mask on read/write.
644 */
645#define FPSR_MASK 0xf800009f
646#define FPCR_MASK 0x07f79f00
647static inline uint32_t vfp_get_fpsr(CPUARMState *env)
648{
649 return vfp_get_fpscr(env) & FPSR_MASK;
650}
651
652static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
653{
654 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
655 vfp_set_fpscr(env, new_fpscr);
656}
657
658static inline uint32_t vfp_get_fpcr(CPUARMState *env)
659{
660 return vfp_get_fpscr(env) & FPCR_MASK;
661}
662
663static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
664{
665 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
666 vfp_set_fpscr(env, new_fpscr);
667}
668
b5ff1b31
FB
669enum arm_cpu_mode {
670 ARM_CPU_MODE_USR = 0x10,
671 ARM_CPU_MODE_FIQ = 0x11,
672 ARM_CPU_MODE_IRQ = 0x12,
673 ARM_CPU_MODE_SVC = 0x13,
28c9457d 674 ARM_CPU_MODE_MON = 0x16,
b5ff1b31 675 ARM_CPU_MODE_ABT = 0x17,
28c9457d 676 ARM_CPU_MODE_HYP = 0x1a,
b5ff1b31
FB
677 ARM_CPU_MODE_UND = 0x1b,
678 ARM_CPU_MODE_SYS = 0x1f
679};
680
40f137e1
PB
681/* VFP system registers. */
682#define ARM_VFP_FPSID 0
683#define ARM_VFP_FPSCR 1
a50c0f51 684#define ARM_VFP_MVFR2 5
9ee6e8bb
PB
685#define ARM_VFP_MVFR1 6
686#define ARM_VFP_MVFR0 7
40f137e1
PB
687#define ARM_VFP_FPEXC 8
688#define ARM_VFP_FPINST 9
689#define ARM_VFP_FPINST2 10
690
18c9b560
AZ
691/* iwMMXt coprocessor control registers. */
692#define ARM_IWMMXT_wCID 0
693#define ARM_IWMMXT_wCon 1
694#define ARM_IWMMXT_wCSSF 2
695#define ARM_IWMMXT_wCASF 3
696#define ARM_IWMMXT_wCGR0 8
697#define ARM_IWMMXT_wCGR1 9
698#define ARM_IWMMXT_wCGR2 10
699#define ARM_IWMMXT_wCGR3 11
700
ce854d7c
BC
701/* If adding a feature bit which corresponds to a Linux ELF
702 * HWCAP bit, remember to update the feature-bit-to-hwcap
703 * mapping in linux-user/elfload.c:get_elf_hwcap().
704 */
40f137e1
PB
705enum arm_features {
706 ARM_FEATURE_VFP,
c1713132
AZ
707 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
708 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
ce819861 709 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
9ee6e8bb
PB
710 ARM_FEATURE_V6,
711 ARM_FEATURE_V6K,
712 ARM_FEATURE_V7,
713 ARM_FEATURE_THUMB2,
c3d2689d 714 ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
9ee6e8bb 715 ARM_FEATURE_VFP3,
60011498 716 ARM_FEATURE_VFP_FP16,
9ee6e8bb 717 ARM_FEATURE_NEON,
47789990 718 ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
9ee6e8bb 719 ARM_FEATURE_M, /* Microcontroller profile. */
fe1479c3 720 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
e1bbf446 721 ARM_FEATURE_THUMB2EE,
be5e7a76
DES
722 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
723 ARM_FEATURE_V4T,
724 ARM_FEATURE_V5,
5bc95aa2 725 ARM_FEATURE_STRONGARM,
906879a9 726 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
b8b8ea05 727 ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
da97f52c 728 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
0383ac00 729 ARM_FEATURE_GENERIC_TIMER,
06ed5d66 730 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
1047b9d7 731 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
c4804214
PM
732 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
733 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
734 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
81bdde9d 735 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
de9b05b8
PM
736 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
737 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
81e69fb0 738 ARM_FEATURE_V8,
3926cc84 739 ARM_FEATURE_AARCH64, /* supports 64 bit mode */
9d935509 740 ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
d8ba780b 741 ARM_FEATURE_CBAR, /* has cp15 CBAR */
eb0ecd5a 742 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
f318cec6 743 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
cca7c2f5 744 ARM_FEATURE_EL2, /* has EL2 Virtualization support */
1fe8141e 745 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
f1ecb913
AB
746 ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
747 ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
4e624eda 748 ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
40f137e1
PB
749};
750
751static inline int arm_feature(CPUARMState *env, int feature)
752{
918f5dca 753 return (env->features & (1ULL << feature)) != 0;
40f137e1
PB
754}
755
1f79ee32
PM
756/* Return true if the specified exception level is running in AArch64 state. */
757static inline bool arm_el_is_aa64(CPUARMState *env, int el)
758{
759 /* We don't currently support EL2 or EL3, and this isn't valid for EL0
760 * (if we're in EL0, is_a64() is what you want, and if we're not in EL0
761 * then the state of EL0 isn't well defined.)
762 */
763 assert(el == 1);
764 /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This
765 * is a QEMU-imposed simplification which we may wish to change later.
766 * If we in future support EL2 and/or EL3, then the state of lower
767 * exception levels is controlled by the HCR.RW and SCR.RW bits.
768 */
769 return arm_feature(env, ARM_FEATURE_AARCH64);
770}
771
9a78eead 772void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
9e729b57 773unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
40f137e1 774
9ee6e8bb
PB
775/* Interface between CPU and Interrupt controller. */
776void armv7m_nvic_set_pending(void *opaque, int irq);
777int armv7m_nvic_acknowledge_irq(void *opaque);
778void armv7m_nvic_complete_irq(void *opaque, int irq);
779
4b6a83fb
PM
780/* Interface for defining coprocessor registers.
781 * Registers are defined in tables of arm_cp_reginfo structs
782 * which are passed to define_arm_cp_regs().
783 */
784
785/* When looking up a coprocessor register we look for it
786 * via an integer which encodes all of:
787 * coprocessor number
788 * Crn, Crm, opc1, opc2 fields
789 * 32 or 64 bit register (ie is it accessed via MRC/MCR
790 * or via MRRC/MCRR?)
791 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
792 * (In this case crn and opc2 should be zero.)
f5a0a5a5
PM
793 * For AArch64, there is no 32/64 bit size distinction;
794 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
795 * and 4 bit CRn and CRm. The encoding patterns are chosen
796 * to be easy to convert to and from the KVM encodings, and also
797 * so that the hashtable can contain both AArch32 and AArch64
798 * registers (to allow for interprocessing where we might run
799 * 32 bit code on a 64 bit core).
4b6a83fb 800 */
f5a0a5a5
PM
801/* This bit is private to our hashtable cpreg; in KVM register
802 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
803 * in the upper bits of the 64 bit ID.
804 */
805#define CP_REG_AA64_SHIFT 28
806#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
807
4b6a83fb
PM
808#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
809 (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
810 ((crm) << 7) | ((opc1) << 3) | (opc2))
811
f5a0a5a5
PM
812#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
813 (CP_REG_AA64_MASK | \
814 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
815 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
816 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
817 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
818 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
819 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
820
721fae12
PM
821/* Convert a full 64 bit KVM register ID to the truncated 32 bit
822 * version used as a key for the coprocessor register hashtable
823 */
824static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
825{
826 uint32_t cpregid = kvmid;
f5a0a5a5
PM
827 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
828 cpregid |= CP_REG_AA64_MASK;
829 } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
721fae12
PM
830 cpregid |= (1 << 15);
831 }
832 return cpregid;
833}
834
835/* Convert a truncated 32 bit hashtable key into the full
836 * 64 bit KVM register ID.
837 */
838static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
839{
f5a0a5a5
PM
840 uint64_t kvmid;
841
842 if (cpregid & CP_REG_AA64_MASK) {
843 kvmid = cpregid & ~CP_REG_AA64_MASK;
844 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
721fae12 845 } else {
f5a0a5a5
PM
846 kvmid = cpregid & ~(1 << 15);
847 if (cpregid & (1 << 15)) {
848 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
849 } else {
850 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
851 }
721fae12
PM
852 }
853 return kvmid;
854}
855
4b6a83fb
PM
856/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
857 * special-behaviour cp reg and bits [15..8] indicate what behaviour
858 * it has. Otherwise it is a simple cp reg, where CONST indicates that
859 * TCG can assume the value to be constant (ie load at translate time)
860 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
861 * indicates that the TB should not be ended after a write to this register
862 * (the default is that the TB ends after cp writes). OVERRIDE permits
863 * a register definition to override a previous definition for the
864 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
865 * old must have the OVERRIDE bit set.
7023ec7e
PM
866 * NO_MIGRATE indicates that this register should be ignored for migration;
867 * (eg because any state is accessed via some other coprocessor register).
2452731c
PM
868 * IO indicates that this register does I/O and therefore its accesses
869 * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
870 * registers which implement clocks or timers require this.
4b6a83fb
PM
871 */
872#define ARM_CP_SPECIAL 1
873#define ARM_CP_CONST 2
874#define ARM_CP_64BIT 4
875#define ARM_CP_SUPPRESS_TB_END 8
876#define ARM_CP_OVERRIDE 16
7023ec7e 877#define ARM_CP_NO_MIGRATE 32
2452731c 878#define ARM_CP_IO 64
4b6a83fb
PM
879#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
880#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
b0d2b7d0 881#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
0eef9d98 882#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
aca3f40b
PM
883#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
884#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
4b6a83fb
PM
885/* Used only as a terminator for ARMCPRegInfo lists */
886#define ARM_CP_SENTINEL 0xffff
887/* Mask of only the flag bits in a type field */
2452731c 888#define ARM_CP_FLAG_MASK 0x7f
4b6a83fb 889
f5a0a5a5
PM
890/* Valid values for ARMCPRegInfo state field, indicating which of
891 * the AArch32 and AArch64 execution states this register is visible in.
892 * If the reginfo doesn't explicitly specify then it is AArch32 only.
893 * If the reginfo is declared to be visible in both states then a second
894 * reginfo is synthesised for the AArch32 view of the AArch64 register,
895 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
896 * Note that we rely on the values of these enums as we iterate through
897 * the various states in some places.
898 */
899enum {
900 ARM_CP_STATE_AA32 = 0,
901 ARM_CP_STATE_AA64 = 1,
902 ARM_CP_STATE_BOTH = 2,
903};
904
4b6a83fb
PM
905/* Return true if cptype is a valid type field. This is used to try to
906 * catch errors where the sentinel has been accidentally left off the end
907 * of a list of registers.
908 */
909static inline bool cptype_valid(int cptype)
910{
911 return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
912 || ((cptype & ARM_CP_SPECIAL) &&
34affeef 913 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
4b6a83fb
PM
914}
915
916/* Access rights:
917 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
918 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
919 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
920 * (ie any of the privileged modes in Secure state, or Monitor mode).
921 * If a register is accessible in one privilege level it's always accessible
922 * in higher privilege levels too. Since "Secure PL1" also follows this rule
923 * (ie anything visible in PL2 is visible in S-PL1, some things are only
924 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
925 * terminology a little and call this PL3.
f5a0a5a5
PM
926 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
927 * with the ELx exception levels.
4b6a83fb
PM
928 *
929 * If access permissions for a register are more complex than can be
930 * described with these bits, then use a laxer set of restrictions, and
931 * do the more restrictive/complex check inside a helper function.
932 */
933#define PL3_R 0x80
934#define PL3_W 0x40
935#define PL2_R (0x20 | PL3_R)
936#define PL2_W (0x10 | PL3_W)
937#define PL1_R (0x08 | PL2_R)
938#define PL1_W (0x04 | PL2_W)
939#define PL0_R (0x02 | PL1_R)
940#define PL0_W (0x01 | PL1_W)
941
942#define PL3_RW (PL3_R | PL3_W)
943#define PL2_RW (PL2_R | PL2_W)
944#define PL1_RW (PL1_R | PL1_W)
945#define PL0_RW (PL0_R | PL0_W)
946
947static inline int arm_current_pl(CPUARMState *env)
948{
f5a0a5a5
PM
949 if (env->aarch64) {
950 return extract32(env->pstate, 2, 2);
951 }
952
4b6a83fb
PM
953 if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) {
954 return 0;
955 }
956 /* We don't currently implement the Virtualization or TrustZone
957 * extensions, so PL2 and PL3 don't exist for us.
958 */
959 return 1;
960}
961
962typedef struct ARMCPRegInfo ARMCPRegInfo;
963
f59df3f2
PM
964typedef enum CPAccessResult {
965 /* Access is permitted */
966 CP_ACCESS_OK = 0,
967 /* Access fails due to a configurable trap or enable which would
968 * result in a categorized exception syndrome giving information about
969 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
970 * 0xc or 0x18).
971 */
972 CP_ACCESS_TRAP = 1,
973 /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
974 * Note that this is not a catch-all case -- the set of cases which may
975 * result in this failure is specifically defined by the architecture.
976 */
977 CP_ACCESS_TRAP_UNCATEGORIZED = 2,
978} CPAccessResult;
979
c4241c7d
PM
980/* Access functions for coprocessor registers. These cannot fail and
981 * may not raise exceptions.
982 */
983typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
984typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
985 uint64_t value);
f59df3f2
PM
986/* Access permission check functions for coprocessor registers. */
987typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque);
4b6a83fb
PM
988/* Hook function for register reset */
989typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
990
991#define CP_ANY 0xff
992
993/* Definition of an ARM coprocessor register */
994struct ARMCPRegInfo {
995 /* Name of register (useful mainly for debugging, need not be unique) */
996 const char *name;
997 /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
998 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
999 * 'wildcard' field -- any value of that field in the MRC/MCR insn
1000 * will be decoded to this register. The register read and write
1001 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
1002 * used by the program, so it is possible to register a wildcard and
1003 * then behave differently on read/write if necessary.
1004 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
1005 * must both be zero.
f5a0a5a5
PM
1006 * For AArch64-visible registers, opc0 is also used.
1007 * Since there are no "coprocessors" in AArch64, cp is purely used as a
1008 * way to distinguish (for KVM's benefit) guest-visible system registers
1009 * from demuxed ones provided to preserve the "no side effects on
1010 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
1011 * visible (to match KVM's encoding); cp==0 will be converted to
1012 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
4b6a83fb
PM
1013 */
1014 uint8_t cp;
1015 uint8_t crn;
1016 uint8_t crm;
f5a0a5a5 1017 uint8_t opc0;
4b6a83fb
PM
1018 uint8_t opc1;
1019 uint8_t opc2;
f5a0a5a5
PM
1020 /* Execution state in which this register is visible: ARM_CP_STATE_* */
1021 int state;
4b6a83fb
PM
1022 /* Register type: ARM_CP_* bits/values */
1023 int type;
1024 /* Access rights: PL*_[RW] */
1025 int access;
1026 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
1027 * this register was defined: can be used to hand data through to the
1028 * register read/write functions, since they are passed the ARMCPRegInfo*.
1029 */
1030 void *opaque;
1031 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
1032 * fieldoffset is non-zero, the reset value of the register.
1033 */
1034 uint64_t resetvalue;
1035 /* Offset of the field in CPUARMState for this register. This is not
1036 * needed if either:
1037 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
1038 * 2. both readfn and writefn are specified
1039 */
1040 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
f59df3f2
PM
1041 /* Function for making any access checks for this register in addition to
1042 * those specified by the 'access' permissions bits. If NULL, no extra
1043 * checks required. The access check is performed at runtime, not at
1044 * translate time.
1045 */
1046 CPAccessFn *accessfn;
4b6a83fb
PM
1047 /* Function for handling reads of this register. If NULL, then reads
1048 * will be done by loading from the offset into CPUARMState specified
1049 * by fieldoffset.
1050 */
1051 CPReadFn *readfn;
1052 /* Function for handling writes of this register. If NULL, then writes
1053 * will be done by writing to the offset into CPUARMState specified
1054 * by fieldoffset.
1055 */
1056 CPWriteFn *writefn;
7023ec7e
PM
1057 /* Function for doing a "raw" read; used when we need to copy
1058 * coprocessor state to the kernel for KVM or out for
1059 * migration. This only needs to be provided if there is also a
c4241c7d 1060 * readfn and it has side effects (for instance clear-on-read bits).
7023ec7e
PM
1061 */
1062 CPReadFn *raw_readfn;
1063 /* Function for doing a "raw" write; used when we need to copy KVM
1064 * kernel coprocessor state into userspace, or for inbound
1065 * migration. This only needs to be provided if there is also a
c4241c7d
PM
1066 * writefn and it masks out "unwritable" bits or has write-one-to-clear
1067 * or similar behaviour.
7023ec7e
PM
1068 */
1069 CPWriteFn *raw_writefn;
4b6a83fb
PM
1070 /* Function for resetting the register. If NULL, then reset will be done
1071 * by writing resetvalue to the field specified in fieldoffset. If
1072 * fieldoffset is 0 then no reset will be done.
1073 */
1074 CPResetFn *resetfn;
1075};
1076
1077/* Macros which are lvalues for the field in CPUARMState for the
1078 * ARMCPRegInfo *ri.
1079 */
1080#define CPREG_FIELD32(env, ri) \
1081 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
1082#define CPREG_FIELD64(env, ri) \
1083 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
1084
1085#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
1086
1087void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
1088 const ARMCPRegInfo *regs, void *opaque);
1089void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1090 const ARMCPRegInfo *regs, void *opaque);
1091static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
1092{
1093 define_arm_cp_regs_with_opaque(cpu, regs, 0);
1094}
1095static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
1096{
1097 define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
1098}
60322b39 1099const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
4b6a83fb
PM
1100
1101/* CPWriteFn that can be used to implement writes-ignored behaviour */
c4241c7d
PM
1102void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1103 uint64_t value);
4b6a83fb 1104/* CPReadFn that can be used for read-as-zero behaviour */
c4241c7d 1105uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
4b6a83fb 1106
f5a0a5a5
PM
1107/* CPResetFn that does nothing, for use if no reset is required even
1108 * if fieldoffset is non zero.
1109 */
1110void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1111
67ed771d
PM
1112/* Return true if this reginfo struct's field in the cpu state struct
1113 * is 64 bits wide.
1114 */
1115static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1116{
1117 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1118}
1119
60322b39 1120static inline bool cp_access_ok(int current_pl,
4b6a83fb
PM
1121 const ARMCPRegInfo *ri, int isread)
1122{
60322b39 1123 return (ri->access >> ((current_pl * 2) + isread)) & 1;
4b6a83fb
PM
1124}
1125
721fae12
PM
1126/**
1127 * write_list_to_cpustate
1128 * @cpu: ARMCPU
1129 *
1130 * For each register listed in the ARMCPU cpreg_indexes list, write
1131 * its value from the cpreg_values list into the ARMCPUState structure.
1132 * This updates TCG's working data structures from KVM data or
1133 * from incoming migration state.
1134 *
1135 * Returns: true if all register values were updated correctly,
1136 * false if some register was unknown or could not be written.
1137 * Note that we do not stop early on failure -- we will attempt
1138 * writing all registers in the list.
1139 */
1140bool write_list_to_cpustate(ARMCPU *cpu);
1141
1142/**
1143 * write_cpustate_to_list:
1144 * @cpu: ARMCPU
1145 *
1146 * For each register listed in the ARMCPU cpreg_indexes list, write
1147 * its value from the ARMCPUState structure into the cpreg_values list.
1148 * This is used to copy info from TCG's working data structures into
1149 * KVM or for outbound migration.
1150 *
1151 * Returns: true if all register values were read correctly,
1152 * false if some register was unknown or could not be read.
1153 * Note that we do not stop early on failure -- we will attempt
1154 * reading all registers in the list.
1155 */
1156bool write_cpustate_to_list(ARMCPU *cpu);
1157
9ee6e8bb
PB
1158/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3.
1159 Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
1160 conventional cores (ie. Application or Realtime profile). */
1161
1162#define IS_M(env) arm_feature(env, ARM_FEATURE_M)
9ee6e8bb 1163
9ee6e8bb
PB
1164#define ARM_CPUID_TI915T 0x54029152
1165#define ARM_CPUID_TI925T 0x54029252
40f137e1 1166
b5ff1b31 1167#if defined(CONFIG_USER_ONLY)
2c0262af 1168#define TARGET_PAGE_BITS 12
b5ff1b31
FB
1169#else
1170/* The ARM MMU allows 1k pages. */
1171/* ??? Linux doesn't actually use these, and they're deprecated in recent
82d17978 1172 architecture revisions. Maybe a configure option to disable them. */
b5ff1b31
FB
1173#define TARGET_PAGE_BITS 10
1174#endif
9467d44c 1175
3926cc84
AG
1176#if defined(TARGET_AARCH64)
1177# define TARGET_PHYS_ADDR_SPACE_BITS 48
1178# define TARGET_VIRT_ADDR_SPACE_BITS 64
1179#else
1180# define TARGET_PHYS_ADDR_SPACE_BITS 40
1181# define TARGET_VIRT_ADDR_SPACE_BITS 32
1182#endif
52705890 1183
043b7f8d
EI
1184static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
1185{
1186 CPUARMState *env = cs->env_ptr;
dfafd090
EI
1187 unsigned int cur_el = arm_current_pl(env);
1188 unsigned int target_el = arm_excp_target_el(cs, excp_idx);
041c9666
EI
1189 /* FIXME: Use actual secure state. */
1190 bool secure = false;
1191 /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */
1192 bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
136e67e9
EI
1193 /* ARMv7-M interrupt return works by loading a magic value
1194 * into the PC. On real hardware the load causes the
1195 * return to occur. The qemu implementation performs the
1196 * jump normally, then does the exception return when the
1197 * CPU tries to execute code at the magic address.
1198 * This will cause the magic PC value to be pushed to
1199 * the stack if an interrupt occurred at the wrong time.
1200 * We avoid this by disabling interrupts when
1201 * pc contains a magic address.
1202 */
1203 bool irq_unmasked = !(env->daif & PSTATE_I)
1204 && (!IS_M(env) || env->regs[15] < 0xfffffff0);
dfafd090
EI
1205
1206 /* Don't take exceptions if they target a lower EL. */
1207 if (cur_el > target_el) {
1208 return false;
1209 }
043b7f8d
EI
1210
1211 switch (excp_idx) {
1212 case EXCP_FIQ:
041c9666
EI
1213 if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) {
1214 return true;
1215 }
043b7f8d
EI
1216 return !(env->daif & PSTATE_F);
1217 case EXCP_IRQ:
041c9666
EI
1218 if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
1219 return true;
1220 }
136e67e9
EI
1221 return irq_unmasked;
1222 case EXCP_VFIQ:
1223 if (!secure && !(env->cp15.hcr_el2 & HCR_FMO)) {
1224 /* VFIQs are only taken when hypervized and non-secure. */
1225 return false;
1226 }
1227 return !(env->daif & PSTATE_F);
1228 case EXCP_VIRQ:
1229 if (!secure && !(env->cp15.hcr_el2 & HCR_IMO)) {
1230 /* VIRQs are only taken when hypervized and non-secure. */
1231 return false;
1232 }
1233 return irq_unmasked;
043b7f8d
EI
1234 default:
1235 g_assert_not_reached();
1236 }
1237}
1238
ad37ad5b
PM
1239static inline CPUARMState *cpu_init(const char *cpu_model)
1240{
1241 ARMCPU *cpu = cpu_arm_init(cpu_model);
1242 if (cpu) {
1243 return &cpu->env;
1244 }
1245 return NULL;
1246}
1247
9467d44c
TS
1248#define cpu_exec cpu_arm_exec
1249#define cpu_gen_code cpu_arm_gen_code
1250#define cpu_signal_handler cpu_arm_signal_handler
c732abe2 1251#define cpu_list arm_cpu_list
9467d44c 1252
6ebbf390 1253/* MMU modes definitions */
f79fbf39
EI
1254#define MMU_MODE0_SUFFIX _user
1255#define MMU_MODE1_SUFFIX _kernel
1256#define MMU_USER_IDX 0
0ecb72a5 1257static inline int cpu_mmu_index (CPUARMState *env)
6ebbf390 1258{
f79fbf39 1259 return arm_current_pl(env);
6ebbf390
JM
1260}
1261
3a298203
PM
1262/* Return the Exception Level targeted by debug exceptions;
1263 * currently always EL1 since we don't implement EL2 or EL3.
1264 */
1265static inline int arm_debug_target_el(CPUARMState *env)
1266{
1267 return 1;
1268}
1269
1270static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
1271{
1272 if (arm_current_pl(env) == arm_debug_target_el(env)) {
1273 if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
1274 || (env->daif & PSTATE_D)) {
1275 return false;
1276 }
1277 }
1278 return true;
1279}
1280
1281static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
1282{
1283 if (arm_current_pl(env) == 0 && arm_el_is_aa64(env, 1)) {
1284 return aa64_generate_debug_exceptions(env);
1285 }
1286 return arm_current_pl(env) != 2;
1287}
1288
1289/* Return true if debugging exceptions are currently enabled.
1290 * This corresponds to what in ARM ARM pseudocode would be
1291 * if UsingAArch32() then
1292 * return AArch32.GenerateDebugExceptions()
1293 * else
1294 * return AArch64.GenerateDebugExceptions()
1295 * We choose to push the if() down into this function for clarity,
1296 * since the pseudocode has it at all callsites except for the one in
1297 * CheckSoftwareStep(), where it is elided because both branches would
1298 * always return the same value.
1299 *
1300 * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
1301 * don't yet implement those exception levels or their associated trap bits.
1302 */
1303static inline bool arm_generate_debug_exceptions(CPUARMState *env)
1304{
1305 if (env->aarch64) {
1306 return aa64_generate_debug_exceptions(env);
1307 } else {
1308 return aa32_generate_debug_exceptions(env);
1309 }
1310}
1311
1312/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
1313 * implicitly means this always returns false in pre-v8 CPUs.)
1314 */
1315static inline bool arm_singlestep_active(CPUARMState *env)
1316{
1317 return extract32(env->cp15.mdscr_el1, 0, 1)
1318 && arm_el_is_aa64(env, arm_debug_target_el(env))
1319 && arm_generate_debug_exceptions(env);
1320}
1321
022c62cb 1322#include "exec/cpu-all.h"
622ed360 1323
3926cc84
AG
1324/* Bit usage in the TB flags field: bit 31 indicates whether we are
1325 * in 32 or 64 bit mode. The meaning of the other bits depends on that.
1326 */
1327#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
1328#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
1329
1330/* Bit usage when in AArch32 state: */
a1705768
PM
1331#define ARM_TBFLAG_THUMB_SHIFT 0
1332#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
1333#define ARM_TBFLAG_VECLEN_SHIFT 1
1334#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
1335#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
1336#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
1337#define ARM_TBFLAG_PRIV_SHIFT 6
1338#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT)
1339#define ARM_TBFLAG_VFPEN_SHIFT 7
1340#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
1341#define ARM_TBFLAG_CONDEXEC_SHIFT 8
1342#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
d8fd2954
PB
1343#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
1344#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
2c7ffc41
PM
1345#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
1346#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
50225ad0
PM
1347#define ARM_TBFLAG_SS_ACTIVE_SHIFT 18
1348#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
1349#define ARM_TBFLAG_PSTATE_SS_SHIFT 19
1350#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
c0f4af17
PM
1351/* We store the bottom two bits of the CPAR as TB flags and handle
1352 * checks on the other bits at runtime
1353 */
1354#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
1355#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
3926cc84 1356
d9ea7d29
PM
1357/* Bit usage when in AArch64 state */
1358#define ARM_TBFLAG_AA64_EL_SHIFT 0
1359#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
8c6afa6a
PM
1360#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
1361#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
7ea47fe7
PM
1362#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
1363#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
1364#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4
1365#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
a1705768
PM
1366
1367/* some convenience accessor macros */
3926cc84
AG
1368#define ARM_TBFLAG_AARCH64_STATE(F) \
1369 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
a1705768
PM
1370#define ARM_TBFLAG_THUMB(F) \
1371 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
1372#define ARM_TBFLAG_VECLEN(F) \
1373 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
1374#define ARM_TBFLAG_VECSTRIDE(F) \
1375 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
1376#define ARM_TBFLAG_PRIV(F) \
1377 (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT)
1378#define ARM_TBFLAG_VFPEN(F) \
1379 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
1380#define ARM_TBFLAG_CONDEXEC(F) \
1381 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
d8fd2954
PB
1382#define ARM_TBFLAG_BSWAP_CODE(F) \
1383 (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
2c7ffc41
PM
1384#define ARM_TBFLAG_CPACR_FPEN(F) \
1385 (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
50225ad0
PM
1386#define ARM_TBFLAG_SS_ACTIVE(F) \
1387 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
1388#define ARM_TBFLAG_PSTATE_SS(F) \
1389 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
c0f4af17
PM
1390#define ARM_TBFLAG_XSCALE_CPAR(F) \
1391 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
d9ea7d29
PM
1392#define ARM_TBFLAG_AA64_EL(F) \
1393 (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
8c6afa6a
PM
1394#define ARM_TBFLAG_AA64_FPEN(F) \
1395 (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
7ea47fe7
PM
1396#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
1397 (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
1398#define ARM_TBFLAG_AA64_PSTATE_SS(F) \
1399 (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
a1705768 1400
0ecb72a5 1401static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
6b917547
AL
1402 target_ulong *cs_base, int *flags)
1403{
ed1f13d6
PM
1404 int fpen;
1405
1406 if (arm_feature(env, ARM_FEATURE_V6)) {
1407 fpen = extract32(env->cp15.c1_coproc, 20, 2);
1408 } else {
1409 /* CPACR doesn't exist before v6, so VFP is always accessible */
1410 fpen = 3;
1411 }
8c6afa6a 1412
3926cc84
AG
1413 if (is_a64(env)) {
1414 *pc = env->pc;
d9ea7d29
PM
1415 *flags = ARM_TBFLAG_AARCH64_STATE_MASK
1416 | (arm_current_pl(env) << ARM_TBFLAG_AA64_EL_SHIFT);
8c6afa6a
PM
1417 if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
1418 *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
1419 }
7ea47fe7
PM
1420 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
1421 * states defined in the ARM ARM for software singlestep:
1422 * SS_ACTIVE PSTATE.SS State
1423 * 0 x Inactive (the TB flag for SS is always 0)
1424 * 1 0 Active-pending
1425 * 1 1 Active-not-pending
1426 */
1427 if (arm_singlestep_active(env)) {
1428 *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK;
1429 if (env->pstate & PSTATE_SS) {
1430 *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK;
1431 }
1432 }
05ed9a99 1433 } else {
3926cc84
AG
1434 int privmode;
1435 *pc = env->regs[15];
1436 *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
1437 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
1438 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
1439 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
1440 | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
1441 if (arm_feature(env, ARM_FEATURE_M)) {
1442 privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
1443 } else {
1444 privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
1445 }
1446 if (privmode) {
1447 *flags |= ARM_TBFLAG_PRIV_MASK;
1448 }
2c7ffc41
PM
1449 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
1450 || arm_el_is_aa64(env, 1)) {
3926cc84
AG
1451 *flags |= ARM_TBFLAG_VFPEN_MASK;
1452 }
2c7ffc41
PM
1453 if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
1454 *flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
1455 }
50225ad0
PM
1456 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
1457 * states defined in the ARM ARM for software singlestep:
1458 * SS_ACTIVE PSTATE.SS State
1459 * 0 x Inactive (the TB flag for SS is always 0)
1460 * 1 0 Active-pending
1461 * 1 1 Active-not-pending
1462 */
1463 if (arm_singlestep_active(env)) {
1464 *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
1465 if (env->uncached_cpsr & PSTATE_SS) {
1466 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
1467 }
1468 }
c0f4af17
PM
1469 *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
1470 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
a1705768 1471 }
3926cc84
AG
1472
1473 *cs_base = 0;
6b917547
AL
1474}
1475
022c62cb 1476#include "exec/exec-all.h"
f081c76c 1477
3926cc84
AG
1478static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
1479{
1480 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
1481 env->pc = tb->pc;
1482 } else {
1483 env->regs[15] = tb->pc;
1484 }
1485}
1486
98128601
RH
1487enum {
1488 QEMU_PSCI_CONDUIT_DISABLED = 0,
1489 QEMU_PSCI_CONDUIT_SMC = 1,
1490 QEMU_PSCI_CONDUIT_HVC = 2,
1491};
1492
2c0262af 1493#endif