]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/helper.c
target/arm: Add ignore_stackfaults argument to v7m_exception_taken()
[mirror_qemu.git] / target / arm / helper.c
1 #include "qemu/osdep.h"
2 #include "trace.h"
3 #include "cpu.h"
4 #include "internals.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
14 #include "arm_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
18
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
20
21 #ifndef CONFIG_USER_ONLY
22 /* Cacheability and shareability attributes for a memory access */
23 typedef struct ARMCacheAttrs {
24 unsigned int attrs:8; /* as in the MAIR register encoding */
25 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
26 } ARMCacheAttrs;
27
28 static bool get_phys_addr(CPUARMState *env, target_ulong address,
29 MMUAccessType access_type, ARMMMUIdx mmu_idx,
30 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
31 target_ulong *page_size,
32 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
33
34 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
35 MMUAccessType access_type, ARMMMUIdx mmu_idx,
36 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
37 target_ulong *page_size_ptr,
38 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
39
40 /* Security attributes for an address, as returned by v8m_security_lookup. */
41 typedef struct V8M_SAttributes {
42 bool ns;
43 bool nsc;
44 uint8_t sregion;
45 bool srvalid;
46 uint8_t iregion;
47 bool irvalid;
48 } V8M_SAttributes;
49
50 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
51 MMUAccessType access_type, ARMMMUIdx mmu_idx,
52 V8M_SAttributes *sattrs);
53
54 /* Definitions for the PMCCNTR and PMCR registers */
55 #define PMCRD 0x8
56 #define PMCRC 0x4
57 #define PMCRE 0x1
58 #endif
59
60 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
61 {
62 int nregs;
63
64 /* VFP data registers are always little-endian. */
65 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
66 if (reg < nregs) {
67 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
68 return 8;
69 }
70 if (arm_feature(env, ARM_FEATURE_NEON)) {
71 /* Aliases for Q regs. */
72 nregs += 16;
73 if (reg < nregs) {
74 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
75 stq_le_p(buf, q[0]);
76 stq_le_p(buf + 8, q[1]);
77 return 16;
78 }
79 }
80 switch (reg - nregs) {
81 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
82 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
83 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
84 }
85 return 0;
86 }
87
88 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
89 {
90 int nregs;
91
92 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
93 if (reg < nregs) {
94 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
95 return 8;
96 }
97 if (arm_feature(env, ARM_FEATURE_NEON)) {
98 nregs += 16;
99 if (reg < nregs) {
100 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
101 q[0] = ldq_le_p(buf);
102 q[1] = ldq_le_p(buf + 8);
103 return 16;
104 }
105 }
106 switch (reg - nregs) {
107 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
108 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
109 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
110 }
111 return 0;
112 }
113
114 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
115 {
116 switch (reg) {
117 case 0 ... 31:
118 /* 128 bit FP register */
119 {
120 uint64_t *q = aa64_vfp_qreg(env, reg);
121 stq_le_p(buf, q[0]);
122 stq_le_p(buf + 8, q[1]);
123 return 16;
124 }
125 case 32:
126 /* FPSR */
127 stl_p(buf, vfp_get_fpsr(env));
128 return 4;
129 case 33:
130 /* FPCR */
131 stl_p(buf, vfp_get_fpcr(env));
132 return 4;
133 default:
134 return 0;
135 }
136 }
137
138 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
139 {
140 switch (reg) {
141 case 0 ... 31:
142 /* 128 bit FP register */
143 {
144 uint64_t *q = aa64_vfp_qreg(env, reg);
145 q[0] = ldq_le_p(buf);
146 q[1] = ldq_le_p(buf + 8);
147 return 16;
148 }
149 case 32:
150 /* FPSR */
151 vfp_set_fpsr(env, ldl_p(buf));
152 return 4;
153 case 33:
154 /* FPCR */
155 vfp_set_fpcr(env, ldl_p(buf));
156 return 4;
157 default:
158 return 0;
159 }
160 }
161
162 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
163 {
164 assert(ri->fieldoffset);
165 if (cpreg_field_is_64bit(ri)) {
166 return CPREG_FIELD64(env, ri);
167 } else {
168 return CPREG_FIELD32(env, ri);
169 }
170 }
171
172 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
173 uint64_t value)
174 {
175 assert(ri->fieldoffset);
176 if (cpreg_field_is_64bit(ri)) {
177 CPREG_FIELD64(env, ri) = value;
178 } else {
179 CPREG_FIELD32(env, ri) = value;
180 }
181 }
182
183 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
184 {
185 return (char *)env + ri->fieldoffset;
186 }
187
188 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
189 {
190 /* Raw read of a coprocessor register (as needed for migration, etc). */
191 if (ri->type & ARM_CP_CONST) {
192 return ri->resetvalue;
193 } else if (ri->raw_readfn) {
194 return ri->raw_readfn(env, ri);
195 } else if (ri->readfn) {
196 return ri->readfn(env, ri);
197 } else {
198 return raw_read(env, ri);
199 }
200 }
201
202 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
203 uint64_t v)
204 {
205 /* Raw write of a coprocessor register (as needed for migration, etc).
206 * Note that constant registers are treated as write-ignored; the
207 * caller should check for success by whether a readback gives the
208 * value written.
209 */
210 if (ri->type & ARM_CP_CONST) {
211 return;
212 } else if (ri->raw_writefn) {
213 ri->raw_writefn(env, ri, v);
214 } else if (ri->writefn) {
215 ri->writefn(env, ri, v);
216 } else {
217 raw_write(env, ri, v);
218 }
219 }
220
221 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
222 {
223 /* Return true if the regdef would cause an assertion if you called
224 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
225 * program bug for it not to have the NO_RAW flag).
226 * NB that returning false here doesn't necessarily mean that calling
227 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
228 * read/write access functions which are safe for raw use" from "has
229 * read/write access functions which have side effects but has forgotten
230 * to provide raw access functions".
231 * The tests here line up with the conditions in read/write_raw_cp_reg()
232 * and assertions in raw_read()/raw_write().
233 */
234 if ((ri->type & ARM_CP_CONST) ||
235 ri->fieldoffset ||
236 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
237 return false;
238 }
239 return true;
240 }
241
242 bool write_cpustate_to_list(ARMCPU *cpu)
243 {
244 /* Write the coprocessor state from cpu->env to the (index,value) list. */
245 int i;
246 bool ok = true;
247
248 for (i = 0; i < cpu->cpreg_array_len; i++) {
249 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
250 const ARMCPRegInfo *ri;
251
252 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
253 if (!ri) {
254 ok = false;
255 continue;
256 }
257 if (ri->type & ARM_CP_NO_RAW) {
258 continue;
259 }
260 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
261 }
262 return ok;
263 }
264
265 bool write_list_to_cpustate(ARMCPU *cpu)
266 {
267 int i;
268 bool ok = true;
269
270 for (i = 0; i < cpu->cpreg_array_len; i++) {
271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
272 uint64_t v = cpu->cpreg_values[i];
273 const ARMCPRegInfo *ri;
274
275 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
276 if (!ri) {
277 ok = false;
278 continue;
279 }
280 if (ri->type & ARM_CP_NO_RAW) {
281 continue;
282 }
283 /* Write value and confirm it reads back as written
284 * (to catch read-only registers and partially read-only
285 * registers where the incoming migration value doesn't match)
286 */
287 write_raw_cp_reg(&cpu->env, ri, v);
288 if (read_raw_cp_reg(&cpu->env, ri) != v) {
289 ok = false;
290 }
291 }
292 return ok;
293 }
294
295 static void add_cpreg_to_list(gpointer key, gpointer opaque)
296 {
297 ARMCPU *cpu = opaque;
298 uint64_t regidx;
299 const ARMCPRegInfo *ri;
300
301 regidx = *(uint32_t *)key;
302 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
303
304 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
305 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
306 /* The value array need not be initialized at this point */
307 cpu->cpreg_array_len++;
308 }
309 }
310
311 static void count_cpreg(gpointer key, gpointer opaque)
312 {
313 ARMCPU *cpu = opaque;
314 uint64_t regidx;
315 const ARMCPRegInfo *ri;
316
317 regidx = *(uint32_t *)key;
318 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
319
320 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
321 cpu->cpreg_array_len++;
322 }
323 }
324
325 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
326 {
327 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
328 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
329
330 if (aidx > bidx) {
331 return 1;
332 }
333 if (aidx < bidx) {
334 return -1;
335 }
336 return 0;
337 }
338
339 void init_cpreg_list(ARMCPU *cpu)
340 {
341 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
342 * Note that we require cpreg_tuples[] to be sorted by key ID.
343 */
344 GList *keys;
345 int arraylen;
346
347 keys = g_hash_table_get_keys(cpu->cp_regs);
348 keys = g_list_sort(keys, cpreg_key_compare);
349
350 cpu->cpreg_array_len = 0;
351
352 g_list_foreach(keys, count_cpreg, cpu);
353
354 arraylen = cpu->cpreg_array_len;
355 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
356 cpu->cpreg_values = g_new(uint64_t, arraylen);
357 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
358 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
359 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
360 cpu->cpreg_array_len = 0;
361
362 g_list_foreach(keys, add_cpreg_to_list, cpu);
363
364 assert(cpu->cpreg_array_len == arraylen);
365
366 g_list_free(keys);
367 }
368
369 /*
370 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
371 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
372 *
373 * access_el3_aa32ns: Used to check AArch32 register views.
374 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
375 */
376 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
377 const ARMCPRegInfo *ri,
378 bool isread)
379 {
380 bool secure = arm_is_secure_below_el3(env);
381
382 assert(!arm_el_is_aa64(env, 3));
383 if (secure) {
384 return CP_ACCESS_TRAP_UNCATEGORIZED;
385 }
386 return CP_ACCESS_OK;
387 }
388
389 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
390 const ARMCPRegInfo *ri,
391 bool isread)
392 {
393 if (!arm_el_is_aa64(env, 3)) {
394 return access_el3_aa32ns(env, ri, isread);
395 }
396 return CP_ACCESS_OK;
397 }
398
399 /* Some secure-only AArch32 registers trap to EL3 if used from
400 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
401 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
402 * We assume that the .access field is set to PL1_RW.
403 */
404 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
405 const ARMCPRegInfo *ri,
406 bool isread)
407 {
408 if (arm_current_el(env) == 3) {
409 return CP_ACCESS_OK;
410 }
411 if (arm_is_secure_below_el3(env)) {
412 return CP_ACCESS_TRAP_EL3;
413 }
414 /* This will be EL1 NS and EL2 NS, which just UNDEF */
415 return CP_ACCESS_TRAP_UNCATEGORIZED;
416 }
417
418 /* Check for traps to "powerdown debug" registers, which are controlled
419 * by MDCR.TDOSA
420 */
421 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
422 bool isread)
423 {
424 int el = arm_current_el(env);
425
426 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
427 && !arm_is_secure_below_el3(env)) {
428 return CP_ACCESS_TRAP_EL2;
429 }
430 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
431 return CP_ACCESS_TRAP_EL3;
432 }
433 return CP_ACCESS_OK;
434 }
435
436 /* Check for traps to "debug ROM" registers, which are controlled
437 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
438 */
439 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
440 bool isread)
441 {
442 int el = arm_current_el(env);
443
444 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
445 && !arm_is_secure_below_el3(env)) {
446 return CP_ACCESS_TRAP_EL2;
447 }
448 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
449 return CP_ACCESS_TRAP_EL3;
450 }
451 return CP_ACCESS_OK;
452 }
453
454 /* Check for traps to general debug registers, which are controlled
455 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
456 */
457 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
458 bool isread)
459 {
460 int el = arm_current_el(env);
461
462 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
463 && !arm_is_secure_below_el3(env)) {
464 return CP_ACCESS_TRAP_EL2;
465 }
466 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
467 return CP_ACCESS_TRAP_EL3;
468 }
469 return CP_ACCESS_OK;
470 }
471
472 /* Check for traps to performance monitor registers, which are controlled
473 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
474 */
475 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
476 bool isread)
477 {
478 int el = arm_current_el(env);
479
480 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
481 && !arm_is_secure_below_el3(env)) {
482 return CP_ACCESS_TRAP_EL2;
483 }
484 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
485 return CP_ACCESS_TRAP_EL3;
486 }
487 return CP_ACCESS_OK;
488 }
489
490 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
491 {
492 ARMCPU *cpu = arm_env_get_cpu(env);
493
494 raw_write(env, ri, value);
495 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
496 }
497
498 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
499 {
500 ARMCPU *cpu = arm_env_get_cpu(env);
501
502 if (raw_read(env, ri) != value) {
503 /* Unlike real hardware the qemu TLB uses virtual addresses,
504 * not modified virtual addresses, so this causes a TLB flush.
505 */
506 tlb_flush(CPU(cpu));
507 raw_write(env, ri, value);
508 }
509 }
510
511 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
512 uint64_t value)
513 {
514 ARMCPU *cpu = arm_env_get_cpu(env);
515
516 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
517 && !extended_addresses_enabled(env)) {
518 /* For VMSA (when not using the LPAE long descriptor page table
519 * format) this register includes the ASID, so do a TLB flush.
520 * For PMSA it is purely a process ID and no action is needed.
521 */
522 tlb_flush(CPU(cpu));
523 }
524 raw_write(env, ri, value);
525 }
526
527 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
528 uint64_t value)
529 {
530 /* Invalidate all (TLBIALL) */
531 ARMCPU *cpu = arm_env_get_cpu(env);
532
533 tlb_flush(CPU(cpu));
534 }
535
536 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
537 uint64_t value)
538 {
539 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
540 ARMCPU *cpu = arm_env_get_cpu(env);
541
542 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
543 }
544
545 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
546 uint64_t value)
547 {
548 /* Invalidate by ASID (TLBIASID) */
549 ARMCPU *cpu = arm_env_get_cpu(env);
550
551 tlb_flush(CPU(cpu));
552 }
553
554 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
555 uint64_t value)
556 {
557 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
558 ARMCPU *cpu = arm_env_get_cpu(env);
559
560 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
561 }
562
563 /* IS variants of TLB operations must affect all cores */
564 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
565 uint64_t value)
566 {
567 CPUState *cs = ENV_GET_CPU(env);
568
569 tlb_flush_all_cpus_synced(cs);
570 }
571
572 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
573 uint64_t value)
574 {
575 CPUState *cs = ENV_GET_CPU(env);
576
577 tlb_flush_all_cpus_synced(cs);
578 }
579
580 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
581 uint64_t value)
582 {
583 CPUState *cs = ENV_GET_CPU(env);
584
585 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
586 }
587
588 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
589 uint64_t value)
590 {
591 CPUState *cs = ENV_GET_CPU(env);
592
593 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
594 }
595
596 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
597 uint64_t value)
598 {
599 CPUState *cs = ENV_GET_CPU(env);
600
601 tlb_flush_by_mmuidx(cs,
602 ARMMMUIdxBit_S12NSE1 |
603 ARMMMUIdxBit_S12NSE0 |
604 ARMMMUIdxBit_S2NS);
605 }
606
607 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
608 uint64_t value)
609 {
610 CPUState *cs = ENV_GET_CPU(env);
611
612 tlb_flush_by_mmuidx_all_cpus_synced(cs,
613 ARMMMUIdxBit_S12NSE1 |
614 ARMMMUIdxBit_S12NSE0 |
615 ARMMMUIdxBit_S2NS);
616 }
617
618 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
619 uint64_t value)
620 {
621 /* Invalidate by IPA. This has to invalidate any structures that
622 * contain only stage 2 translation information, but does not need
623 * to apply to structures that contain combined stage 1 and stage 2
624 * translation information.
625 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
626 */
627 CPUState *cs = ENV_GET_CPU(env);
628 uint64_t pageaddr;
629
630 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
631 return;
632 }
633
634 pageaddr = sextract64(value << 12, 0, 40);
635
636 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
637 }
638
639 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
640 uint64_t value)
641 {
642 CPUState *cs = ENV_GET_CPU(env);
643 uint64_t pageaddr;
644
645 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
646 return;
647 }
648
649 pageaddr = sextract64(value << 12, 0, 40);
650
651 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
652 ARMMMUIdxBit_S2NS);
653 }
654
655 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
656 uint64_t value)
657 {
658 CPUState *cs = ENV_GET_CPU(env);
659
660 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
661 }
662
663 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
664 uint64_t value)
665 {
666 CPUState *cs = ENV_GET_CPU(env);
667
668 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
669 }
670
671 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
672 uint64_t value)
673 {
674 CPUState *cs = ENV_GET_CPU(env);
675 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
676
677 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
678 }
679
680 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
681 uint64_t value)
682 {
683 CPUState *cs = ENV_GET_CPU(env);
684 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
685
686 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
687 ARMMMUIdxBit_S1E2);
688 }
689
690 static const ARMCPRegInfo cp_reginfo[] = {
691 /* Define the secure and non-secure FCSE identifier CP registers
692 * separately because there is no secure bank in V8 (no _EL3). This allows
693 * the secure register to be properly reset and migrated. There is also no
694 * v8 EL1 version of the register so the non-secure instance stands alone.
695 */
696 { .name = "FCSEIDR(NS)",
697 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
698 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
699 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
700 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
701 { .name = "FCSEIDR(S)",
702 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
703 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
704 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
705 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
706 /* Define the secure and non-secure context identifier CP registers
707 * separately because there is no secure bank in V8 (no _EL3). This allows
708 * the secure register to be properly reset and migrated. In the
709 * non-secure case, the 32-bit register will have reset and migration
710 * disabled during registration as it is handled by the 64-bit instance.
711 */
712 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
713 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
714 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
715 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
716 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
717 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
718 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
719 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
720 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
721 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
722 REGINFO_SENTINEL
723 };
724
725 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
726 /* NB: Some of these registers exist in v8 but with more precise
727 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
728 */
729 /* MMU Domain access control / MPU write buffer control */
730 { .name = "DACR",
731 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
732 .access = PL1_RW, .resetvalue = 0,
733 .writefn = dacr_write, .raw_writefn = raw_write,
734 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
735 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
736 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
737 * For v6 and v5, these mappings are overly broad.
738 */
739 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
740 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
741 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
742 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
743 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
744 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
745 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
746 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
747 /* Cache maintenance ops; some of this space may be overridden later. */
748 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
749 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
750 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
751 REGINFO_SENTINEL
752 };
753
754 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
755 /* Not all pre-v6 cores implemented this WFI, so this is slightly
756 * over-broad.
757 */
758 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
759 .access = PL1_W, .type = ARM_CP_WFI },
760 REGINFO_SENTINEL
761 };
762
763 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
764 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
765 * is UNPREDICTABLE; we choose to NOP as most implementations do).
766 */
767 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
768 .access = PL1_W, .type = ARM_CP_WFI },
769 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
770 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
771 * OMAPCP will override this space.
772 */
773 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
774 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
775 .resetvalue = 0 },
776 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
777 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
778 .resetvalue = 0 },
779 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
780 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
781 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
782 .resetvalue = 0 },
783 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
784 * implementing it as RAZ means the "debug architecture version" bits
785 * will read as a reserved value, which should cause Linux to not try
786 * to use the debug hardware.
787 */
788 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
789 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
790 /* MMU TLB control. Note that the wildcarding means we cover not just
791 * the unified TLB ops but also the dside/iside/inner-shareable variants.
792 */
793 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
794 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
795 .type = ARM_CP_NO_RAW },
796 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
797 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
798 .type = ARM_CP_NO_RAW },
799 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
800 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
801 .type = ARM_CP_NO_RAW },
802 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
803 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
804 .type = ARM_CP_NO_RAW },
805 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
806 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
807 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
808 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
809 REGINFO_SENTINEL
810 };
811
812 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
813 uint64_t value)
814 {
815 uint32_t mask = 0;
816
817 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
818 if (!arm_feature(env, ARM_FEATURE_V8)) {
819 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
820 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
821 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
822 */
823 if (arm_feature(env, ARM_FEATURE_VFP)) {
824 /* VFP coprocessor: cp10 & cp11 [23:20] */
825 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
826
827 if (!arm_feature(env, ARM_FEATURE_NEON)) {
828 /* ASEDIS [31] bit is RAO/WI */
829 value |= (1 << 31);
830 }
831
832 /* VFPv3 and upwards with NEON implement 32 double precision
833 * registers (D0-D31).
834 */
835 if (!arm_feature(env, ARM_FEATURE_NEON) ||
836 !arm_feature(env, ARM_FEATURE_VFP3)) {
837 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
838 value |= (1 << 30);
839 }
840 }
841 value &= mask;
842 }
843 env->cp15.cpacr_el1 = value;
844 }
845
846 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
847 bool isread)
848 {
849 if (arm_feature(env, ARM_FEATURE_V8)) {
850 /* Check if CPACR accesses are to be trapped to EL2 */
851 if (arm_current_el(env) == 1 &&
852 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
853 return CP_ACCESS_TRAP_EL2;
854 /* Check if CPACR accesses are to be trapped to EL3 */
855 } else if (arm_current_el(env) < 3 &&
856 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
857 return CP_ACCESS_TRAP_EL3;
858 }
859 }
860
861 return CP_ACCESS_OK;
862 }
863
864 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
865 bool isread)
866 {
867 /* Check if CPTR accesses are set to trap to EL3 */
868 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
869 return CP_ACCESS_TRAP_EL3;
870 }
871
872 return CP_ACCESS_OK;
873 }
874
875 static const ARMCPRegInfo v6_cp_reginfo[] = {
876 /* prefetch by MVA in v6, NOP in v7 */
877 { .name = "MVA_prefetch",
878 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
879 .access = PL1_W, .type = ARM_CP_NOP },
880 /* We need to break the TB after ISB to execute self-modifying code
881 * correctly and also to take any pending interrupts immediately.
882 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
883 */
884 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
885 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
886 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
887 .access = PL0_W, .type = ARM_CP_NOP },
888 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
889 .access = PL0_W, .type = ARM_CP_NOP },
890 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
891 .access = PL1_RW,
892 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
893 offsetof(CPUARMState, cp15.ifar_ns) },
894 .resetvalue = 0, },
895 /* Watchpoint Fault Address Register : should actually only be present
896 * for 1136, 1176, 11MPCore.
897 */
898 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
899 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
900 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
901 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
902 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
903 .resetvalue = 0, .writefn = cpacr_write },
904 REGINFO_SENTINEL
905 };
906
907 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
908 bool isread)
909 {
910 /* Performance monitor registers user accessibility is controlled
911 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
912 * trapping to EL2 or EL3 for other accesses.
913 */
914 int el = arm_current_el(env);
915
916 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
917 return CP_ACCESS_TRAP;
918 }
919 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
920 && !arm_is_secure_below_el3(env)) {
921 return CP_ACCESS_TRAP_EL2;
922 }
923 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
924 return CP_ACCESS_TRAP_EL3;
925 }
926
927 return CP_ACCESS_OK;
928 }
929
930 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
931 const ARMCPRegInfo *ri,
932 bool isread)
933 {
934 /* ER: event counter read trap control */
935 if (arm_feature(env, ARM_FEATURE_V8)
936 && arm_current_el(env) == 0
937 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
938 && isread) {
939 return CP_ACCESS_OK;
940 }
941
942 return pmreg_access(env, ri, isread);
943 }
944
945 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
946 const ARMCPRegInfo *ri,
947 bool isread)
948 {
949 /* SW: software increment write trap control */
950 if (arm_feature(env, ARM_FEATURE_V8)
951 && arm_current_el(env) == 0
952 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
953 && !isread) {
954 return CP_ACCESS_OK;
955 }
956
957 return pmreg_access(env, ri, isread);
958 }
959
960 #ifndef CONFIG_USER_ONLY
961
962 static CPAccessResult pmreg_access_selr(CPUARMState *env,
963 const ARMCPRegInfo *ri,
964 bool isread)
965 {
966 /* ER: event counter read trap control */
967 if (arm_feature(env, ARM_FEATURE_V8)
968 && arm_current_el(env) == 0
969 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
970 return CP_ACCESS_OK;
971 }
972
973 return pmreg_access(env, ri, isread);
974 }
975
976 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
977 const ARMCPRegInfo *ri,
978 bool isread)
979 {
980 /* CR: cycle counter read trap control */
981 if (arm_feature(env, ARM_FEATURE_V8)
982 && arm_current_el(env) == 0
983 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
984 && isread) {
985 return CP_ACCESS_OK;
986 }
987
988 return pmreg_access(env, ri, isread);
989 }
990
991 static inline bool arm_ccnt_enabled(CPUARMState *env)
992 {
993 /* This does not support checking PMCCFILTR_EL0 register */
994
995 if (!(env->cp15.c9_pmcr & PMCRE)) {
996 return false;
997 }
998
999 return true;
1000 }
1001
1002 void pmccntr_sync(CPUARMState *env)
1003 {
1004 uint64_t temp_ticks;
1005
1006 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1007 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1008
1009 if (env->cp15.c9_pmcr & PMCRD) {
1010 /* Increment once every 64 processor clock cycles */
1011 temp_ticks /= 64;
1012 }
1013
1014 if (arm_ccnt_enabled(env)) {
1015 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1016 }
1017 }
1018
1019 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1020 uint64_t value)
1021 {
1022 pmccntr_sync(env);
1023
1024 if (value & PMCRC) {
1025 /* The counter has been reset */
1026 env->cp15.c15_ccnt = 0;
1027 }
1028
1029 /* only the DP, X, D and E bits are writable */
1030 env->cp15.c9_pmcr &= ~0x39;
1031 env->cp15.c9_pmcr |= (value & 0x39);
1032
1033 pmccntr_sync(env);
1034 }
1035
1036 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1037 {
1038 uint64_t total_ticks;
1039
1040 if (!arm_ccnt_enabled(env)) {
1041 /* Counter is disabled, do not change value */
1042 return env->cp15.c15_ccnt;
1043 }
1044
1045 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1046 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1047
1048 if (env->cp15.c9_pmcr & PMCRD) {
1049 /* Increment once every 64 processor clock cycles */
1050 total_ticks /= 64;
1051 }
1052 return total_ticks - env->cp15.c15_ccnt;
1053 }
1054
1055 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1056 uint64_t value)
1057 {
1058 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1059 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1060 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1061 * accessed.
1062 */
1063 env->cp15.c9_pmselr = value & 0x1f;
1064 }
1065
1066 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1067 uint64_t value)
1068 {
1069 uint64_t total_ticks;
1070
1071 if (!arm_ccnt_enabled(env)) {
1072 /* Counter is disabled, set the absolute value */
1073 env->cp15.c15_ccnt = value;
1074 return;
1075 }
1076
1077 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1078 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1079
1080 if (env->cp15.c9_pmcr & PMCRD) {
1081 /* Increment once every 64 processor clock cycles */
1082 total_ticks /= 64;
1083 }
1084 env->cp15.c15_ccnt = total_ticks - value;
1085 }
1086
1087 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1088 uint64_t value)
1089 {
1090 uint64_t cur_val = pmccntr_read(env, NULL);
1091
1092 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1093 }
1094
1095 #else /* CONFIG_USER_ONLY */
1096
1097 void pmccntr_sync(CPUARMState *env)
1098 {
1099 }
1100
1101 #endif
1102
1103 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1104 uint64_t value)
1105 {
1106 pmccntr_sync(env);
1107 env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1108 pmccntr_sync(env);
1109 }
1110
1111 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1112 uint64_t value)
1113 {
1114 value &= (1 << 31);
1115 env->cp15.c9_pmcnten |= value;
1116 }
1117
1118 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1119 uint64_t value)
1120 {
1121 value &= (1 << 31);
1122 env->cp15.c9_pmcnten &= ~value;
1123 }
1124
1125 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1126 uint64_t value)
1127 {
1128 env->cp15.c9_pmovsr &= ~value;
1129 }
1130
1131 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1132 uint64_t value)
1133 {
1134 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1135 * PMSELR value is equal to or greater than the number of implemented
1136 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1137 */
1138 if (env->cp15.c9_pmselr == 0x1f) {
1139 pmccfiltr_write(env, ri, value);
1140 }
1141 }
1142
1143 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1144 {
1145 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1146 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1147 */
1148 if (env->cp15.c9_pmselr == 0x1f) {
1149 return env->cp15.pmccfiltr_el0;
1150 } else {
1151 return 0;
1152 }
1153 }
1154
1155 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1156 uint64_t value)
1157 {
1158 if (arm_feature(env, ARM_FEATURE_V8)) {
1159 env->cp15.c9_pmuserenr = value & 0xf;
1160 } else {
1161 env->cp15.c9_pmuserenr = value & 1;
1162 }
1163 }
1164
1165 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1166 uint64_t value)
1167 {
1168 /* We have no event counters so only the C bit can be changed */
1169 value &= (1 << 31);
1170 env->cp15.c9_pminten |= value;
1171 }
1172
1173 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1174 uint64_t value)
1175 {
1176 value &= (1 << 31);
1177 env->cp15.c9_pminten &= ~value;
1178 }
1179
1180 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1181 uint64_t value)
1182 {
1183 /* Note that even though the AArch64 view of this register has bits
1184 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1185 * architectural requirements for bits which are RES0 only in some
1186 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1187 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1188 */
1189 raw_write(env, ri, value & ~0x1FULL);
1190 }
1191
1192 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1193 {
1194 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1195 * For bits that vary between AArch32/64, code needs to check the
1196 * current execution mode before directly using the feature bit.
1197 */
1198 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1199
1200 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1201 valid_mask &= ~SCR_HCE;
1202
1203 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1204 * supported if EL2 exists. The bit is UNK/SBZP when
1205 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1206 * when EL2 is unavailable.
1207 * On ARMv8, this bit is always available.
1208 */
1209 if (arm_feature(env, ARM_FEATURE_V7) &&
1210 !arm_feature(env, ARM_FEATURE_V8)) {
1211 valid_mask &= ~SCR_SMD;
1212 }
1213 }
1214
1215 /* Clear all-context RES0 bits. */
1216 value &= valid_mask;
1217 raw_write(env, ri, value);
1218 }
1219
1220 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1221 {
1222 ARMCPU *cpu = arm_env_get_cpu(env);
1223
1224 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1225 * bank
1226 */
1227 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1228 ri->secure & ARM_CP_SECSTATE_S);
1229
1230 return cpu->ccsidr[index];
1231 }
1232
1233 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1234 uint64_t value)
1235 {
1236 raw_write(env, ri, value & 0xf);
1237 }
1238
1239 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1240 {
1241 CPUState *cs = ENV_GET_CPU(env);
1242 uint64_t ret = 0;
1243
1244 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1245 ret |= CPSR_I;
1246 }
1247 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1248 ret |= CPSR_F;
1249 }
1250 /* External aborts are not possible in QEMU so A bit is always clear */
1251 return ret;
1252 }
1253
1254 static const ARMCPRegInfo v7_cp_reginfo[] = {
1255 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1256 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1257 .access = PL1_W, .type = ARM_CP_NOP },
1258 /* Performance monitors are implementation defined in v7,
1259 * but with an ARM recommended set of registers, which we
1260 * follow (although we don't actually implement any counters)
1261 *
1262 * Performance registers fall into three categories:
1263 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1264 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1265 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1266 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1267 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1268 */
1269 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1270 .access = PL0_RW, .type = ARM_CP_ALIAS,
1271 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1272 .writefn = pmcntenset_write,
1273 .accessfn = pmreg_access,
1274 .raw_writefn = raw_write },
1275 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1276 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1277 .access = PL0_RW, .accessfn = pmreg_access,
1278 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1279 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1280 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1281 .access = PL0_RW,
1282 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1283 .accessfn = pmreg_access,
1284 .writefn = pmcntenclr_write,
1285 .type = ARM_CP_ALIAS },
1286 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1287 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1288 .access = PL0_RW, .accessfn = pmreg_access,
1289 .type = ARM_CP_ALIAS,
1290 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1291 .writefn = pmcntenclr_write },
1292 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1293 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1294 .accessfn = pmreg_access,
1295 .writefn = pmovsr_write,
1296 .raw_writefn = raw_write },
1297 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1298 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1299 .access = PL0_RW, .accessfn = pmreg_access,
1300 .type = ARM_CP_ALIAS,
1301 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1302 .writefn = pmovsr_write,
1303 .raw_writefn = raw_write },
1304 /* Unimplemented so WI. */
1305 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1306 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1307 #ifndef CONFIG_USER_ONLY
1308 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1309 .access = PL0_RW, .type = ARM_CP_ALIAS,
1310 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1311 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1312 .raw_writefn = raw_write},
1313 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1314 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1315 .access = PL0_RW, .accessfn = pmreg_access_selr,
1316 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1317 .writefn = pmselr_write, .raw_writefn = raw_write, },
1318 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1319 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1320 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1321 .accessfn = pmreg_access_ccntr },
1322 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1323 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1324 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1325 .type = ARM_CP_IO,
1326 .readfn = pmccntr_read, .writefn = pmccntr_write, },
1327 #endif
1328 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1329 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1330 .writefn = pmccfiltr_write,
1331 .access = PL0_RW, .accessfn = pmreg_access,
1332 .type = ARM_CP_IO,
1333 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1334 .resetvalue = 0, },
1335 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1336 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1337 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1338 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1339 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1340 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1341 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1342 /* Unimplemented, RAZ/WI. */
1343 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1344 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1345 .accessfn = pmreg_access_xevcntr },
1346 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1347 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1348 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1349 .resetvalue = 0,
1350 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1351 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1352 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1353 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1354 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1355 .resetvalue = 0,
1356 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1357 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1358 .access = PL1_RW, .accessfn = access_tpm,
1359 .type = ARM_CP_ALIAS,
1360 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1361 .resetvalue = 0,
1362 .writefn = pmintenset_write, .raw_writefn = raw_write },
1363 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1364 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1365 .access = PL1_RW, .accessfn = access_tpm,
1366 .type = ARM_CP_IO,
1367 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1368 .writefn = pmintenset_write, .raw_writefn = raw_write,
1369 .resetvalue = 0x0 },
1370 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1371 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1372 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1373 .writefn = pmintenclr_write, },
1374 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1375 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1376 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1377 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1378 .writefn = pmintenclr_write },
1379 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1380 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1381 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1382 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1383 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1384 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1385 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1386 offsetof(CPUARMState, cp15.csselr_ns) } },
1387 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1388 * just RAZ for all cores:
1389 */
1390 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1391 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1392 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1393 /* Auxiliary fault status registers: these also are IMPDEF, and we
1394 * choose to RAZ/WI for all cores.
1395 */
1396 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1397 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1398 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1399 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1400 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1401 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1402 /* MAIR can just read-as-written because we don't implement caches
1403 * and so don't need to care about memory attributes.
1404 */
1405 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1406 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1407 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1408 .resetvalue = 0 },
1409 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1410 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1411 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1412 .resetvalue = 0 },
1413 /* For non-long-descriptor page tables these are PRRR and NMRR;
1414 * regardless they still act as reads-as-written for QEMU.
1415 */
1416 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1417 * allows them to assign the correct fieldoffset based on the endianness
1418 * handled in the field definitions.
1419 */
1420 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1421 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1422 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1423 offsetof(CPUARMState, cp15.mair0_ns) },
1424 .resetfn = arm_cp_reset_ignore },
1425 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1426 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1427 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1428 offsetof(CPUARMState, cp15.mair1_ns) },
1429 .resetfn = arm_cp_reset_ignore },
1430 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1431 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1432 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1433 /* 32 bit ITLB invalidates */
1434 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1435 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1436 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1437 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1438 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1439 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1440 /* 32 bit DTLB invalidates */
1441 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1442 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1443 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1444 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1445 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1446 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1447 /* 32 bit TLB invalidates */
1448 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1449 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1450 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1451 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1452 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1453 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1454 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1455 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1456 REGINFO_SENTINEL
1457 };
1458
1459 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1460 /* 32 bit TLB invalidates, Inner Shareable */
1461 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1462 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1463 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1464 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1465 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1466 .type = ARM_CP_NO_RAW, .access = PL1_W,
1467 .writefn = tlbiasid_is_write },
1468 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1469 .type = ARM_CP_NO_RAW, .access = PL1_W,
1470 .writefn = tlbimvaa_is_write },
1471 REGINFO_SENTINEL
1472 };
1473
1474 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1475 uint64_t value)
1476 {
1477 value &= 1;
1478 env->teecr = value;
1479 }
1480
1481 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1482 bool isread)
1483 {
1484 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1485 return CP_ACCESS_TRAP;
1486 }
1487 return CP_ACCESS_OK;
1488 }
1489
1490 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1491 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1492 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1493 .resetvalue = 0,
1494 .writefn = teecr_write },
1495 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1496 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1497 .accessfn = teehbr_access, .resetvalue = 0 },
1498 REGINFO_SENTINEL
1499 };
1500
1501 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1502 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1503 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1504 .access = PL0_RW,
1505 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1506 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1507 .access = PL0_RW,
1508 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1509 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1510 .resetfn = arm_cp_reset_ignore },
1511 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1512 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1513 .access = PL0_R|PL1_W,
1514 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1515 .resetvalue = 0},
1516 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1517 .access = PL0_R|PL1_W,
1518 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1519 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1520 .resetfn = arm_cp_reset_ignore },
1521 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1522 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1523 .access = PL1_RW,
1524 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1525 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1526 .access = PL1_RW,
1527 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1528 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1529 .resetvalue = 0 },
1530 REGINFO_SENTINEL
1531 };
1532
1533 #ifndef CONFIG_USER_ONLY
1534
1535 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1536 bool isread)
1537 {
1538 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1539 * Writable only at the highest implemented exception level.
1540 */
1541 int el = arm_current_el(env);
1542
1543 switch (el) {
1544 case 0:
1545 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1546 return CP_ACCESS_TRAP;
1547 }
1548 break;
1549 case 1:
1550 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1551 arm_is_secure_below_el3(env)) {
1552 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1553 return CP_ACCESS_TRAP_UNCATEGORIZED;
1554 }
1555 break;
1556 case 2:
1557 case 3:
1558 break;
1559 }
1560
1561 if (!isread && el < arm_highest_el(env)) {
1562 return CP_ACCESS_TRAP_UNCATEGORIZED;
1563 }
1564
1565 return CP_ACCESS_OK;
1566 }
1567
1568 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1569 bool isread)
1570 {
1571 unsigned int cur_el = arm_current_el(env);
1572 bool secure = arm_is_secure(env);
1573
1574 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1575 if (cur_el == 0 &&
1576 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1577 return CP_ACCESS_TRAP;
1578 }
1579
1580 if (arm_feature(env, ARM_FEATURE_EL2) &&
1581 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1582 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1583 return CP_ACCESS_TRAP_EL2;
1584 }
1585 return CP_ACCESS_OK;
1586 }
1587
1588 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1589 bool isread)
1590 {
1591 unsigned int cur_el = arm_current_el(env);
1592 bool secure = arm_is_secure(env);
1593
1594 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1595 * EL0[PV]TEN is zero.
1596 */
1597 if (cur_el == 0 &&
1598 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1599 return CP_ACCESS_TRAP;
1600 }
1601
1602 if (arm_feature(env, ARM_FEATURE_EL2) &&
1603 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1604 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1605 return CP_ACCESS_TRAP_EL2;
1606 }
1607 return CP_ACCESS_OK;
1608 }
1609
1610 static CPAccessResult gt_pct_access(CPUARMState *env,
1611 const ARMCPRegInfo *ri,
1612 bool isread)
1613 {
1614 return gt_counter_access(env, GTIMER_PHYS, isread);
1615 }
1616
1617 static CPAccessResult gt_vct_access(CPUARMState *env,
1618 const ARMCPRegInfo *ri,
1619 bool isread)
1620 {
1621 return gt_counter_access(env, GTIMER_VIRT, isread);
1622 }
1623
1624 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1625 bool isread)
1626 {
1627 return gt_timer_access(env, GTIMER_PHYS, isread);
1628 }
1629
1630 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1631 bool isread)
1632 {
1633 return gt_timer_access(env, GTIMER_VIRT, isread);
1634 }
1635
1636 static CPAccessResult gt_stimer_access(CPUARMState *env,
1637 const ARMCPRegInfo *ri,
1638 bool isread)
1639 {
1640 /* The AArch64 register view of the secure physical timer is
1641 * always accessible from EL3, and configurably accessible from
1642 * Secure EL1.
1643 */
1644 switch (arm_current_el(env)) {
1645 case 1:
1646 if (!arm_is_secure(env)) {
1647 return CP_ACCESS_TRAP;
1648 }
1649 if (!(env->cp15.scr_el3 & SCR_ST)) {
1650 return CP_ACCESS_TRAP_EL3;
1651 }
1652 return CP_ACCESS_OK;
1653 case 0:
1654 case 2:
1655 return CP_ACCESS_TRAP;
1656 case 3:
1657 return CP_ACCESS_OK;
1658 default:
1659 g_assert_not_reached();
1660 }
1661 }
1662
1663 static uint64_t gt_get_countervalue(CPUARMState *env)
1664 {
1665 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1666 }
1667
1668 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1669 {
1670 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1671
1672 if (gt->ctl & 1) {
1673 /* Timer enabled: calculate and set current ISTATUS, irq, and
1674 * reset timer to when ISTATUS next has to change
1675 */
1676 uint64_t offset = timeridx == GTIMER_VIRT ?
1677 cpu->env.cp15.cntvoff_el2 : 0;
1678 uint64_t count = gt_get_countervalue(&cpu->env);
1679 /* Note that this must be unsigned 64 bit arithmetic: */
1680 int istatus = count - offset >= gt->cval;
1681 uint64_t nexttick;
1682 int irqstate;
1683
1684 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1685
1686 irqstate = (istatus && !(gt->ctl & 2));
1687 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1688
1689 if (istatus) {
1690 /* Next transition is when count rolls back over to zero */
1691 nexttick = UINT64_MAX;
1692 } else {
1693 /* Next transition is when we hit cval */
1694 nexttick = gt->cval + offset;
1695 }
1696 /* Note that the desired next expiry time might be beyond the
1697 * signed-64-bit range of a QEMUTimer -- in this case we just
1698 * set the timer for as far in the future as possible. When the
1699 * timer expires we will reset the timer for any remaining period.
1700 */
1701 if (nexttick > INT64_MAX / GTIMER_SCALE) {
1702 nexttick = INT64_MAX / GTIMER_SCALE;
1703 }
1704 timer_mod(cpu->gt_timer[timeridx], nexttick);
1705 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1706 } else {
1707 /* Timer disabled: ISTATUS and timer output always clear */
1708 gt->ctl &= ~4;
1709 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1710 timer_del(cpu->gt_timer[timeridx]);
1711 trace_arm_gt_recalc_disabled(timeridx);
1712 }
1713 }
1714
1715 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1716 int timeridx)
1717 {
1718 ARMCPU *cpu = arm_env_get_cpu(env);
1719
1720 timer_del(cpu->gt_timer[timeridx]);
1721 }
1722
1723 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1724 {
1725 return gt_get_countervalue(env);
1726 }
1727
1728 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1729 {
1730 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1731 }
1732
1733 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734 int timeridx,
1735 uint64_t value)
1736 {
1737 trace_arm_gt_cval_write(timeridx, value);
1738 env->cp15.c14_timer[timeridx].cval = value;
1739 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1740 }
1741
1742 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1743 int timeridx)
1744 {
1745 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1746
1747 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1748 (gt_get_countervalue(env) - offset));
1749 }
1750
1751 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1752 int timeridx,
1753 uint64_t value)
1754 {
1755 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1756
1757 trace_arm_gt_tval_write(timeridx, value);
1758 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1759 sextract64(value, 0, 32);
1760 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1761 }
1762
1763 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1764 int timeridx,
1765 uint64_t value)
1766 {
1767 ARMCPU *cpu = arm_env_get_cpu(env);
1768 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1769
1770 trace_arm_gt_ctl_write(timeridx, value);
1771 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1772 if ((oldval ^ value) & 1) {
1773 /* Enable toggled */
1774 gt_recalc_timer(cpu, timeridx);
1775 } else if ((oldval ^ value) & 2) {
1776 /* IMASK toggled: don't need to recalculate,
1777 * just set the interrupt line based on ISTATUS
1778 */
1779 int irqstate = (oldval & 4) && !(value & 2);
1780
1781 trace_arm_gt_imask_toggle(timeridx, irqstate);
1782 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1783 }
1784 }
1785
1786 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1787 {
1788 gt_timer_reset(env, ri, GTIMER_PHYS);
1789 }
1790
1791 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1792 uint64_t value)
1793 {
1794 gt_cval_write(env, ri, GTIMER_PHYS, value);
1795 }
1796
1797 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1798 {
1799 return gt_tval_read(env, ri, GTIMER_PHYS);
1800 }
1801
1802 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1803 uint64_t value)
1804 {
1805 gt_tval_write(env, ri, GTIMER_PHYS, value);
1806 }
1807
1808 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1809 uint64_t value)
1810 {
1811 gt_ctl_write(env, ri, GTIMER_PHYS, value);
1812 }
1813
1814 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1815 {
1816 gt_timer_reset(env, ri, GTIMER_VIRT);
1817 }
1818
1819 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1820 uint64_t value)
1821 {
1822 gt_cval_write(env, ri, GTIMER_VIRT, value);
1823 }
1824
1825 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1826 {
1827 return gt_tval_read(env, ri, GTIMER_VIRT);
1828 }
1829
1830 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1831 uint64_t value)
1832 {
1833 gt_tval_write(env, ri, GTIMER_VIRT, value);
1834 }
1835
1836 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1837 uint64_t value)
1838 {
1839 gt_ctl_write(env, ri, GTIMER_VIRT, value);
1840 }
1841
1842 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1843 uint64_t value)
1844 {
1845 ARMCPU *cpu = arm_env_get_cpu(env);
1846
1847 trace_arm_gt_cntvoff_write(value);
1848 raw_write(env, ri, value);
1849 gt_recalc_timer(cpu, GTIMER_VIRT);
1850 }
1851
1852 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1853 {
1854 gt_timer_reset(env, ri, GTIMER_HYP);
1855 }
1856
1857 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858 uint64_t value)
1859 {
1860 gt_cval_write(env, ri, GTIMER_HYP, value);
1861 }
1862
1863 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1864 {
1865 return gt_tval_read(env, ri, GTIMER_HYP);
1866 }
1867
1868 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869 uint64_t value)
1870 {
1871 gt_tval_write(env, ri, GTIMER_HYP, value);
1872 }
1873
1874 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1875 uint64_t value)
1876 {
1877 gt_ctl_write(env, ri, GTIMER_HYP, value);
1878 }
1879
1880 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1881 {
1882 gt_timer_reset(env, ri, GTIMER_SEC);
1883 }
1884
1885 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1886 uint64_t value)
1887 {
1888 gt_cval_write(env, ri, GTIMER_SEC, value);
1889 }
1890
1891 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1892 {
1893 return gt_tval_read(env, ri, GTIMER_SEC);
1894 }
1895
1896 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897 uint64_t value)
1898 {
1899 gt_tval_write(env, ri, GTIMER_SEC, value);
1900 }
1901
1902 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1903 uint64_t value)
1904 {
1905 gt_ctl_write(env, ri, GTIMER_SEC, value);
1906 }
1907
1908 void arm_gt_ptimer_cb(void *opaque)
1909 {
1910 ARMCPU *cpu = opaque;
1911
1912 gt_recalc_timer(cpu, GTIMER_PHYS);
1913 }
1914
1915 void arm_gt_vtimer_cb(void *opaque)
1916 {
1917 ARMCPU *cpu = opaque;
1918
1919 gt_recalc_timer(cpu, GTIMER_VIRT);
1920 }
1921
1922 void arm_gt_htimer_cb(void *opaque)
1923 {
1924 ARMCPU *cpu = opaque;
1925
1926 gt_recalc_timer(cpu, GTIMER_HYP);
1927 }
1928
1929 void arm_gt_stimer_cb(void *opaque)
1930 {
1931 ARMCPU *cpu = opaque;
1932
1933 gt_recalc_timer(cpu, GTIMER_SEC);
1934 }
1935
1936 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1937 /* Note that CNTFRQ is purely reads-as-written for the benefit
1938 * of software; writing it doesn't actually change the timer frequency.
1939 * Our reset value matches the fixed frequency we implement the timer at.
1940 */
1941 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1942 .type = ARM_CP_ALIAS,
1943 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1944 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1945 },
1946 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1947 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1948 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1949 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1950 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1951 },
1952 /* overall control: mostly access permissions */
1953 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1954 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1955 .access = PL1_RW,
1956 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1957 .resetvalue = 0,
1958 },
1959 /* per-timer control */
1960 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1961 .secure = ARM_CP_SECSTATE_NS,
1962 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1963 .accessfn = gt_ptimer_access,
1964 .fieldoffset = offsetoflow32(CPUARMState,
1965 cp15.c14_timer[GTIMER_PHYS].ctl),
1966 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1967 },
1968 { .name = "CNTP_CTL(S)",
1969 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1970 .secure = ARM_CP_SECSTATE_S,
1971 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1972 .accessfn = gt_ptimer_access,
1973 .fieldoffset = offsetoflow32(CPUARMState,
1974 cp15.c14_timer[GTIMER_SEC].ctl),
1975 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1976 },
1977 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1978 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1979 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1980 .accessfn = gt_ptimer_access,
1981 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1982 .resetvalue = 0,
1983 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1984 },
1985 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1986 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1987 .accessfn = gt_vtimer_access,
1988 .fieldoffset = offsetoflow32(CPUARMState,
1989 cp15.c14_timer[GTIMER_VIRT].ctl),
1990 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1991 },
1992 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1993 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1994 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1995 .accessfn = gt_vtimer_access,
1996 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1997 .resetvalue = 0,
1998 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1999 },
2000 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2001 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2002 .secure = ARM_CP_SECSTATE_NS,
2003 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2004 .accessfn = gt_ptimer_access,
2005 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2006 },
2007 { .name = "CNTP_TVAL(S)",
2008 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2009 .secure = ARM_CP_SECSTATE_S,
2010 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2011 .accessfn = gt_ptimer_access,
2012 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2013 },
2014 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2015 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2016 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2017 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2018 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2019 },
2020 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2021 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2022 .accessfn = gt_vtimer_access,
2023 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2024 },
2025 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2026 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2027 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2028 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2029 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2030 },
2031 /* The counter itself */
2032 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2033 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2034 .accessfn = gt_pct_access,
2035 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2036 },
2037 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2038 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2039 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2040 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2041 },
2042 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2043 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2044 .accessfn = gt_vct_access,
2045 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2046 },
2047 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2048 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2049 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2050 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2051 },
2052 /* Comparison value, indicating when the timer goes off */
2053 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2054 .secure = ARM_CP_SECSTATE_NS,
2055 .access = PL1_RW | PL0_R,
2056 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2057 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2058 .accessfn = gt_ptimer_access,
2059 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2060 },
2061 { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
2062 .secure = ARM_CP_SECSTATE_S,
2063 .access = PL1_RW | PL0_R,
2064 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2065 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2066 .accessfn = gt_ptimer_access,
2067 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2068 },
2069 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2070 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2071 .access = PL1_RW | PL0_R,
2072 .type = ARM_CP_IO,
2073 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2074 .resetvalue = 0, .accessfn = gt_ptimer_access,
2075 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2076 },
2077 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2078 .access = PL1_RW | PL0_R,
2079 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2080 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2081 .accessfn = gt_vtimer_access,
2082 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2083 },
2084 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2085 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2086 .access = PL1_RW | PL0_R,
2087 .type = ARM_CP_IO,
2088 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2089 .resetvalue = 0, .accessfn = gt_vtimer_access,
2090 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2091 },
2092 /* Secure timer -- this is actually restricted to only EL3
2093 * and configurably Secure-EL1 via the accessfn.
2094 */
2095 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2096 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2097 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2098 .accessfn = gt_stimer_access,
2099 .readfn = gt_sec_tval_read,
2100 .writefn = gt_sec_tval_write,
2101 .resetfn = gt_sec_timer_reset,
2102 },
2103 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2104 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2105 .type = ARM_CP_IO, .access = PL1_RW,
2106 .accessfn = gt_stimer_access,
2107 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2108 .resetvalue = 0,
2109 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2110 },
2111 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2112 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2113 .type = ARM_CP_IO, .access = PL1_RW,
2114 .accessfn = gt_stimer_access,
2115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2116 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2117 },
2118 REGINFO_SENTINEL
2119 };
2120
2121 #else
2122 /* In user-mode none of the generic timer registers are accessible,
2123 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2124 * so instead just don't register any of them.
2125 */
2126 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2127 REGINFO_SENTINEL
2128 };
2129
2130 #endif
2131
2132 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2133 {
2134 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2135 raw_write(env, ri, value);
2136 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2137 raw_write(env, ri, value & 0xfffff6ff);
2138 } else {
2139 raw_write(env, ri, value & 0xfffff1ff);
2140 }
2141 }
2142
2143 #ifndef CONFIG_USER_ONLY
2144 /* get_phys_addr() isn't present for user-mode-only targets */
2145
2146 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2147 bool isread)
2148 {
2149 if (ri->opc2 & 4) {
2150 /* The ATS12NSO* operations must trap to EL3 if executed in
2151 * Secure EL1 (which can only happen if EL3 is AArch64).
2152 * They are simply UNDEF if executed from NS EL1.
2153 * They function normally from EL2 or EL3.
2154 */
2155 if (arm_current_el(env) == 1) {
2156 if (arm_is_secure_below_el3(env)) {
2157 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2158 }
2159 return CP_ACCESS_TRAP_UNCATEGORIZED;
2160 }
2161 }
2162 return CP_ACCESS_OK;
2163 }
2164
2165 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2166 MMUAccessType access_type, ARMMMUIdx mmu_idx)
2167 {
2168 hwaddr phys_addr;
2169 target_ulong page_size;
2170 int prot;
2171 bool ret;
2172 uint64_t par64;
2173 bool format64 = false;
2174 MemTxAttrs attrs = {};
2175 ARMMMUFaultInfo fi = {};
2176 ARMCacheAttrs cacheattrs = {};
2177
2178 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2179 &prot, &page_size, &fi, &cacheattrs);
2180
2181 if (is_a64(env)) {
2182 format64 = true;
2183 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2184 /*
2185 * ATS1Cxx:
2186 * * TTBCR.EAE determines whether the result is returned using the
2187 * 32-bit or the 64-bit PAR format
2188 * * Instructions executed in Hyp mode always use the 64bit format
2189 *
2190 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2191 * * The Non-secure TTBCR.EAE bit is set to 1
2192 * * The implementation includes EL2, and the value of HCR.VM is 1
2193 *
2194 * ATS1Hx always uses the 64bit format (not supported yet).
2195 */
2196 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2197
2198 if (arm_feature(env, ARM_FEATURE_EL2)) {
2199 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2200 format64 |= env->cp15.hcr_el2 & HCR_VM;
2201 } else {
2202 format64 |= arm_current_el(env) == 2;
2203 }
2204 }
2205 }
2206
2207 if (format64) {
2208 /* Create a 64-bit PAR */
2209 par64 = (1 << 11); /* LPAE bit always set */
2210 if (!ret) {
2211 par64 |= phys_addr & ~0xfffULL;
2212 if (!attrs.secure) {
2213 par64 |= (1 << 9); /* NS */
2214 }
2215 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2216 par64 |= cacheattrs.shareability << 7; /* SH */
2217 } else {
2218 uint32_t fsr = arm_fi_to_lfsc(&fi);
2219
2220 par64 |= 1; /* F */
2221 par64 |= (fsr & 0x3f) << 1; /* FS */
2222 /* Note that S2WLK and FSTAGE are always zero, because we don't
2223 * implement virtualization and therefore there can't be a stage 2
2224 * fault.
2225 */
2226 }
2227 } else {
2228 /* fsr is a DFSR/IFSR value for the short descriptor
2229 * translation table format (with WnR always clear).
2230 * Convert it to a 32-bit PAR.
2231 */
2232 if (!ret) {
2233 /* We do not set any attribute bits in the PAR */
2234 if (page_size == (1 << 24)
2235 && arm_feature(env, ARM_FEATURE_V7)) {
2236 par64 = (phys_addr & 0xff000000) | (1 << 1);
2237 } else {
2238 par64 = phys_addr & 0xfffff000;
2239 }
2240 if (!attrs.secure) {
2241 par64 |= (1 << 9); /* NS */
2242 }
2243 } else {
2244 uint32_t fsr = arm_fi_to_sfsc(&fi);
2245
2246 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2247 ((fsr & 0xf) << 1) | 1;
2248 }
2249 }
2250 return par64;
2251 }
2252
2253 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2254 {
2255 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2256 uint64_t par64;
2257 ARMMMUIdx mmu_idx;
2258 int el = arm_current_el(env);
2259 bool secure = arm_is_secure_below_el3(env);
2260
2261 switch (ri->opc2 & 6) {
2262 case 0:
2263 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2264 switch (el) {
2265 case 3:
2266 mmu_idx = ARMMMUIdx_S1E3;
2267 break;
2268 case 2:
2269 mmu_idx = ARMMMUIdx_S1NSE1;
2270 break;
2271 case 1:
2272 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2273 break;
2274 default:
2275 g_assert_not_reached();
2276 }
2277 break;
2278 case 2:
2279 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2280 switch (el) {
2281 case 3:
2282 mmu_idx = ARMMMUIdx_S1SE0;
2283 break;
2284 case 2:
2285 mmu_idx = ARMMMUIdx_S1NSE0;
2286 break;
2287 case 1:
2288 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2289 break;
2290 default:
2291 g_assert_not_reached();
2292 }
2293 break;
2294 case 4:
2295 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2296 mmu_idx = ARMMMUIdx_S12NSE1;
2297 break;
2298 case 6:
2299 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2300 mmu_idx = ARMMMUIdx_S12NSE0;
2301 break;
2302 default:
2303 g_assert_not_reached();
2304 }
2305
2306 par64 = do_ats_write(env, value, access_type, mmu_idx);
2307
2308 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2309 }
2310
2311 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2312 uint64_t value)
2313 {
2314 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2315 uint64_t par64;
2316
2317 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2318
2319 A32_BANKED_CURRENT_REG_SET(env, par, par64);
2320 }
2321
2322 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2323 bool isread)
2324 {
2325 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2326 return CP_ACCESS_TRAP;
2327 }
2328 return CP_ACCESS_OK;
2329 }
2330
2331 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2332 uint64_t value)
2333 {
2334 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2335 ARMMMUIdx mmu_idx;
2336 int secure = arm_is_secure_below_el3(env);
2337
2338 switch (ri->opc2 & 6) {
2339 case 0:
2340 switch (ri->opc1) {
2341 case 0: /* AT S1E1R, AT S1E1W */
2342 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2343 break;
2344 case 4: /* AT S1E2R, AT S1E2W */
2345 mmu_idx = ARMMMUIdx_S1E2;
2346 break;
2347 case 6: /* AT S1E3R, AT S1E3W */
2348 mmu_idx = ARMMMUIdx_S1E3;
2349 break;
2350 default:
2351 g_assert_not_reached();
2352 }
2353 break;
2354 case 2: /* AT S1E0R, AT S1E0W */
2355 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2356 break;
2357 case 4: /* AT S12E1R, AT S12E1W */
2358 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2359 break;
2360 case 6: /* AT S12E0R, AT S12E0W */
2361 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2362 break;
2363 default:
2364 g_assert_not_reached();
2365 }
2366
2367 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2368 }
2369 #endif
2370
2371 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2372 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2373 .access = PL1_RW, .resetvalue = 0,
2374 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2375 offsetoflow32(CPUARMState, cp15.par_ns) },
2376 .writefn = par_write },
2377 #ifndef CONFIG_USER_ONLY
2378 /* This underdecoding is safe because the reginfo is NO_RAW. */
2379 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2380 .access = PL1_W, .accessfn = ats_access,
2381 .writefn = ats_write, .type = ARM_CP_NO_RAW },
2382 #endif
2383 REGINFO_SENTINEL
2384 };
2385
2386 /* Return basic MPU access permission bits. */
2387 static uint32_t simple_mpu_ap_bits(uint32_t val)
2388 {
2389 uint32_t ret;
2390 uint32_t mask;
2391 int i;
2392 ret = 0;
2393 mask = 3;
2394 for (i = 0; i < 16; i += 2) {
2395 ret |= (val >> i) & mask;
2396 mask <<= 2;
2397 }
2398 return ret;
2399 }
2400
2401 /* Pad basic MPU access permission bits to extended format. */
2402 static uint32_t extended_mpu_ap_bits(uint32_t val)
2403 {
2404 uint32_t ret;
2405 uint32_t mask;
2406 int i;
2407 ret = 0;
2408 mask = 3;
2409 for (i = 0; i < 16; i += 2) {
2410 ret |= (val & mask) << i;
2411 mask <<= 2;
2412 }
2413 return ret;
2414 }
2415
2416 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2417 uint64_t value)
2418 {
2419 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2420 }
2421
2422 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2423 {
2424 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2425 }
2426
2427 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2428 uint64_t value)
2429 {
2430 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2431 }
2432
2433 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2434 {
2435 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2436 }
2437
2438 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2439 {
2440 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2441
2442 if (!u32p) {
2443 return 0;
2444 }
2445
2446 u32p += env->pmsav7.rnr[M_REG_NS];
2447 return *u32p;
2448 }
2449
2450 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2451 uint64_t value)
2452 {
2453 ARMCPU *cpu = arm_env_get_cpu(env);
2454 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2455
2456 if (!u32p) {
2457 return;
2458 }
2459
2460 u32p += env->pmsav7.rnr[M_REG_NS];
2461 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2462 *u32p = value;
2463 }
2464
2465 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2466 uint64_t value)
2467 {
2468 ARMCPU *cpu = arm_env_get_cpu(env);
2469 uint32_t nrgs = cpu->pmsav7_dregion;
2470
2471 if (value >= nrgs) {
2472 qemu_log_mask(LOG_GUEST_ERROR,
2473 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2474 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2475 return;
2476 }
2477
2478 raw_write(env, ri, value);
2479 }
2480
2481 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2482 /* Reset for all these registers is handled in arm_cpu_reset(),
2483 * because the PMSAv7 is also used by M-profile CPUs, which do
2484 * not register cpregs but still need the state to be reset.
2485 */
2486 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2487 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2488 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2489 .readfn = pmsav7_read, .writefn = pmsav7_write,
2490 .resetfn = arm_cp_reset_ignore },
2491 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2492 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2493 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2494 .readfn = pmsav7_read, .writefn = pmsav7_write,
2495 .resetfn = arm_cp_reset_ignore },
2496 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2497 .access = PL1_RW, .type = ARM_CP_NO_RAW,
2498 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2499 .readfn = pmsav7_read, .writefn = pmsav7_write,
2500 .resetfn = arm_cp_reset_ignore },
2501 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2502 .access = PL1_RW,
2503 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2504 .writefn = pmsav7_rgnr_write,
2505 .resetfn = arm_cp_reset_ignore },
2506 REGINFO_SENTINEL
2507 };
2508
2509 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2510 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2511 .access = PL1_RW, .type = ARM_CP_ALIAS,
2512 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2513 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2514 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2515 .access = PL1_RW, .type = ARM_CP_ALIAS,
2516 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2517 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2518 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2519 .access = PL1_RW,
2520 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2521 .resetvalue = 0, },
2522 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2523 .access = PL1_RW,
2524 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2525 .resetvalue = 0, },
2526 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2527 .access = PL1_RW,
2528 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2529 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2530 .access = PL1_RW,
2531 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2532 /* Protection region base and size registers */
2533 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2534 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2535 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2536 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2537 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2538 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2539 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2540 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2541 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2542 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2543 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2544 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2545 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2546 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2547 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2548 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2549 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2550 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2551 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2552 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2553 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2554 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2555 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2556 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2557 REGINFO_SENTINEL
2558 };
2559
2560 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2561 uint64_t value)
2562 {
2563 TCR *tcr = raw_ptr(env, ri);
2564 int maskshift = extract32(value, 0, 3);
2565
2566 if (!arm_feature(env, ARM_FEATURE_V8)) {
2567 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2568 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2569 * using Long-desciptor translation table format */
2570 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2571 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2572 /* In an implementation that includes the Security Extensions
2573 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2574 * Short-descriptor translation table format.
2575 */
2576 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2577 } else {
2578 value &= TTBCR_N;
2579 }
2580 }
2581
2582 /* Update the masks corresponding to the TCR bank being written
2583 * Note that we always calculate mask and base_mask, but
2584 * they are only used for short-descriptor tables (ie if EAE is 0);
2585 * for long-descriptor tables the TCR fields are used differently
2586 * and the mask and base_mask values are meaningless.
2587 */
2588 tcr->raw_tcr = value;
2589 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2590 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2591 }
2592
2593 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2594 uint64_t value)
2595 {
2596 ARMCPU *cpu = arm_env_get_cpu(env);
2597
2598 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2599 /* With LPAE the TTBCR could result in a change of ASID
2600 * via the TTBCR.A1 bit, so do a TLB flush.
2601 */
2602 tlb_flush(CPU(cpu));
2603 }
2604 vmsa_ttbcr_raw_write(env, ri, value);
2605 }
2606
2607 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2608 {
2609 TCR *tcr = raw_ptr(env, ri);
2610
2611 /* Reset both the TCR as well as the masks corresponding to the bank of
2612 * the TCR being reset.
2613 */
2614 tcr->raw_tcr = 0;
2615 tcr->mask = 0;
2616 tcr->base_mask = 0xffffc000u;
2617 }
2618
2619 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2620 uint64_t value)
2621 {
2622 ARMCPU *cpu = arm_env_get_cpu(env);
2623 TCR *tcr = raw_ptr(env, ri);
2624
2625 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2626 tlb_flush(CPU(cpu));
2627 tcr->raw_tcr = value;
2628 }
2629
2630 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2631 uint64_t value)
2632 {
2633 /* 64 bit accesses to the TTBRs can change the ASID and so we
2634 * must flush the TLB.
2635 */
2636 if (cpreg_field_is_64bit(ri)) {
2637 ARMCPU *cpu = arm_env_get_cpu(env);
2638
2639 tlb_flush(CPU(cpu));
2640 }
2641 raw_write(env, ri, value);
2642 }
2643
2644 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2645 uint64_t value)
2646 {
2647 ARMCPU *cpu = arm_env_get_cpu(env);
2648 CPUState *cs = CPU(cpu);
2649
2650 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2651 if (raw_read(env, ri) != value) {
2652 tlb_flush_by_mmuidx(cs,
2653 ARMMMUIdxBit_S12NSE1 |
2654 ARMMMUIdxBit_S12NSE0 |
2655 ARMMMUIdxBit_S2NS);
2656 raw_write(env, ri, value);
2657 }
2658 }
2659
2660 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2661 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2662 .access = PL1_RW, .type = ARM_CP_ALIAS,
2663 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2664 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2665 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2666 .access = PL1_RW, .resetvalue = 0,
2667 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2668 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2669 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2670 .access = PL1_RW, .resetvalue = 0,
2671 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2672 offsetof(CPUARMState, cp15.dfar_ns) } },
2673 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2674 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2675 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2676 .resetvalue = 0, },
2677 REGINFO_SENTINEL
2678 };
2679
2680 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2681 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2682 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2683 .access = PL1_RW,
2684 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2685 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2686 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2687 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2688 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2689 offsetof(CPUARMState, cp15.ttbr0_ns) } },
2690 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2691 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2692 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2693 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2694 offsetof(CPUARMState, cp15.ttbr1_ns) } },
2695 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2696 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2697 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2698 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2699 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2700 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2701 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2702 .raw_writefn = vmsa_ttbcr_raw_write,
2703 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2704 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2705 REGINFO_SENTINEL
2706 };
2707
2708 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2709 uint64_t value)
2710 {
2711 env->cp15.c15_ticonfig = value & 0xe7;
2712 /* The OS_TYPE bit in this register changes the reported CPUID! */
2713 env->cp15.c0_cpuid = (value & (1 << 5)) ?
2714 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2715 }
2716
2717 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2718 uint64_t value)
2719 {
2720 env->cp15.c15_threadid = value & 0xffff;
2721 }
2722
2723 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2724 uint64_t value)
2725 {
2726 /* Wait-for-interrupt (deprecated) */
2727 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2728 }
2729
2730 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2731 uint64_t value)
2732 {
2733 /* On OMAP there are registers indicating the max/min index of dcache lines
2734 * containing a dirty line; cache flush operations have to reset these.
2735 */
2736 env->cp15.c15_i_max = 0x000;
2737 env->cp15.c15_i_min = 0xff0;
2738 }
2739
2740 static const ARMCPRegInfo omap_cp_reginfo[] = {
2741 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2742 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2743 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2744 .resetvalue = 0, },
2745 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2746 .access = PL1_RW, .type = ARM_CP_NOP },
2747 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2748 .access = PL1_RW,
2749 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2750 .writefn = omap_ticonfig_write },
2751 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2752 .access = PL1_RW,
2753 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2754 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2755 .access = PL1_RW, .resetvalue = 0xff0,
2756 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2757 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2758 .access = PL1_RW,
2759 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2760 .writefn = omap_threadid_write },
2761 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2762 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2763 .type = ARM_CP_NO_RAW,
2764 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2765 /* TODO: Peripheral port remap register:
2766 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2767 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2768 * when MMU is off.
2769 */
2770 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2771 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2772 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2773 .writefn = omap_cachemaint_write },
2774 { .name = "C9", .cp = 15, .crn = 9,
2775 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2776 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2777 REGINFO_SENTINEL
2778 };
2779
2780 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2781 uint64_t value)
2782 {
2783 env->cp15.c15_cpar = value & 0x3fff;
2784 }
2785
2786 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2787 { .name = "XSCALE_CPAR",
2788 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2789 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2790 .writefn = xscale_cpar_write, },
2791 { .name = "XSCALE_AUXCR",
2792 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2793 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2794 .resetvalue = 0, },
2795 /* XScale specific cache-lockdown: since we have no cache we NOP these
2796 * and hope the guest does not really rely on cache behaviour.
2797 */
2798 { .name = "XSCALE_LOCK_ICACHE_LINE",
2799 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2800 .access = PL1_W, .type = ARM_CP_NOP },
2801 { .name = "XSCALE_UNLOCK_ICACHE",
2802 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2803 .access = PL1_W, .type = ARM_CP_NOP },
2804 { .name = "XSCALE_DCACHE_LOCK",
2805 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2806 .access = PL1_RW, .type = ARM_CP_NOP },
2807 { .name = "XSCALE_UNLOCK_DCACHE",
2808 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2809 .access = PL1_W, .type = ARM_CP_NOP },
2810 REGINFO_SENTINEL
2811 };
2812
2813 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2814 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2815 * implementation of this implementation-defined space.
2816 * Ideally this should eventually disappear in favour of actually
2817 * implementing the correct behaviour for all cores.
2818 */
2819 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2820 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2821 .access = PL1_RW,
2822 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2823 .resetvalue = 0 },
2824 REGINFO_SENTINEL
2825 };
2826
2827 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2828 /* Cache status: RAZ because we have no cache so it's always clean */
2829 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2830 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2831 .resetvalue = 0 },
2832 REGINFO_SENTINEL
2833 };
2834
2835 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2836 /* We never have a a block transfer operation in progress */
2837 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2838 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2839 .resetvalue = 0 },
2840 /* The cache ops themselves: these all NOP for QEMU */
2841 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2842 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2843 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2844 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2845 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2846 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2847 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2848 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2849 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2850 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2851 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2852 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2853 REGINFO_SENTINEL
2854 };
2855
2856 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2857 /* The cache test-and-clean instructions always return (1 << 30)
2858 * to indicate that there are no dirty cache lines.
2859 */
2860 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2861 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2862 .resetvalue = (1 << 30) },
2863 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2864 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2865 .resetvalue = (1 << 30) },
2866 REGINFO_SENTINEL
2867 };
2868
2869 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2870 /* Ignore ReadBuffer accesses */
2871 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2872 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2873 .access = PL1_RW, .resetvalue = 0,
2874 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2875 REGINFO_SENTINEL
2876 };
2877
2878 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2879 {
2880 ARMCPU *cpu = arm_env_get_cpu(env);
2881 unsigned int cur_el = arm_current_el(env);
2882 bool secure = arm_is_secure(env);
2883
2884 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2885 return env->cp15.vpidr_el2;
2886 }
2887 return raw_read(env, ri);
2888 }
2889
2890 static uint64_t mpidr_read_val(CPUARMState *env)
2891 {
2892 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2893 uint64_t mpidr = cpu->mp_affinity;
2894
2895 if (arm_feature(env, ARM_FEATURE_V7MP)) {
2896 mpidr |= (1U << 31);
2897 /* Cores which are uniprocessor (non-coherent)
2898 * but still implement the MP extensions set
2899 * bit 30. (For instance, Cortex-R5).
2900 */
2901 if (cpu->mp_is_up) {
2902 mpidr |= (1u << 30);
2903 }
2904 }
2905 return mpidr;
2906 }
2907
2908 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2909 {
2910 unsigned int cur_el = arm_current_el(env);
2911 bool secure = arm_is_secure(env);
2912
2913 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2914 return env->cp15.vmpidr_el2;
2915 }
2916 return mpidr_read_val(env);
2917 }
2918
2919 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2920 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2921 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2922 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2923 REGINFO_SENTINEL
2924 };
2925
2926 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2927 /* NOP AMAIR0/1 */
2928 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2929 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2930 .access = PL1_RW, .type = ARM_CP_CONST,
2931 .resetvalue = 0 },
2932 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2933 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2934 .access = PL1_RW, .type = ARM_CP_CONST,
2935 .resetvalue = 0 },
2936 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2937 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2938 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2939 offsetof(CPUARMState, cp15.par_ns)} },
2940 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2941 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2942 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2943 offsetof(CPUARMState, cp15.ttbr0_ns) },
2944 .writefn = vmsa_ttbr_write, },
2945 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2946 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2947 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2948 offsetof(CPUARMState, cp15.ttbr1_ns) },
2949 .writefn = vmsa_ttbr_write, },
2950 REGINFO_SENTINEL
2951 };
2952
2953 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2954 {
2955 return vfp_get_fpcr(env);
2956 }
2957
2958 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2959 uint64_t value)
2960 {
2961 vfp_set_fpcr(env, value);
2962 }
2963
2964 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2965 {
2966 return vfp_get_fpsr(env);
2967 }
2968
2969 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970 uint64_t value)
2971 {
2972 vfp_set_fpsr(env, value);
2973 }
2974
2975 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2976 bool isread)
2977 {
2978 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2979 return CP_ACCESS_TRAP;
2980 }
2981 return CP_ACCESS_OK;
2982 }
2983
2984 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2985 uint64_t value)
2986 {
2987 env->daif = value & PSTATE_DAIF;
2988 }
2989
2990 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2991 const ARMCPRegInfo *ri,
2992 bool isread)
2993 {
2994 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2995 * SCTLR_EL1.UCI is set.
2996 */
2997 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2998 return CP_ACCESS_TRAP;
2999 }
3000 return CP_ACCESS_OK;
3001 }
3002
3003 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3004 * Page D4-1736 (DDI0487A.b)
3005 */
3006
3007 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3008 uint64_t value)
3009 {
3010 CPUState *cs = ENV_GET_CPU(env);
3011
3012 if (arm_is_secure_below_el3(env)) {
3013 tlb_flush_by_mmuidx(cs,
3014 ARMMMUIdxBit_S1SE1 |
3015 ARMMMUIdxBit_S1SE0);
3016 } else {
3017 tlb_flush_by_mmuidx(cs,
3018 ARMMMUIdxBit_S12NSE1 |
3019 ARMMMUIdxBit_S12NSE0);
3020 }
3021 }
3022
3023 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3024 uint64_t value)
3025 {
3026 CPUState *cs = ENV_GET_CPU(env);
3027 bool sec = arm_is_secure_below_el3(env);
3028
3029 if (sec) {
3030 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3031 ARMMMUIdxBit_S1SE1 |
3032 ARMMMUIdxBit_S1SE0);
3033 } else {
3034 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3035 ARMMMUIdxBit_S12NSE1 |
3036 ARMMMUIdxBit_S12NSE0);
3037 }
3038 }
3039
3040 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3041 uint64_t value)
3042 {
3043 /* Note that the 'ALL' scope must invalidate both stage 1 and
3044 * stage 2 translations, whereas most other scopes only invalidate
3045 * stage 1 translations.
3046 */
3047 ARMCPU *cpu = arm_env_get_cpu(env);
3048 CPUState *cs = CPU(cpu);
3049
3050 if (arm_is_secure_below_el3(env)) {
3051 tlb_flush_by_mmuidx(cs,
3052 ARMMMUIdxBit_S1SE1 |
3053 ARMMMUIdxBit_S1SE0);
3054 } else {
3055 if (arm_feature(env, ARM_FEATURE_EL2)) {
3056 tlb_flush_by_mmuidx(cs,
3057 ARMMMUIdxBit_S12NSE1 |
3058 ARMMMUIdxBit_S12NSE0 |
3059 ARMMMUIdxBit_S2NS);
3060 } else {
3061 tlb_flush_by_mmuidx(cs,
3062 ARMMMUIdxBit_S12NSE1 |
3063 ARMMMUIdxBit_S12NSE0);
3064 }
3065 }
3066 }
3067
3068 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3069 uint64_t value)
3070 {
3071 ARMCPU *cpu = arm_env_get_cpu(env);
3072 CPUState *cs = CPU(cpu);
3073
3074 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3075 }
3076
3077 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3078 uint64_t value)
3079 {
3080 ARMCPU *cpu = arm_env_get_cpu(env);
3081 CPUState *cs = CPU(cpu);
3082
3083 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3084 }
3085
3086 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3087 uint64_t value)
3088 {
3089 /* Note that the 'ALL' scope must invalidate both stage 1 and
3090 * stage 2 translations, whereas most other scopes only invalidate
3091 * stage 1 translations.
3092 */
3093 CPUState *cs = ENV_GET_CPU(env);
3094 bool sec = arm_is_secure_below_el3(env);
3095 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3096
3097 if (sec) {
3098 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3099 ARMMMUIdxBit_S1SE1 |
3100 ARMMMUIdxBit_S1SE0);
3101 } else if (has_el2) {
3102 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3103 ARMMMUIdxBit_S12NSE1 |
3104 ARMMMUIdxBit_S12NSE0 |
3105 ARMMMUIdxBit_S2NS);
3106 } else {
3107 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3108 ARMMMUIdxBit_S12NSE1 |
3109 ARMMMUIdxBit_S12NSE0);
3110 }
3111 }
3112
3113 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3114 uint64_t value)
3115 {
3116 CPUState *cs = ENV_GET_CPU(env);
3117
3118 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3119 }
3120
3121 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3122 uint64_t value)
3123 {
3124 CPUState *cs = ENV_GET_CPU(env);
3125
3126 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3127 }
3128
3129 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3130 uint64_t value)
3131 {
3132 /* Invalidate by VA, EL1&0 (AArch64 version).
3133 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3134 * since we don't support flush-for-specific-ASID-only or
3135 * flush-last-level-only.
3136 */
3137 ARMCPU *cpu = arm_env_get_cpu(env);
3138 CPUState *cs = CPU(cpu);
3139 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3140
3141 if (arm_is_secure_below_el3(env)) {
3142 tlb_flush_page_by_mmuidx(cs, pageaddr,
3143 ARMMMUIdxBit_S1SE1 |
3144 ARMMMUIdxBit_S1SE0);
3145 } else {
3146 tlb_flush_page_by_mmuidx(cs, pageaddr,
3147 ARMMMUIdxBit_S12NSE1 |
3148 ARMMMUIdxBit_S12NSE0);
3149 }
3150 }
3151
3152 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3153 uint64_t value)
3154 {
3155 /* Invalidate by VA, EL2
3156 * Currently handles both VAE2 and VALE2, since we don't support
3157 * flush-last-level-only.
3158 */
3159 ARMCPU *cpu = arm_env_get_cpu(env);
3160 CPUState *cs = CPU(cpu);
3161 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3162
3163 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3164 }
3165
3166 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3167 uint64_t value)
3168 {
3169 /* Invalidate by VA, EL3
3170 * Currently handles both VAE3 and VALE3, since we don't support
3171 * flush-last-level-only.
3172 */
3173 ARMCPU *cpu = arm_env_get_cpu(env);
3174 CPUState *cs = CPU(cpu);
3175 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3176
3177 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3178 }
3179
3180 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3181 uint64_t value)
3182 {
3183 ARMCPU *cpu = arm_env_get_cpu(env);
3184 CPUState *cs = CPU(cpu);
3185 bool sec = arm_is_secure_below_el3(env);
3186 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3187
3188 if (sec) {
3189 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3190 ARMMMUIdxBit_S1SE1 |
3191 ARMMMUIdxBit_S1SE0);
3192 } else {
3193 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3194 ARMMMUIdxBit_S12NSE1 |
3195 ARMMMUIdxBit_S12NSE0);
3196 }
3197 }
3198
3199 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3200 uint64_t value)
3201 {
3202 CPUState *cs = ENV_GET_CPU(env);
3203 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3204
3205 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3206 ARMMMUIdxBit_S1E2);
3207 }
3208
3209 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3210 uint64_t value)
3211 {
3212 CPUState *cs = ENV_GET_CPU(env);
3213 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3214
3215 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3216 ARMMMUIdxBit_S1E3);
3217 }
3218
3219 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3220 uint64_t value)
3221 {
3222 /* Invalidate by IPA. This has to invalidate any structures that
3223 * contain only stage 2 translation information, but does not need
3224 * to apply to structures that contain combined stage 1 and stage 2
3225 * translation information.
3226 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3227 */
3228 ARMCPU *cpu = arm_env_get_cpu(env);
3229 CPUState *cs = CPU(cpu);
3230 uint64_t pageaddr;
3231
3232 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3233 return;
3234 }
3235
3236 pageaddr = sextract64(value << 12, 0, 48);
3237
3238 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3239 }
3240
3241 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3242 uint64_t value)
3243 {
3244 CPUState *cs = ENV_GET_CPU(env);
3245 uint64_t pageaddr;
3246
3247 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3248 return;
3249 }
3250
3251 pageaddr = sextract64(value << 12, 0, 48);
3252
3253 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3254 ARMMMUIdxBit_S2NS);
3255 }
3256
3257 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3258 bool isread)
3259 {
3260 /* We don't implement EL2, so the only control on DC ZVA is the
3261 * bit in the SCTLR which can prohibit access for EL0.
3262 */
3263 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3264 return CP_ACCESS_TRAP;
3265 }
3266 return CP_ACCESS_OK;
3267 }
3268
3269 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3270 {
3271 ARMCPU *cpu = arm_env_get_cpu(env);
3272 int dzp_bit = 1 << 4;
3273
3274 /* DZP indicates whether DC ZVA access is allowed */
3275 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3276 dzp_bit = 0;
3277 }
3278 return cpu->dcz_blocksize | dzp_bit;
3279 }
3280
3281 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3282 bool isread)
3283 {
3284 if (!(env->pstate & PSTATE_SP)) {
3285 /* Access to SP_EL0 is undefined if it's being used as
3286 * the stack pointer.
3287 */
3288 return CP_ACCESS_TRAP_UNCATEGORIZED;
3289 }
3290 return CP_ACCESS_OK;
3291 }
3292
3293 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3294 {
3295 return env->pstate & PSTATE_SP;
3296 }
3297
3298 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3299 {
3300 update_spsel(env, val);
3301 }
3302
3303 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3304 uint64_t value)
3305 {
3306 ARMCPU *cpu = arm_env_get_cpu(env);
3307
3308 if (raw_read(env, ri) == value) {
3309 /* Skip the TLB flush if nothing actually changed; Linux likes
3310 * to do a lot of pointless SCTLR writes.
3311 */
3312 return;
3313 }
3314
3315 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3316 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3317 value &= ~SCTLR_M;
3318 }
3319
3320 raw_write(env, ri, value);
3321 /* ??? Lots of these bits are not implemented. */
3322 /* This may enable/disable the MMU, so do a TLB flush. */
3323 tlb_flush(CPU(cpu));
3324 }
3325
3326 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3327 bool isread)
3328 {
3329 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3330 return CP_ACCESS_TRAP_FP_EL2;
3331 }
3332 if (env->cp15.cptr_el[3] & CPTR_TFP) {
3333 return CP_ACCESS_TRAP_FP_EL3;
3334 }
3335 return CP_ACCESS_OK;
3336 }
3337
3338 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3339 uint64_t value)
3340 {
3341 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3342 }
3343
3344 static const ARMCPRegInfo v8_cp_reginfo[] = {
3345 /* Minimal set of EL0-visible registers. This will need to be expanded
3346 * significantly for system emulation of AArch64 CPUs.
3347 */
3348 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3349 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3350 .access = PL0_RW, .type = ARM_CP_NZCV },
3351 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3352 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3353 .type = ARM_CP_NO_RAW,
3354 .access = PL0_RW, .accessfn = aa64_daif_access,
3355 .fieldoffset = offsetof(CPUARMState, daif),
3356 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3357 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3358 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3359 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3360 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3361 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3362 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3363 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3364 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3365 .access = PL0_R, .type = ARM_CP_NO_RAW,
3366 .readfn = aa64_dczid_read },
3367 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3368 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3369 .access = PL0_W, .type = ARM_CP_DC_ZVA,
3370 #ifndef CONFIG_USER_ONLY
3371 /* Avoid overhead of an access check that always passes in user-mode */
3372 .accessfn = aa64_zva_access,
3373 #endif
3374 },
3375 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3376 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3377 .access = PL1_R, .type = ARM_CP_CURRENTEL },
3378 /* Cache ops: all NOPs since we don't emulate caches */
3379 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3380 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3381 .access = PL1_W, .type = ARM_CP_NOP },
3382 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3383 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3384 .access = PL1_W, .type = ARM_CP_NOP },
3385 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3386 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3387 .access = PL0_W, .type = ARM_CP_NOP,
3388 .accessfn = aa64_cacheop_access },
3389 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3390 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3391 .access = PL1_W, .type = ARM_CP_NOP },
3392 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3393 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3394 .access = PL1_W, .type = ARM_CP_NOP },
3395 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3396 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3397 .access = PL0_W, .type = ARM_CP_NOP,
3398 .accessfn = aa64_cacheop_access },
3399 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3400 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3401 .access = PL1_W, .type = ARM_CP_NOP },
3402 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3403 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3404 .access = PL0_W, .type = ARM_CP_NOP,
3405 .accessfn = aa64_cacheop_access },
3406 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3407 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3408 .access = PL0_W, .type = ARM_CP_NOP,
3409 .accessfn = aa64_cacheop_access },
3410 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3411 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3412 .access = PL1_W, .type = ARM_CP_NOP },
3413 /* TLBI operations */
3414 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3415 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3416 .access = PL1_W, .type = ARM_CP_NO_RAW,
3417 .writefn = tlbi_aa64_vmalle1is_write },
3418 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3419 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3420 .access = PL1_W, .type = ARM_CP_NO_RAW,
3421 .writefn = tlbi_aa64_vae1is_write },
3422 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3423 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3424 .access = PL1_W, .type = ARM_CP_NO_RAW,
3425 .writefn = tlbi_aa64_vmalle1is_write },
3426 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3427 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3428 .access = PL1_W, .type = ARM_CP_NO_RAW,
3429 .writefn = tlbi_aa64_vae1is_write },
3430 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3431 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3432 .access = PL1_W, .type = ARM_CP_NO_RAW,
3433 .writefn = tlbi_aa64_vae1is_write },
3434 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3435 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3436 .access = PL1_W, .type = ARM_CP_NO_RAW,
3437 .writefn = tlbi_aa64_vae1is_write },
3438 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3439 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3440 .access = PL1_W, .type = ARM_CP_NO_RAW,
3441 .writefn = tlbi_aa64_vmalle1_write },
3442 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3443 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3444 .access = PL1_W, .type = ARM_CP_NO_RAW,
3445 .writefn = tlbi_aa64_vae1_write },
3446 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3447 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3448 .access = PL1_W, .type = ARM_CP_NO_RAW,
3449 .writefn = tlbi_aa64_vmalle1_write },
3450 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3451 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3452 .access = PL1_W, .type = ARM_CP_NO_RAW,
3453 .writefn = tlbi_aa64_vae1_write },
3454 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3455 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3456 .access = PL1_W, .type = ARM_CP_NO_RAW,
3457 .writefn = tlbi_aa64_vae1_write },
3458 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3459 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3460 .access = PL1_W, .type = ARM_CP_NO_RAW,
3461 .writefn = tlbi_aa64_vae1_write },
3462 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3463 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3464 .access = PL2_W, .type = ARM_CP_NO_RAW,
3465 .writefn = tlbi_aa64_ipas2e1is_write },
3466 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3467 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3468 .access = PL2_W, .type = ARM_CP_NO_RAW,
3469 .writefn = tlbi_aa64_ipas2e1is_write },
3470 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3471 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3472 .access = PL2_W, .type = ARM_CP_NO_RAW,
3473 .writefn = tlbi_aa64_alle1is_write },
3474 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3475 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3476 .access = PL2_W, .type = ARM_CP_NO_RAW,
3477 .writefn = tlbi_aa64_alle1is_write },
3478 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3479 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3480 .access = PL2_W, .type = ARM_CP_NO_RAW,
3481 .writefn = tlbi_aa64_ipas2e1_write },
3482 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3483 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3484 .access = PL2_W, .type = ARM_CP_NO_RAW,
3485 .writefn = tlbi_aa64_ipas2e1_write },
3486 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3487 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3488 .access = PL2_W, .type = ARM_CP_NO_RAW,
3489 .writefn = tlbi_aa64_alle1_write },
3490 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3491 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3492 .access = PL2_W, .type = ARM_CP_NO_RAW,
3493 .writefn = tlbi_aa64_alle1is_write },
3494 #ifndef CONFIG_USER_ONLY
3495 /* 64 bit address translation operations */
3496 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3497 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3498 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3499 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3500 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3501 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3502 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3503 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3504 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3505 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3506 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3507 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3508 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3509 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3510 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3511 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3512 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3513 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3514 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3515 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3516 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3517 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3518 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3519 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3520 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3521 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3522 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3523 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3524 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3525 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3526 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3527 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3528 .type = ARM_CP_ALIAS,
3529 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3530 .access = PL1_RW, .resetvalue = 0,
3531 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3532 .writefn = par_write },
3533 #endif
3534 /* TLB invalidate last level of translation table walk */
3535 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3536 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3537 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3538 .type = ARM_CP_NO_RAW, .access = PL1_W,
3539 .writefn = tlbimvaa_is_write },
3540 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3541 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3542 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3543 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3544 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3545 .type = ARM_CP_NO_RAW, .access = PL2_W,
3546 .writefn = tlbimva_hyp_write },
3547 { .name = "TLBIMVALHIS",
3548 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3549 .type = ARM_CP_NO_RAW, .access = PL2_W,
3550 .writefn = tlbimva_hyp_is_write },
3551 { .name = "TLBIIPAS2",
3552 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3553 .type = ARM_CP_NO_RAW, .access = PL2_W,
3554 .writefn = tlbiipas2_write },
3555 { .name = "TLBIIPAS2IS",
3556 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3557 .type = ARM_CP_NO_RAW, .access = PL2_W,
3558 .writefn = tlbiipas2_is_write },
3559 { .name = "TLBIIPAS2L",
3560 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3561 .type = ARM_CP_NO_RAW, .access = PL2_W,
3562 .writefn = tlbiipas2_write },
3563 { .name = "TLBIIPAS2LIS",
3564 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3565 .type = ARM_CP_NO_RAW, .access = PL2_W,
3566 .writefn = tlbiipas2_is_write },
3567 /* 32 bit cache operations */
3568 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3569 .type = ARM_CP_NOP, .access = PL1_W },
3570 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3571 .type = ARM_CP_NOP, .access = PL1_W },
3572 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3573 .type = ARM_CP_NOP, .access = PL1_W },
3574 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3575 .type = ARM_CP_NOP, .access = PL1_W },
3576 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3577 .type = ARM_CP_NOP, .access = PL1_W },
3578 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3579 .type = ARM_CP_NOP, .access = PL1_W },
3580 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3581 .type = ARM_CP_NOP, .access = PL1_W },
3582 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3583 .type = ARM_CP_NOP, .access = PL1_W },
3584 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3585 .type = ARM_CP_NOP, .access = PL1_W },
3586 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3587 .type = ARM_CP_NOP, .access = PL1_W },
3588 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3589 .type = ARM_CP_NOP, .access = PL1_W },
3590 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3591 .type = ARM_CP_NOP, .access = PL1_W },
3592 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3593 .type = ARM_CP_NOP, .access = PL1_W },
3594 /* MMU Domain access control / MPU write buffer control */
3595 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3596 .access = PL1_RW, .resetvalue = 0,
3597 .writefn = dacr_write, .raw_writefn = raw_write,
3598 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3599 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3600 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3601 .type = ARM_CP_ALIAS,
3602 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3603 .access = PL1_RW,
3604 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3605 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3606 .type = ARM_CP_ALIAS,
3607 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3608 .access = PL1_RW,
3609 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3610 /* We rely on the access checks not allowing the guest to write to the
3611 * state field when SPSel indicates that it's being used as the stack
3612 * pointer.
3613 */
3614 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3615 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3616 .access = PL1_RW, .accessfn = sp_el0_access,
3617 .type = ARM_CP_ALIAS,
3618 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3619 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3620 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3621 .access = PL2_RW, .type = ARM_CP_ALIAS,
3622 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3623 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3624 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3625 .type = ARM_CP_NO_RAW,
3626 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3627 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3628 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3629 .type = ARM_CP_ALIAS,
3630 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3631 .access = PL2_RW, .accessfn = fpexc32_access },
3632 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3633 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3634 .access = PL2_RW, .resetvalue = 0,
3635 .writefn = dacr_write, .raw_writefn = raw_write,
3636 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3637 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3638 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3639 .access = PL2_RW, .resetvalue = 0,
3640 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3641 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3642 .type = ARM_CP_ALIAS,
3643 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3644 .access = PL2_RW,
3645 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3646 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3647 .type = ARM_CP_ALIAS,
3648 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3649 .access = PL2_RW,
3650 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3651 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3652 .type = ARM_CP_ALIAS,
3653 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3654 .access = PL2_RW,
3655 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3656 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3657 .type = ARM_CP_ALIAS,
3658 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3659 .access = PL2_RW,
3660 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3661 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3662 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3663 .resetvalue = 0,
3664 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3665 { .name = "SDCR", .type = ARM_CP_ALIAS,
3666 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3667 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3668 .writefn = sdcr_write,
3669 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3670 REGINFO_SENTINEL
3671 };
3672
3673 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3674 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3675 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3676 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3677 .access = PL2_RW,
3678 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3679 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3680 .type = ARM_CP_NO_RAW,
3681 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3682 .access = PL2_RW,
3683 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3684 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3685 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3686 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3687 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3688 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3689 .access = PL2_RW, .type = ARM_CP_CONST,
3690 .resetvalue = 0 },
3691 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3692 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3693 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3694 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3695 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3696 .access = PL2_RW, .type = ARM_CP_CONST,
3697 .resetvalue = 0 },
3698 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3699 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3700 .access = PL2_RW, .type = ARM_CP_CONST,
3701 .resetvalue = 0 },
3702 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3703 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3704 .access = PL2_RW, .type = ARM_CP_CONST,
3705 .resetvalue = 0 },
3706 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3707 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3708 .access = PL2_RW, .type = ARM_CP_CONST,
3709 .resetvalue = 0 },
3710 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3711 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3712 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3713 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3714 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3715 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3716 .type = ARM_CP_CONST, .resetvalue = 0 },
3717 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3718 .cp = 15, .opc1 = 6, .crm = 2,
3719 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3720 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3721 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3722 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3723 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3724 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3725 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3726 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3727 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3728 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3729 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3730 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3731 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3732 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3733 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3734 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3735 .resetvalue = 0 },
3736 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3737 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3738 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3739 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3740 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3741 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3742 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3743 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3744 .resetvalue = 0 },
3745 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3746 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3747 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3748 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3749 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3750 .resetvalue = 0 },
3751 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3752 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3753 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3754 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3755 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3756 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3757 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3758 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3759 .access = PL2_RW, .accessfn = access_tda,
3760 .type = ARM_CP_CONST, .resetvalue = 0 },
3761 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3762 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3763 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3764 .type = ARM_CP_CONST, .resetvalue = 0 },
3765 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3766 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3767 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3768 REGINFO_SENTINEL
3769 };
3770
3771 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3772 {
3773 ARMCPU *cpu = arm_env_get_cpu(env);
3774 uint64_t valid_mask = HCR_MASK;
3775
3776 if (arm_feature(env, ARM_FEATURE_EL3)) {
3777 valid_mask &= ~HCR_HCD;
3778 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3779 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3780 * However, if we're using the SMC PSCI conduit then QEMU is
3781 * effectively acting like EL3 firmware and so the guest at
3782 * EL2 should retain the ability to prevent EL1 from being
3783 * able to make SMC calls into the ersatz firmware, so in
3784 * that case HCR.TSC should be read/write.
3785 */
3786 valid_mask &= ~HCR_TSC;
3787 }
3788
3789 /* Clear RES0 bits. */
3790 value &= valid_mask;
3791
3792 /* These bits change the MMU setup:
3793 * HCR_VM enables stage 2 translation
3794 * HCR_PTW forbids certain page-table setups
3795 * HCR_DC Disables stage1 and enables stage2 translation
3796 */
3797 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3798 tlb_flush(CPU(cpu));
3799 }
3800 raw_write(env, ri, value);
3801 }
3802
3803 static const ARMCPRegInfo el2_cp_reginfo[] = {
3804 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3805 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3806 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3807 .writefn = hcr_write },
3808 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3809 .type = ARM_CP_ALIAS,
3810 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3811 .access = PL2_RW,
3812 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3813 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3814 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3815 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3816 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3817 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3818 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3819 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3820 .type = ARM_CP_ALIAS,
3821 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3822 .access = PL2_RW,
3823 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3824 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3825 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3826 .access = PL2_RW, .writefn = vbar_write,
3827 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3828 .resetvalue = 0 },
3829 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3830 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3831 .access = PL3_RW, .type = ARM_CP_ALIAS,
3832 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3833 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3834 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3835 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3836 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3837 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3838 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3839 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3840 .resetvalue = 0 },
3841 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3842 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3843 .access = PL2_RW, .type = ARM_CP_ALIAS,
3844 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3845 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3846 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3847 .access = PL2_RW, .type = ARM_CP_CONST,
3848 .resetvalue = 0 },
3849 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3850 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3851 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3852 .access = PL2_RW, .type = ARM_CP_CONST,
3853 .resetvalue = 0 },
3854 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3855 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3856 .access = PL2_RW, .type = ARM_CP_CONST,
3857 .resetvalue = 0 },
3858 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3859 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3860 .access = PL2_RW, .type = ARM_CP_CONST,
3861 .resetvalue = 0 },
3862 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3863 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3864 .access = PL2_RW,
3865 /* no .writefn needed as this can't cause an ASID change;
3866 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3867 */
3868 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3869 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3870 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3871 .type = ARM_CP_ALIAS,
3872 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3873 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3874 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3875 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3876 .access = PL2_RW,
3877 /* no .writefn needed as this can't cause an ASID change;
3878 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3879 */
3880 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3881 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3882 .cp = 15, .opc1 = 6, .crm = 2,
3883 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3884 .access = PL2_RW, .accessfn = access_el3_aa32ns,
3885 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3886 .writefn = vttbr_write },
3887 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3888 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3889 .access = PL2_RW, .writefn = vttbr_write,
3890 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3891 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3892 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3893 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3894 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3895 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3896 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3897 .access = PL2_RW, .resetvalue = 0,
3898 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3899 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3900 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3901 .access = PL2_RW, .resetvalue = 0,
3902 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3903 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3904 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3905 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3906 { .name = "TLBIALLNSNH",
3907 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3908 .type = ARM_CP_NO_RAW, .access = PL2_W,
3909 .writefn = tlbiall_nsnh_write },
3910 { .name = "TLBIALLNSNHIS",
3911 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3912 .type = ARM_CP_NO_RAW, .access = PL2_W,
3913 .writefn = tlbiall_nsnh_is_write },
3914 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3915 .type = ARM_CP_NO_RAW, .access = PL2_W,
3916 .writefn = tlbiall_hyp_write },
3917 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3918 .type = ARM_CP_NO_RAW, .access = PL2_W,
3919 .writefn = tlbiall_hyp_is_write },
3920 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3921 .type = ARM_CP_NO_RAW, .access = PL2_W,
3922 .writefn = tlbimva_hyp_write },
3923 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3924 .type = ARM_CP_NO_RAW, .access = PL2_W,
3925 .writefn = tlbimva_hyp_is_write },
3926 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3927 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3928 .type = ARM_CP_NO_RAW, .access = PL2_W,
3929 .writefn = tlbi_aa64_alle2_write },
3930 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3931 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3932 .type = ARM_CP_NO_RAW, .access = PL2_W,
3933 .writefn = tlbi_aa64_vae2_write },
3934 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3935 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3936 .access = PL2_W, .type = ARM_CP_NO_RAW,
3937 .writefn = tlbi_aa64_vae2_write },
3938 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3939 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3940 .access = PL2_W, .type = ARM_CP_NO_RAW,
3941 .writefn = tlbi_aa64_alle2is_write },
3942 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3943 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3944 .type = ARM_CP_NO_RAW, .access = PL2_W,
3945 .writefn = tlbi_aa64_vae2is_write },
3946 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3947 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3948 .access = PL2_W, .type = ARM_CP_NO_RAW,
3949 .writefn = tlbi_aa64_vae2is_write },
3950 #ifndef CONFIG_USER_ONLY
3951 /* Unlike the other EL2-related AT operations, these must
3952 * UNDEF from EL3 if EL2 is not implemented, which is why we
3953 * define them here rather than with the rest of the AT ops.
3954 */
3955 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3956 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3957 .access = PL2_W, .accessfn = at_s1e2_access,
3958 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3959 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3960 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3961 .access = PL2_W, .accessfn = at_s1e2_access,
3962 .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3963 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3964 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3965 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3966 * to behave as if SCR.NS was 1.
3967 */
3968 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3969 .access = PL2_W,
3970 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3971 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3972 .access = PL2_W,
3973 .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3974 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3975 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3976 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3977 * reset values as IMPDEF. We choose to reset to 3 to comply with
3978 * both ARMv7 and ARMv8.
3979 */
3980 .access = PL2_RW, .resetvalue = 3,
3981 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3982 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3983 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3984 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3985 .writefn = gt_cntvoff_write,
3986 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3987 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3988 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3989 .writefn = gt_cntvoff_write,
3990 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3991 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3992 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3993 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3994 .type = ARM_CP_IO, .access = PL2_RW,
3995 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3996 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3997 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3998 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3999 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4000 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4001 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4002 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4003 .resetfn = gt_hyp_timer_reset,
4004 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4005 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4006 .type = ARM_CP_IO,
4007 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4008 .access = PL2_RW,
4009 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4010 .resetvalue = 0,
4011 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4012 #endif
4013 /* The only field of MDCR_EL2 that has a defined architectural reset value
4014 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4015 * don't impelment any PMU event counters, so using zero as a reset
4016 * value for MDCR_EL2 is okay
4017 */
4018 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4019 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4020 .access = PL2_RW, .resetvalue = 0,
4021 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4022 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4023 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4024 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4025 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4026 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4027 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4028 .access = PL2_RW,
4029 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4030 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4031 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4032 .access = PL2_RW,
4033 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4034 REGINFO_SENTINEL
4035 };
4036
4037 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4038 bool isread)
4039 {
4040 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4041 * At Secure EL1 it traps to EL3.
4042 */
4043 if (arm_current_el(env) == 3) {
4044 return CP_ACCESS_OK;
4045 }
4046 if (arm_is_secure_below_el3(env)) {
4047 return CP_ACCESS_TRAP_EL3;
4048 }
4049 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4050 if (isread) {
4051 return CP_ACCESS_OK;
4052 }
4053 return CP_ACCESS_TRAP_UNCATEGORIZED;
4054 }
4055
4056 static const ARMCPRegInfo el3_cp_reginfo[] = {
4057 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4058 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4059 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4060 .resetvalue = 0, .writefn = scr_write },
4061 { .name = "SCR", .type = ARM_CP_ALIAS,
4062 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4063 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4064 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4065 .writefn = scr_write },
4066 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4067 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4068 .access = PL3_RW, .resetvalue = 0,
4069 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4070 { .name = "SDER",
4071 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4072 .access = PL3_RW, .resetvalue = 0,
4073 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4074 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4075 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4076 .writefn = vbar_write, .resetvalue = 0,
4077 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4078 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4079 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4080 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
4081 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4082 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4083 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4084 .access = PL3_RW,
4085 /* no .writefn needed as this can't cause an ASID change;
4086 * we must provide a .raw_writefn and .resetfn because we handle
4087 * reset and migration for the AArch32 TTBCR(S), which might be
4088 * using mask and base_mask.
4089 */
4090 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4091 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4092 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4093 .type = ARM_CP_ALIAS,
4094 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4095 .access = PL3_RW,
4096 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4097 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4098 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4099 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4100 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4101 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4102 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4103 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4104 .type = ARM_CP_ALIAS,
4105 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4106 .access = PL3_RW,
4107 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4108 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4109 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4110 .access = PL3_RW, .writefn = vbar_write,
4111 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4112 .resetvalue = 0 },
4113 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4114 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4115 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4116 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4117 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4118 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4119 .access = PL3_RW, .resetvalue = 0,
4120 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4121 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4122 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4123 .access = PL3_RW, .type = ARM_CP_CONST,
4124 .resetvalue = 0 },
4125 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4126 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4127 .access = PL3_RW, .type = ARM_CP_CONST,
4128 .resetvalue = 0 },
4129 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4130 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4131 .access = PL3_RW, .type = ARM_CP_CONST,
4132 .resetvalue = 0 },
4133 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4134 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4135 .access = PL3_W, .type = ARM_CP_NO_RAW,
4136 .writefn = tlbi_aa64_alle3is_write },
4137 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4138 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4139 .access = PL3_W, .type = ARM_CP_NO_RAW,
4140 .writefn = tlbi_aa64_vae3is_write },
4141 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4142 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4143 .access = PL3_W, .type = ARM_CP_NO_RAW,
4144 .writefn = tlbi_aa64_vae3is_write },
4145 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4146 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4147 .access = PL3_W, .type = ARM_CP_NO_RAW,
4148 .writefn = tlbi_aa64_alle3_write },
4149 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4150 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4151 .access = PL3_W, .type = ARM_CP_NO_RAW,
4152 .writefn = tlbi_aa64_vae3_write },
4153 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4154 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4155 .access = PL3_W, .type = ARM_CP_NO_RAW,
4156 .writefn = tlbi_aa64_vae3_write },
4157 REGINFO_SENTINEL
4158 };
4159
4160 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4161 bool isread)
4162 {
4163 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4164 * but the AArch32 CTR has its own reginfo struct)
4165 */
4166 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4167 return CP_ACCESS_TRAP;
4168 }
4169 return CP_ACCESS_OK;
4170 }
4171
4172 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4173 uint64_t value)
4174 {
4175 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4176 * read via a bit in OSLSR_EL1.
4177 */
4178 int oslock;
4179
4180 if (ri->state == ARM_CP_STATE_AA32) {
4181 oslock = (value == 0xC5ACCE55);
4182 } else {
4183 oslock = value & 1;
4184 }
4185
4186 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4187 }
4188
4189 static const ARMCPRegInfo debug_cp_reginfo[] = {
4190 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4191 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4192 * unlike DBGDRAR it is never accessible from EL0.
4193 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4194 * accessor.
4195 */
4196 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4197 .access = PL0_R, .accessfn = access_tdra,
4198 .type = ARM_CP_CONST, .resetvalue = 0 },
4199 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4200 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4201 .access = PL1_R, .accessfn = access_tdra,
4202 .type = ARM_CP_CONST, .resetvalue = 0 },
4203 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4204 .access = PL0_R, .accessfn = access_tdra,
4205 .type = ARM_CP_CONST, .resetvalue = 0 },
4206 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4207 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4208 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4209 .access = PL1_RW, .accessfn = access_tda,
4210 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4211 .resetvalue = 0 },
4212 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4213 * We don't implement the configurable EL0 access.
4214 */
4215 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4216 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4217 .type = ARM_CP_ALIAS,
4218 .access = PL1_R, .accessfn = access_tda,
4219 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4220 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4221 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4222 .access = PL1_W, .type = ARM_CP_NO_RAW,
4223 .accessfn = access_tdosa,
4224 .writefn = oslar_write },
4225 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4226 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4227 .access = PL1_R, .resetvalue = 10,
4228 .accessfn = access_tdosa,
4229 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4230 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4231 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4232 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4233 .access = PL1_RW, .accessfn = access_tdosa,
4234 .type = ARM_CP_NOP },
4235 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4236 * implement vector catch debug events yet.
4237 */
4238 { .name = "DBGVCR",
4239 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4240 .access = PL1_RW, .accessfn = access_tda,
4241 .type = ARM_CP_NOP },
4242 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4243 * to save and restore a 32-bit guest's DBGVCR)
4244 */
4245 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4246 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4247 .access = PL2_RW, .accessfn = access_tda,
4248 .type = ARM_CP_NOP },
4249 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4250 * Channel but Linux may try to access this register. The 32-bit
4251 * alias is DBGDCCINT.
4252 */
4253 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4254 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4255 .access = PL1_RW, .accessfn = access_tda,
4256 .type = ARM_CP_NOP },
4257 REGINFO_SENTINEL
4258 };
4259
4260 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4261 /* 64 bit access versions of the (dummy) debug registers */
4262 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4263 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4264 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4265 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4266 REGINFO_SENTINEL
4267 };
4268
4269 void hw_watchpoint_update(ARMCPU *cpu, int n)
4270 {
4271 CPUARMState *env = &cpu->env;
4272 vaddr len = 0;
4273 vaddr wvr = env->cp15.dbgwvr[n];
4274 uint64_t wcr = env->cp15.dbgwcr[n];
4275 int mask;
4276 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4277
4278 if (env->cpu_watchpoint[n]) {
4279 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4280 env->cpu_watchpoint[n] = NULL;
4281 }
4282
4283 if (!extract64(wcr, 0, 1)) {
4284 /* E bit clear : watchpoint disabled */
4285 return;
4286 }
4287
4288 switch (extract64(wcr, 3, 2)) {
4289 case 0:
4290 /* LSC 00 is reserved and must behave as if the wp is disabled */
4291 return;
4292 case 1:
4293 flags |= BP_MEM_READ;
4294 break;
4295 case 2:
4296 flags |= BP_MEM_WRITE;
4297 break;
4298 case 3:
4299 flags |= BP_MEM_ACCESS;
4300 break;
4301 }
4302
4303 /* Attempts to use both MASK and BAS fields simultaneously are
4304 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4305 * thus generating a watchpoint for every byte in the masked region.
4306 */
4307 mask = extract64(wcr, 24, 4);
4308 if (mask == 1 || mask == 2) {
4309 /* Reserved values of MASK; we must act as if the mask value was
4310 * some non-reserved value, or as if the watchpoint were disabled.
4311 * We choose the latter.
4312 */
4313 return;
4314 } else if (mask) {
4315 /* Watchpoint covers an aligned area up to 2GB in size */
4316 len = 1ULL << mask;
4317 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4318 * whether the watchpoint fires when the unmasked bits match; we opt
4319 * to generate the exceptions.
4320 */
4321 wvr &= ~(len - 1);
4322 } else {
4323 /* Watchpoint covers bytes defined by the byte address select bits */
4324 int bas = extract64(wcr, 5, 8);
4325 int basstart;
4326
4327 if (bas == 0) {
4328 /* This must act as if the watchpoint is disabled */
4329 return;
4330 }
4331
4332 if (extract64(wvr, 2, 1)) {
4333 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4334 * ignored, and BAS[3:0] define which bytes to watch.
4335 */
4336 bas &= 0xf;
4337 }
4338 /* The BAS bits are supposed to be programmed to indicate a contiguous
4339 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4340 * we fire for each byte in the word/doubleword addressed by the WVR.
4341 * We choose to ignore any non-zero bits after the first range of 1s.
4342 */
4343 basstart = ctz32(bas);
4344 len = cto32(bas >> basstart);
4345 wvr += basstart;
4346 }
4347
4348 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4349 &env->cpu_watchpoint[n]);
4350 }
4351
4352 void hw_watchpoint_update_all(ARMCPU *cpu)
4353 {
4354 int i;
4355 CPUARMState *env = &cpu->env;
4356
4357 /* Completely clear out existing QEMU watchpoints and our array, to
4358 * avoid possible stale entries following migration load.
4359 */
4360 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4361 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4362
4363 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4364 hw_watchpoint_update(cpu, i);
4365 }
4366 }
4367
4368 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4369 uint64_t value)
4370 {
4371 ARMCPU *cpu = arm_env_get_cpu(env);
4372 int i = ri->crm;
4373
4374 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4375 * register reads and behaves as if values written are sign extended.
4376 * Bits [1:0] are RES0.
4377 */
4378 value = sextract64(value, 0, 49) & ~3ULL;
4379
4380 raw_write(env, ri, value);
4381 hw_watchpoint_update(cpu, i);
4382 }
4383
4384 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385 uint64_t value)
4386 {
4387 ARMCPU *cpu = arm_env_get_cpu(env);
4388 int i = ri->crm;
4389
4390 raw_write(env, ri, value);
4391 hw_watchpoint_update(cpu, i);
4392 }
4393
4394 void hw_breakpoint_update(ARMCPU *cpu, int n)
4395 {
4396 CPUARMState *env = &cpu->env;
4397 uint64_t bvr = env->cp15.dbgbvr[n];
4398 uint64_t bcr = env->cp15.dbgbcr[n];
4399 vaddr addr;
4400 int bt;
4401 int flags = BP_CPU;
4402
4403 if (env->cpu_breakpoint[n]) {
4404 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4405 env->cpu_breakpoint[n] = NULL;
4406 }
4407
4408 if (!extract64(bcr, 0, 1)) {
4409 /* E bit clear : watchpoint disabled */
4410 return;
4411 }
4412
4413 bt = extract64(bcr, 20, 4);
4414
4415 switch (bt) {
4416 case 4: /* unlinked address mismatch (reserved if AArch64) */
4417 case 5: /* linked address mismatch (reserved if AArch64) */
4418 qemu_log_mask(LOG_UNIMP,
4419 "arm: address mismatch breakpoint types not implemented");
4420 return;
4421 case 0: /* unlinked address match */
4422 case 1: /* linked address match */
4423 {
4424 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4425 * we behave as if the register was sign extended. Bits [1:0] are
4426 * RES0. The BAS field is used to allow setting breakpoints on 16
4427 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4428 * a bp will fire if the addresses covered by the bp and the addresses
4429 * covered by the insn overlap but the insn doesn't start at the
4430 * start of the bp address range. We choose to require the insn and
4431 * the bp to have the same address. The constraints on writing to
4432 * BAS enforced in dbgbcr_write mean we have only four cases:
4433 * 0b0000 => no breakpoint
4434 * 0b0011 => breakpoint on addr
4435 * 0b1100 => breakpoint on addr + 2
4436 * 0b1111 => breakpoint on addr
4437 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4438 */
4439 int bas = extract64(bcr, 5, 4);
4440 addr = sextract64(bvr, 0, 49) & ~3ULL;
4441 if (bas == 0) {
4442 return;
4443 }
4444 if (bas == 0xc) {
4445 addr += 2;
4446 }
4447 break;
4448 }
4449 case 2: /* unlinked context ID match */
4450 case 8: /* unlinked VMID match (reserved if no EL2) */
4451 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4452 qemu_log_mask(LOG_UNIMP,
4453 "arm: unlinked context breakpoint types not implemented");
4454 return;
4455 case 9: /* linked VMID match (reserved if no EL2) */
4456 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4457 case 3: /* linked context ID match */
4458 default:
4459 /* We must generate no events for Linked context matches (unless
4460 * they are linked to by some other bp/wp, which is handled in
4461 * updates for the linking bp/wp). We choose to also generate no events
4462 * for reserved values.
4463 */
4464 return;
4465 }
4466
4467 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4468 }
4469
4470 void hw_breakpoint_update_all(ARMCPU *cpu)
4471 {
4472 int i;
4473 CPUARMState *env = &cpu->env;
4474
4475 /* Completely clear out existing QEMU breakpoints and our array, to
4476 * avoid possible stale entries following migration load.
4477 */
4478 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4479 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4480
4481 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4482 hw_breakpoint_update(cpu, i);
4483 }
4484 }
4485
4486 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4487 uint64_t value)
4488 {
4489 ARMCPU *cpu = arm_env_get_cpu(env);
4490 int i = ri->crm;
4491
4492 raw_write(env, ri, value);
4493 hw_breakpoint_update(cpu, i);
4494 }
4495
4496 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4497 uint64_t value)
4498 {
4499 ARMCPU *cpu = arm_env_get_cpu(env);
4500 int i = ri->crm;
4501
4502 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4503 * copy of BAS[0].
4504 */
4505 value = deposit64(value, 6, 1, extract64(value, 5, 1));
4506 value = deposit64(value, 8, 1, extract64(value, 7, 1));
4507
4508 raw_write(env, ri, value);
4509 hw_breakpoint_update(cpu, i);
4510 }
4511
4512 static void define_debug_regs(ARMCPU *cpu)
4513 {
4514 /* Define v7 and v8 architectural debug registers.
4515 * These are just dummy implementations for now.
4516 */
4517 int i;
4518 int wrps, brps, ctx_cmps;
4519 ARMCPRegInfo dbgdidr = {
4520 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4521 .access = PL0_R, .accessfn = access_tda,
4522 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4523 };
4524
4525 /* Note that all these register fields hold "number of Xs minus 1". */
4526 brps = extract32(cpu->dbgdidr, 24, 4);
4527 wrps = extract32(cpu->dbgdidr, 28, 4);
4528 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4529
4530 assert(ctx_cmps <= brps);
4531
4532 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4533 * of the debug registers such as number of breakpoints;
4534 * check that if they both exist then they agree.
4535 */
4536 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4537 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4538 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4539 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4540 }
4541
4542 define_one_arm_cp_reg(cpu, &dbgdidr);
4543 define_arm_cp_regs(cpu, debug_cp_reginfo);
4544
4545 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4546 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4547 }
4548
4549 for (i = 0; i < brps + 1; i++) {
4550 ARMCPRegInfo dbgregs[] = {
4551 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4552 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4553 .access = PL1_RW, .accessfn = access_tda,
4554 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4555 .writefn = dbgbvr_write, .raw_writefn = raw_write
4556 },
4557 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4558 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4559 .access = PL1_RW, .accessfn = access_tda,
4560 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4561 .writefn = dbgbcr_write, .raw_writefn = raw_write
4562 },
4563 REGINFO_SENTINEL
4564 };
4565 define_arm_cp_regs(cpu, dbgregs);
4566 }
4567
4568 for (i = 0; i < wrps + 1; i++) {
4569 ARMCPRegInfo dbgregs[] = {
4570 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4571 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4572 .access = PL1_RW, .accessfn = access_tda,
4573 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4574 .writefn = dbgwvr_write, .raw_writefn = raw_write
4575 },
4576 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4577 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4578 .access = PL1_RW, .accessfn = access_tda,
4579 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4580 .writefn = dbgwcr_write, .raw_writefn = raw_write
4581 },
4582 REGINFO_SENTINEL
4583 };
4584 define_arm_cp_regs(cpu, dbgregs);
4585 }
4586 }
4587
4588 /* We don't know until after realize whether there's a GICv3
4589 * attached, and that is what registers the gicv3 sysregs.
4590 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4591 * at runtime.
4592 */
4593 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
4594 {
4595 ARMCPU *cpu = arm_env_get_cpu(env);
4596 uint64_t pfr1 = cpu->id_pfr1;
4597
4598 if (env->gicv3state) {
4599 pfr1 |= 1 << 28;
4600 }
4601 return pfr1;
4602 }
4603
4604 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
4605 {
4606 ARMCPU *cpu = arm_env_get_cpu(env);
4607 uint64_t pfr0 = cpu->id_aa64pfr0;
4608
4609 if (env->gicv3state) {
4610 pfr0 |= 1 << 24;
4611 }
4612 return pfr0;
4613 }
4614
4615 void register_cp_regs_for_features(ARMCPU *cpu)
4616 {
4617 /* Register all the coprocessor registers based on feature bits */
4618 CPUARMState *env = &cpu->env;
4619 if (arm_feature(env, ARM_FEATURE_M)) {
4620 /* M profile has no coprocessor registers */
4621 return;
4622 }
4623
4624 define_arm_cp_regs(cpu, cp_reginfo);
4625 if (!arm_feature(env, ARM_FEATURE_V8)) {
4626 /* Must go early as it is full of wildcards that may be
4627 * overridden by later definitions.
4628 */
4629 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4630 }
4631
4632 if (arm_feature(env, ARM_FEATURE_V6)) {
4633 /* The ID registers all have impdef reset values */
4634 ARMCPRegInfo v6_idregs[] = {
4635 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4636 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4637 .access = PL1_R, .type = ARM_CP_CONST,
4638 .resetvalue = cpu->id_pfr0 },
4639 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4640 * the value of the GIC field until after we define these regs.
4641 */
4642 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4643 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4644 .access = PL1_R, .type = ARM_CP_NO_RAW,
4645 .readfn = id_pfr1_read,
4646 .writefn = arm_cp_write_ignore },
4647 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4648 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4649 .access = PL1_R, .type = ARM_CP_CONST,
4650 .resetvalue = cpu->id_dfr0 },
4651 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4652 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4653 .access = PL1_R, .type = ARM_CP_CONST,
4654 .resetvalue = cpu->id_afr0 },
4655 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4656 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4657 .access = PL1_R, .type = ARM_CP_CONST,
4658 .resetvalue = cpu->id_mmfr0 },
4659 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4660 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4661 .access = PL1_R, .type = ARM_CP_CONST,
4662 .resetvalue = cpu->id_mmfr1 },
4663 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4664 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4665 .access = PL1_R, .type = ARM_CP_CONST,
4666 .resetvalue = cpu->id_mmfr2 },
4667 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4668 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4669 .access = PL1_R, .type = ARM_CP_CONST,
4670 .resetvalue = cpu->id_mmfr3 },
4671 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4672 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4673 .access = PL1_R, .type = ARM_CP_CONST,
4674 .resetvalue = cpu->id_isar0 },
4675 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4676 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4677 .access = PL1_R, .type = ARM_CP_CONST,
4678 .resetvalue = cpu->id_isar1 },
4679 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4680 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4681 .access = PL1_R, .type = ARM_CP_CONST,
4682 .resetvalue = cpu->id_isar2 },
4683 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4684 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4685 .access = PL1_R, .type = ARM_CP_CONST,
4686 .resetvalue = cpu->id_isar3 },
4687 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4688 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4689 .access = PL1_R, .type = ARM_CP_CONST,
4690 .resetvalue = cpu->id_isar4 },
4691 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4692 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4693 .access = PL1_R, .type = ARM_CP_CONST,
4694 .resetvalue = cpu->id_isar5 },
4695 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4696 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4697 .access = PL1_R, .type = ARM_CP_CONST,
4698 .resetvalue = cpu->id_mmfr4 },
4699 /* 7 is as yet unallocated and must RAZ */
4700 { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4701 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4702 .access = PL1_R, .type = ARM_CP_CONST,
4703 .resetvalue = 0 },
4704 REGINFO_SENTINEL
4705 };
4706 define_arm_cp_regs(cpu, v6_idregs);
4707 define_arm_cp_regs(cpu, v6_cp_reginfo);
4708 } else {
4709 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4710 }
4711 if (arm_feature(env, ARM_FEATURE_V6K)) {
4712 define_arm_cp_regs(cpu, v6k_cp_reginfo);
4713 }
4714 if (arm_feature(env, ARM_FEATURE_V7MP) &&
4715 !arm_feature(env, ARM_FEATURE_PMSA)) {
4716 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4717 }
4718 if (arm_feature(env, ARM_FEATURE_V7)) {
4719 /* v7 performance monitor control register: same implementor
4720 * field as main ID register, and we implement only the cycle
4721 * count register.
4722 */
4723 #ifndef CONFIG_USER_ONLY
4724 ARMCPRegInfo pmcr = {
4725 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4726 .access = PL0_RW,
4727 .type = ARM_CP_IO | ARM_CP_ALIAS,
4728 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4729 .accessfn = pmreg_access, .writefn = pmcr_write,
4730 .raw_writefn = raw_write,
4731 };
4732 ARMCPRegInfo pmcr64 = {
4733 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4734 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4735 .access = PL0_RW, .accessfn = pmreg_access,
4736 .type = ARM_CP_IO,
4737 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4738 .resetvalue = cpu->midr & 0xff000000,
4739 .writefn = pmcr_write, .raw_writefn = raw_write,
4740 };
4741 define_one_arm_cp_reg(cpu, &pmcr);
4742 define_one_arm_cp_reg(cpu, &pmcr64);
4743 #endif
4744 ARMCPRegInfo clidr = {
4745 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4746 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4747 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4748 };
4749 define_one_arm_cp_reg(cpu, &clidr);
4750 define_arm_cp_regs(cpu, v7_cp_reginfo);
4751 define_debug_regs(cpu);
4752 } else {
4753 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4754 }
4755 if (arm_feature(env, ARM_FEATURE_V8)) {
4756 /* AArch64 ID registers, which all have impdef reset values.
4757 * Note that within the ID register ranges the unused slots
4758 * must all RAZ, not UNDEF; future architecture versions may
4759 * define new registers here.
4760 */
4761 ARMCPRegInfo v8_idregs[] = {
4762 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
4763 * know the right value for the GIC field until after we
4764 * define these regs.
4765 */
4766 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4767 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4768 .access = PL1_R, .type = ARM_CP_NO_RAW,
4769 .readfn = id_aa64pfr0_read,
4770 .writefn = arm_cp_write_ignore },
4771 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4772 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4773 .access = PL1_R, .type = ARM_CP_CONST,
4774 .resetvalue = cpu->id_aa64pfr1},
4775 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4776 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4777 .access = PL1_R, .type = ARM_CP_CONST,
4778 .resetvalue = 0 },
4779 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4780 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4781 .access = PL1_R, .type = ARM_CP_CONST,
4782 .resetvalue = 0 },
4783 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4784 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4785 .access = PL1_R, .type = ARM_CP_CONST,
4786 .resetvalue = 0 },
4787 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4788 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4789 .access = PL1_R, .type = ARM_CP_CONST,
4790 .resetvalue = 0 },
4791 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4792 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4793 .access = PL1_R, .type = ARM_CP_CONST,
4794 .resetvalue = 0 },
4795 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4796 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4797 .access = PL1_R, .type = ARM_CP_CONST,
4798 .resetvalue = 0 },
4799 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4800 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4801 .access = PL1_R, .type = ARM_CP_CONST,
4802 .resetvalue = cpu->id_aa64dfr0 },
4803 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4804 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4805 .access = PL1_R, .type = ARM_CP_CONST,
4806 .resetvalue = cpu->id_aa64dfr1 },
4807 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4808 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4809 .access = PL1_R, .type = ARM_CP_CONST,
4810 .resetvalue = 0 },
4811 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4812 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4813 .access = PL1_R, .type = ARM_CP_CONST,
4814 .resetvalue = 0 },
4815 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4816 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4817 .access = PL1_R, .type = ARM_CP_CONST,
4818 .resetvalue = cpu->id_aa64afr0 },
4819 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4820 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4821 .access = PL1_R, .type = ARM_CP_CONST,
4822 .resetvalue = cpu->id_aa64afr1 },
4823 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4824 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4825 .access = PL1_R, .type = ARM_CP_CONST,
4826 .resetvalue = 0 },
4827 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4828 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4829 .access = PL1_R, .type = ARM_CP_CONST,
4830 .resetvalue = 0 },
4831 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4832 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4833 .access = PL1_R, .type = ARM_CP_CONST,
4834 .resetvalue = cpu->id_aa64isar0 },
4835 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4836 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4837 .access = PL1_R, .type = ARM_CP_CONST,
4838 .resetvalue = cpu->id_aa64isar1 },
4839 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4840 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4841 .access = PL1_R, .type = ARM_CP_CONST,
4842 .resetvalue = 0 },
4843 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4844 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4845 .access = PL1_R, .type = ARM_CP_CONST,
4846 .resetvalue = 0 },
4847 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4848 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4849 .access = PL1_R, .type = ARM_CP_CONST,
4850 .resetvalue = 0 },
4851 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4852 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4853 .access = PL1_R, .type = ARM_CP_CONST,
4854 .resetvalue = 0 },
4855 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4856 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4857 .access = PL1_R, .type = ARM_CP_CONST,
4858 .resetvalue = 0 },
4859 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4860 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4861 .access = PL1_R, .type = ARM_CP_CONST,
4862 .resetvalue = 0 },
4863 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4864 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4865 .access = PL1_R, .type = ARM_CP_CONST,
4866 .resetvalue = cpu->id_aa64mmfr0 },
4867 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4868 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4869 .access = PL1_R, .type = ARM_CP_CONST,
4870 .resetvalue = cpu->id_aa64mmfr1 },
4871 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4872 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4873 .access = PL1_R, .type = ARM_CP_CONST,
4874 .resetvalue = 0 },
4875 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4876 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4877 .access = PL1_R, .type = ARM_CP_CONST,
4878 .resetvalue = 0 },
4879 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4880 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4881 .access = PL1_R, .type = ARM_CP_CONST,
4882 .resetvalue = 0 },
4883 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4884 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4885 .access = PL1_R, .type = ARM_CP_CONST,
4886 .resetvalue = 0 },
4887 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4888 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4889 .access = PL1_R, .type = ARM_CP_CONST,
4890 .resetvalue = 0 },
4891 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4892 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4893 .access = PL1_R, .type = ARM_CP_CONST,
4894 .resetvalue = 0 },
4895 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4896 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4897 .access = PL1_R, .type = ARM_CP_CONST,
4898 .resetvalue = cpu->mvfr0 },
4899 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4900 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4901 .access = PL1_R, .type = ARM_CP_CONST,
4902 .resetvalue = cpu->mvfr1 },
4903 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4904 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4905 .access = PL1_R, .type = ARM_CP_CONST,
4906 .resetvalue = cpu->mvfr2 },
4907 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4908 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4909 .access = PL1_R, .type = ARM_CP_CONST,
4910 .resetvalue = 0 },
4911 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4912 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4913 .access = PL1_R, .type = ARM_CP_CONST,
4914 .resetvalue = 0 },
4915 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4916 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4917 .access = PL1_R, .type = ARM_CP_CONST,
4918 .resetvalue = 0 },
4919 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4920 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4921 .access = PL1_R, .type = ARM_CP_CONST,
4922 .resetvalue = 0 },
4923 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4924 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4925 .access = PL1_R, .type = ARM_CP_CONST,
4926 .resetvalue = 0 },
4927 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4928 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4929 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4930 .resetvalue = cpu->pmceid0 },
4931 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4932 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4933 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4934 .resetvalue = cpu->pmceid0 },
4935 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4936 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4937 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4938 .resetvalue = cpu->pmceid1 },
4939 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
4940 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
4941 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4942 .resetvalue = cpu->pmceid1 },
4943 REGINFO_SENTINEL
4944 };
4945 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4946 if (!arm_feature(env, ARM_FEATURE_EL3) &&
4947 !arm_feature(env, ARM_FEATURE_EL2)) {
4948 ARMCPRegInfo rvbar = {
4949 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
4950 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4951 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
4952 };
4953 define_one_arm_cp_reg(cpu, &rvbar);
4954 }
4955 define_arm_cp_regs(cpu, v8_idregs);
4956 define_arm_cp_regs(cpu, v8_cp_reginfo);
4957 }
4958 if (arm_feature(env, ARM_FEATURE_EL2)) {
4959 uint64_t vmpidr_def = mpidr_read_val(env);
4960 ARMCPRegInfo vpidr_regs[] = {
4961 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
4962 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4963 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4964 .resetvalue = cpu->midr,
4965 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4966 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
4967 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4968 .access = PL2_RW, .resetvalue = cpu->midr,
4969 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4970 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
4971 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4972 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4973 .resetvalue = vmpidr_def,
4974 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4975 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
4976 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4977 .access = PL2_RW,
4978 .resetvalue = vmpidr_def,
4979 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4980 REGINFO_SENTINEL
4981 };
4982 define_arm_cp_regs(cpu, vpidr_regs);
4983 define_arm_cp_regs(cpu, el2_cp_reginfo);
4984 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4985 if (!arm_feature(env, ARM_FEATURE_EL3)) {
4986 ARMCPRegInfo rvbar = {
4987 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
4988 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4989 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
4990 };
4991 define_one_arm_cp_reg(cpu, &rvbar);
4992 }
4993 } else {
4994 /* If EL2 is missing but higher ELs are enabled, we need to
4995 * register the no_el2 reginfos.
4996 */
4997 if (arm_feature(env, ARM_FEATURE_EL3)) {
4998 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4999 * of MIDR_EL1 and MPIDR_EL1.
5000 */
5001 ARMCPRegInfo vpidr_regs[] = {
5002 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5003 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5004 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5005 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
5006 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5007 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5008 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5009 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5010 .type = ARM_CP_NO_RAW,
5011 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
5012 REGINFO_SENTINEL
5013 };
5014 define_arm_cp_regs(cpu, vpidr_regs);
5015 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
5016 }
5017 }
5018 if (arm_feature(env, ARM_FEATURE_EL3)) {
5019 define_arm_cp_regs(cpu, el3_cp_reginfo);
5020 ARMCPRegInfo el3_regs[] = {
5021 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
5022 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
5023 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
5024 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
5025 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
5026 .access = PL3_RW,
5027 .raw_writefn = raw_write, .writefn = sctlr_write,
5028 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
5029 .resetvalue = cpu->reset_sctlr },
5030 REGINFO_SENTINEL
5031 };
5032
5033 define_arm_cp_regs(cpu, el3_regs);
5034 }
5035 /* The behaviour of NSACR is sufficiently various that we don't
5036 * try to describe it in a single reginfo:
5037 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5038 * reads as constant 0xc00 from NS EL1 and NS EL2
5039 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5040 * if v7 without EL3, register doesn't exist
5041 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5042 */
5043 if (arm_feature(env, ARM_FEATURE_EL3)) {
5044 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5045 ARMCPRegInfo nsacr = {
5046 .name = "NSACR", .type = ARM_CP_CONST,
5047 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5048 .access = PL1_RW, .accessfn = nsacr_access,
5049 .resetvalue = 0xc00
5050 };
5051 define_one_arm_cp_reg(cpu, &nsacr);
5052 } else {
5053 ARMCPRegInfo nsacr = {
5054 .name = "NSACR",
5055 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5056 .access = PL3_RW | PL1_R,
5057 .resetvalue = 0,
5058 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
5059 };
5060 define_one_arm_cp_reg(cpu, &nsacr);
5061 }
5062 } else {
5063 if (arm_feature(env, ARM_FEATURE_V8)) {
5064 ARMCPRegInfo nsacr = {
5065 .name = "NSACR", .type = ARM_CP_CONST,
5066 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5067 .access = PL1_R,
5068 .resetvalue = 0xc00
5069 };
5070 define_one_arm_cp_reg(cpu, &nsacr);
5071 }
5072 }
5073
5074 if (arm_feature(env, ARM_FEATURE_PMSA)) {
5075 if (arm_feature(env, ARM_FEATURE_V6)) {
5076 /* PMSAv6 not implemented */
5077 assert(arm_feature(env, ARM_FEATURE_V7));
5078 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5079 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
5080 } else {
5081 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
5082 }
5083 } else {
5084 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5085 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
5086 }
5087 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5088 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
5089 }
5090 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
5091 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
5092 }
5093 if (arm_feature(env, ARM_FEATURE_VAPA)) {
5094 define_arm_cp_regs(cpu, vapa_cp_reginfo);
5095 }
5096 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
5097 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
5098 }
5099 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
5100 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
5101 }
5102 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
5103 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
5104 }
5105 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
5106 define_arm_cp_regs(cpu, omap_cp_reginfo);
5107 }
5108 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
5109 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
5110 }
5111 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5112 define_arm_cp_regs(cpu, xscale_cp_reginfo);
5113 }
5114 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
5115 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
5116 }
5117 if (arm_feature(env, ARM_FEATURE_LPAE)) {
5118 define_arm_cp_regs(cpu, lpae_cp_reginfo);
5119 }
5120 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5121 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5122 * be read-only (ie write causes UNDEF exception).
5123 */
5124 {
5125 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
5126 /* Pre-v8 MIDR space.
5127 * Note that the MIDR isn't a simple constant register because
5128 * of the TI925 behaviour where writes to another register can
5129 * cause the MIDR value to change.
5130 *
5131 * Unimplemented registers in the c15 0 0 0 space default to
5132 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5133 * and friends override accordingly.
5134 */
5135 { .name = "MIDR",
5136 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
5137 .access = PL1_R, .resetvalue = cpu->midr,
5138 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
5139 .readfn = midr_read,
5140 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5141 .type = ARM_CP_OVERRIDE },
5142 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5143 { .name = "DUMMY",
5144 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
5145 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5146 { .name = "DUMMY",
5147 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
5148 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5149 { .name = "DUMMY",
5150 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
5151 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5152 { .name = "DUMMY",
5153 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
5154 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5155 { .name = "DUMMY",
5156 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
5157 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5158 REGINFO_SENTINEL
5159 };
5160 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
5161 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
5162 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
5163 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
5164 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5165 .readfn = midr_read },
5166 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5167 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5168 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5169 .access = PL1_R, .resetvalue = cpu->midr },
5170 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5171 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5172 .access = PL1_R, .resetvalue = cpu->midr },
5173 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5174 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5175 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5176 REGINFO_SENTINEL
5177 };
5178 ARMCPRegInfo id_cp_reginfo[] = {
5179 /* These are common to v8 and pre-v8 */
5180 { .name = "CTR",
5181 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5182 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5183 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5184 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5185 .access = PL0_R, .accessfn = ctr_el0_access,
5186 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5187 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5188 { .name = "TCMTR",
5189 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5190 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5191 REGINFO_SENTINEL
5192 };
5193 /* TLBTR is specific to VMSA */
5194 ARMCPRegInfo id_tlbtr_reginfo = {
5195 .name = "TLBTR",
5196 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5197 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5198 };
5199 /* MPUIR is specific to PMSA V6+ */
5200 ARMCPRegInfo id_mpuir_reginfo = {
5201 .name = "MPUIR",
5202 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5203 .access = PL1_R, .type = ARM_CP_CONST,
5204 .resetvalue = cpu->pmsav7_dregion << 8
5205 };
5206 ARMCPRegInfo crn0_wi_reginfo = {
5207 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5208 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5209 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5210 };
5211 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5212 arm_feature(env, ARM_FEATURE_STRONGARM)) {
5213 ARMCPRegInfo *r;
5214 /* Register the blanket "writes ignored" value first to cover the
5215 * whole space. Then update the specific ID registers to allow write
5216 * access, so that they ignore writes rather than causing them to
5217 * UNDEF.
5218 */
5219 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5220 for (r = id_pre_v8_midr_cp_reginfo;
5221 r->type != ARM_CP_SENTINEL; r++) {
5222 r->access = PL1_RW;
5223 }
5224 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5225 r->access = PL1_RW;
5226 }
5227 id_tlbtr_reginfo.access = PL1_RW;
5228 id_tlbtr_reginfo.access = PL1_RW;
5229 }
5230 if (arm_feature(env, ARM_FEATURE_V8)) {
5231 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5232 } else {
5233 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5234 }
5235 define_arm_cp_regs(cpu, id_cp_reginfo);
5236 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
5237 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5238 } else if (arm_feature(env, ARM_FEATURE_V7)) {
5239 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5240 }
5241 }
5242
5243 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5244 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5245 }
5246
5247 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5248 ARMCPRegInfo auxcr_reginfo[] = {
5249 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5250 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5251 .access = PL1_RW, .type = ARM_CP_CONST,
5252 .resetvalue = cpu->reset_auxcr },
5253 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5254 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5255 .access = PL2_RW, .type = ARM_CP_CONST,
5256 .resetvalue = 0 },
5257 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5258 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5259 .access = PL3_RW, .type = ARM_CP_CONST,
5260 .resetvalue = 0 },
5261 REGINFO_SENTINEL
5262 };
5263 define_arm_cp_regs(cpu, auxcr_reginfo);
5264 }
5265
5266 if (arm_feature(env, ARM_FEATURE_CBAR)) {
5267 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5268 /* 32 bit view is [31:18] 0...0 [43:32]. */
5269 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5270 | extract64(cpu->reset_cbar, 32, 12);
5271 ARMCPRegInfo cbar_reginfo[] = {
5272 { .name = "CBAR",
5273 .type = ARM_CP_CONST,
5274 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5275 .access = PL1_R, .resetvalue = cpu->reset_cbar },
5276 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5277 .type = ARM_CP_CONST,
5278 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5279 .access = PL1_R, .resetvalue = cbar32 },
5280 REGINFO_SENTINEL
5281 };
5282 /* We don't implement a r/w 64 bit CBAR currently */
5283 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5284 define_arm_cp_regs(cpu, cbar_reginfo);
5285 } else {
5286 ARMCPRegInfo cbar = {
5287 .name = "CBAR",
5288 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5289 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5290 .fieldoffset = offsetof(CPUARMState,
5291 cp15.c15_config_base_address)
5292 };
5293 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5294 cbar.access = PL1_R;
5295 cbar.fieldoffset = 0;
5296 cbar.type = ARM_CP_CONST;
5297 }
5298 define_one_arm_cp_reg(cpu, &cbar);
5299 }
5300 }
5301
5302 if (arm_feature(env, ARM_FEATURE_VBAR)) {
5303 ARMCPRegInfo vbar_cp_reginfo[] = {
5304 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5305 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5306 .access = PL1_RW, .writefn = vbar_write,
5307 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5308 offsetof(CPUARMState, cp15.vbar_ns) },
5309 .resetvalue = 0 },
5310 REGINFO_SENTINEL
5311 };
5312 define_arm_cp_regs(cpu, vbar_cp_reginfo);
5313 }
5314
5315 /* Generic registers whose values depend on the implementation */
5316 {
5317 ARMCPRegInfo sctlr = {
5318 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5319 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5320 .access = PL1_RW,
5321 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5322 offsetof(CPUARMState, cp15.sctlr_ns) },
5323 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5324 .raw_writefn = raw_write,
5325 };
5326 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5327 /* Normally we would always end the TB on an SCTLR write, but Linux
5328 * arch/arm/mach-pxa/sleep.S expects two instructions following
5329 * an MMU enable to execute from cache. Imitate this behaviour.
5330 */
5331 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5332 }
5333 define_one_arm_cp_reg(cpu, &sctlr);
5334 }
5335 }
5336
5337 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5338 {
5339 CPUState *cs = CPU(cpu);
5340 CPUARMState *env = &cpu->env;
5341
5342 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5343 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5344 aarch64_fpu_gdb_set_reg,
5345 34, "aarch64-fpu.xml", 0);
5346 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5347 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5348 51, "arm-neon.xml", 0);
5349 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5350 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5351 35, "arm-vfp3.xml", 0);
5352 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5353 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5354 19, "arm-vfp.xml", 0);
5355 }
5356 }
5357
5358 /* Sort alphabetically by type name, except for "any". */
5359 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5360 {
5361 ObjectClass *class_a = (ObjectClass *)a;
5362 ObjectClass *class_b = (ObjectClass *)b;
5363 const char *name_a, *name_b;
5364
5365 name_a = object_class_get_name(class_a);
5366 name_b = object_class_get_name(class_b);
5367 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5368 return 1;
5369 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5370 return -1;
5371 } else {
5372 return strcmp(name_a, name_b);
5373 }
5374 }
5375
5376 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5377 {
5378 ObjectClass *oc = data;
5379 CPUListState *s = user_data;
5380 const char *typename;
5381 char *name;
5382
5383 typename = object_class_get_name(oc);
5384 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5385 (*s->cpu_fprintf)(s->file, " %s\n",
5386 name);
5387 g_free(name);
5388 }
5389
5390 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5391 {
5392 CPUListState s = {
5393 .file = f,
5394 .cpu_fprintf = cpu_fprintf,
5395 };
5396 GSList *list;
5397
5398 list = object_class_get_list(TYPE_ARM_CPU, false);
5399 list = g_slist_sort(list, arm_cpu_list_compare);
5400 (*cpu_fprintf)(f, "Available CPUs:\n");
5401 g_slist_foreach(list, arm_cpu_list_entry, &s);
5402 g_slist_free(list);
5403 #ifdef CONFIG_KVM
5404 /* The 'host' CPU type is dynamically registered only if KVM is
5405 * enabled, so we have to special-case it here:
5406 */
5407 (*cpu_fprintf)(f, " host (only available in KVM mode)\n");
5408 #endif
5409 }
5410
5411 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5412 {
5413 ObjectClass *oc = data;
5414 CpuDefinitionInfoList **cpu_list = user_data;
5415 CpuDefinitionInfoList *entry;
5416 CpuDefinitionInfo *info;
5417 const char *typename;
5418
5419 typename = object_class_get_name(oc);
5420 info = g_malloc0(sizeof(*info));
5421 info->name = g_strndup(typename,
5422 strlen(typename) - strlen("-" TYPE_ARM_CPU));
5423 info->q_typename = g_strdup(typename);
5424
5425 entry = g_malloc0(sizeof(*entry));
5426 entry->value = info;
5427 entry->next = *cpu_list;
5428 *cpu_list = entry;
5429 }
5430
5431 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5432 {
5433 CpuDefinitionInfoList *cpu_list = NULL;
5434 GSList *list;
5435
5436 list = object_class_get_list(TYPE_ARM_CPU, false);
5437 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5438 g_slist_free(list);
5439
5440 return cpu_list;
5441 }
5442
5443 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5444 void *opaque, int state, int secstate,
5445 int crm, int opc1, int opc2)
5446 {
5447 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5448 * add a single reginfo struct to the hash table.
5449 */
5450 uint32_t *key = g_new(uint32_t, 1);
5451 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5452 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5453 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5454
5455 /* Reset the secure state to the specific incoming state. This is
5456 * necessary as the register may have been defined with both states.
5457 */
5458 r2->secure = secstate;
5459
5460 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5461 /* Register is banked (using both entries in array).
5462 * Overwriting fieldoffset as the array is only used to define
5463 * banked registers but later only fieldoffset is used.
5464 */
5465 r2->fieldoffset = r->bank_fieldoffsets[ns];
5466 }
5467
5468 if (state == ARM_CP_STATE_AA32) {
5469 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5470 /* If the register is banked then we don't need to migrate or
5471 * reset the 32-bit instance in certain cases:
5472 *
5473 * 1) If the register has both 32-bit and 64-bit instances then we
5474 * can count on the 64-bit instance taking care of the
5475 * non-secure bank.
5476 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5477 * taking care of the secure bank. This requires that separate
5478 * 32 and 64-bit definitions are provided.
5479 */
5480 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5481 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5482 r2->type |= ARM_CP_ALIAS;
5483 }
5484 } else if ((secstate != r->secure) && !ns) {
5485 /* The register is not banked so we only want to allow migration of
5486 * the non-secure instance.
5487 */
5488 r2->type |= ARM_CP_ALIAS;
5489 }
5490
5491 if (r->state == ARM_CP_STATE_BOTH) {
5492 /* We assume it is a cp15 register if the .cp field is left unset.
5493 */
5494 if (r2->cp == 0) {
5495 r2->cp = 15;
5496 }
5497
5498 #ifdef HOST_WORDS_BIGENDIAN
5499 if (r2->fieldoffset) {
5500 r2->fieldoffset += sizeof(uint32_t);
5501 }
5502 #endif
5503 }
5504 }
5505 if (state == ARM_CP_STATE_AA64) {
5506 /* To allow abbreviation of ARMCPRegInfo
5507 * definitions, we treat cp == 0 as equivalent to
5508 * the value for "standard guest-visible sysreg".
5509 * STATE_BOTH definitions are also always "standard
5510 * sysreg" in their AArch64 view (the .cp value may
5511 * be non-zero for the benefit of the AArch32 view).
5512 */
5513 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5514 r2->cp = CP_REG_ARM64_SYSREG_CP;
5515 }
5516 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5517 r2->opc0, opc1, opc2);
5518 } else {
5519 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5520 }
5521 if (opaque) {
5522 r2->opaque = opaque;
5523 }
5524 /* reginfo passed to helpers is correct for the actual access,
5525 * and is never ARM_CP_STATE_BOTH:
5526 */
5527 r2->state = state;
5528 /* Make sure reginfo passed to helpers for wildcarded regs
5529 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5530 */
5531 r2->crm = crm;
5532 r2->opc1 = opc1;
5533 r2->opc2 = opc2;
5534 /* By convention, for wildcarded registers only the first
5535 * entry is used for migration; the others are marked as
5536 * ALIAS so we don't try to transfer the register
5537 * multiple times. Special registers (ie NOP/WFI) are
5538 * never migratable and not even raw-accessible.
5539 */
5540 if ((r->type & ARM_CP_SPECIAL)) {
5541 r2->type |= ARM_CP_NO_RAW;
5542 }
5543 if (((r->crm == CP_ANY) && crm != 0) ||
5544 ((r->opc1 == CP_ANY) && opc1 != 0) ||
5545 ((r->opc2 == CP_ANY) && opc2 != 0)) {
5546 r2->type |= ARM_CP_ALIAS;
5547 }
5548
5549 /* Check that raw accesses are either forbidden or handled. Note that
5550 * we can't assert this earlier because the setup of fieldoffset for
5551 * banked registers has to be done first.
5552 */
5553 if (!(r2->type & ARM_CP_NO_RAW)) {
5554 assert(!raw_accessors_invalid(r2));
5555 }
5556
5557 /* Overriding of an existing definition must be explicitly
5558 * requested.
5559 */
5560 if (!(r->type & ARM_CP_OVERRIDE)) {
5561 ARMCPRegInfo *oldreg;
5562 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5563 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5564 fprintf(stderr, "Register redefined: cp=%d %d bit "
5565 "crn=%d crm=%d opc1=%d opc2=%d, "
5566 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5567 r2->crn, r2->crm, r2->opc1, r2->opc2,
5568 oldreg->name, r2->name);
5569 g_assert_not_reached();
5570 }
5571 }
5572 g_hash_table_insert(cpu->cp_regs, key, r2);
5573 }
5574
5575
5576 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5577 const ARMCPRegInfo *r, void *opaque)
5578 {
5579 /* Define implementations of coprocessor registers.
5580 * We store these in a hashtable because typically
5581 * there are less than 150 registers in a space which
5582 * is 16*16*16*8*8 = 262144 in size.
5583 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5584 * If a register is defined twice then the second definition is
5585 * used, so this can be used to define some generic registers and
5586 * then override them with implementation specific variations.
5587 * At least one of the original and the second definition should
5588 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5589 * against accidental use.
5590 *
5591 * The state field defines whether the register is to be
5592 * visible in the AArch32 or AArch64 execution state. If the
5593 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5594 * reginfo structure for the AArch32 view, which sees the lower
5595 * 32 bits of the 64 bit register.
5596 *
5597 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5598 * be wildcarded. AArch64 registers are always considered to be 64
5599 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5600 * the register, if any.
5601 */
5602 int crm, opc1, opc2, state;
5603 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5604 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5605 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5606 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5607 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5608 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5609 /* 64 bit registers have only CRm and Opc1 fields */
5610 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5611 /* op0 only exists in the AArch64 encodings */
5612 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5613 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5614 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5615 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5616 * encodes a minimum access level for the register. We roll this
5617 * runtime check into our general permission check code, so check
5618 * here that the reginfo's specified permissions are strict enough
5619 * to encompass the generic architectural permission check.
5620 */
5621 if (r->state != ARM_CP_STATE_AA32) {
5622 int mask = 0;
5623 switch (r->opc1) {
5624 case 0: case 1: case 2:
5625 /* min_EL EL1 */
5626 mask = PL1_RW;
5627 break;
5628 case 3:
5629 /* min_EL EL0 */
5630 mask = PL0_RW;
5631 break;
5632 case 4:
5633 /* min_EL EL2 */
5634 mask = PL2_RW;
5635 break;
5636 case 5:
5637 /* unallocated encoding, so not possible */
5638 assert(false);
5639 break;
5640 case 6:
5641 /* min_EL EL3 */
5642 mask = PL3_RW;
5643 break;
5644 case 7:
5645 /* min_EL EL1, secure mode only (we don't check the latter) */
5646 mask = PL1_RW;
5647 break;
5648 default:
5649 /* broken reginfo with out-of-range opc1 */
5650 assert(false);
5651 break;
5652 }
5653 /* assert our permissions are not too lax (stricter is fine) */
5654 assert((r->access & ~mask) == 0);
5655 }
5656
5657 /* Check that the register definition has enough info to handle
5658 * reads and writes if they are permitted.
5659 */
5660 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5661 if (r->access & PL3_R) {
5662 assert((r->fieldoffset ||
5663 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5664 r->readfn);
5665 }
5666 if (r->access & PL3_W) {
5667 assert((r->fieldoffset ||
5668 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5669 r->writefn);
5670 }
5671 }
5672 /* Bad type field probably means missing sentinel at end of reg list */
5673 assert(cptype_valid(r->type));
5674 for (crm = crmmin; crm <= crmmax; crm++) {
5675 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5676 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5677 for (state = ARM_CP_STATE_AA32;
5678 state <= ARM_CP_STATE_AA64; state++) {
5679 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5680 continue;
5681 }
5682 if (state == ARM_CP_STATE_AA32) {
5683 /* Under AArch32 CP registers can be common
5684 * (same for secure and non-secure world) or banked.
5685 */
5686 switch (r->secure) {
5687 case ARM_CP_SECSTATE_S:
5688 case ARM_CP_SECSTATE_NS:
5689 add_cpreg_to_hashtable(cpu, r, opaque, state,
5690 r->secure, crm, opc1, opc2);
5691 break;
5692 default:
5693 add_cpreg_to_hashtable(cpu, r, opaque, state,
5694 ARM_CP_SECSTATE_S,
5695 crm, opc1, opc2);
5696 add_cpreg_to_hashtable(cpu, r, opaque, state,
5697 ARM_CP_SECSTATE_NS,
5698 crm, opc1, opc2);
5699 break;
5700 }
5701 } else {
5702 /* AArch64 registers get mapped to non-secure instance
5703 * of AArch32 */
5704 add_cpreg_to_hashtable(cpu, r, opaque, state,
5705 ARM_CP_SECSTATE_NS,
5706 crm, opc1, opc2);
5707 }
5708 }
5709 }
5710 }
5711 }
5712 }
5713
5714 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5715 const ARMCPRegInfo *regs, void *opaque)
5716 {
5717 /* Define a whole list of registers */
5718 const ARMCPRegInfo *r;
5719 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5720 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5721 }
5722 }
5723
5724 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5725 {
5726 return g_hash_table_lookup(cpregs, &encoded_cp);
5727 }
5728
5729 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5730 uint64_t value)
5731 {
5732 /* Helper coprocessor write function for write-ignore registers */
5733 }
5734
5735 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5736 {
5737 /* Helper coprocessor write function for read-as-zero registers */
5738 return 0;
5739 }
5740
5741 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5742 {
5743 /* Helper coprocessor reset function for do-nothing-on-reset registers */
5744 }
5745
5746 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5747 {
5748 /* Return true if it is not valid for us to switch to
5749 * this CPU mode (ie all the UNPREDICTABLE cases in
5750 * the ARM ARM CPSRWriteByInstr pseudocode).
5751 */
5752
5753 /* Changes to or from Hyp via MSR and CPS are illegal. */
5754 if (write_type == CPSRWriteByInstr &&
5755 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5756 mode == ARM_CPU_MODE_HYP)) {
5757 return 1;
5758 }
5759
5760 switch (mode) {
5761 case ARM_CPU_MODE_USR:
5762 return 0;
5763 case ARM_CPU_MODE_SYS:
5764 case ARM_CPU_MODE_SVC:
5765 case ARM_CPU_MODE_ABT:
5766 case ARM_CPU_MODE_UND:
5767 case ARM_CPU_MODE_IRQ:
5768 case ARM_CPU_MODE_FIQ:
5769 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5770 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5771 */
5772 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5773 * and CPS are treated as illegal mode changes.
5774 */
5775 if (write_type == CPSRWriteByInstr &&
5776 (env->cp15.hcr_el2 & HCR_TGE) &&
5777 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5778 !arm_is_secure_below_el3(env)) {
5779 return 1;
5780 }
5781 return 0;
5782 case ARM_CPU_MODE_HYP:
5783 return !arm_feature(env, ARM_FEATURE_EL2)
5784 || arm_current_el(env) < 2 || arm_is_secure(env);
5785 case ARM_CPU_MODE_MON:
5786 return arm_current_el(env) < 3;
5787 default:
5788 return 1;
5789 }
5790 }
5791
5792 uint32_t cpsr_read(CPUARMState *env)
5793 {
5794 int ZF;
5795 ZF = (env->ZF == 0);
5796 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5797 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5798 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5799 | ((env->condexec_bits & 0xfc) << 8)
5800 | (env->GE << 16) | (env->daif & CPSR_AIF);
5801 }
5802
5803 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5804 CPSRWriteType write_type)
5805 {
5806 uint32_t changed_daif;
5807
5808 if (mask & CPSR_NZCV) {
5809 env->ZF = (~val) & CPSR_Z;
5810 env->NF = val;
5811 env->CF = (val >> 29) & 1;
5812 env->VF = (val << 3) & 0x80000000;
5813 }
5814 if (mask & CPSR_Q)
5815 env->QF = ((val & CPSR_Q) != 0);
5816 if (mask & CPSR_T)
5817 env->thumb = ((val & CPSR_T) != 0);
5818 if (mask & CPSR_IT_0_1) {
5819 env->condexec_bits &= ~3;
5820 env->condexec_bits |= (val >> 25) & 3;
5821 }
5822 if (mask & CPSR_IT_2_7) {
5823 env->condexec_bits &= 3;
5824 env->condexec_bits |= (val >> 8) & 0xfc;
5825 }
5826 if (mask & CPSR_GE) {
5827 env->GE = (val >> 16) & 0xf;
5828 }
5829
5830 /* In a V7 implementation that includes the security extensions but does
5831 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5832 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5833 * bits respectively.
5834 *
5835 * In a V8 implementation, it is permitted for privileged software to
5836 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5837 */
5838 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
5839 arm_feature(env, ARM_FEATURE_EL3) &&
5840 !arm_feature(env, ARM_FEATURE_EL2) &&
5841 !arm_is_secure(env)) {
5842
5843 changed_daif = (env->daif ^ val) & mask;
5844
5845 if (changed_daif & CPSR_A) {
5846 /* Check to see if we are allowed to change the masking of async
5847 * abort exceptions from a non-secure state.
5848 */
5849 if (!(env->cp15.scr_el3 & SCR_AW)) {
5850 qemu_log_mask(LOG_GUEST_ERROR,
5851 "Ignoring attempt to switch CPSR_A flag from "
5852 "non-secure world with SCR.AW bit clear\n");
5853 mask &= ~CPSR_A;
5854 }
5855 }
5856
5857 if (changed_daif & CPSR_F) {
5858 /* Check to see if we are allowed to change the masking of FIQ
5859 * exceptions from a non-secure state.
5860 */
5861 if (!(env->cp15.scr_el3 & SCR_FW)) {
5862 qemu_log_mask(LOG_GUEST_ERROR,
5863 "Ignoring attempt to switch CPSR_F flag from "
5864 "non-secure world with SCR.FW bit clear\n");
5865 mask &= ~CPSR_F;
5866 }
5867
5868 /* Check whether non-maskable FIQ (NMFI) support is enabled.
5869 * If this bit is set software is not allowed to mask
5870 * FIQs, but is allowed to set CPSR_F to 0.
5871 */
5872 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
5873 (val & CPSR_F)) {
5874 qemu_log_mask(LOG_GUEST_ERROR,
5875 "Ignoring attempt to enable CPSR_F flag "
5876 "(non-maskable FIQ [NMFI] support enabled)\n");
5877 mask &= ~CPSR_F;
5878 }
5879 }
5880 }
5881
5882 env->daif &= ~(CPSR_AIF & mask);
5883 env->daif |= val & CPSR_AIF & mask;
5884
5885 if (write_type != CPSRWriteRaw &&
5886 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
5887 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
5888 /* Note that we can only get here in USR mode if this is a
5889 * gdb stub write; for this case we follow the architectural
5890 * behaviour for guest writes in USR mode of ignoring an attempt
5891 * to switch mode. (Those are caught by translate.c for writes
5892 * triggered by guest instructions.)
5893 */
5894 mask &= ~CPSR_M;
5895 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
5896 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5897 * v7, and has defined behaviour in v8:
5898 * + leave CPSR.M untouched
5899 * + allow changes to the other CPSR fields
5900 * + set PSTATE.IL
5901 * For user changes via the GDB stub, we don't set PSTATE.IL,
5902 * as this would be unnecessarily harsh for a user error.
5903 */
5904 mask &= ~CPSR_M;
5905 if (write_type != CPSRWriteByGDBStub &&
5906 arm_feature(env, ARM_FEATURE_V8)) {
5907 mask |= CPSR_IL;
5908 val |= CPSR_IL;
5909 }
5910 } else {
5911 switch_mode(env, val & CPSR_M);
5912 }
5913 }
5914 mask &= ~CACHED_CPSR_BITS;
5915 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
5916 }
5917
5918 /* Sign/zero extend */
5919 uint32_t HELPER(sxtb16)(uint32_t x)
5920 {
5921 uint32_t res;
5922 res = (uint16_t)(int8_t)x;
5923 res |= (uint32_t)(int8_t)(x >> 16) << 16;
5924 return res;
5925 }
5926
5927 uint32_t HELPER(uxtb16)(uint32_t x)
5928 {
5929 uint32_t res;
5930 res = (uint16_t)(uint8_t)x;
5931 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
5932 return res;
5933 }
5934
5935 int32_t HELPER(sdiv)(int32_t num, int32_t den)
5936 {
5937 if (den == 0)
5938 return 0;
5939 if (num == INT_MIN && den == -1)
5940 return INT_MIN;
5941 return num / den;
5942 }
5943
5944 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
5945 {
5946 if (den == 0)
5947 return 0;
5948 return num / den;
5949 }
5950
5951 uint32_t HELPER(rbit)(uint32_t x)
5952 {
5953 return revbit32(x);
5954 }
5955
5956 #if defined(CONFIG_USER_ONLY)
5957
5958 /* These should probably raise undefined insn exceptions. */
5959 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
5960 {
5961 ARMCPU *cpu = arm_env_get_cpu(env);
5962
5963 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
5964 }
5965
5966 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
5967 {
5968 ARMCPU *cpu = arm_env_get_cpu(env);
5969
5970 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
5971 return 0;
5972 }
5973
5974 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
5975 {
5976 /* translate.c should never generate calls here in user-only mode */
5977 g_assert_not_reached();
5978 }
5979
5980 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
5981 {
5982 /* translate.c should never generate calls here in user-only mode */
5983 g_assert_not_reached();
5984 }
5985
5986 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
5987 {
5988 /* The TT instructions can be used by unprivileged code, but in
5989 * user-only emulation we don't have the MPU.
5990 * Luckily since we know we are NonSecure unprivileged (and that in
5991 * turn means that the A flag wasn't specified), all the bits in the
5992 * register must be zero:
5993 * IREGION: 0 because IRVALID is 0
5994 * IRVALID: 0 because NS
5995 * S: 0 because NS
5996 * NSRW: 0 because NS
5997 * NSR: 0 because NS
5998 * RW: 0 because unpriv and A flag not set
5999 * R: 0 because unpriv and A flag not set
6000 * SRVALID: 0 because NS
6001 * MRVALID: 0 because unpriv and A flag not set
6002 * SREGION: 0 becaus SRVALID is 0
6003 * MREGION: 0 because MRVALID is 0
6004 */
6005 return 0;
6006 }
6007
6008 void switch_mode(CPUARMState *env, int mode)
6009 {
6010 ARMCPU *cpu = arm_env_get_cpu(env);
6011
6012 if (mode != ARM_CPU_MODE_USR) {
6013 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
6014 }
6015 }
6016
6017 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6018 uint32_t cur_el, bool secure)
6019 {
6020 return 1;
6021 }
6022
6023 void aarch64_sync_64_to_32(CPUARMState *env)
6024 {
6025 g_assert_not_reached();
6026 }
6027
6028 #else
6029
6030 void switch_mode(CPUARMState *env, int mode)
6031 {
6032 int old_mode;
6033 int i;
6034
6035 old_mode = env->uncached_cpsr & CPSR_M;
6036 if (mode == old_mode)
6037 return;
6038
6039 if (old_mode == ARM_CPU_MODE_FIQ) {
6040 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
6041 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
6042 } else if (mode == ARM_CPU_MODE_FIQ) {
6043 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
6044 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
6045 }
6046
6047 i = bank_number(old_mode);
6048 env->banked_r13[i] = env->regs[13];
6049 env->banked_r14[i] = env->regs[14];
6050 env->banked_spsr[i] = env->spsr;
6051
6052 i = bank_number(mode);
6053 env->regs[13] = env->banked_r13[i];
6054 env->regs[14] = env->banked_r14[i];
6055 env->spsr = env->banked_spsr[i];
6056 }
6057
6058 /* Physical Interrupt Target EL Lookup Table
6059 *
6060 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6061 *
6062 * The below multi-dimensional table is used for looking up the target
6063 * exception level given numerous condition criteria. Specifically, the
6064 * target EL is based on SCR and HCR routing controls as well as the
6065 * currently executing EL and secure state.
6066 *
6067 * Dimensions:
6068 * target_el_table[2][2][2][2][2][4]
6069 * | | | | | +--- Current EL
6070 * | | | | +------ Non-secure(0)/Secure(1)
6071 * | | | +--------- HCR mask override
6072 * | | +------------ SCR exec state control
6073 * | +--------------- SCR mask override
6074 * +------------------ 32-bit(0)/64-bit(1) EL3
6075 *
6076 * The table values are as such:
6077 * 0-3 = EL0-EL3
6078 * -1 = Cannot occur
6079 *
6080 * The ARM ARM target EL table includes entries indicating that an "exception
6081 * is not taken". The two cases where this is applicable are:
6082 * 1) An exception is taken from EL3 but the SCR does not have the exception
6083 * routed to EL3.
6084 * 2) An exception is taken from EL2 but the HCR does not have the exception
6085 * routed to EL2.
6086 * In these two cases, the below table contain a target of EL1. This value is
6087 * returned as it is expected that the consumer of the table data will check
6088 * for "target EL >= current EL" to ensure the exception is not taken.
6089 *
6090 * SCR HCR
6091 * 64 EA AMO From
6092 * BIT IRQ IMO Non-secure Secure
6093 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6094 */
6095 static const int8_t target_el_table[2][2][2][2][2][4] = {
6096 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6097 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6098 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6099 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6100 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6101 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6102 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6103 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6104 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6105 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6106 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6107 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6108 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6109 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6110 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6111 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6112 };
6113
6114 /*
6115 * Determine the target EL for physical exceptions
6116 */
6117 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6118 uint32_t cur_el, bool secure)
6119 {
6120 CPUARMState *env = cs->env_ptr;
6121 int rw;
6122 int scr;
6123 int hcr;
6124 int target_el;
6125 /* Is the highest EL AArch64? */
6126 int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
6127
6128 if (arm_feature(env, ARM_FEATURE_EL3)) {
6129 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
6130 } else {
6131 /* Either EL2 is the highest EL (and so the EL2 register width
6132 * is given by is64); or there is no EL2 or EL3, in which case
6133 * the value of 'rw' does not affect the table lookup anyway.
6134 */
6135 rw = is64;
6136 }
6137
6138 switch (excp_idx) {
6139 case EXCP_IRQ:
6140 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
6141 hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
6142 break;
6143 case EXCP_FIQ:
6144 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
6145 hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
6146 break;
6147 default:
6148 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
6149 hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
6150 break;
6151 };
6152
6153 /* If HCR.TGE is set then HCR is treated as being 1 */
6154 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
6155
6156 /* Perform a table-lookup for the target EL given the current state */
6157 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
6158
6159 assert(target_el > 0);
6160
6161 return target_el;
6162 }
6163
6164 static void v7m_push(CPUARMState *env, uint32_t val)
6165 {
6166 CPUState *cs = CPU(arm_env_get_cpu(env));
6167
6168 env->regs[13] -= 4;
6169 stl_phys(cs->as, env->regs[13], val);
6170 }
6171
6172 /* Return true if we're using the process stack pointer (not the MSP) */
6173 static bool v7m_using_psp(CPUARMState *env)
6174 {
6175 /* Handler mode always uses the main stack; for thread mode
6176 * the CONTROL.SPSEL bit determines the answer.
6177 * Note that in v7M it is not possible to be in Handler mode with
6178 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6179 */
6180 return !arm_v7m_is_handler_mode(env) &&
6181 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
6182 }
6183
6184 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6185 * This may change the current stack pointer between Main and Process
6186 * stack pointers if it is done for the CONTROL register for the current
6187 * security state.
6188 */
6189 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
6190 bool new_spsel,
6191 bool secstate)
6192 {
6193 bool old_is_psp = v7m_using_psp(env);
6194
6195 env->v7m.control[secstate] =
6196 deposit32(env->v7m.control[secstate],
6197 R_V7M_CONTROL_SPSEL_SHIFT,
6198 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6199
6200 if (secstate == env->v7m.secure) {
6201 bool new_is_psp = v7m_using_psp(env);
6202 uint32_t tmp;
6203
6204 if (old_is_psp != new_is_psp) {
6205 tmp = env->v7m.other_sp;
6206 env->v7m.other_sp = env->regs[13];
6207 env->regs[13] = tmp;
6208 }
6209 }
6210 }
6211
6212 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6213 * stack pointer between Main and Process stack pointers.
6214 */
6215 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
6216 {
6217 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
6218 }
6219
6220 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
6221 {
6222 /* Write a new value to v7m.exception, thus transitioning into or out
6223 * of Handler mode; this may result in a change of active stack pointer.
6224 */
6225 bool new_is_psp, old_is_psp = v7m_using_psp(env);
6226 uint32_t tmp;
6227
6228 env->v7m.exception = new_exc;
6229
6230 new_is_psp = v7m_using_psp(env);
6231
6232 if (old_is_psp != new_is_psp) {
6233 tmp = env->v7m.other_sp;
6234 env->v7m.other_sp = env->regs[13];
6235 env->regs[13] = tmp;
6236 }
6237 }
6238
6239 /* Switch M profile security state between NS and S */
6240 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
6241 {
6242 uint32_t new_ss_msp, new_ss_psp;
6243
6244 if (env->v7m.secure == new_secstate) {
6245 return;
6246 }
6247
6248 /* All the banked state is accessed by looking at env->v7m.secure
6249 * except for the stack pointer; rearrange the SP appropriately.
6250 */
6251 new_ss_msp = env->v7m.other_ss_msp;
6252 new_ss_psp = env->v7m.other_ss_psp;
6253
6254 if (v7m_using_psp(env)) {
6255 env->v7m.other_ss_psp = env->regs[13];
6256 env->v7m.other_ss_msp = env->v7m.other_sp;
6257 } else {
6258 env->v7m.other_ss_msp = env->regs[13];
6259 env->v7m.other_ss_psp = env->v7m.other_sp;
6260 }
6261
6262 env->v7m.secure = new_secstate;
6263
6264 if (v7m_using_psp(env)) {
6265 env->regs[13] = new_ss_psp;
6266 env->v7m.other_sp = new_ss_msp;
6267 } else {
6268 env->regs[13] = new_ss_msp;
6269 env->v7m.other_sp = new_ss_psp;
6270 }
6271 }
6272
6273 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6274 {
6275 /* Handle v7M BXNS:
6276 * - if the return value is a magic value, do exception return (like BX)
6277 * - otherwise bit 0 of the return value is the target security state
6278 */
6279 uint32_t min_magic;
6280
6281 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6282 /* Covers FNC_RETURN and EXC_RETURN magic */
6283 min_magic = FNC_RETURN_MIN_MAGIC;
6284 } else {
6285 /* EXC_RETURN magic only */
6286 min_magic = EXC_RETURN_MIN_MAGIC;
6287 }
6288
6289 if (dest >= min_magic) {
6290 /* This is an exception return magic value; put it where
6291 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6292 * Note that if we ever add gen_ss_advance() singlestep support to
6293 * M profile this should count as an "instruction execution complete"
6294 * event (compare gen_bx_excret_final_code()).
6295 */
6296 env->regs[15] = dest & ~1;
6297 env->thumb = dest & 1;
6298 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
6299 /* notreached */
6300 }
6301
6302 /* translate.c should have made BXNS UNDEF unless we're secure */
6303 assert(env->v7m.secure);
6304
6305 switch_v7m_security_state(env, dest & 1);
6306 env->thumb = 1;
6307 env->regs[15] = dest & ~1;
6308 }
6309
6310 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6311 {
6312 /* Handle v7M BLXNS:
6313 * - bit 0 of the destination address is the target security state
6314 */
6315
6316 /* At this point regs[15] is the address just after the BLXNS */
6317 uint32_t nextinst = env->regs[15] | 1;
6318 uint32_t sp = env->regs[13] - 8;
6319 uint32_t saved_psr;
6320
6321 /* translate.c will have made BLXNS UNDEF unless we're secure */
6322 assert(env->v7m.secure);
6323
6324 if (dest & 1) {
6325 /* target is Secure, so this is just a normal BLX,
6326 * except that the low bit doesn't indicate Thumb/not.
6327 */
6328 env->regs[14] = nextinst;
6329 env->thumb = 1;
6330 env->regs[15] = dest & ~1;
6331 return;
6332 }
6333
6334 /* Target is non-secure: first push a stack frame */
6335 if (!QEMU_IS_ALIGNED(sp, 8)) {
6336 qemu_log_mask(LOG_GUEST_ERROR,
6337 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6338 }
6339
6340 saved_psr = env->v7m.exception;
6341 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
6342 saved_psr |= XPSR_SFPA;
6343 }
6344
6345 /* Note that these stores can throw exceptions on MPU faults */
6346 cpu_stl_data(env, sp, nextinst);
6347 cpu_stl_data(env, sp + 4, saved_psr);
6348
6349 env->regs[13] = sp;
6350 env->regs[14] = 0xfeffffff;
6351 if (arm_v7m_is_handler_mode(env)) {
6352 /* Write a dummy value to IPSR, to avoid leaking the current secure
6353 * exception number to non-secure code. This is guaranteed not
6354 * to cause write_v7m_exception() to actually change stacks.
6355 */
6356 write_v7m_exception(env, 1);
6357 }
6358 switch_v7m_security_state(env, 0);
6359 env->thumb = 1;
6360 env->regs[15] = dest;
6361 }
6362
6363 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
6364 bool spsel)
6365 {
6366 /* Return a pointer to the location where we currently store the
6367 * stack pointer for the requested security state and thread mode.
6368 * This pointer will become invalid if the CPU state is updated
6369 * such that the stack pointers are switched around (eg changing
6370 * the SPSEL control bit).
6371 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6372 * Unlike that pseudocode, we require the caller to pass us in the
6373 * SPSEL control bit value; this is because we also use this
6374 * function in handling of pushing of the callee-saves registers
6375 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6376 * and in the tailchain codepath the SPSEL bit comes from the exception
6377 * return magic LR value from the previous exception. The pseudocode
6378 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6379 * to make this utility function generic enough to do the job.
6380 */
6381 bool want_psp = threadmode && spsel;
6382
6383 if (secure == env->v7m.secure) {
6384 if (want_psp == v7m_using_psp(env)) {
6385 return &env->regs[13];
6386 } else {
6387 return &env->v7m.other_sp;
6388 }
6389 } else {
6390 if (want_psp) {
6391 return &env->v7m.other_ss_psp;
6392 } else {
6393 return &env->v7m.other_ss_msp;
6394 }
6395 }
6396 }
6397
6398 static uint32_t arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure)
6399 {
6400 CPUState *cs = CPU(cpu);
6401 CPUARMState *env = &cpu->env;
6402 MemTxResult result;
6403 hwaddr vec = env->v7m.vecbase[targets_secure] + exc * 4;
6404 uint32_t addr;
6405
6406 addr = address_space_ldl(cs->as, vec,
6407 MEMTXATTRS_UNSPECIFIED, &result);
6408 if (result != MEMTX_OK) {
6409 /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
6410 * which would then be immediately followed by our failing to load
6411 * the entry vector for that HardFault, which is a Lockup case.
6412 * Since we don't model Lockup, we just report this guest error
6413 * via cpu_abort().
6414 */
6415 cpu_abort(cs, "Failed to read from %s exception vector table "
6416 "entry %08x\n", targets_secure ? "secure" : "nonsecure",
6417 (unsigned)vec);
6418 }
6419 return addr;
6420 }
6421
6422 static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6423 bool ignore_faults)
6424 {
6425 /* For v8M, push the callee-saves register part of the stack frame.
6426 * Compare the v8M pseudocode PushCalleeStack().
6427 * In the tailchaining case this may not be the current stack.
6428 */
6429 CPUARMState *env = &cpu->env;
6430 CPUState *cs = CPU(cpu);
6431 uint32_t *frame_sp_p;
6432 uint32_t frameptr;
6433
6434 if (dotailchain) {
6435 frame_sp_p = get_v7m_sp_ptr(env, true,
6436 lr & R_V7M_EXCRET_MODE_MASK,
6437 lr & R_V7M_EXCRET_SPSEL_MASK);
6438 } else {
6439 frame_sp_p = &env->regs[13];
6440 }
6441
6442 frameptr = *frame_sp_p - 0x28;
6443
6444 stl_phys(cs->as, frameptr, 0xfefa125b);
6445 stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
6446 stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
6447 stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
6448 stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
6449 stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
6450 stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
6451 stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
6452 stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
6453
6454 *frame_sp_p = frameptr;
6455 }
6456
6457 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6458 bool ignore_stackfaults)
6459 {
6460 /* Do the "take the exception" parts of exception entry,
6461 * but not the pushing of state to the stack. This is
6462 * similar to the pseudocode ExceptionTaken() function.
6463 */
6464 CPUARMState *env = &cpu->env;
6465 uint32_t addr;
6466 bool targets_secure;
6467 int exc;
6468
6469 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
6470
6471 if (arm_feature(env, ARM_FEATURE_V8)) {
6472 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
6473 (lr & R_V7M_EXCRET_S_MASK)) {
6474 /* The background code (the owner of the registers in the
6475 * exception frame) is Secure. This means it may either already
6476 * have or now needs to push callee-saves registers.
6477 */
6478 if (targets_secure) {
6479 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
6480 /* We took an exception from Secure to NonSecure
6481 * (which means the callee-saved registers got stacked)
6482 * and are now tailchaining to a Secure exception.
6483 * Clear DCRS so eventual return from this Secure
6484 * exception unstacks the callee-saved registers.
6485 */
6486 lr &= ~R_V7M_EXCRET_DCRS_MASK;
6487 }
6488 } else {
6489 /* We're going to a non-secure exception; push the
6490 * callee-saves registers to the stack now, if they're
6491 * not already saved.
6492 */
6493 if (lr & R_V7M_EXCRET_DCRS_MASK &&
6494 !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
6495 v7m_push_callee_stack(cpu, lr, dotailchain,
6496 ignore_stackfaults);
6497 }
6498 lr |= R_V7M_EXCRET_DCRS_MASK;
6499 }
6500 }
6501
6502 lr &= ~R_V7M_EXCRET_ES_MASK;
6503 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6504 lr |= R_V7M_EXCRET_ES_MASK;
6505 }
6506 lr &= ~R_V7M_EXCRET_SPSEL_MASK;
6507 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
6508 lr |= R_V7M_EXCRET_SPSEL_MASK;
6509 }
6510
6511 /* Clear registers if necessary to prevent non-secure exception
6512 * code being able to see register values from secure code.
6513 * Where register values become architecturally UNKNOWN we leave
6514 * them with their previous values.
6515 */
6516 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6517 if (!targets_secure) {
6518 /* Always clear the caller-saved registers (they have been
6519 * pushed to the stack earlier in v7m_push_stack()).
6520 * Clear callee-saved registers if the background code is
6521 * Secure (in which case these regs were saved in
6522 * v7m_push_callee_stack()).
6523 */
6524 int i;
6525
6526 for (i = 0; i < 13; i++) {
6527 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
6528 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
6529 env->regs[i] = 0;
6530 }
6531 }
6532 /* Clear EAPSR */
6533 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
6534 }
6535 }
6536 }
6537
6538 addr = arm_v7m_load_vector(cpu, exc, targets_secure);
6539
6540 /* Now we've done everything that might cause a derived exception
6541 * we can go ahead and activate whichever exception we're going to
6542 * take (which might now be the derived exception).
6543 */
6544 armv7m_nvic_acknowledge_irq(env->nvic);
6545
6546 /* Switch to target security state -- must do this before writing SPSEL */
6547 switch_v7m_security_state(env, targets_secure);
6548 write_v7m_control_spsel(env, 0);
6549 arm_clear_exclusive(env);
6550 /* Clear IT bits */
6551 env->condexec_bits = 0;
6552 env->regs[14] = lr;
6553 env->regs[15] = addr & 0xfffffffe;
6554 env->thumb = addr & 1;
6555 }
6556
6557 static bool v7m_push_stack(ARMCPU *cpu)
6558 {
6559 /* Do the "set up stack frame" part of exception entry,
6560 * similar to pseudocode PushStack().
6561 * Return true if we generate a derived exception (and so
6562 * should ignore further stack faults trying to process
6563 * that derived exception.)
6564 */
6565 CPUARMState *env = &cpu->env;
6566 uint32_t xpsr = xpsr_read(env);
6567
6568 /* Align stack pointer if the guest wants that */
6569 if ((env->regs[13] & 4) &&
6570 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
6571 env->regs[13] -= 4;
6572 xpsr |= XPSR_SPREALIGN;
6573 }
6574 /* Switch to the handler mode. */
6575 v7m_push(env, xpsr);
6576 v7m_push(env, env->regs[15]);
6577 v7m_push(env, env->regs[14]);
6578 v7m_push(env, env->regs[12]);
6579 v7m_push(env, env->regs[3]);
6580 v7m_push(env, env->regs[2]);
6581 v7m_push(env, env->regs[1]);
6582 v7m_push(env, env->regs[0]);
6583
6584 return false;
6585 }
6586
6587 static void do_v7m_exception_exit(ARMCPU *cpu)
6588 {
6589 CPUARMState *env = &cpu->env;
6590 CPUState *cs = CPU(cpu);
6591 uint32_t excret;
6592 uint32_t xpsr;
6593 bool ufault = false;
6594 bool sfault = false;
6595 bool return_to_sp_process;
6596 bool return_to_handler;
6597 bool rettobase = false;
6598 bool exc_secure = false;
6599 bool return_to_secure;
6600
6601 /* If we're not in Handler mode then jumps to magic exception-exit
6602 * addresses don't have magic behaviour. However for the v8M
6603 * security extensions the magic secure-function-return has to
6604 * work in thread mode too, so to avoid doing an extra check in
6605 * the generated code we allow exception-exit magic to also cause the
6606 * internal exception and bring us here in thread mode. Correct code
6607 * will never try to do this (the following insn fetch will always
6608 * fault) so we the overhead of having taken an unnecessary exception
6609 * doesn't matter.
6610 */
6611 if (!arm_v7m_is_handler_mode(env)) {
6612 return;
6613 }
6614
6615 /* In the spec pseudocode ExceptionReturn() is called directly
6616 * from BXWritePC() and gets the full target PC value including
6617 * bit zero. In QEMU's implementation we treat it as a normal
6618 * jump-to-register (which is then caught later on), and so split
6619 * the target value up between env->regs[15] and env->thumb in
6620 * gen_bx(). Reconstitute it.
6621 */
6622 excret = env->regs[15];
6623 if (env->thumb) {
6624 excret |= 1;
6625 }
6626
6627 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
6628 " previous exception %d\n",
6629 excret, env->v7m.exception);
6630
6631 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
6632 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
6633 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
6634 excret);
6635 }
6636
6637 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6638 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
6639 * we pick which FAULTMASK to clear.
6640 */
6641 if (!env->v7m.secure &&
6642 ((excret & R_V7M_EXCRET_ES_MASK) ||
6643 !(excret & R_V7M_EXCRET_DCRS_MASK))) {
6644 sfault = 1;
6645 /* For all other purposes, treat ES as 0 (R_HXSR) */
6646 excret &= ~R_V7M_EXCRET_ES_MASK;
6647 }
6648 }
6649
6650 if (env->v7m.exception != ARMV7M_EXCP_NMI) {
6651 /* Auto-clear FAULTMASK on return from other than NMI.
6652 * If the security extension is implemented then this only
6653 * happens if the raw execution priority is >= 0; the
6654 * value of the ES bit in the exception return value indicates
6655 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
6656 */
6657 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6658 exc_secure = excret & R_V7M_EXCRET_ES_MASK;
6659 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
6660 env->v7m.faultmask[exc_secure] = 0;
6661 }
6662 } else {
6663 env->v7m.faultmask[M_REG_NS] = 0;
6664 }
6665 }
6666
6667 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
6668 exc_secure)) {
6669 case -1:
6670 /* attempt to exit an exception that isn't active */
6671 ufault = true;
6672 break;
6673 case 0:
6674 /* still an irq active now */
6675 break;
6676 case 1:
6677 /* we returned to base exception level, no nesting.
6678 * (In the pseudocode this is written using "NestedActivation != 1"
6679 * where we have 'rettobase == false'.)
6680 */
6681 rettobase = true;
6682 break;
6683 default:
6684 g_assert_not_reached();
6685 }
6686
6687 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
6688 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
6689 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
6690 (excret & R_V7M_EXCRET_S_MASK);
6691
6692 if (arm_feature(env, ARM_FEATURE_V8)) {
6693 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6694 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
6695 * we choose to take the UsageFault.
6696 */
6697 if ((excret & R_V7M_EXCRET_S_MASK) ||
6698 (excret & R_V7M_EXCRET_ES_MASK) ||
6699 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
6700 ufault = true;
6701 }
6702 }
6703 if (excret & R_V7M_EXCRET_RES0_MASK) {
6704 ufault = true;
6705 }
6706 } else {
6707 /* For v7M we only recognize certain combinations of the low bits */
6708 switch (excret & 0xf) {
6709 case 1: /* Return to Handler */
6710 break;
6711 case 13: /* Return to Thread using Process stack */
6712 case 9: /* Return to Thread using Main stack */
6713 /* We only need to check NONBASETHRDENA for v7M, because in
6714 * v8M this bit does not exist (it is RES1).
6715 */
6716 if (!rettobase &&
6717 !(env->v7m.ccr[env->v7m.secure] &
6718 R_V7M_CCR_NONBASETHRDENA_MASK)) {
6719 ufault = true;
6720 }
6721 break;
6722 default:
6723 ufault = true;
6724 }
6725 }
6726
6727 if (sfault) {
6728 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
6729 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
6730 v7m_exception_taken(cpu, excret, true, false);
6731 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
6732 "stackframe: failed EXC_RETURN.ES validity check\n");
6733 return;
6734 }
6735
6736 if (ufault) {
6737 /* Bad exception return: instead of popping the exception
6738 * stack, directly take a usage fault on the current stack.
6739 */
6740 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
6741 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
6742 v7m_exception_taken(cpu, excret, true, false);
6743 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
6744 "stackframe: failed exception return integrity check\n");
6745 return;
6746 }
6747
6748 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
6749 * Handler mode (and will be until we write the new XPSR.Interrupt
6750 * field) this does not switch around the current stack pointer.
6751 */
6752 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
6753
6754 switch_v7m_security_state(env, return_to_secure);
6755
6756 {
6757 /* The stack pointer we should be reading the exception frame from
6758 * depends on bits in the magic exception return type value (and
6759 * for v8M isn't necessarily the stack pointer we will eventually
6760 * end up resuming execution with). Get a pointer to the location
6761 * in the CPU state struct where the SP we need is currently being
6762 * stored; we will use and modify it in place.
6763 * We use this limited C variable scope so we don't accidentally
6764 * use 'frame_sp_p' after we do something that makes it invalid.
6765 */
6766 uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
6767 return_to_secure,
6768 !return_to_handler,
6769 return_to_sp_process);
6770 uint32_t frameptr = *frame_sp_p;
6771
6772 if (!QEMU_IS_ALIGNED(frameptr, 8) &&
6773 arm_feature(env, ARM_FEATURE_V8)) {
6774 qemu_log_mask(LOG_GUEST_ERROR,
6775 "M profile exception return with non-8-aligned SP "
6776 "for destination state is UNPREDICTABLE\n");
6777 }
6778
6779 /* Do we need to pop callee-saved registers? */
6780 if (return_to_secure &&
6781 ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
6782 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
6783 uint32_t expected_sig = 0xfefa125b;
6784 uint32_t actual_sig = ldl_phys(cs->as, frameptr);
6785
6786 if (expected_sig != actual_sig) {
6787 /* Take a SecureFault on the current stack */
6788 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
6789 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
6790 v7m_exception_taken(cpu, excret, true, false);
6791 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
6792 "stackframe: failed exception return integrity "
6793 "signature check\n");
6794 return;
6795 }
6796
6797 env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
6798 env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
6799 env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
6800 env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
6801 env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
6802 env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
6803 env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
6804 env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
6805
6806 frameptr += 0x28;
6807 }
6808
6809 /* Pop registers. TODO: make these accesses use the correct
6810 * attributes and address space (S/NS, priv/unpriv) and handle
6811 * memory transaction failures.
6812 */
6813 env->regs[0] = ldl_phys(cs->as, frameptr);
6814 env->regs[1] = ldl_phys(cs->as, frameptr + 0x4);
6815 env->regs[2] = ldl_phys(cs->as, frameptr + 0x8);
6816 env->regs[3] = ldl_phys(cs->as, frameptr + 0xc);
6817 env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
6818 env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
6819 env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
6820
6821 /* Returning from an exception with a PC with bit 0 set is defined
6822 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
6823 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
6824 * the lsbit, and there are several RTOSes out there which incorrectly
6825 * assume the r15 in the stack frame should be a Thumb-style "lsbit
6826 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
6827 * complain about the badly behaved guest.
6828 */
6829 if (env->regs[15] & 1) {
6830 env->regs[15] &= ~1U;
6831 if (!arm_feature(env, ARM_FEATURE_V8)) {
6832 qemu_log_mask(LOG_GUEST_ERROR,
6833 "M profile return from interrupt with misaligned "
6834 "PC is UNPREDICTABLE on v7M\n");
6835 }
6836 }
6837
6838 xpsr = ldl_phys(cs->as, frameptr + 0x1c);
6839
6840 if (arm_feature(env, ARM_FEATURE_V8)) {
6841 /* For v8M we have to check whether the xPSR exception field
6842 * matches the EXCRET value for return to handler/thread
6843 * before we commit to changing the SP and xPSR.
6844 */
6845 bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
6846 if (return_to_handler != will_be_handler) {
6847 /* Take an INVPC UsageFault on the current stack.
6848 * By this point we will have switched to the security state
6849 * for the background state, so this UsageFault will target
6850 * that state.
6851 */
6852 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
6853 env->v7m.secure);
6854 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
6855 v7m_exception_taken(cpu, excret, true, false);
6856 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
6857 "stackframe: failed exception return integrity "
6858 "check\n");
6859 return;
6860 }
6861 }
6862
6863 /* Commit to consuming the stack frame */
6864 frameptr += 0x20;
6865 /* Undo stack alignment (the SPREALIGN bit indicates that the original
6866 * pre-exception SP was not 8-aligned and we added a padding word to
6867 * align it, so we undo this by ORing in the bit that increases it
6868 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
6869 * would work too but a logical OR is how the pseudocode specifies it.)
6870 */
6871 if (xpsr & XPSR_SPREALIGN) {
6872 frameptr |= 4;
6873 }
6874 *frame_sp_p = frameptr;
6875 }
6876 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
6877 xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
6878
6879 /* The restored xPSR exception field will be zero if we're
6880 * resuming in Thread mode. If that doesn't match what the
6881 * exception return excret specified then this is a UsageFault.
6882 * v7M requires we make this check here; v8M did it earlier.
6883 */
6884 if (return_to_handler != arm_v7m_is_handler_mode(env)) {
6885 /* Take an INVPC UsageFault by pushing the stack again;
6886 * we know we're v7M so this is never a Secure UsageFault.
6887 */
6888 bool ignore_stackfaults;
6889
6890 assert(!arm_feature(env, ARM_FEATURE_V8));
6891 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
6892 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
6893 ignore_stackfaults = v7m_push_stack(cpu);
6894 v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
6895 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
6896 "failed exception return integrity check\n");
6897 return;
6898 }
6899
6900 /* Otherwise, we have a successful exception exit. */
6901 arm_clear_exclusive(env);
6902 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
6903 }
6904
6905 static bool do_v7m_function_return(ARMCPU *cpu)
6906 {
6907 /* v8M security extensions magic function return.
6908 * We may either:
6909 * (1) throw an exception (longjump)
6910 * (2) return true if we successfully handled the function return
6911 * (3) return false if we failed a consistency check and have
6912 * pended a UsageFault that needs to be taken now
6913 *
6914 * At this point the magic return value is split between env->regs[15]
6915 * and env->thumb. We don't bother to reconstitute it because we don't
6916 * need it (all values are handled the same way).
6917 */
6918 CPUARMState *env = &cpu->env;
6919 uint32_t newpc, newpsr, newpsr_exc;
6920
6921 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
6922
6923 {
6924 bool threadmode, spsel;
6925 TCGMemOpIdx oi;
6926 ARMMMUIdx mmu_idx;
6927 uint32_t *frame_sp_p;
6928 uint32_t frameptr;
6929
6930 /* Pull the return address and IPSR from the Secure stack */
6931 threadmode = !arm_v7m_is_handler_mode(env);
6932 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
6933
6934 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
6935 frameptr = *frame_sp_p;
6936
6937 /* These loads may throw an exception (for MPU faults). We want to
6938 * do them as secure, so work out what MMU index that is.
6939 */
6940 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
6941 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
6942 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
6943 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
6944
6945 /* Consistency checks on new IPSR */
6946 newpsr_exc = newpsr & XPSR_EXCP;
6947 if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
6948 (env->v7m.exception == 1 && newpsr_exc != 0))) {
6949 /* Pend the fault and tell our caller to take it */
6950 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
6951 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
6952 env->v7m.secure);
6953 qemu_log_mask(CPU_LOG_INT,
6954 "...taking INVPC UsageFault: "
6955 "IPSR consistency check failed\n");
6956 return false;
6957 }
6958
6959 *frame_sp_p = frameptr + 8;
6960 }
6961
6962 /* This invalidates frame_sp_p */
6963 switch_v7m_security_state(env, true);
6964 env->v7m.exception = newpsr_exc;
6965 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
6966 if (newpsr & XPSR_SFPA) {
6967 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
6968 }
6969 xpsr_write(env, 0, XPSR_IT);
6970 env->thumb = newpc & 1;
6971 env->regs[15] = newpc & ~1;
6972
6973 qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
6974 return true;
6975 }
6976
6977 static void arm_log_exception(int idx)
6978 {
6979 if (qemu_loglevel_mask(CPU_LOG_INT)) {
6980 const char *exc = NULL;
6981 static const char * const excnames[] = {
6982 [EXCP_UDEF] = "Undefined Instruction",
6983 [EXCP_SWI] = "SVC",
6984 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
6985 [EXCP_DATA_ABORT] = "Data Abort",
6986 [EXCP_IRQ] = "IRQ",
6987 [EXCP_FIQ] = "FIQ",
6988 [EXCP_BKPT] = "Breakpoint",
6989 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
6990 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
6991 [EXCP_HVC] = "Hypervisor Call",
6992 [EXCP_HYP_TRAP] = "Hypervisor Trap",
6993 [EXCP_SMC] = "Secure Monitor Call",
6994 [EXCP_VIRQ] = "Virtual IRQ",
6995 [EXCP_VFIQ] = "Virtual FIQ",
6996 [EXCP_SEMIHOST] = "Semihosting call",
6997 [EXCP_NOCP] = "v7M NOCP UsageFault",
6998 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
6999 };
7000
7001 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7002 exc = excnames[idx];
7003 }
7004 if (!exc) {
7005 exc = "unknown";
7006 }
7007 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7008 }
7009 }
7010
7011 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
7012 uint32_t addr, uint16_t *insn)
7013 {
7014 /* Load a 16-bit portion of a v7M instruction, returning true on success,
7015 * or false on failure (in which case we will have pended the appropriate
7016 * exception).
7017 * We need to do the instruction fetch's MPU and SAU checks
7018 * like this because there is no MMU index that would allow
7019 * doing the load with a single function call. Instead we must
7020 * first check that the security attributes permit the load
7021 * and that they don't mismatch on the two halves of the instruction,
7022 * and then we do the load as a secure load (ie using the security
7023 * attributes of the address, not the CPU, as architecturally required).
7024 */
7025 CPUState *cs = CPU(cpu);
7026 CPUARMState *env = &cpu->env;
7027 V8M_SAttributes sattrs = {};
7028 MemTxAttrs attrs = {};
7029 ARMMMUFaultInfo fi = {};
7030 MemTxResult txres;
7031 target_ulong page_size;
7032 hwaddr physaddr;
7033 int prot;
7034
7035 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
7036 if (!sattrs.nsc || sattrs.ns) {
7037 /* This must be the second half of the insn, and it straddles a
7038 * region boundary with the second half not being S&NSC.
7039 */
7040 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7041 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7042 qemu_log_mask(CPU_LOG_INT,
7043 "...really SecureFault with SFSR.INVEP\n");
7044 return false;
7045 }
7046 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
7047 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
7048 /* the MPU lookup failed */
7049 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7050 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
7051 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
7052 return false;
7053 }
7054 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
7055 attrs, &txres);
7056 if (txres != MEMTX_OK) {
7057 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7058 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7059 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
7060 return false;
7061 }
7062 return true;
7063 }
7064
7065 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
7066 {
7067 /* Check whether this attempt to execute code in a Secure & NS-Callable
7068 * memory region is for an SG instruction; if so, then emulate the
7069 * effect of the SG instruction and return true. Otherwise pend
7070 * the correct kind of exception and return false.
7071 */
7072 CPUARMState *env = &cpu->env;
7073 ARMMMUIdx mmu_idx;
7074 uint16_t insn;
7075
7076 /* We should never get here unless get_phys_addr_pmsav8() caused
7077 * an exception for NS executing in S&NSC memory.
7078 */
7079 assert(!env->v7m.secure);
7080 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7081
7082 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7083 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7084
7085 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
7086 return false;
7087 }
7088
7089 if (!env->thumb) {
7090 goto gen_invep;
7091 }
7092
7093 if (insn != 0xe97f) {
7094 /* Not an SG instruction first half (we choose the IMPDEF
7095 * early-SG-check option).
7096 */
7097 goto gen_invep;
7098 }
7099
7100 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
7101 return false;
7102 }
7103
7104 if (insn != 0xe97f) {
7105 /* Not an SG instruction second half (yes, both halves of the SG
7106 * insn have the same hex value)
7107 */
7108 goto gen_invep;
7109 }
7110
7111 /* OK, we have confirmed that we really have an SG instruction.
7112 * We know we're NS in S memory so don't need to repeat those checks.
7113 */
7114 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
7115 ", executing it\n", env->regs[15]);
7116 env->regs[14] &= ~1;
7117 switch_v7m_security_state(env, true);
7118 xpsr_write(env, 0, XPSR_IT);
7119 env->regs[15] += 4;
7120 return true;
7121
7122 gen_invep:
7123 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7124 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7125 qemu_log_mask(CPU_LOG_INT,
7126 "...really SecureFault with SFSR.INVEP\n");
7127 return false;
7128 }
7129
7130 void arm_v7m_cpu_do_interrupt(CPUState *cs)
7131 {
7132 ARMCPU *cpu = ARM_CPU(cs);
7133 CPUARMState *env = &cpu->env;
7134 uint32_t lr;
7135 bool ignore_stackfaults;
7136
7137 arm_log_exception(cs->exception_index);
7138
7139 /* For exceptions we just mark as pending on the NVIC, and let that
7140 handle it. */
7141 switch (cs->exception_index) {
7142 case EXCP_UDEF:
7143 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7144 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
7145 break;
7146 case EXCP_NOCP:
7147 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7148 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
7149 break;
7150 case EXCP_INVSTATE:
7151 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7152 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
7153 break;
7154 case EXCP_SWI:
7155 /* The PC already points to the next instruction. */
7156 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
7157 break;
7158 case EXCP_PREFETCH_ABORT:
7159 case EXCP_DATA_ABORT:
7160 /* Note that for M profile we don't have a guest facing FSR, but
7161 * the env->exception.fsr will be populated by the code that
7162 * raises the fault, in the A profile short-descriptor format.
7163 */
7164 switch (env->exception.fsr & 0xf) {
7165 case M_FAKE_FSR_NSC_EXEC:
7166 /* Exception generated when we try to execute code at an address
7167 * which is marked as Secure & Non-Secure Callable and the CPU
7168 * is in the Non-Secure state. The only instruction which can
7169 * be executed like this is SG (and that only if both halves of
7170 * the SG instruction have the same security attributes.)
7171 * Everything else must generate an INVEP SecureFault, so we
7172 * emulate the SG instruction here.
7173 */
7174 if (v7m_handle_execute_nsc(cpu)) {
7175 return;
7176 }
7177 break;
7178 case M_FAKE_FSR_SFAULT:
7179 /* Various flavours of SecureFault for attempts to execute or
7180 * access data in the wrong security state.
7181 */
7182 switch (cs->exception_index) {
7183 case EXCP_PREFETCH_ABORT:
7184 if (env->v7m.secure) {
7185 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
7186 qemu_log_mask(CPU_LOG_INT,
7187 "...really SecureFault with SFSR.INVTRAN\n");
7188 } else {
7189 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7190 qemu_log_mask(CPU_LOG_INT,
7191 "...really SecureFault with SFSR.INVEP\n");
7192 }
7193 break;
7194 case EXCP_DATA_ABORT:
7195 /* This must be an NS access to S memory */
7196 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
7197 qemu_log_mask(CPU_LOG_INT,
7198 "...really SecureFault with SFSR.AUVIOL\n");
7199 break;
7200 }
7201 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7202 break;
7203 case 0x8: /* External Abort */
7204 switch (cs->exception_index) {
7205 case EXCP_PREFETCH_ABORT:
7206 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7207 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
7208 break;
7209 case EXCP_DATA_ABORT:
7210 env->v7m.cfsr[M_REG_NS] |=
7211 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
7212 env->v7m.bfar = env->exception.vaddress;
7213 qemu_log_mask(CPU_LOG_INT,
7214 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7215 env->v7m.bfar);
7216 break;
7217 }
7218 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7219 break;
7220 default:
7221 /* All other FSR values are either MPU faults or "can't happen
7222 * for M profile" cases.
7223 */
7224 switch (cs->exception_index) {
7225 case EXCP_PREFETCH_ABORT:
7226 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7227 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
7228 break;
7229 case EXCP_DATA_ABORT:
7230 env->v7m.cfsr[env->v7m.secure] |=
7231 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
7232 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
7233 qemu_log_mask(CPU_LOG_INT,
7234 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7235 env->v7m.mmfar[env->v7m.secure]);
7236 break;
7237 }
7238 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
7239 env->v7m.secure);
7240 break;
7241 }
7242 break;
7243 case EXCP_BKPT:
7244 if (semihosting_enabled()) {
7245 int nr;
7246 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
7247 if (nr == 0xab) {
7248 env->regs[15] += 2;
7249 qemu_log_mask(CPU_LOG_INT,
7250 "...handling as semihosting call 0x%x\n",
7251 env->regs[0]);
7252 env->regs[0] = do_arm_semihosting(env);
7253 return;
7254 }
7255 }
7256 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
7257 break;
7258 case EXCP_IRQ:
7259 break;
7260 case EXCP_EXCEPTION_EXIT:
7261 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
7262 /* Must be v8M security extension function return */
7263 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
7264 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7265 if (do_v7m_function_return(cpu)) {
7266 return;
7267 }
7268 } else {
7269 do_v7m_exception_exit(cpu);
7270 return;
7271 }
7272 break;
7273 default:
7274 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7275 return; /* Never happens. Keep compiler happy. */
7276 }
7277
7278 if (arm_feature(env, ARM_FEATURE_V8)) {
7279 lr = R_V7M_EXCRET_RES1_MASK |
7280 R_V7M_EXCRET_DCRS_MASK |
7281 R_V7M_EXCRET_FTYPE_MASK;
7282 /* The S bit indicates whether we should return to Secure
7283 * or NonSecure (ie our current state).
7284 * The ES bit indicates whether we're taking this exception
7285 * to Secure or NonSecure (ie our target state). We set it
7286 * later, in v7m_exception_taken().
7287 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7288 * This corresponds to the ARM ARM pseudocode for v8M setting
7289 * some LR bits in PushStack() and some in ExceptionTaken();
7290 * the distinction matters for the tailchain cases where we
7291 * can take an exception without pushing the stack.
7292 */
7293 if (env->v7m.secure) {
7294 lr |= R_V7M_EXCRET_S_MASK;
7295 }
7296 } else {
7297 lr = R_V7M_EXCRET_RES1_MASK |
7298 R_V7M_EXCRET_S_MASK |
7299 R_V7M_EXCRET_DCRS_MASK |
7300 R_V7M_EXCRET_FTYPE_MASK |
7301 R_V7M_EXCRET_ES_MASK;
7302 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
7303 lr |= R_V7M_EXCRET_SPSEL_MASK;
7304 }
7305 }
7306 if (!arm_v7m_is_handler_mode(env)) {
7307 lr |= R_V7M_EXCRET_MODE_MASK;
7308 }
7309
7310 ignore_stackfaults = v7m_push_stack(cpu);
7311 v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
7312 qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
7313 }
7314
7315 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7316 * register set. This is necessary when switching between AArch32 and AArch64
7317 * execution state.
7318 */
7319 void aarch64_sync_32_to_64(CPUARMState *env)
7320 {
7321 int i;
7322 uint32_t mode = env->uncached_cpsr & CPSR_M;
7323
7324 /* We can blanket copy R[0:7] to X[0:7] */
7325 for (i = 0; i < 8; i++) {
7326 env->xregs[i] = env->regs[i];
7327 }
7328
7329 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7330 * Otherwise, they come from the banked user regs.
7331 */
7332 if (mode == ARM_CPU_MODE_FIQ) {
7333 for (i = 8; i < 13; i++) {
7334 env->xregs[i] = env->usr_regs[i - 8];
7335 }
7336 } else {
7337 for (i = 8; i < 13; i++) {
7338 env->xregs[i] = env->regs[i];
7339 }
7340 }
7341
7342 /* Registers x13-x23 are the various mode SP and FP registers. Registers
7343 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7344 * from the mode banked register.
7345 */
7346 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7347 env->xregs[13] = env->regs[13];
7348 env->xregs[14] = env->regs[14];
7349 } else {
7350 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
7351 /* HYP is an exception in that it is copied from r14 */
7352 if (mode == ARM_CPU_MODE_HYP) {
7353 env->xregs[14] = env->regs[14];
7354 } else {
7355 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
7356 }
7357 }
7358
7359 if (mode == ARM_CPU_MODE_HYP) {
7360 env->xregs[15] = env->regs[13];
7361 } else {
7362 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
7363 }
7364
7365 if (mode == ARM_CPU_MODE_IRQ) {
7366 env->xregs[16] = env->regs[14];
7367 env->xregs[17] = env->regs[13];
7368 } else {
7369 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
7370 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
7371 }
7372
7373 if (mode == ARM_CPU_MODE_SVC) {
7374 env->xregs[18] = env->regs[14];
7375 env->xregs[19] = env->regs[13];
7376 } else {
7377 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
7378 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
7379 }
7380
7381 if (mode == ARM_CPU_MODE_ABT) {
7382 env->xregs[20] = env->regs[14];
7383 env->xregs[21] = env->regs[13];
7384 } else {
7385 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
7386 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
7387 }
7388
7389 if (mode == ARM_CPU_MODE_UND) {
7390 env->xregs[22] = env->regs[14];
7391 env->xregs[23] = env->regs[13];
7392 } else {
7393 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
7394 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
7395 }
7396
7397 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7398 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7399 * FIQ bank for r8-r14.
7400 */
7401 if (mode == ARM_CPU_MODE_FIQ) {
7402 for (i = 24; i < 31; i++) {
7403 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
7404 }
7405 } else {
7406 for (i = 24; i < 29; i++) {
7407 env->xregs[i] = env->fiq_regs[i - 24];
7408 }
7409 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
7410 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
7411 }
7412
7413 env->pc = env->regs[15];
7414 }
7415
7416 /* Function used to synchronize QEMU's AArch32 register set with AArch64
7417 * register set. This is necessary when switching between AArch32 and AArch64
7418 * execution state.
7419 */
7420 void aarch64_sync_64_to_32(CPUARMState *env)
7421 {
7422 int i;
7423 uint32_t mode = env->uncached_cpsr & CPSR_M;
7424
7425 /* We can blanket copy X[0:7] to R[0:7] */
7426 for (i = 0; i < 8; i++) {
7427 env->regs[i] = env->xregs[i];
7428 }
7429
7430 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7431 * Otherwise, we copy x8-x12 into the banked user regs.
7432 */
7433 if (mode == ARM_CPU_MODE_FIQ) {
7434 for (i = 8; i < 13; i++) {
7435 env->usr_regs[i - 8] = env->xregs[i];
7436 }
7437 } else {
7438 for (i = 8; i < 13; i++) {
7439 env->regs[i] = env->xregs[i];
7440 }
7441 }
7442
7443 /* Registers r13 & r14 depend on the current mode.
7444 * If we are in a given mode, we copy the corresponding x registers to r13
7445 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7446 * for the mode.
7447 */
7448 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7449 env->regs[13] = env->xregs[13];
7450 env->regs[14] = env->xregs[14];
7451 } else {
7452 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
7453
7454 /* HYP is an exception in that it does not have its own banked r14 but
7455 * shares the USR r14
7456 */
7457 if (mode == ARM_CPU_MODE_HYP) {
7458 env->regs[14] = env->xregs[14];
7459 } else {
7460 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
7461 }
7462 }
7463
7464 if (mode == ARM_CPU_MODE_HYP) {
7465 env->regs[13] = env->xregs[15];
7466 } else {
7467 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
7468 }
7469
7470 if (mode == ARM_CPU_MODE_IRQ) {
7471 env->regs[14] = env->xregs[16];
7472 env->regs[13] = env->xregs[17];
7473 } else {
7474 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
7475 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
7476 }
7477
7478 if (mode == ARM_CPU_MODE_SVC) {
7479 env->regs[14] = env->xregs[18];
7480 env->regs[13] = env->xregs[19];
7481 } else {
7482 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
7483 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
7484 }
7485
7486 if (mode == ARM_CPU_MODE_ABT) {
7487 env->regs[14] = env->xregs[20];
7488 env->regs[13] = env->xregs[21];
7489 } else {
7490 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
7491 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
7492 }
7493
7494 if (mode == ARM_CPU_MODE_UND) {
7495 env->regs[14] = env->xregs[22];
7496 env->regs[13] = env->xregs[23];
7497 } else {
7498 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
7499 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
7500 }
7501
7502 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7503 * mode, then we can copy to r8-r14. Otherwise, we copy to the
7504 * FIQ bank for r8-r14.
7505 */
7506 if (mode == ARM_CPU_MODE_FIQ) {
7507 for (i = 24; i < 31; i++) {
7508 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
7509 }
7510 } else {
7511 for (i = 24; i < 29; i++) {
7512 env->fiq_regs[i - 24] = env->xregs[i];
7513 }
7514 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
7515 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
7516 }
7517
7518 env->regs[15] = env->pc;
7519 }
7520
7521 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
7522 {
7523 ARMCPU *cpu = ARM_CPU(cs);
7524 CPUARMState *env = &cpu->env;
7525 uint32_t addr;
7526 uint32_t mask;
7527 int new_mode;
7528 uint32_t offset;
7529 uint32_t moe;
7530
7531 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
7532 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
7533 case EC_BREAKPOINT:
7534 case EC_BREAKPOINT_SAME_EL:
7535 moe = 1;
7536 break;
7537 case EC_WATCHPOINT:
7538 case EC_WATCHPOINT_SAME_EL:
7539 moe = 10;
7540 break;
7541 case EC_AA32_BKPT:
7542 moe = 3;
7543 break;
7544 case EC_VECTORCATCH:
7545 moe = 5;
7546 break;
7547 default:
7548 moe = 0;
7549 break;
7550 }
7551
7552 if (moe) {
7553 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
7554 }
7555
7556 /* TODO: Vectored interrupt controller. */
7557 switch (cs->exception_index) {
7558 case EXCP_UDEF:
7559 new_mode = ARM_CPU_MODE_UND;
7560 addr = 0x04;
7561 mask = CPSR_I;
7562 if (env->thumb)
7563 offset = 2;
7564 else
7565 offset = 4;
7566 break;
7567 case EXCP_SWI:
7568 new_mode = ARM_CPU_MODE_SVC;
7569 addr = 0x08;
7570 mask = CPSR_I;
7571 /* The PC already points to the next instruction. */
7572 offset = 0;
7573 break;
7574 case EXCP_BKPT:
7575 env->exception.fsr = 2;
7576 /* Fall through to prefetch abort. */
7577 case EXCP_PREFETCH_ABORT:
7578 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
7579 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
7580 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
7581 env->exception.fsr, (uint32_t)env->exception.vaddress);
7582 new_mode = ARM_CPU_MODE_ABT;
7583 addr = 0x0c;
7584 mask = CPSR_A | CPSR_I;
7585 offset = 4;
7586 break;
7587 case EXCP_DATA_ABORT:
7588 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
7589 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
7590 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
7591 env->exception.fsr,
7592 (uint32_t)env->exception.vaddress);
7593 new_mode = ARM_CPU_MODE_ABT;
7594 addr = 0x10;
7595 mask = CPSR_A | CPSR_I;
7596 offset = 8;
7597 break;
7598 case EXCP_IRQ:
7599 new_mode = ARM_CPU_MODE_IRQ;
7600 addr = 0x18;
7601 /* Disable IRQ and imprecise data aborts. */
7602 mask = CPSR_A | CPSR_I;
7603 offset = 4;
7604 if (env->cp15.scr_el3 & SCR_IRQ) {
7605 /* IRQ routed to monitor mode */
7606 new_mode = ARM_CPU_MODE_MON;
7607 mask |= CPSR_F;
7608 }
7609 break;
7610 case EXCP_FIQ:
7611 new_mode = ARM_CPU_MODE_FIQ;
7612 addr = 0x1c;
7613 /* Disable FIQ, IRQ and imprecise data aborts. */
7614 mask = CPSR_A | CPSR_I | CPSR_F;
7615 if (env->cp15.scr_el3 & SCR_FIQ) {
7616 /* FIQ routed to monitor mode */
7617 new_mode = ARM_CPU_MODE_MON;
7618 }
7619 offset = 4;
7620 break;
7621 case EXCP_VIRQ:
7622 new_mode = ARM_CPU_MODE_IRQ;
7623 addr = 0x18;
7624 /* Disable IRQ and imprecise data aborts. */
7625 mask = CPSR_A | CPSR_I;
7626 offset = 4;
7627 break;
7628 case EXCP_VFIQ:
7629 new_mode = ARM_CPU_MODE_FIQ;
7630 addr = 0x1c;
7631 /* Disable FIQ, IRQ and imprecise data aborts. */
7632 mask = CPSR_A | CPSR_I | CPSR_F;
7633 offset = 4;
7634 break;
7635 case EXCP_SMC:
7636 new_mode = ARM_CPU_MODE_MON;
7637 addr = 0x08;
7638 mask = CPSR_A | CPSR_I | CPSR_F;
7639 offset = 0;
7640 break;
7641 default:
7642 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7643 return; /* Never happens. Keep compiler happy. */
7644 }
7645
7646 if (new_mode == ARM_CPU_MODE_MON) {
7647 addr += env->cp15.mvbar;
7648 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
7649 /* High vectors. When enabled, base address cannot be remapped. */
7650 addr += 0xffff0000;
7651 } else {
7652 /* ARM v7 architectures provide a vector base address register to remap
7653 * the interrupt vector table.
7654 * This register is only followed in non-monitor mode, and is banked.
7655 * Note: only bits 31:5 are valid.
7656 */
7657 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
7658 }
7659
7660 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
7661 env->cp15.scr_el3 &= ~SCR_NS;
7662 }
7663
7664 switch_mode (env, new_mode);
7665 /* For exceptions taken to AArch32 we must clear the SS bit in both
7666 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
7667 */
7668 env->uncached_cpsr &= ~PSTATE_SS;
7669 env->spsr = cpsr_read(env);
7670 /* Clear IT bits. */
7671 env->condexec_bits = 0;
7672 /* Switch to the new mode, and to the correct instruction set. */
7673 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
7674 /* Set new mode endianness */
7675 env->uncached_cpsr &= ~CPSR_E;
7676 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
7677 env->uncached_cpsr |= CPSR_E;
7678 }
7679 env->daif |= mask;
7680 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
7681 * and we should just guard the thumb mode on V4 */
7682 if (arm_feature(env, ARM_FEATURE_V4T)) {
7683 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
7684 }
7685 env->regs[14] = env->regs[15] + offset;
7686 env->regs[15] = addr;
7687 }
7688
7689 /* Handle exception entry to a target EL which is using AArch64 */
7690 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
7691 {
7692 ARMCPU *cpu = ARM_CPU(cs);
7693 CPUARMState *env = &cpu->env;
7694 unsigned int new_el = env->exception.target_el;
7695 target_ulong addr = env->cp15.vbar_el[new_el];
7696 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
7697
7698 if (arm_current_el(env) < new_el) {
7699 /* Entry vector offset depends on whether the implemented EL
7700 * immediately lower than the target level is using AArch32 or AArch64
7701 */
7702 bool is_aa64;
7703
7704 switch (new_el) {
7705 case 3:
7706 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
7707 break;
7708 case 2:
7709 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
7710 break;
7711 case 1:
7712 is_aa64 = is_a64(env);
7713 break;
7714 default:
7715 g_assert_not_reached();
7716 }
7717
7718 if (is_aa64) {
7719 addr += 0x400;
7720 } else {
7721 addr += 0x600;
7722 }
7723 } else if (pstate_read(env) & PSTATE_SP) {
7724 addr += 0x200;
7725 }
7726
7727 switch (cs->exception_index) {
7728 case EXCP_PREFETCH_ABORT:
7729 case EXCP_DATA_ABORT:
7730 env->cp15.far_el[new_el] = env->exception.vaddress;
7731 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
7732 env->cp15.far_el[new_el]);
7733 /* fall through */
7734 case EXCP_BKPT:
7735 case EXCP_UDEF:
7736 case EXCP_SWI:
7737 case EXCP_HVC:
7738 case EXCP_HYP_TRAP:
7739 case EXCP_SMC:
7740 env->cp15.esr_el[new_el] = env->exception.syndrome;
7741 break;
7742 case EXCP_IRQ:
7743 case EXCP_VIRQ:
7744 addr += 0x80;
7745 break;
7746 case EXCP_FIQ:
7747 case EXCP_VFIQ:
7748 addr += 0x100;
7749 break;
7750 case EXCP_SEMIHOST:
7751 qemu_log_mask(CPU_LOG_INT,
7752 "...handling as semihosting call 0x%" PRIx64 "\n",
7753 env->xregs[0]);
7754 env->xregs[0] = do_arm_semihosting(env);
7755 return;
7756 default:
7757 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7758 }
7759
7760 if (is_a64(env)) {
7761 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
7762 aarch64_save_sp(env, arm_current_el(env));
7763 env->elr_el[new_el] = env->pc;
7764 } else {
7765 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
7766 env->elr_el[new_el] = env->regs[15];
7767
7768 aarch64_sync_32_to_64(env);
7769
7770 env->condexec_bits = 0;
7771 }
7772 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
7773 env->elr_el[new_el]);
7774
7775 pstate_write(env, PSTATE_DAIF | new_mode);
7776 env->aarch64 = 1;
7777 aarch64_restore_sp(env, new_el);
7778
7779 env->pc = addr;
7780
7781 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
7782 new_el, env->pc, pstate_read(env));
7783 }
7784
7785 static inline bool check_for_semihosting(CPUState *cs)
7786 {
7787 /* Check whether this exception is a semihosting call; if so
7788 * then handle it and return true; otherwise return false.
7789 */
7790 ARMCPU *cpu = ARM_CPU(cs);
7791 CPUARMState *env = &cpu->env;
7792
7793 if (is_a64(env)) {
7794 if (cs->exception_index == EXCP_SEMIHOST) {
7795 /* This is always the 64-bit semihosting exception.
7796 * The "is this usermode" and "is semihosting enabled"
7797 * checks have been done at translate time.
7798 */
7799 qemu_log_mask(CPU_LOG_INT,
7800 "...handling as semihosting call 0x%" PRIx64 "\n",
7801 env->xregs[0]);
7802 env->xregs[0] = do_arm_semihosting(env);
7803 return true;
7804 }
7805 return false;
7806 } else {
7807 uint32_t imm;
7808
7809 /* Only intercept calls from privileged modes, to provide some
7810 * semblance of security.
7811 */
7812 if (cs->exception_index != EXCP_SEMIHOST &&
7813 (!semihosting_enabled() ||
7814 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
7815 return false;
7816 }
7817
7818 switch (cs->exception_index) {
7819 case EXCP_SEMIHOST:
7820 /* This is always a semihosting call; the "is this usermode"
7821 * and "is semihosting enabled" checks have been done at
7822 * translate time.
7823 */
7824 break;
7825 case EXCP_SWI:
7826 /* Check for semihosting interrupt. */
7827 if (env->thumb) {
7828 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
7829 & 0xff;
7830 if (imm == 0xab) {
7831 break;
7832 }
7833 } else {
7834 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
7835 & 0xffffff;
7836 if (imm == 0x123456) {
7837 break;
7838 }
7839 }
7840 return false;
7841 case EXCP_BKPT:
7842 /* See if this is a semihosting syscall. */
7843 if (env->thumb) {
7844 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
7845 & 0xff;
7846 if (imm == 0xab) {
7847 env->regs[15] += 2;
7848 break;
7849 }
7850 }
7851 return false;
7852 default:
7853 return false;
7854 }
7855
7856 qemu_log_mask(CPU_LOG_INT,
7857 "...handling as semihosting call 0x%x\n",
7858 env->regs[0]);
7859 env->regs[0] = do_arm_semihosting(env);
7860 return true;
7861 }
7862 }
7863
7864 /* Handle a CPU exception for A and R profile CPUs.
7865 * Do any appropriate logging, handle PSCI calls, and then hand off
7866 * to the AArch64-entry or AArch32-entry function depending on the
7867 * target exception level's register width.
7868 */
7869 void arm_cpu_do_interrupt(CPUState *cs)
7870 {
7871 ARMCPU *cpu = ARM_CPU(cs);
7872 CPUARMState *env = &cpu->env;
7873 unsigned int new_el = env->exception.target_el;
7874
7875 assert(!arm_feature(env, ARM_FEATURE_M));
7876
7877 arm_log_exception(cs->exception_index);
7878 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
7879 new_el);
7880 if (qemu_loglevel_mask(CPU_LOG_INT)
7881 && !excp_is_internal(cs->exception_index)) {
7882 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
7883 env->exception.syndrome >> ARM_EL_EC_SHIFT,
7884 env->exception.syndrome);
7885 }
7886
7887 if (arm_is_psci_call(cpu, cs->exception_index)) {
7888 arm_handle_psci_call(cpu);
7889 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
7890 return;
7891 }
7892
7893 /* Semihosting semantics depend on the register width of the
7894 * code that caused the exception, not the target exception level,
7895 * so must be handled here.
7896 */
7897 if (check_for_semihosting(cs)) {
7898 return;
7899 }
7900
7901 assert(!excp_is_internal(cs->exception_index));
7902 if (arm_el_is_aa64(env, new_el)) {
7903 arm_cpu_do_interrupt_aarch64(cs);
7904 } else {
7905 arm_cpu_do_interrupt_aarch32(cs);
7906 }
7907
7908 /* Hooks may change global state so BQL should be held, also the
7909 * BQL needs to be held for any modification of
7910 * cs->interrupt_request.
7911 */
7912 g_assert(qemu_mutex_iothread_locked());
7913
7914 arm_call_el_change_hook(cpu);
7915
7916 if (!kvm_enabled()) {
7917 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
7918 }
7919 }
7920
7921 /* Return the exception level which controls this address translation regime */
7922 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
7923 {
7924 switch (mmu_idx) {
7925 case ARMMMUIdx_S2NS:
7926 case ARMMMUIdx_S1E2:
7927 return 2;
7928 case ARMMMUIdx_S1E3:
7929 return 3;
7930 case ARMMMUIdx_S1SE0:
7931 return arm_el_is_aa64(env, 3) ? 1 : 3;
7932 case ARMMMUIdx_S1SE1:
7933 case ARMMMUIdx_S1NSE0:
7934 case ARMMMUIdx_S1NSE1:
7935 case ARMMMUIdx_MPrivNegPri:
7936 case ARMMMUIdx_MUserNegPri:
7937 case ARMMMUIdx_MPriv:
7938 case ARMMMUIdx_MUser:
7939 case ARMMMUIdx_MSPrivNegPri:
7940 case ARMMMUIdx_MSUserNegPri:
7941 case ARMMMUIdx_MSPriv:
7942 case ARMMMUIdx_MSUser:
7943 return 1;
7944 default:
7945 g_assert_not_reached();
7946 }
7947 }
7948
7949 /* Return the SCTLR value which controls this address translation regime */
7950 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
7951 {
7952 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
7953 }
7954
7955 /* Return true if the specified stage of address translation is disabled */
7956 static inline bool regime_translation_disabled(CPUARMState *env,
7957 ARMMMUIdx mmu_idx)
7958 {
7959 if (arm_feature(env, ARM_FEATURE_M)) {
7960 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
7961 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
7962 case R_V7M_MPU_CTRL_ENABLE_MASK:
7963 /* Enabled, but not for HardFault and NMI */
7964 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
7965 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
7966 /* Enabled for all cases */
7967 return false;
7968 case 0:
7969 default:
7970 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
7971 * we warned about that in armv7m_nvic.c when the guest set it.
7972 */
7973 return true;
7974 }
7975 }
7976
7977 if (mmu_idx == ARMMMUIdx_S2NS) {
7978 return (env->cp15.hcr_el2 & HCR_VM) == 0;
7979 }
7980 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
7981 }
7982
7983 static inline bool regime_translation_big_endian(CPUARMState *env,
7984 ARMMMUIdx mmu_idx)
7985 {
7986 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
7987 }
7988
7989 /* Return the TCR controlling this translation regime */
7990 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
7991 {
7992 if (mmu_idx == ARMMMUIdx_S2NS) {
7993 return &env->cp15.vtcr_el2;
7994 }
7995 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
7996 }
7997
7998 /* Convert a possible stage1+2 MMU index into the appropriate
7999 * stage 1 MMU index
8000 */
8001 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8002 {
8003 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8004 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8005 }
8006 return mmu_idx;
8007 }
8008
8009 /* Returns TBI0 value for current regime el */
8010 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
8011 {
8012 TCR *tcr;
8013 uint32_t el;
8014
8015 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8016 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8017 */
8018 mmu_idx = stage_1_mmu_idx(mmu_idx);
8019
8020 tcr = regime_tcr(env, mmu_idx);
8021 el = regime_el(env, mmu_idx);
8022
8023 if (el > 1) {
8024 return extract64(tcr->raw_tcr, 20, 1);
8025 } else {
8026 return extract64(tcr->raw_tcr, 37, 1);
8027 }
8028 }
8029
8030 /* Returns TBI1 value for current regime el */
8031 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
8032 {
8033 TCR *tcr;
8034 uint32_t el;
8035
8036 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8037 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8038 */
8039 mmu_idx = stage_1_mmu_idx(mmu_idx);
8040
8041 tcr = regime_tcr(env, mmu_idx);
8042 el = regime_el(env, mmu_idx);
8043
8044 if (el > 1) {
8045 return 0;
8046 } else {
8047 return extract64(tcr->raw_tcr, 38, 1);
8048 }
8049 }
8050
8051 /* Return the TTBR associated with this translation regime */
8052 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8053 int ttbrn)
8054 {
8055 if (mmu_idx == ARMMMUIdx_S2NS) {
8056 return env->cp15.vttbr_el2;
8057 }
8058 if (ttbrn == 0) {
8059 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8060 } else {
8061 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8062 }
8063 }
8064
8065 /* Return true if the translation regime is using LPAE format page tables */
8066 static inline bool regime_using_lpae_format(CPUARMState *env,
8067 ARMMMUIdx mmu_idx)
8068 {
8069 int el = regime_el(env, mmu_idx);
8070 if (el == 2 || arm_el_is_aa64(env, el)) {
8071 return true;
8072 }
8073 if (arm_feature(env, ARM_FEATURE_LPAE)
8074 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8075 return true;
8076 }
8077 return false;
8078 }
8079
8080 /* Returns true if the stage 1 translation regime is using LPAE format page
8081 * tables. Used when raising alignment exceptions, whose FSR changes depending
8082 * on whether the long or short descriptor format is in use. */
8083 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8084 {
8085 mmu_idx = stage_1_mmu_idx(mmu_idx);
8086
8087 return regime_using_lpae_format(env, mmu_idx);
8088 }
8089
8090 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8091 {
8092 switch (mmu_idx) {
8093 case ARMMMUIdx_S1SE0:
8094 case ARMMMUIdx_S1NSE0:
8095 case ARMMMUIdx_MUser:
8096 case ARMMMUIdx_MSUser:
8097 case ARMMMUIdx_MUserNegPri:
8098 case ARMMMUIdx_MSUserNegPri:
8099 return true;
8100 default:
8101 return false;
8102 case ARMMMUIdx_S12NSE0:
8103 case ARMMMUIdx_S12NSE1:
8104 g_assert_not_reached();
8105 }
8106 }
8107
8108 /* Translate section/page access permissions to page
8109 * R/W protection flags
8110 *
8111 * @env: CPUARMState
8112 * @mmu_idx: MMU index indicating required translation regime
8113 * @ap: The 3-bit access permissions (AP[2:0])
8114 * @domain_prot: The 2-bit domain access permissions
8115 */
8116 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8117 int ap, int domain_prot)
8118 {
8119 bool is_user = regime_is_user(env, mmu_idx);
8120
8121 if (domain_prot == 3) {
8122 return PAGE_READ | PAGE_WRITE;
8123 }
8124
8125 switch (ap) {
8126 case 0:
8127 if (arm_feature(env, ARM_FEATURE_V7)) {
8128 return 0;
8129 }
8130 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8131 case SCTLR_S:
8132 return is_user ? 0 : PAGE_READ;
8133 case SCTLR_R:
8134 return PAGE_READ;
8135 default:
8136 return 0;
8137 }
8138 case 1:
8139 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8140 case 2:
8141 if (is_user) {
8142 return PAGE_READ;
8143 } else {
8144 return PAGE_READ | PAGE_WRITE;
8145 }
8146 case 3:
8147 return PAGE_READ | PAGE_WRITE;
8148 case 4: /* Reserved. */
8149 return 0;
8150 case 5:
8151 return is_user ? 0 : PAGE_READ;
8152 case 6:
8153 return PAGE_READ;
8154 case 7:
8155 if (!arm_feature(env, ARM_FEATURE_V6K)) {
8156 return 0;
8157 }
8158 return PAGE_READ;
8159 default:
8160 g_assert_not_reached();
8161 }
8162 }
8163
8164 /* Translate section/page access permissions to page
8165 * R/W protection flags.
8166 *
8167 * @ap: The 2-bit simple AP (AP[2:1])
8168 * @is_user: TRUE if accessing from PL0
8169 */
8170 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8171 {
8172 switch (ap) {
8173 case 0:
8174 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8175 case 1:
8176 return PAGE_READ | PAGE_WRITE;
8177 case 2:
8178 return is_user ? 0 : PAGE_READ;
8179 case 3:
8180 return PAGE_READ;
8181 default:
8182 g_assert_not_reached();
8183 }
8184 }
8185
8186 static inline int
8187 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
8188 {
8189 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
8190 }
8191
8192 /* Translate S2 section/page access permissions to protection flags
8193 *
8194 * @env: CPUARMState
8195 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8196 * @xn: XN (execute-never) bit
8197 */
8198 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
8199 {
8200 int prot = 0;
8201
8202 if (s2ap & 1) {
8203 prot |= PAGE_READ;
8204 }
8205 if (s2ap & 2) {
8206 prot |= PAGE_WRITE;
8207 }
8208 if (!xn) {
8209 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
8210 prot |= PAGE_EXEC;
8211 }
8212 }
8213 return prot;
8214 }
8215
8216 /* Translate section/page access permissions to protection flags
8217 *
8218 * @env: CPUARMState
8219 * @mmu_idx: MMU index indicating required translation regime
8220 * @is_aa64: TRUE if AArch64
8221 * @ap: The 2-bit simple AP (AP[2:1])
8222 * @ns: NS (non-secure) bit
8223 * @xn: XN (execute-never) bit
8224 * @pxn: PXN (privileged execute-never) bit
8225 */
8226 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
8227 int ap, int ns, int xn, int pxn)
8228 {
8229 bool is_user = regime_is_user(env, mmu_idx);
8230 int prot_rw, user_rw;
8231 bool have_wxn;
8232 int wxn = 0;
8233
8234 assert(mmu_idx != ARMMMUIdx_S2NS);
8235
8236 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
8237 if (is_user) {
8238 prot_rw = user_rw;
8239 } else {
8240 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
8241 }
8242
8243 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
8244 return prot_rw;
8245 }
8246
8247 /* TODO have_wxn should be replaced with
8248 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8249 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8250 * compatible processors have EL2, which is required for [U]WXN.
8251 */
8252 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
8253
8254 if (have_wxn) {
8255 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
8256 }
8257
8258 if (is_aa64) {
8259 switch (regime_el(env, mmu_idx)) {
8260 case 1:
8261 if (!is_user) {
8262 xn = pxn || (user_rw & PAGE_WRITE);
8263 }
8264 break;
8265 case 2:
8266 case 3:
8267 break;
8268 }
8269 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8270 switch (regime_el(env, mmu_idx)) {
8271 case 1:
8272 case 3:
8273 if (is_user) {
8274 xn = xn || !(user_rw & PAGE_READ);
8275 } else {
8276 int uwxn = 0;
8277 if (have_wxn) {
8278 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
8279 }
8280 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
8281 (uwxn && (user_rw & PAGE_WRITE));
8282 }
8283 break;
8284 case 2:
8285 break;
8286 }
8287 } else {
8288 xn = wxn = 0;
8289 }
8290
8291 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
8292 return prot_rw;
8293 }
8294 return prot_rw | PAGE_EXEC;
8295 }
8296
8297 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8298 uint32_t *table, uint32_t address)
8299 {
8300 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8301 TCR *tcr = regime_tcr(env, mmu_idx);
8302
8303 if (address & tcr->mask) {
8304 if (tcr->raw_tcr & TTBCR_PD1) {
8305 /* Translation table walk disabled for TTBR1 */
8306 return false;
8307 }
8308 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
8309 } else {
8310 if (tcr->raw_tcr & TTBCR_PD0) {
8311 /* Translation table walk disabled for TTBR0 */
8312 return false;
8313 }
8314 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
8315 }
8316 *table |= (address >> 18) & 0x3ffc;
8317 return true;
8318 }
8319
8320 /* Translate a S1 pagetable walk through S2 if needed. */
8321 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
8322 hwaddr addr, MemTxAttrs txattrs,
8323 ARMMMUFaultInfo *fi)
8324 {
8325 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
8326 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8327 target_ulong s2size;
8328 hwaddr s2pa;
8329 int s2prot;
8330 int ret;
8331
8332 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
8333 &txattrs, &s2prot, &s2size, fi, NULL);
8334 if (ret) {
8335 assert(fi->type != ARMFault_None);
8336 fi->s2addr = addr;
8337 fi->stage2 = true;
8338 fi->s1ptw = true;
8339 return ~0;
8340 }
8341 addr = s2pa;
8342 }
8343 return addr;
8344 }
8345
8346 /* All loads done in the course of a page table walk go through here.
8347 * TODO: rather than ignoring errors from physical memory reads (which
8348 * are external aborts in ARM terminology) we should propagate this
8349 * error out so that we can turn it into a Data Abort if this walk
8350 * was being done for a CPU load/store or an address translation instruction
8351 * (but not if it was for a debug access).
8352 */
8353 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8354 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8355 {
8356 ARMCPU *cpu = ARM_CPU(cs);
8357 CPUARMState *env = &cpu->env;
8358 MemTxAttrs attrs = {};
8359 MemTxResult result = MEMTX_OK;
8360 AddressSpace *as;
8361 uint32_t data;
8362
8363 attrs.secure = is_secure;
8364 as = arm_addressspace(cs, attrs);
8365 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8366 if (fi->s1ptw) {
8367 return 0;
8368 }
8369 if (regime_translation_big_endian(env, mmu_idx)) {
8370 data = address_space_ldl_be(as, addr, attrs, &result);
8371 } else {
8372 data = address_space_ldl_le(as, addr, attrs, &result);
8373 }
8374 if (result == MEMTX_OK) {
8375 return data;
8376 }
8377 fi->type = ARMFault_SyncExternalOnWalk;
8378 fi->ea = arm_extabort_type(result);
8379 return 0;
8380 }
8381
8382 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8383 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8384 {
8385 ARMCPU *cpu = ARM_CPU(cs);
8386 CPUARMState *env = &cpu->env;
8387 MemTxAttrs attrs = {};
8388 MemTxResult result = MEMTX_OK;
8389 AddressSpace *as;
8390 uint64_t data;
8391
8392 attrs.secure = is_secure;
8393 as = arm_addressspace(cs, attrs);
8394 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8395 if (fi->s1ptw) {
8396 return 0;
8397 }
8398 if (regime_translation_big_endian(env, mmu_idx)) {
8399 data = address_space_ldq_be(as, addr, attrs, &result);
8400 } else {
8401 data = address_space_ldq_le(as, addr, attrs, &result);
8402 }
8403 if (result == MEMTX_OK) {
8404 return data;
8405 }
8406 fi->type = ARMFault_SyncExternalOnWalk;
8407 fi->ea = arm_extabort_type(result);
8408 return 0;
8409 }
8410
8411 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
8412 MMUAccessType access_type, ARMMMUIdx mmu_idx,
8413 hwaddr *phys_ptr, int *prot,
8414 target_ulong *page_size,
8415 ARMMMUFaultInfo *fi)
8416 {
8417 CPUState *cs = CPU(arm_env_get_cpu(env));
8418 int level = 1;
8419 uint32_t table;
8420 uint32_t desc;
8421 int type;
8422 int ap;
8423 int domain = 0;
8424 int domain_prot;
8425 hwaddr phys_addr;
8426 uint32_t dacr;
8427
8428 /* Pagetable walk. */
8429 /* Lookup l1 descriptor. */
8430 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
8431 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8432 fi->type = ARMFault_Translation;
8433 goto do_fault;
8434 }
8435 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8436 mmu_idx, fi);
8437 if (fi->type != ARMFault_None) {
8438 goto do_fault;
8439 }
8440 type = (desc & 3);
8441 domain = (desc >> 5) & 0x0f;
8442 if (regime_el(env, mmu_idx) == 1) {
8443 dacr = env->cp15.dacr_ns;
8444 } else {
8445 dacr = env->cp15.dacr_s;
8446 }
8447 domain_prot = (dacr >> (domain * 2)) & 3;
8448 if (type == 0) {
8449 /* Section translation fault. */
8450 fi->type = ARMFault_Translation;
8451 goto do_fault;
8452 }
8453 if (type != 2) {
8454 level = 2;
8455 }
8456 if (domain_prot == 0 || domain_prot == 2) {
8457 fi->type = ARMFault_Domain;
8458 goto do_fault;
8459 }
8460 if (type == 2) {
8461 /* 1Mb section. */
8462 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
8463 ap = (desc >> 10) & 3;
8464 *page_size = 1024 * 1024;
8465 } else {
8466 /* Lookup l2 entry. */
8467 if (type == 1) {
8468 /* Coarse pagetable. */
8469 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
8470 } else {
8471 /* Fine pagetable. */
8472 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
8473 }
8474 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8475 mmu_idx, fi);
8476 if (fi->type != ARMFault_None) {
8477 goto do_fault;
8478 }
8479 switch (desc & 3) {
8480 case 0: /* Page translation fault. */
8481 fi->type = ARMFault_Translation;
8482 goto do_fault;
8483 case 1: /* 64k page. */
8484 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
8485 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
8486 *page_size = 0x10000;
8487 break;
8488 case 2: /* 4k page. */
8489 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8490 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
8491 *page_size = 0x1000;
8492 break;
8493 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
8494 if (type == 1) {
8495 /* ARMv6/XScale extended small page format */
8496 if (arm_feature(env, ARM_FEATURE_XSCALE)
8497 || arm_feature(env, ARM_FEATURE_V6)) {
8498 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8499 *page_size = 0x1000;
8500 } else {
8501 /* UNPREDICTABLE in ARMv5; we choose to take a
8502 * page translation fault.
8503 */
8504 fi->type = ARMFault_Translation;
8505 goto do_fault;
8506 }
8507 } else {
8508 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
8509 *page_size = 0x400;
8510 }
8511 ap = (desc >> 4) & 3;
8512 break;
8513 default:
8514 /* Never happens, but compiler isn't smart enough to tell. */
8515 abort();
8516 }
8517 }
8518 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
8519 *prot |= *prot ? PAGE_EXEC : 0;
8520 if (!(*prot & (1 << access_type))) {
8521 /* Access permission fault. */
8522 fi->type = ARMFault_Permission;
8523 goto do_fault;
8524 }
8525 *phys_ptr = phys_addr;
8526 return false;
8527 do_fault:
8528 fi->domain = domain;
8529 fi->level = level;
8530 return true;
8531 }
8532
8533 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
8534 MMUAccessType access_type, ARMMMUIdx mmu_idx,
8535 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8536 target_ulong *page_size, ARMMMUFaultInfo *fi)
8537 {
8538 CPUState *cs = CPU(arm_env_get_cpu(env));
8539 int level = 1;
8540 uint32_t table;
8541 uint32_t desc;
8542 uint32_t xn;
8543 uint32_t pxn = 0;
8544 int type;
8545 int ap;
8546 int domain = 0;
8547 int domain_prot;
8548 hwaddr phys_addr;
8549 uint32_t dacr;
8550 bool ns;
8551
8552 /* Pagetable walk. */
8553 /* Lookup l1 descriptor. */
8554 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
8555 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8556 fi->type = ARMFault_Translation;
8557 goto do_fault;
8558 }
8559 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8560 mmu_idx, fi);
8561 if (fi->type != ARMFault_None) {
8562 goto do_fault;
8563 }
8564 type = (desc & 3);
8565 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
8566 /* Section translation fault, or attempt to use the encoding
8567 * which is Reserved on implementations without PXN.
8568 */
8569 fi->type = ARMFault_Translation;
8570 goto do_fault;
8571 }
8572 if ((type == 1) || !(desc & (1 << 18))) {
8573 /* Page or Section. */
8574 domain = (desc >> 5) & 0x0f;
8575 }
8576 if (regime_el(env, mmu_idx) == 1) {
8577 dacr = env->cp15.dacr_ns;
8578 } else {
8579 dacr = env->cp15.dacr_s;
8580 }
8581 if (type == 1) {
8582 level = 2;
8583 }
8584 domain_prot = (dacr >> (domain * 2)) & 3;
8585 if (domain_prot == 0 || domain_prot == 2) {
8586 /* Section or Page domain fault */
8587 fi->type = ARMFault_Domain;
8588 goto do_fault;
8589 }
8590 if (type != 1) {
8591 if (desc & (1 << 18)) {
8592 /* Supersection. */
8593 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
8594 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
8595 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
8596 *page_size = 0x1000000;
8597 } else {
8598 /* Section. */
8599 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
8600 *page_size = 0x100000;
8601 }
8602 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
8603 xn = desc & (1 << 4);
8604 pxn = desc & 1;
8605 ns = extract32(desc, 19, 1);
8606 } else {
8607 if (arm_feature(env, ARM_FEATURE_PXN)) {
8608 pxn = (desc >> 2) & 1;
8609 }
8610 ns = extract32(desc, 3, 1);
8611 /* Lookup l2 entry. */
8612 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
8613 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8614 mmu_idx, fi);
8615 if (fi->type != ARMFault_None) {
8616 goto do_fault;
8617 }
8618 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
8619 switch (desc & 3) {
8620 case 0: /* Page translation fault. */
8621 fi->type = ARMFault_Translation;
8622 goto do_fault;
8623 case 1: /* 64k page. */
8624 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
8625 xn = desc & (1 << 15);
8626 *page_size = 0x10000;
8627 break;
8628 case 2: case 3: /* 4k page. */
8629 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8630 xn = desc & 1;
8631 *page_size = 0x1000;
8632 break;
8633 default:
8634 /* Never happens, but compiler isn't smart enough to tell. */
8635 abort();
8636 }
8637 }
8638 if (domain_prot == 3) {
8639 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8640 } else {
8641 if (pxn && !regime_is_user(env, mmu_idx)) {
8642 xn = 1;
8643 }
8644 if (xn && access_type == MMU_INST_FETCH) {
8645 fi->type = ARMFault_Permission;
8646 goto do_fault;
8647 }
8648
8649 if (arm_feature(env, ARM_FEATURE_V6K) &&
8650 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
8651 /* The simplified model uses AP[0] as an access control bit. */
8652 if ((ap & 1) == 0) {
8653 /* Access flag fault. */
8654 fi->type = ARMFault_AccessFlag;
8655 goto do_fault;
8656 }
8657 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
8658 } else {
8659 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
8660 }
8661 if (*prot && !xn) {
8662 *prot |= PAGE_EXEC;
8663 }
8664 if (!(*prot & (1 << access_type))) {
8665 /* Access permission fault. */
8666 fi->type = ARMFault_Permission;
8667 goto do_fault;
8668 }
8669 }
8670 if (ns) {
8671 /* The NS bit will (as required by the architecture) have no effect if
8672 * the CPU doesn't support TZ or this is a non-secure translation
8673 * regime, because the attribute will already be non-secure.
8674 */
8675 attrs->secure = false;
8676 }
8677 *phys_ptr = phys_addr;
8678 return false;
8679 do_fault:
8680 fi->domain = domain;
8681 fi->level = level;
8682 return true;
8683 }
8684
8685 /*
8686 * check_s2_mmu_setup
8687 * @cpu: ARMCPU
8688 * @is_aa64: True if the translation regime is in AArch64 state
8689 * @startlevel: Suggested starting level
8690 * @inputsize: Bitsize of IPAs
8691 * @stride: Page-table stride (See the ARM ARM)
8692 *
8693 * Returns true if the suggested S2 translation parameters are OK and
8694 * false otherwise.
8695 */
8696 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
8697 int inputsize, int stride)
8698 {
8699 const int grainsize = stride + 3;
8700 int startsizecheck;
8701
8702 /* Negative levels are never allowed. */
8703 if (level < 0) {
8704 return false;
8705 }
8706
8707 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
8708 if (startsizecheck < 1 || startsizecheck > stride + 4) {
8709 return false;
8710 }
8711
8712 if (is_aa64) {
8713 CPUARMState *env = &cpu->env;
8714 unsigned int pamax = arm_pamax(cpu);
8715
8716 switch (stride) {
8717 case 13: /* 64KB Pages. */
8718 if (level == 0 || (level == 1 && pamax <= 42)) {
8719 return false;
8720 }
8721 break;
8722 case 11: /* 16KB Pages. */
8723 if (level == 0 || (level == 1 && pamax <= 40)) {
8724 return false;
8725 }
8726 break;
8727 case 9: /* 4KB Pages. */
8728 if (level == 0 && pamax <= 42) {
8729 return false;
8730 }
8731 break;
8732 default:
8733 g_assert_not_reached();
8734 }
8735
8736 /* Inputsize checks. */
8737 if (inputsize > pamax &&
8738 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
8739 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
8740 return false;
8741 }
8742 } else {
8743 /* AArch32 only supports 4KB pages. Assert on that. */
8744 assert(stride == 9);
8745
8746 if (level == 0) {
8747 return false;
8748 }
8749 }
8750 return true;
8751 }
8752
8753 /* Translate from the 4-bit stage 2 representation of
8754 * memory attributes (without cache-allocation hints) to
8755 * the 8-bit representation of the stage 1 MAIR registers
8756 * (which includes allocation hints).
8757 *
8758 * ref: shared/translation/attrs/S2AttrDecode()
8759 * .../S2ConvertAttrsHints()
8760 */
8761 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
8762 {
8763 uint8_t hiattr = extract32(s2attrs, 2, 2);
8764 uint8_t loattr = extract32(s2attrs, 0, 2);
8765 uint8_t hihint = 0, lohint = 0;
8766
8767 if (hiattr != 0) { /* normal memory */
8768 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
8769 hiattr = loattr = 1; /* non-cacheable */
8770 } else {
8771 if (hiattr != 1) { /* Write-through or write-back */
8772 hihint = 3; /* RW allocate */
8773 }
8774 if (loattr != 1) { /* Write-through or write-back */
8775 lohint = 3; /* RW allocate */
8776 }
8777 }
8778 }
8779
8780 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
8781 }
8782
8783 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
8784 MMUAccessType access_type, ARMMMUIdx mmu_idx,
8785 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
8786 target_ulong *page_size_ptr,
8787 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
8788 {
8789 ARMCPU *cpu = arm_env_get_cpu(env);
8790 CPUState *cs = CPU(cpu);
8791 /* Read an LPAE long-descriptor translation table. */
8792 ARMFaultType fault_type = ARMFault_Translation;
8793 uint32_t level;
8794 uint32_t epd = 0;
8795 int32_t t0sz, t1sz;
8796 uint32_t tg;
8797 uint64_t ttbr;
8798 int ttbr_select;
8799 hwaddr descaddr, indexmask, indexmask_grainsize;
8800 uint32_t tableattrs;
8801 target_ulong page_size;
8802 uint32_t attrs;
8803 int32_t stride = 9;
8804 int32_t addrsize;
8805 int inputsize;
8806 int32_t tbi = 0;
8807 TCR *tcr = regime_tcr(env, mmu_idx);
8808 int ap, ns, xn, pxn;
8809 uint32_t el = regime_el(env, mmu_idx);
8810 bool ttbr1_valid = true;
8811 uint64_t descaddrmask;
8812 bool aarch64 = arm_el_is_aa64(env, el);
8813
8814 /* TODO:
8815 * This code does not handle the different format TCR for VTCR_EL2.
8816 * This code also does not support shareability levels.
8817 * Attribute and permission bit handling should also be checked when adding
8818 * support for those page table walks.
8819 */
8820 if (aarch64) {
8821 level = 0;
8822 addrsize = 64;
8823 if (el > 1) {
8824 if (mmu_idx != ARMMMUIdx_S2NS) {
8825 tbi = extract64(tcr->raw_tcr, 20, 1);
8826 }
8827 } else {
8828 if (extract64(address, 55, 1)) {
8829 tbi = extract64(tcr->raw_tcr, 38, 1);
8830 } else {
8831 tbi = extract64(tcr->raw_tcr, 37, 1);
8832 }
8833 }
8834 tbi *= 8;
8835
8836 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
8837 * invalid.
8838 */
8839 if (el > 1) {
8840 ttbr1_valid = false;
8841 }
8842 } else {
8843 level = 1;
8844 addrsize = 32;
8845 /* There is no TTBR1 for EL2 */
8846 if (el == 2) {
8847 ttbr1_valid = false;
8848 }
8849 }
8850
8851 /* Determine whether this address is in the region controlled by
8852 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
8853 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
8854 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
8855 */
8856 if (aarch64) {
8857 /* AArch64 translation. */
8858 t0sz = extract32(tcr->raw_tcr, 0, 6);
8859 t0sz = MIN(t0sz, 39);
8860 t0sz = MAX(t0sz, 16);
8861 } else if (mmu_idx != ARMMMUIdx_S2NS) {
8862 /* AArch32 stage 1 translation. */
8863 t0sz = extract32(tcr->raw_tcr, 0, 3);
8864 } else {
8865 /* AArch32 stage 2 translation. */
8866 bool sext = extract32(tcr->raw_tcr, 4, 1);
8867 bool sign = extract32(tcr->raw_tcr, 3, 1);
8868 /* Address size is 40-bit for a stage 2 translation,
8869 * and t0sz can be negative (from -8 to 7),
8870 * so we need to adjust it to use the TTBR selecting logic below.
8871 */
8872 addrsize = 40;
8873 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
8874
8875 /* If the sign-extend bit is not the same as t0sz[3], the result
8876 * is unpredictable. Flag this as a guest error. */
8877 if (sign != sext) {
8878 qemu_log_mask(LOG_GUEST_ERROR,
8879 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
8880 }
8881 }
8882 t1sz = extract32(tcr->raw_tcr, 16, 6);
8883 if (aarch64) {
8884 t1sz = MIN(t1sz, 39);
8885 t1sz = MAX(t1sz, 16);
8886 }
8887 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
8888 /* there is a ttbr0 region and we are in it (high bits all zero) */
8889 ttbr_select = 0;
8890 } else if (ttbr1_valid && t1sz &&
8891 !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
8892 /* there is a ttbr1 region and we are in it (high bits all one) */
8893 ttbr_select = 1;
8894 } else if (!t0sz) {
8895 /* ttbr0 region is "everything not in the ttbr1 region" */
8896 ttbr_select = 0;
8897 } else if (!t1sz && ttbr1_valid) {
8898 /* ttbr1 region is "everything not in the ttbr0 region" */
8899 ttbr_select = 1;
8900 } else {
8901 /* in the gap between the two regions, this is a Translation fault */
8902 fault_type = ARMFault_Translation;
8903 goto do_fault;
8904 }
8905
8906 /* Note that QEMU ignores shareability and cacheability attributes,
8907 * so we don't need to do anything with the SH, ORGN, IRGN fields
8908 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
8909 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
8910 * implement any ASID-like capability so we can ignore it (instead
8911 * we will always flush the TLB any time the ASID is changed).
8912 */
8913 if (ttbr_select == 0) {
8914 ttbr = regime_ttbr(env, mmu_idx, 0);
8915 if (el < 2) {
8916 epd = extract32(tcr->raw_tcr, 7, 1);
8917 }
8918 inputsize = addrsize - t0sz;
8919
8920 tg = extract32(tcr->raw_tcr, 14, 2);
8921 if (tg == 1) { /* 64KB pages */
8922 stride = 13;
8923 }
8924 if (tg == 2) { /* 16KB pages */
8925 stride = 11;
8926 }
8927 } else {
8928 /* We should only be here if TTBR1 is valid */
8929 assert(ttbr1_valid);
8930
8931 ttbr = regime_ttbr(env, mmu_idx, 1);
8932 epd = extract32(tcr->raw_tcr, 23, 1);
8933 inputsize = addrsize - t1sz;
8934
8935 tg = extract32(tcr->raw_tcr, 30, 2);
8936 if (tg == 3) { /* 64KB pages */
8937 stride = 13;
8938 }
8939 if (tg == 1) { /* 16KB pages */
8940 stride = 11;
8941 }
8942 }
8943
8944 /* Here we should have set up all the parameters for the translation:
8945 * inputsize, ttbr, epd, stride, tbi
8946 */
8947
8948 if (epd) {
8949 /* Translation table walk disabled => Translation fault on TLB miss
8950 * Note: This is always 0 on 64-bit EL2 and EL3.
8951 */
8952 goto do_fault;
8953 }
8954
8955 if (mmu_idx != ARMMMUIdx_S2NS) {
8956 /* The starting level depends on the virtual address size (which can
8957 * be up to 48 bits) and the translation granule size. It indicates
8958 * the number of strides (stride bits at a time) needed to
8959 * consume the bits of the input address. In the pseudocode this is:
8960 * level = 4 - RoundUp((inputsize - grainsize) / stride)
8961 * where their 'inputsize' is our 'inputsize', 'grainsize' is
8962 * our 'stride + 3' and 'stride' is our 'stride'.
8963 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
8964 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
8965 * = 4 - (inputsize - 4) / stride;
8966 */
8967 level = 4 - (inputsize - 4) / stride;
8968 } else {
8969 /* For stage 2 translations the starting level is specified by the
8970 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
8971 */
8972 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
8973 uint32_t startlevel;
8974 bool ok;
8975
8976 if (!aarch64 || stride == 9) {
8977 /* AArch32 or 4KB pages */
8978 startlevel = 2 - sl0;
8979 } else {
8980 /* 16KB or 64KB pages */
8981 startlevel = 3 - sl0;
8982 }
8983
8984 /* Check that the starting level is valid. */
8985 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
8986 inputsize, stride);
8987 if (!ok) {
8988 fault_type = ARMFault_Translation;
8989 goto do_fault;
8990 }
8991 level = startlevel;
8992 }
8993
8994 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
8995 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
8996
8997 /* Now we can extract the actual base address from the TTBR */
8998 descaddr = extract64(ttbr, 0, 48);
8999 descaddr &= ~indexmask;
9000
9001 /* The address field in the descriptor goes up to bit 39 for ARMv7
9002 * but up to bit 47 for ARMv8, but we use the descaddrmask
9003 * up to bit 39 for AArch32, because we don't need other bits in that case
9004 * to construct next descriptor address (anyway they should be all zeroes).
9005 */
9006 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9007 ~indexmask_grainsize;
9008
9009 /* Secure accesses start with the page table in secure memory and
9010 * can be downgraded to non-secure at any step. Non-secure accesses
9011 * remain non-secure. We implement this by just ORing in the NSTable/NS
9012 * bits at each step.
9013 */
9014 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9015 for (;;) {
9016 uint64_t descriptor;
9017 bool nstable;
9018
9019 descaddr |= (address >> (stride * (4 - level))) & indexmask;
9020 descaddr &= ~7ULL;
9021 nstable = extract32(tableattrs, 4, 1);
9022 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9023 if (fi->type != ARMFault_None) {
9024 goto do_fault;
9025 }
9026
9027 if (!(descriptor & 1) ||
9028 (!(descriptor & 2) && (level == 3))) {
9029 /* Invalid, or the Reserved level 3 encoding */
9030 goto do_fault;
9031 }
9032 descaddr = descriptor & descaddrmask;
9033
9034 if ((descriptor & 2) && (level < 3)) {
9035 /* Table entry. The top five bits are attributes which may
9036 * propagate down through lower levels of the table (and
9037 * which are all arranged so that 0 means "no effect", so
9038 * we can gather them up by ORing in the bits at each level).
9039 */
9040 tableattrs |= extract64(descriptor, 59, 5);
9041 level++;
9042 indexmask = indexmask_grainsize;
9043 continue;
9044 }
9045 /* Block entry at level 1 or 2, or page entry at level 3.
9046 * These are basically the same thing, although the number
9047 * of bits we pull in from the vaddr varies.
9048 */
9049 page_size = (1ULL << ((stride * (4 - level)) + 3));
9050 descaddr |= (address & (page_size - 1));
9051 /* Extract attributes from the descriptor */
9052 attrs = extract64(descriptor, 2, 10)
9053 | (extract64(descriptor, 52, 12) << 10);
9054
9055 if (mmu_idx == ARMMMUIdx_S2NS) {
9056 /* Stage 2 table descriptors do not include any attribute fields */
9057 break;
9058 }
9059 /* Merge in attributes from table descriptors */
9060 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
9061 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
9062 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9063 * means "force PL1 access only", which means forcing AP[1] to 0.
9064 */
9065 if (extract32(tableattrs, 2, 1)) {
9066 attrs &= ~(1 << 4);
9067 }
9068 attrs |= nstable << 3; /* NS */
9069 break;
9070 }
9071 /* Here descaddr is the final physical address, and attributes
9072 * are all in attrs.
9073 */
9074 fault_type = ARMFault_AccessFlag;
9075 if ((attrs & (1 << 8)) == 0) {
9076 /* Access flag */
9077 goto do_fault;
9078 }
9079
9080 ap = extract32(attrs, 4, 2);
9081 xn = extract32(attrs, 12, 1);
9082
9083 if (mmu_idx == ARMMMUIdx_S2NS) {
9084 ns = true;
9085 *prot = get_S2prot(env, ap, xn);
9086 } else {
9087 ns = extract32(attrs, 3, 1);
9088 pxn = extract32(attrs, 11, 1);
9089 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9090 }
9091
9092 fault_type = ARMFault_Permission;
9093 if (!(*prot & (1 << access_type))) {
9094 goto do_fault;
9095 }
9096
9097 if (ns) {
9098 /* The NS bit will (as required by the architecture) have no effect if
9099 * the CPU doesn't support TZ or this is a non-secure translation
9100 * regime, because the attribute will already be non-secure.
9101 */
9102 txattrs->secure = false;
9103 }
9104
9105 if (cacheattrs != NULL) {
9106 if (mmu_idx == ARMMMUIdx_S2NS) {
9107 cacheattrs->attrs = convert_stage2_attrs(env,
9108 extract32(attrs, 0, 4));
9109 } else {
9110 /* Index into MAIR registers for cache attributes */
9111 uint8_t attrindx = extract32(attrs, 0, 3);
9112 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9113 assert(attrindx <= 7);
9114 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9115 }
9116 cacheattrs->shareability = extract32(attrs, 6, 2);
9117 }
9118
9119 *phys_ptr = descaddr;
9120 *page_size_ptr = page_size;
9121 return false;
9122
9123 do_fault:
9124 fi->type = fault_type;
9125 fi->level = level;
9126 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9127 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9128 return true;
9129 }
9130
9131 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9132 ARMMMUIdx mmu_idx,
9133 int32_t address, int *prot)
9134 {
9135 if (!arm_feature(env, ARM_FEATURE_M)) {
9136 *prot = PAGE_READ | PAGE_WRITE;
9137 switch (address) {
9138 case 0xF0000000 ... 0xFFFFFFFF:
9139 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9140 /* hivecs execing is ok */
9141 *prot |= PAGE_EXEC;
9142 }
9143 break;
9144 case 0x00000000 ... 0x7FFFFFFF:
9145 *prot |= PAGE_EXEC;
9146 break;
9147 }
9148 } else {
9149 /* Default system address map for M profile cores.
9150 * The architecture specifies which regions are execute-never;
9151 * at the MPU level no other checks are defined.
9152 */
9153 switch (address) {
9154 case 0x00000000 ... 0x1fffffff: /* ROM */
9155 case 0x20000000 ... 0x3fffffff: /* SRAM */
9156 case 0x60000000 ... 0x7fffffff: /* RAM */
9157 case 0x80000000 ... 0x9fffffff: /* RAM */
9158 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9159 break;
9160 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9161 case 0xa0000000 ... 0xbfffffff: /* Device */
9162 case 0xc0000000 ... 0xdfffffff: /* Device */
9163 case 0xe0000000 ... 0xffffffff: /* System */
9164 *prot = PAGE_READ | PAGE_WRITE;
9165 break;
9166 default:
9167 g_assert_not_reached();
9168 }
9169 }
9170 }
9171
9172 static bool pmsav7_use_background_region(ARMCPU *cpu,
9173 ARMMMUIdx mmu_idx, bool is_user)
9174 {
9175 /* Return true if we should use the default memory map as a
9176 * "background" region if there are no hits against any MPU regions.
9177 */
9178 CPUARMState *env = &cpu->env;
9179
9180 if (is_user) {
9181 return false;
9182 }
9183
9184 if (arm_feature(env, ARM_FEATURE_M)) {
9185 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
9186 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
9187 } else {
9188 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
9189 }
9190 }
9191
9192 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
9193 {
9194 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9195 return arm_feature(env, ARM_FEATURE_M) &&
9196 extract32(address, 20, 12) == 0xe00;
9197 }
9198
9199 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
9200 {
9201 /* True if address is in the M profile system region
9202 * 0xe0000000 - 0xffffffff
9203 */
9204 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
9205 }
9206
9207 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
9208 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9209 hwaddr *phys_ptr, int *prot,
9210 ARMMMUFaultInfo *fi)
9211 {
9212 ARMCPU *cpu = arm_env_get_cpu(env);
9213 int n;
9214 bool is_user = regime_is_user(env, mmu_idx);
9215
9216 *phys_ptr = address;
9217 *prot = 0;
9218
9219 if (regime_translation_disabled(env, mmu_idx) ||
9220 m_is_ppb_region(env, address)) {
9221 /* MPU disabled or M profile PPB access: use default memory map.
9222 * The other case which uses the default memory map in the
9223 * v7M ARM ARM pseudocode is exception vector reads from the vector
9224 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9225 * which always does a direct read using address_space_ldl(), rather
9226 * than going via this function, so we don't need to check that here.
9227 */
9228 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9229 } else { /* MPU enabled */
9230 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9231 /* region search */
9232 uint32_t base = env->pmsav7.drbar[n];
9233 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
9234 uint32_t rmask;
9235 bool srdis = false;
9236
9237 if (!(env->pmsav7.drsr[n] & 0x1)) {
9238 continue;
9239 }
9240
9241 if (!rsize) {
9242 qemu_log_mask(LOG_GUEST_ERROR,
9243 "DRSR[%d]: Rsize field cannot be 0\n", n);
9244 continue;
9245 }
9246 rsize++;
9247 rmask = (1ull << rsize) - 1;
9248
9249 if (base & rmask) {
9250 qemu_log_mask(LOG_GUEST_ERROR,
9251 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
9252 "to DRSR region size, mask = 0x%" PRIx32 "\n",
9253 n, base, rmask);
9254 continue;
9255 }
9256
9257 if (address < base || address > base + rmask) {
9258 continue;
9259 }
9260
9261 /* Region matched */
9262
9263 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
9264 int i, snd;
9265 uint32_t srdis_mask;
9266
9267 rsize -= 3; /* sub region size (power of 2) */
9268 snd = ((address - base) >> rsize) & 0x7;
9269 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
9270
9271 srdis_mask = srdis ? 0x3 : 0x0;
9272 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
9273 /* This will check in groups of 2, 4 and then 8, whether
9274 * the subregion bits are consistent. rsize is incremented
9275 * back up to give the region size, considering consistent
9276 * adjacent subregions as one region. Stop testing if rsize
9277 * is already big enough for an entire QEMU page.
9278 */
9279 int snd_rounded = snd & ~(i - 1);
9280 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
9281 snd_rounded + 8, i);
9282 if (srdis_mask ^ srdis_multi) {
9283 break;
9284 }
9285 srdis_mask = (srdis_mask << i) | srdis_mask;
9286 rsize++;
9287 }
9288 }
9289 if (rsize < TARGET_PAGE_BITS) {
9290 qemu_log_mask(LOG_UNIMP,
9291 "DRSR[%d]: No support for MPU (sub)region "
9292 "alignment of %" PRIu32 " bits. Minimum is %d\n",
9293 n, rsize, TARGET_PAGE_BITS);
9294 continue;
9295 }
9296 if (srdis) {
9297 continue;
9298 }
9299 break;
9300 }
9301
9302 if (n == -1) { /* no hits */
9303 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
9304 /* background fault */
9305 fi->type = ARMFault_Background;
9306 return true;
9307 }
9308 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9309 } else { /* a MPU hit! */
9310 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
9311 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
9312
9313 if (m_is_system_region(env, address)) {
9314 /* System space is always execute never */
9315 xn = 1;
9316 }
9317
9318 if (is_user) { /* User mode AP bit decoding */
9319 switch (ap) {
9320 case 0:
9321 case 1:
9322 case 5:
9323 break; /* no access */
9324 case 3:
9325 *prot |= PAGE_WRITE;
9326 /* fall through */
9327 case 2:
9328 case 6:
9329 *prot |= PAGE_READ | PAGE_EXEC;
9330 break;
9331 case 7:
9332 /* for v7M, same as 6; for R profile a reserved value */
9333 if (arm_feature(env, ARM_FEATURE_M)) {
9334 *prot |= PAGE_READ | PAGE_EXEC;
9335 break;
9336 }
9337 /* fall through */
9338 default:
9339 qemu_log_mask(LOG_GUEST_ERROR,
9340 "DRACR[%d]: Bad value for AP bits: 0x%"
9341 PRIx32 "\n", n, ap);
9342 }
9343 } else { /* Priv. mode AP bits decoding */
9344 switch (ap) {
9345 case 0:
9346 break; /* no access */
9347 case 1:
9348 case 2:
9349 case 3:
9350 *prot |= PAGE_WRITE;
9351 /* fall through */
9352 case 5:
9353 case 6:
9354 *prot |= PAGE_READ | PAGE_EXEC;
9355 break;
9356 case 7:
9357 /* for v7M, same as 6; for R profile a reserved value */
9358 if (arm_feature(env, ARM_FEATURE_M)) {
9359 *prot |= PAGE_READ | PAGE_EXEC;
9360 break;
9361 }
9362 /* fall through */
9363 default:
9364 qemu_log_mask(LOG_GUEST_ERROR,
9365 "DRACR[%d]: Bad value for AP bits: 0x%"
9366 PRIx32 "\n", n, ap);
9367 }
9368 }
9369
9370 /* execute never */
9371 if (xn) {
9372 *prot &= ~PAGE_EXEC;
9373 }
9374 }
9375 }
9376
9377 fi->type = ARMFault_Permission;
9378 fi->level = 1;
9379 return !(*prot & (1 << access_type));
9380 }
9381
9382 static bool v8m_is_sau_exempt(CPUARMState *env,
9383 uint32_t address, MMUAccessType access_type)
9384 {
9385 /* The architecture specifies that certain address ranges are
9386 * exempt from v8M SAU/IDAU checks.
9387 */
9388 return
9389 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
9390 (address >= 0xe0000000 && address <= 0xe0002fff) ||
9391 (address >= 0xe000e000 && address <= 0xe000efff) ||
9392 (address >= 0xe002e000 && address <= 0xe002efff) ||
9393 (address >= 0xe0040000 && address <= 0xe0041fff) ||
9394 (address >= 0xe00ff000 && address <= 0xe00fffff);
9395 }
9396
9397 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
9398 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9399 V8M_SAttributes *sattrs)
9400 {
9401 /* Look up the security attributes for this address. Compare the
9402 * pseudocode SecurityCheck() function.
9403 * We assume the caller has zero-initialized *sattrs.
9404 */
9405 ARMCPU *cpu = arm_env_get_cpu(env);
9406 int r;
9407
9408 /* TODO: implement IDAU */
9409
9410 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
9411 /* 0xf0000000..0xffffffff is always S for insn fetches */
9412 return;
9413 }
9414
9415 if (v8m_is_sau_exempt(env, address, access_type)) {
9416 sattrs->ns = !regime_is_secure(env, mmu_idx);
9417 return;
9418 }
9419
9420 switch (env->sau.ctrl & 3) {
9421 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
9422 break;
9423 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
9424 sattrs->ns = true;
9425 break;
9426 default: /* SAU.ENABLE == 1 */
9427 for (r = 0; r < cpu->sau_sregion; r++) {
9428 if (env->sau.rlar[r] & 1) {
9429 uint32_t base = env->sau.rbar[r] & ~0x1f;
9430 uint32_t limit = env->sau.rlar[r] | 0x1f;
9431
9432 if (base <= address && limit >= address) {
9433 if (sattrs->srvalid) {
9434 /* If we hit in more than one region then we must report
9435 * as Secure, not NS-Callable, with no valid region
9436 * number info.
9437 */
9438 sattrs->ns = false;
9439 sattrs->nsc = false;
9440 sattrs->sregion = 0;
9441 sattrs->srvalid = false;
9442 break;
9443 } else {
9444 if (env->sau.rlar[r] & 2) {
9445 sattrs->nsc = true;
9446 } else {
9447 sattrs->ns = true;
9448 }
9449 sattrs->srvalid = true;
9450 sattrs->sregion = r;
9451 }
9452 }
9453 }
9454 }
9455
9456 /* TODO when we support the IDAU then it may override the result here */
9457 break;
9458 }
9459 }
9460
9461 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
9462 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9463 hwaddr *phys_ptr, MemTxAttrs *txattrs,
9464 int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion)
9465 {
9466 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
9467 * that a full phys-to-virt translation does).
9468 * mregion is (if not NULL) set to the region number which matched,
9469 * or -1 if no region number is returned (MPU off, address did not
9470 * hit a region, address hit in multiple regions).
9471 */
9472 ARMCPU *cpu = arm_env_get_cpu(env);
9473 bool is_user = regime_is_user(env, mmu_idx);
9474 uint32_t secure = regime_is_secure(env, mmu_idx);
9475 int n;
9476 int matchregion = -1;
9477 bool hit = false;
9478
9479 *phys_ptr = address;
9480 *prot = 0;
9481 if (mregion) {
9482 *mregion = -1;
9483 }
9484
9485 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
9486 * was an exception vector read from the vector table (which is always
9487 * done using the default system address map), because those accesses
9488 * are done in arm_v7m_load_vector(), which always does a direct
9489 * read using address_space_ldl(), rather than going via this function.
9490 */
9491 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
9492 hit = true;
9493 } else if (m_is_ppb_region(env, address)) {
9494 hit = true;
9495 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
9496 hit = true;
9497 } else {
9498 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9499 /* region search */
9500 /* Note that the base address is bits [31:5] from the register
9501 * with bits [4:0] all zeroes, but the limit address is bits
9502 * [31:5] from the register with bits [4:0] all ones.
9503 */
9504 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
9505 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
9506
9507 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
9508 /* Region disabled */
9509 continue;
9510 }
9511
9512 if (address < base || address > limit) {
9513 continue;
9514 }
9515
9516 if (hit) {
9517 /* Multiple regions match -- always a failure (unlike
9518 * PMSAv7 where highest-numbered-region wins)
9519 */
9520 fi->type = ARMFault_Permission;
9521 fi->level = 1;
9522 return true;
9523 }
9524
9525 matchregion = n;
9526 hit = true;
9527
9528 if (base & ~TARGET_PAGE_MASK) {
9529 qemu_log_mask(LOG_UNIMP,
9530 "MPU_RBAR[%d]: No support for MPU region base"
9531 "address of 0x%" PRIx32 ". Minimum alignment is "
9532 "%d\n",
9533 n, base, TARGET_PAGE_BITS);
9534 continue;
9535 }
9536 if ((limit + 1) & ~TARGET_PAGE_MASK) {
9537 qemu_log_mask(LOG_UNIMP,
9538 "MPU_RBAR[%d]: No support for MPU region limit"
9539 "address of 0x%" PRIx32 ". Minimum alignment is "
9540 "%d\n",
9541 n, limit, TARGET_PAGE_BITS);
9542 continue;
9543 }
9544 }
9545 }
9546
9547 if (!hit) {
9548 /* background fault */
9549 fi->type = ARMFault_Background;
9550 return true;
9551 }
9552
9553 if (matchregion == -1) {
9554 /* hit using the background region */
9555 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9556 } else {
9557 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
9558 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
9559
9560 if (m_is_system_region(env, address)) {
9561 /* System space is always execute never */
9562 xn = 1;
9563 }
9564
9565 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
9566 if (*prot && !xn) {
9567 *prot |= PAGE_EXEC;
9568 }
9569 /* We don't need to look the attribute up in the MAIR0/MAIR1
9570 * registers because that only tells us about cacheability.
9571 */
9572 if (mregion) {
9573 *mregion = matchregion;
9574 }
9575 }
9576
9577 fi->type = ARMFault_Permission;
9578 fi->level = 1;
9579 return !(*prot & (1 << access_type));
9580 }
9581
9582
9583 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
9584 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9585 hwaddr *phys_ptr, MemTxAttrs *txattrs,
9586 int *prot, ARMMMUFaultInfo *fi)
9587 {
9588 uint32_t secure = regime_is_secure(env, mmu_idx);
9589 V8M_SAttributes sattrs = {};
9590
9591 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
9592 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
9593 if (access_type == MMU_INST_FETCH) {
9594 /* Instruction fetches always use the MMU bank and the
9595 * transaction attribute determined by the fetch address,
9596 * regardless of CPU state. This is painful for QEMU
9597 * to handle, because it would mean we need to encode
9598 * into the mmu_idx not just the (user, negpri) information
9599 * for the current security state but also that for the
9600 * other security state, which would balloon the number
9601 * of mmu_idx values needed alarmingly.
9602 * Fortunately we can avoid this because it's not actually
9603 * possible to arbitrarily execute code from memory with
9604 * the wrong security attribute: it will always generate
9605 * an exception of some kind or another, apart from the
9606 * special case of an NS CPU executing an SG instruction
9607 * in S&NSC memory. So we always just fail the translation
9608 * here and sort things out in the exception handler
9609 * (including possibly emulating an SG instruction).
9610 */
9611 if (sattrs.ns != !secure) {
9612 if (sattrs.nsc) {
9613 fi->type = ARMFault_QEMU_NSCExec;
9614 } else {
9615 fi->type = ARMFault_QEMU_SFault;
9616 }
9617 *phys_ptr = address;
9618 *prot = 0;
9619 return true;
9620 }
9621 } else {
9622 /* For data accesses we always use the MMU bank indicated
9623 * by the current CPU state, but the security attributes
9624 * might downgrade a secure access to nonsecure.
9625 */
9626 if (sattrs.ns) {
9627 txattrs->secure = false;
9628 } else if (!secure) {
9629 /* NS access to S memory must fault.
9630 * Architecturally we should first check whether the
9631 * MPU information for this address indicates that we
9632 * are doing an unaligned access to Device memory, which
9633 * should generate a UsageFault instead. QEMU does not
9634 * currently check for that kind of unaligned access though.
9635 * If we added it we would need to do so as a special case
9636 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
9637 */
9638 fi->type = ARMFault_QEMU_SFault;
9639 *phys_ptr = address;
9640 *prot = 0;
9641 return true;
9642 }
9643 }
9644 }
9645
9646 return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
9647 txattrs, prot, fi, NULL);
9648 }
9649
9650 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
9651 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9652 hwaddr *phys_ptr, int *prot,
9653 ARMMMUFaultInfo *fi)
9654 {
9655 int n;
9656 uint32_t mask;
9657 uint32_t base;
9658 bool is_user = regime_is_user(env, mmu_idx);
9659
9660 if (regime_translation_disabled(env, mmu_idx)) {
9661 /* MPU disabled. */
9662 *phys_ptr = address;
9663 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9664 return false;
9665 }
9666
9667 *phys_ptr = address;
9668 for (n = 7; n >= 0; n--) {
9669 base = env->cp15.c6_region[n];
9670 if ((base & 1) == 0) {
9671 continue;
9672 }
9673 mask = 1 << ((base >> 1) & 0x1f);
9674 /* Keep this shift separate from the above to avoid an
9675 (undefined) << 32. */
9676 mask = (mask << 1) - 1;
9677 if (((base ^ address) & ~mask) == 0) {
9678 break;
9679 }
9680 }
9681 if (n < 0) {
9682 fi->type = ARMFault_Background;
9683 return true;
9684 }
9685
9686 if (access_type == MMU_INST_FETCH) {
9687 mask = env->cp15.pmsav5_insn_ap;
9688 } else {
9689 mask = env->cp15.pmsav5_data_ap;
9690 }
9691 mask = (mask >> (n * 4)) & 0xf;
9692 switch (mask) {
9693 case 0:
9694 fi->type = ARMFault_Permission;
9695 fi->level = 1;
9696 return true;
9697 case 1:
9698 if (is_user) {
9699 fi->type = ARMFault_Permission;
9700 fi->level = 1;
9701 return true;
9702 }
9703 *prot = PAGE_READ | PAGE_WRITE;
9704 break;
9705 case 2:
9706 *prot = PAGE_READ;
9707 if (!is_user) {
9708 *prot |= PAGE_WRITE;
9709 }
9710 break;
9711 case 3:
9712 *prot = PAGE_READ | PAGE_WRITE;
9713 break;
9714 case 5:
9715 if (is_user) {
9716 fi->type = ARMFault_Permission;
9717 fi->level = 1;
9718 return true;
9719 }
9720 *prot = PAGE_READ;
9721 break;
9722 case 6:
9723 *prot = PAGE_READ;
9724 break;
9725 default:
9726 /* Bad permission. */
9727 fi->type = ARMFault_Permission;
9728 fi->level = 1;
9729 return true;
9730 }
9731 *prot |= PAGE_EXEC;
9732 return false;
9733 }
9734
9735 /* Combine either inner or outer cacheability attributes for normal
9736 * memory, according to table D4-42 and pseudocode procedure
9737 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
9738 *
9739 * NB: only stage 1 includes allocation hints (RW bits), leading to
9740 * some asymmetry.
9741 */
9742 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
9743 {
9744 if (s1 == 4 || s2 == 4) {
9745 /* non-cacheable has precedence */
9746 return 4;
9747 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
9748 /* stage 1 write-through takes precedence */
9749 return s1;
9750 } else if (extract32(s2, 2, 2) == 2) {
9751 /* stage 2 write-through takes precedence, but the allocation hint
9752 * is still taken from stage 1
9753 */
9754 return (2 << 2) | extract32(s1, 0, 2);
9755 } else { /* write-back */
9756 return s1;
9757 }
9758 }
9759
9760 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
9761 * and CombineS1S2Desc()
9762 *
9763 * @s1: Attributes from stage 1 walk
9764 * @s2: Attributes from stage 2 walk
9765 */
9766 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
9767 {
9768 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
9769 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
9770 ARMCacheAttrs ret;
9771
9772 /* Combine shareability attributes (table D4-43) */
9773 if (s1.shareability == 2 || s2.shareability == 2) {
9774 /* if either are outer-shareable, the result is outer-shareable */
9775 ret.shareability = 2;
9776 } else if (s1.shareability == 3 || s2.shareability == 3) {
9777 /* if either are inner-shareable, the result is inner-shareable */
9778 ret.shareability = 3;
9779 } else {
9780 /* both non-shareable */
9781 ret.shareability = 0;
9782 }
9783
9784 /* Combine memory type and cacheability attributes */
9785 if (s1hi == 0 || s2hi == 0) {
9786 /* Device has precedence over normal */
9787 if (s1lo == 0 || s2lo == 0) {
9788 /* nGnRnE has precedence over anything */
9789 ret.attrs = 0;
9790 } else if (s1lo == 4 || s2lo == 4) {
9791 /* non-Reordering has precedence over Reordering */
9792 ret.attrs = 4; /* nGnRE */
9793 } else if (s1lo == 8 || s2lo == 8) {
9794 /* non-Gathering has precedence over Gathering */
9795 ret.attrs = 8; /* nGRE */
9796 } else {
9797 ret.attrs = 0xc; /* GRE */
9798 }
9799
9800 /* Any location for which the resultant memory type is any
9801 * type of Device memory is always treated as Outer Shareable.
9802 */
9803 ret.shareability = 2;
9804 } else { /* Normal memory */
9805 /* Outer/inner cacheability combine independently */
9806 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
9807 | combine_cacheattr_nibble(s1lo, s2lo);
9808
9809 if (ret.attrs == 0x44) {
9810 /* Any location for which the resultant memory type is Normal
9811 * Inner Non-cacheable, Outer Non-cacheable is always treated
9812 * as Outer Shareable.
9813 */
9814 ret.shareability = 2;
9815 }
9816 }
9817
9818 return ret;
9819 }
9820
9821
9822 /* get_phys_addr - get the physical address for this virtual address
9823 *
9824 * Find the physical address corresponding to the given virtual address,
9825 * by doing a translation table walk on MMU based systems or using the
9826 * MPU state on MPU based systems.
9827 *
9828 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
9829 * prot and page_size may not be filled in, and the populated fsr value provides
9830 * information on why the translation aborted, in the format of a
9831 * DFSR/IFSR fault register, with the following caveats:
9832 * * we honour the short vs long DFSR format differences.
9833 * * the WnR bit is never set (the caller must do this).
9834 * * for PSMAv5 based systems we don't bother to return a full FSR format
9835 * value.
9836 *
9837 * @env: CPUARMState
9838 * @address: virtual address to get physical address for
9839 * @access_type: 0 for read, 1 for write, 2 for execute
9840 * @mmu_idx: MMU index indicating required translation regime
9841 * @phys_ptr: set to the physical address corresponding to the virtual address
9842 * @attrs: set to the memory transaction attributes to use
9843 * @prot: set to the permissions for the page containing phys_ptr
9844 * @page_size: set to the size of the page containing phys_ptr
9845 * @fi: set to fault info if the translation fails
9846 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
9847 */
9848 static bool get_phys_addr(CPUARMState *env, target_ulong address,
9849 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9850 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
9851 target_ulong *page_size,
9852 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9853 {
9854 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
9855 /* Call ourselves recursively to do the stage 1 and then stage 2
9856 * translations.
9857 */
9858 if (arm_feature(env, ARM_FEATURE_EL2)) {
9859 hwaddr ipa;
9860 int s2_prot;
9861 int ret;
9862 ARMCacheAttrs cacheattrs2 = {};
9863
9864 ret = get_phys_addr(env, address, access_type,
9865 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
9866 prot, page_size, fi, cacheattrs);
9867
9868 /* If S1 fails or S2 is disabled, return early. */
9869 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
9870 *phys_ptr = ipa;
9871 return ret;
9872 }
9873
9874 /* S1 is done. Now do S2 translation. */
9875 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
9876 phys_ptr, attrs, &s2_prot,
9877 page_size, fi,
9878 cacheattrs != NULL ? &cacheattrs2 : NULL);
9879 fi->s2addr = ipa;
9880 /* Combine the S1 and S2 perms. */
9881 *prot &= s2_prot;
9882
9883 /* Combine the S1 and S2 cache attributes, if needed */
9884 if (!ret && cacheattrs != NULL) {
9885 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
9886 }
9887
9888 return ret;
9889 } else {
9890 /*
9891 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
9892 */
9893 mmu_idx = stage_1_mmu_idx(mmu_idx);
9894 }
9895 }
9896
9897 /* The page table entries may downgrade secure to non-secure, but
9898 * cannot upgrade an non-secure translation regime's attributes
9899 * to secure.
9900 */
9901 attrs->secure = regime_is_secure(env, mmu_idx);
9902 attrs->user = regime_is_user(env, mmu_idx);
9903
9904 /* Fast Context Switch Extension. This doesn't exist at all in v8.
9905 * In v7 and earlier it affects all stage 1 translations.
9906 */
9907 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
9908 && !arm_feature(env, ARM_FEATURE_V8)) {
9909 if (regime_el(env, mmu_idx) == 3) {
9910 address += env->cp15.fcseidr_s;
9911 } else {
9912 address += env->cp15.fcseidr_ns;
9913 }
9914 }
9915
9916 if (arm_feature(env, ARM_FEATURE_PMSA)) {
9917 bool ret;
9918 *page_size = TARGET_PAGE_SIZE;
9919
9920 if (arm_feature(env, ARM_FEATURE_V8)) {
9921 /* PMSAv8 */
9922 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
9923 phys_ptr, attrs, prot, fi);
9924 } else if (arm_feature(env, ARM_FEATURE_V7)) {
9925 /* PMSAv7 */
9926 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
9927 phys_ptr, prot, fi);
9928 } else {
9929 /* Pre-v7 MPU */
9930 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
9931 phys_ptr, prot, fi);
9932 }
9933 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
9934 " mmu_idx %u -> %s (prot %c%c%c)\n",
9935 access_type == MMU_DATA_LOAD ? "reading" :
9936 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
9937 (uint32_t)address, mmu_idx,
9938 ret ? "Miss" : "Hit",
9939 *prot & PAGE_READ ? 'r' : '-',
9940 *prot & PAGE_WRITE ? 'w' : '-',
9941 *prot & PAGE_EXEC ? 'x' : '-');
9942
9943 return ret;
9944 }
9945
9946 /* Definitely a real MMU, not an MPU */
9947
9948 if (regime_translation_disabled(env, mmu_idx)) {
9949 /* MMU disabled. */
9950 *phys_ptr = address;
9951 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9952 *page_size = TARGET_PAGE_SIZE;
9953 return 0;
9954 }
9955
9956 if (regime_using_lpae_format(env, mmu_idx)) {
9957 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
9958 phys_ptr, attrs, prot, page_size,
9959 fi, cacheattrs);
9960 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
9961 return get_phys_addr_v6(env, address, access_type, mmu_idx,
9962 phys_ptr, attrs, prot, page_size, fi);
9963 } else {
9964 return get_phys_addr_v5(env, address, access_type, mmu_idx,
9965 phys_ptr, prot, page_size, fi);
9966 }
9967 }
9968
9969 /* Walk the page table and (if the mapping exists) add the page
9970 * to the TLB. Return false on success, or true on failure. Populate
9971 * fsr with ARM DFSR/IFSR fault register format value on failure.
9972 */
9973 bool arm_tlb_fill(CPUState *cs, vaddr address,
9974 MMUAccessType access_type, int mmu_idx,
9975 ARMMMUFaultInfo *fi)
9976 {
9977 ARMCPU *cpu = ARM_CPU(cs);
9978 CPUARMState *env = &cpu->env;
9979 hwaddr phys_addr;
9980 target_ulong page_size;
9981 int prot;
9982 int ret;
9983 MemTxAttrs attrs = {};
9984
9985 ret = get_phys_addr(env, address, access_type,
9986 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
9987 &attrs, &prot, &page_size, fi, NULL);
9988 if (!ret) {
9989 /* Map a single [sub]page. */
9990 phys_addr &= TARGET_PAGE_MASK;
9991 address &= TARGET_PAGE_MASK;
9992 tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
9993 prot, mmu_idx, page_size);
9994 return 0;
9995 }
9996
9997 return ret;
9998 }
9999
10000 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10001 MemTxAttrs *attrs)
10002 {
10003 ARMCPU *cpu = ARM_CPU(cs);
10004 CPUARMState *env = &cpu->env;
10005 hwaddr phys_addr;
10006 target_ulong page_size;
10007 int prot;
10008 bool ret;
10009 ARMMMUFaultInfo fi = {};
10010 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
10011
10012 *attrs = (MemTxAttrs) {};
10013
10014 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10015 attrs, &prot, &page_size, &fi, NULL);
10016
10017 if (ret) {
10018 return -1;
10019 }
10020 return phys_addr;
10021 }
10022
10023 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
10024 {
10025 uint32_t mask;
10026 unsigned el = arm_current_el(env);
10027
10028 /* First handle registers which unprivileged can read */
10029
10030 switch (reg) {
10031 case 0 ... 7: /* xPSR sub-fields */
10032 mask = 0;
10033 if ((reg & 1) && el) {
10034 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
10035 }
10036 if (!(reg & 4)) {
10037 mask |= XPSR_NZCV | XPSR_Q; /* APSR */
10038 }
10039 /* EPSR reads as zero */
10040 return xpsr_read(env) & mask;
10041 break;
10042 case 20: /* CONTROL */
10043 return env->v7m.control[env->v7m.secure];
10044 case 0x94: /* CONTROL_NS */
10045 /* We have to handle this here because unprivileged Secure code
10046 * can read the NS CONTROL register.
10047 */
10048 if (!env->v7m.secure) {
10049 return 0;
10050 }
10051 return env->v7m.control[M_REG_NS];
10052 }
10053
10054 if (el == 0) {
10055 return 0; /* unprivileged reads others as zero */
10056 }
10057
10058 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10059 switch (reg) {
10060 case 0x88: /* MSP_NS */
10061 if (!env->v7m.secure) {
10062 return 0;
10063 }
10064 return env->v7m.other_ss_msp;
10065 case 0x89: /* PSP_NS */
10066 if (!env->v7m.secure) {
10067 return 0;
10068 }
10069 return env->v7m.other_ss_psp;
10070 case 0x90: /* PRIMASK_NS */
10071 if (!env->v7m.secure) {
10072 return 0;
10073 }
10074 return env->v7m.primask[M_REG_NS];
10075 case 0x91: /* BASEPRI_NS */
10076 if (!env->v7m.secure) {
10077 return 0;
10078 }
10079 return env->v7m.basepri[M_REG_NS];
10080 case 0x93: /* FAULTMASK_NS */
10081 if (!env->v7m.secure) {
10082 return 0;
10083 }
10084 return env->v7m.faultmask[M_REG_NS];
10085 case 0x98: /* SP_NS */
10086 {
10087 /* This gives the non-secure SP selected based on whether we're
10088 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10089 */
10090 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
10091
10092 if (!env->v7m.secure) {
10093 return 0;
10094 }
10095 if (!arm_v7m_is_handler_mode(env) && spsel) {
10096 return env->v7m.other_ss_psp;
10097 } else {
10098 return env->v7m.other_ss_msp;
10099 }
10100 }
10101 default:
10102 break;
10103 }
10104 }
10105
10106 switch (reg) {
10107 case 8: /* MSP */
10108 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
10109 case 9: /* PSP */
10110 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
10111 case 16: /* PRIMASK */
10112 return env->v7m.primask[env->v7m.secure];
10113 case 17: /* BASEPRI */
10114 case 18: /* BASEPRI_MAX */
10115 return env->v7m.basepri[env->v7m.secure];
10116 case 19: /* FAULTMASK */
10117 return env->v7m.faultmask[env->v7m.secure];
10118 default:
10119 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
10120 " register %d\n", reg);
10121 return 0;
10122 }
10123 }
10124
10125 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
10126 {
10127 /* We're passed bits [11..0] of the instruction; extract
10128 * SYSm and the mask bits.
10129 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
10130 * we choose to treat them as if the mask bits were valid.
10131 * NB that the pseudocode 'mask' variable is bits [11..10],
10132 * whereas ours is [11..8].
10133 */
10134 uint32_t mask = extract32(maskreg, 8, 4);
10135 uint32_t reg = extract32(maskreg, 0, 8);
10136
10137 if (arm_current_el(env) == 0 && reg > 7) {
10138 /* only xPSR sub-fields may be written by unprivileged */
10139 return;
10140 }
10141
10142 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10143 switch (reg) {
10144 case 0x88: /* MSP_NS */
10145 if (!env->v7m.secure) {
10146 return;
10147 }
10148 env->v7m.other_ss_msp = val;
10149 return;
10150 case 0x89: /* PSP_NS */
10151 if (!env->v7m.secure) {
10152 return;
10153 }
10154 env->v7m.other_ss_psp = val;
10155 return;
10156 case 0x90: /* PRIMASK_NS */
10157 if (!env->v7m.secure) {
10158 return;
10159 }
10160 env->v7m.primask[M_REG_NS] = val & 1;
10161 return;
10162 case 0x91: /* BASEPRI_NS */
10163 if (!env->v7m.secure) {
10164 return;
10165 }
10166 env->v7m.basepri[M_REG_NS] = val & 0xff;
10167 return;
10168 case 0x93: /* FAULTMASK_NS */
10169 if (!env->v7m.secure) {
10170 return;
10171 }
10172 env->v7m.faultmask[M_REG_NS] = val & 1;
10173 return;
10174 case 0x98: /* SP_NS */
10175 {
10176 /* This gives the non-secure SP selected based on whether we're
10177 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10178 */
10179 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
10180
10181 if (!env->v7m.secure) {
10182 return;
10183 }
10184 if (!arm_v7m_is_handler_mode(env) && spsel) {
10185 env->v7m.other_ss_psp = val;
10186 } else {
10187 env->v7m.other_ss_msp = val;
10188 }
10189 return;
10190 }
10191 default:
10192 break;
10193 }
10194 }
10195
10196 switch (reg) {
10197 case 0 ... 7: /* xPSR sub-fields */
10198 /* only APSR is actually writable */
10199 if (!(reg & 4)) {
10200 uint32_t apsrmask = 0;
10201
10202 if (mask & 8) {
10203 apsrmask |= XPSR_NZCV | XPSR_Q;
10204 }
10205 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
10206 apsrmask |= XPSR_GE;
10207 }
10208 xpsr_write(env, val, apsrmask);
10209 }
10210 break;
10211 case 8: /* MSP */
10212 if (v7m_using_psp(env)) {
10213 env->v7m.other_sp = val;
10214 } else {
10215 env->regs[13] = val;
10216 }
10217 break;
10218 case 9: /* PSP */
10219 if (v7m_using_psp(env)) {
10220 env->regs[13] = val;
10221 } else {
10222 env->v7m.other_sp = val;
10223 }
10224 break;
10225 case 16: /* PRIMASK */
10226 env->v7m.primask[env->v7m.secure] = val & 1;
10227 break;
10228 case 17: /* BASEPRI */
10229 env->v7m.basepri[env->v7m.secure] = val & 0xff;
10230 break;
10231 case 18: /* BASEPRI_MAX */
10232 val &= 0xff;
10233 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
10234 || env->v7m.basepri[env->v7m.secure] == 0)) {
10235 env->v7m.basepri[env->v7m.secure] = val;
10236 }
10237 break;
10238 case 19: /* FAULTMASK */
10239 env->v7m.faultmask[env->v7m.secure] = val & 1;
10240 break;
10241 case 20: /* CONTROL */
10242 /* Writing to the SPSEL bit only has an effect if we are in
10243 * thread mode; other bits can be updated by any privileged code.
10244 * write_v7m_control_spsel() deals with updating the SPSEL bit in
10245 * env->v7m.control, so we only need update the others.
10246 * For v7M, we must just ignore explicit writes to SPSEL in handler
10247 * mode; for v8M the write is permitted but will have no effect.
10248 */
10249 if (arm_feature(env, ARM_FEATURE_V8) ||
10250 !arm_v7m_is_handler_mode(env)) {
10251 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
10252 }
10253 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
10254 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
10255 break;
10256 default:
10257 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
10258 " register %d\n", reg);
10259 return;
10260 }
10261 }
10262
10263 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
10264 {
10265 /* Implement the TT instruction. op is bits [7:6] of the insn. */
10266 bool forceunpriv = op & 1;
10267 bool alt = op & 2;
10268 V8M_SAttributes sattrs = {};
10269 uint32_t tt_resp;
10270 bool r, rw, nsr, nsrw, mrvalid;
10271 int prot;
10272 ARMMMUFaultInfo fi = {};
10273 MemTxAttrs attrs = {};
10274 hwaddr phys_addr;
10275 ARMMMUIdx mmu_idx;
10276 uint32_t mregion;
10277 bool targetpriv;
10278 bool targetsec = env->v7m.secure;
10279
10280 /* Work out what the security state and privilege level we're
10281 * interested in is...
10282 */
10283 if (alt) {
10284 targetsec = !targetsec;
10285 }
10286
10287 if (forceunpriv) {
10288 targetpriv = false;
10289 } else {
10290 targetpriv = arm_v7m_is_handler_mode(env) ||
10291 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
10292 }
10293
10294 /* ...and then figure out which MMU index this is */
10295 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
10296
10297 /* We know that the MPU and SAU don't care about the access type
10298 * for our purposes beyond that we don't want to claim to be
10299 * an insn fetch, so we arbitrarily call this a read.
10300 */
10301
10302 /* MPU region info only available for privileged or if
10303 * inspecting the other MPU state.
10304 */
10305 if (arm_current_el(env) != 0 || alt) {
10306 /* We can ignore the return value as prot is always set */
10307 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
10308 &phys_addr, &attrs, &prot, &fi, &mregion);
10309 if (mregion == -1) {
10310 mrvalid = false;
10311 mregion = 0;
10312 } else {
10313 mrvalid = true;
10314 }
10315 r = prot & PAGE_READ;
10316 rw = prot & PAGE_WRITE;
10317 } else {
10318 r = false;
10319 rw = false;
10320 mrvalid = false;
10321 mregion = 0;
10322 }
10323
10324 if (env->v7m.secure) {
10325 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
10326 nsr = sattrs.ns && r;
10327 nsrw = sattrs.ns && rw;
10328 } else {
10329 sattrs.ns = true;
10330 nsr = false;
10331 nsrw = false;
10332 }
10333
10334 tt_resp = (sattrs.iregion << 24) |
10335 (sattrs.irvalid << 23) |
10336 ((!sattrs.ns) << 22) |
10337 (nsrw << 21) |
10338 (nsr << 20) |
10339 (rw << 19) |
10340 (r << 18) |
10341 (sattrs.srvalid << 17) |
10342 (mrvalid << 16) |
10343 (sattrs.sregion << 8) |
10344 mregion;
10345
10346 return tt_resp;
10347 }
10348
10349 #endif
10350
10351 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
10352 {
10353 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
10354 * Note that we do not implement the (architecturally mandated)
10355 * alignment fault for attempts to use this on Device memory
10356 * (which matches the usual QEMU behaviour of not implementing either
10357 * alignment faults or any memory attribute handling).
10358 */
10359
10360 ARMCPU *cpu = arm_env_get_cpu(env);
10361 uint64_t blocklen = 4 << cpu->dcz_blocksize;
10362 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
10363
10364 #ifndef CONFIG_USER_ONLY
10365 {
10366 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
10367 * the block size so we might have to do more than one TLB lookup.
10368 * We know that in fact for any v8 CPU the page size is at least 4K
10369 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
10370 * 1K as an artefact of legacy v5 subpage support being present in the
10371 * same QEMU executable.
10372 */
10373 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
10374 void *hostaddr[maxidx];
10375 int try, i;
10376 unsigned mmu_idx = cpu_mmu_index(env, false);
10377 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
10378
10379 for (try = 0; try < 2; try++) {
10380
10381 for (i = 0; i < maxidx; i++) {
10382 hostaddr[i] = tlb_vaddr_to_host(env,
10383 vaddr + TARGET_PAGE_SIZE * i,
10384 1, mmu_idx);
10385 if (!hostaddr[i]) {
10386 break;
10387 }
10388 }
10389 if (i == maxidx) {
10390 /* If it's all in the TLB it's fair game for just writing to;
10391 * we know we don't need to update dirty status, etc.
10392 */
10393 for (i = 0; i < maxidx - 1; i++) {
10394 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
10395 }
10396 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
10397 return;
10398 }
10399 /* OK, try a store and see if we can populate the tlb. This
10400 * might cause an exception if the memory isn't writable,
10401 * in which case we will longjmp out of here. We must for
10402 * this purpose use the actual register value passed to us
10403 * so that we get the fault address right.
10404 */
10405 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
10406 /* Now we can populate the other TLB entries, if any */
10407 for (i = 0; i < maxidx; i++) {
10408 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
10409 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
10410 helper_ret_stb_mmu(env, va, 0, oi, GETPC());
10411 }
10412 }
10413 }
10414
10415 /* Slow path (probably attempt to do this to an I/O device or
10416 * similar, or clearing of a block of code we have translations
10417 * cached for). Just do a series of byte writes as the architecture
10418 * demands. It's not worth trying to use a cpu_physical_memory_map(),
10419 * memset(), unmap() sequence here because:
10420 * + we'd need to account for the blocksize being larger than a page
10421 * + the direct-RAM access case is almost always going to be dealt
10422 * with in the fastpath code above, so there's no speed benefit
10423 * + we would have to deal with the map returning NULL because the
10424 * bounce buffer was in use
10425 */
10426 for (i = 0; i < blocklen; i++) {
10427 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
10428 }
10429 }
10430 #else
10431 memset(g2h(vaddr), 0, blocklen);
10432 #endif
10433 }
10434
10435 /* Note that signed overflow is undefined in C. The following routines are
10436 careful to use unsigned types where modulo arithmetic is required.
10437 Failure to do so _will_ break on newer gcc. */
10438
10439 /* Signed saturating arithmetic. */
10440
10441 /* Perform 16-bit signed saturating addition. */
10442 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10443 {
10444 uint16_t res;
10445
10446 res = a + b;
10447 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10448 if (a & 0x8000)
10449 res = 0x8000;
10450 else
10451 res = 0x7fff;
10452 }
10453 return res;
10454 }
10455
10456 /* Perform 8-bit signed saturating addition. */
10457 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10458 {
10459 uint8_t res;
10460
10461 res = a + b;
10462 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10463 if (a & 0x80)
10464 res = 0x80;
10465 else
10466 res = 0x7f;
10467 }
10468 return res;
10469 }
10470
10471 /* Perform 16-bit signed saturating subtraction. */
10472 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10473 {
10474 uint16_t res;
10475
10476 res = a - b;
10477 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10478 if (a & 0x8000)
10479 res = 0x8000;
10480 else
10481 res = 0x7fff;
10482 }
10483 return res;
10484 }
10485
10486 /* Perform 8-bit signed saturating subtraction. */
10487 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10488 {
10489 uint8_t res;
10490
10491 res = a - b;
10492 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10493 if (a & 0x80)
10494 res = 0x80;
10495 else
10496 res = 0x7f;
10497 }
10498 return res;
10499 }
10500
10501 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10502 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10503 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10504 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10505 #define PFX q
10506
10507 #include "op_addsub.h"
10508
10509 /* Unsigned saturating arithmetic. */
10510 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
10511 {
10512 uint16_t res;
10513 res = a + b;
10514 if (res < a)
10515 res = 0xffff;
10516 return res;
10517 }
10518
10519 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
10520 {
10521 if (a > b)
10522 return a - b;
10523 else
10524 return 0;
10525 }
10526
10527 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10528 {
10529 uint8_t res;
10530 res = a + b;
10531 if (res < a)
10532 res = 0xff;
10533 return res;
10534 }
10535
10536 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10537 {
10538 if (a > b)
10539 return a - b;
10540 else
10541 return 0;
10542 }
10543
10544 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10545 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10546 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10547 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10548 #define PFX uq
10549
10550 #include "op_addsub.h"
10551
10552 /* Signed modulo arithmetic. */
10553 #define SARITH16(a, b, n, op) do { \
10554 int32_t sum; \
10555 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10556 RESULT(sum, n, 16); \
10557 if (sum >= 0) \
10558 ge |= 3 << (n * 2); \
10559 } while(0)
10560
10561 #define SARITH8(a, b, n, op) do { \
10562 int32_t sum; \
10563 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10564 RESULT(sum, n, 8); \
10565 if (sum >= 0) \
10566 ge |= 1 << n; \
10567 } while(0)
10568
10569
10570 #define ADD16(a, b, n) SARITH16(a, b, n, +)
10571 #define SUB16(a, b, n) SARITH16(a, b, n, -)
10572 #define ADD8(a, b, n) SARITH8(a, b, n, +)
10573 #define SUB8(a, b, n) SARITH8(a, b, n, -)
10574 #define PFX s
10575 #define ARITH_GE
10576
10577 #include "op_addsub.h"
10578
10579 /* Unsigned modulo arithmetic. */
10580 #define ADD16(a, b, n) do { \
10581 uint32_t sum; \
10582 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10583 RESULT(sum, n, 16); \
10584 if ((sum >> 16) == 1) \
10585 ge |= 3 << (n * 2); \
10586 } while(0)
10587
10588 #define ADD8(a, b, n) do { \
10589 uint32_t sum; \
10590 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10591 RESULT(sum, n, 8); \
10592 if ((sum >> 8) == 1) \
10593 ge |= 1 << n; \
10594 } while(0)
10595
10596 #define SUB16(a, b, n) do { \
10597 uint32_t sum; \
10598 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10599 RESULT(sum, n, 16); \
10600 if ((sum >> 16) == 0) \
10601 ge |= 3 << (n * 2); \
10602 } while(0)
10603
10604 #define SUB8(a, b, n) do { \
10605 uint32_t sum; \
10606 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10607 RESULT(sum, n, 8); \
10608 if ((sum >> 8) == 0) \
10609 ge |= 1 << n; \
10610 } while(0)
10611
10612 #define PFX u
10613 #define ARITH_GE
10614
10615 #include "op_addsub.h"
10616
10617 /* Halved signed arithmetic. */
10618 #define ADD16(a, b, n) \
10619 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10620 #define SUB16(a, b, n) \
10621 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10622 #define ADD8(a, b, n) \
10623 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10624 #define SUB8(a, b, n) \
10625 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10626 #define PFX sh
10627
10628 #include "op_addsub.h"
10629
10630 /* Halved unsigned arithmetic. */
10631 #define ADD16(a, b, n) \
10632 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10633 #define SUB16(a, b, n) \
10634 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10635 #define ADD8(a, b, n) \
10636 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10637 #define SUB8(a, b, n) \
10638 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10639 #define PFX uh
10640
10641 #include "op_addsub.h"
10642
10643 static inline uint8_t do_usad(uint8_t a, uint8_t b)
10644 {
10645 if (a > b)
10646 return a - b;
10647 else
10648 return b - a;
10649 }
10650
10651 /* Unsigned sum of absolute byte differences. */
10652 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10653 {
10654 uint32_t sum;
10655 sum = do_usad(a, b);
10656 sum += do_usad(a >> 8, b >> 8);
10657 sum += do_usad(a >> 16, b >>16);
10658 sum += do_usad(a >> 24, b >> 24);
10659 return sum;
10660 }
10661
10662 /* For ARMv6 SEL instruction. */
10663 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10664 {
10665 uint32_t mask;
10666
10667 mask = 0;
10668 if (flags & 1)
10669 mask |= 0xff;
10670 if (flags & 2)
10671 mask |= 0xff00;
10672 if (flags & 4)
10673 mask |= 0xff0000;
10674 if (flags & 8)
10675 mask |= 0xff000000;
10676 return (a & mask) | (b & ~mask);
10677 }
10678
10679 /* VFP support. We follow the convention used for VFP instructions:
10680 Single precision routines have a "s" suffix, double precision a
10681 "d" suffix. */
10682
10683 /* Convert host exception flags to vfp form. */
10684 static inline int vfp_exceptbits_from_host(int host_bits)
10685 {
10686 int target_bits = 0;
10687
10688 if (host_bits & float_flag_invalid)
10689 target_bits |= 1;
10690 if (host_bits & float_flag_divbyzero)
10691 target_bits |= 2;
10692 if (host_bits & float_flag_overflow)
10693 target_bits |= 4;
10694 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
10695 target_bits |= 8;
10696 if (host_bits & float_flag_inexact)
10697 target_bits |= 0x10;
10698 if (host_bits & float_flag_input_denormal)
10699 target_bits |= 0x80;
10700 return target_bits;
10701 }
10702
10703 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
10704 {
10705 int i;
10706 uint32_t fpscr;
10707
10708 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
10709 | (env->vfp.vec_len << 16)
10710 | (env->vfp.vec_stride << 20);
10711 i = get_float_exception_flags(&env->vfp.fp_status);
10712 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
10713 fpscr |= vfp_exceptbits_from_host(i);
10714 return fpscr;
10715 }
10716
10717 uint32_t vfp_get_fpscr(CPUARMState *env)
10718 {
10719 return HELPER(vfp_get_fpscr)(env);
10720 }
10721
10722 /* Convert vfp exception flags to target form. */
10723 static inline int vfp_exceptbits_to_host(int target_bits)
10724 {
10725 int host_bits = 0;
10726
10727 if (target_bits & 1)
10728 host_bits |= float_flag_invalid;
10729 if (target_bits & 2)
10730 host_bits |= float_flag_divbyzero;
10731 if (target_bits & 4)
10732 host_bits |= float_flag_overflow;
10733 if (target_bits & 8)
10734 host_bits |= float_flag_underflow;
10735 if (target_bits & 0x10)
10736 host_bits |= float_flag_inexact;
10737 if (target_bits & 0x80)
10738 host_bits |= float_flag_input_denormal;
10739 return host_bits;
10740 }
10741
10742 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
10743 {
10744 int i;
10745 uint32_t changed;
10746
10747 changed = env->vfp.xregs[ARM_VFP_FPSCR];
10748 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
10749 env->vfp.vec_len = (val >> 16) & 7;
10750 env->vfp.vec_stride = (val >> 20) & 3;
10751
10752 changed ^= val;
10753 if (changed & (3 << 22)) {
10754 i = (val >> 22) & 3;
10755 switch (i) {
10756 case FPROUNDING_TIEEVEN:
10757 i = float_round_nearest_even;
10758 break;
10759 case FPROUNDING_POSINF:
10760 i = float_round_up;
10761 break;
10762 case FPROUNDING_NEGINF:
10763 i = float_round_down;
10764 break;
10765 case FPROUNDING_ZERO:
10766 i = float_round_to_zero;
10767 break;
10768 }
10769 set_float_rounding_mode(i, &env->vfp.fp_status);
10770 }
10771 if (changed & (1 << 24)) {
10772 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
10773 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
10774 }
10775 if (changed & (1 << 25))
10776 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
10777
10778 i = vfp_exceptbits_to_host(val);
10779 set_float_exception_flags(i, &env->vfp.fp_status);
10780 set_float_exception_flags(0, &env->vfp.standard_fp_status);
10781 }
10782
10783 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
10784 {
10785 HELPER(vfp_set_fpscr)(env, val);
10786 }
10787
10788 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
10789
10790 #define VFP_BINOP(name) \
10791 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
10792 { \
10793 float_status *fpst = fpstp; \
10794 return float32_ ## name(a, b, fpst); \
10795 } \
10796 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
10797 { \
10798 float_status *fpst = fpstp; \
10799 return float64_ ## name(a, b, fpst); \
10800 }
10801 VFP_BINOP(add)
10802 VFP_BINOP(sub)
10803 VFP_BINOP(mul)
10804 VFP_BINOP(div)
10805 VFP_BINOP(min)
10806 VFP_BINOP(max)
10807 VFP_BINOP(minnum)
10808 VFP_BINOP(maxnum)
10809 #undef VFP_BINOP
10810
10811 float32 VFP_HELPER(neg, s)(float32 a)
10812 {
10813 return float32_chs(a);
10814 }
10815
10816 float64 VFP_HELPER(neg, d)(float64 a)
10817 {
10818 return float64_chs(a);
10819 }
10820
10821 float32 VFP_HELPER(abs, s)(float32 a)
10822 {
10823 return float32_abs(a);
10824 }
10825
10826 float64 VFP_HELPER(abs, d)(float64 a)
10827 {
10828 return float64_abs(a);
10829 }
10830
10831 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
10832 {
10833 return float32_sqrt(a, &env->vfp.fp_status);
10834 }
10835
10836 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
10837 {
10838 return float64_sqrt(a, &env->vfp.fp_status);
10839 }
10840
10841 /* XXX: check quiet/signaling case */
10842 #define DO_VFP_cmp(p, type) \
10843 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
10844 { \
10845 uint32_t flags; \
10846 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
10847 case 0: flags = 0x6; break; \
10848 case -1: flags = 0x8; break; \
10849 case 1: flags = 0x2; break; \
10850 default: case 2: flags = 0x3; break; \
10851 } \
10852 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
10853 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
10854 } \
10855 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
10856 { \
10857 uint32_t flags; \
10858 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
10859 case 0: flags = 0x6; break; \
10860 case -1: flags = 0x8; break; \
10861 case 1: flags = 0x2; break; \
10862 default: case 2: flags = 0x3; break; \
10863 } \
10864 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
10865 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
10866 }
10867 DO_VFP_cmp(s, float32)
10868 DO_VFP_cmp(d, float64)
10869 #undef DO_VFP_cmp
10870
10871 /* Integer to float and float to integer conversions */
10872
10873 #define CONV_ITOF(name, fsz, sign) \
10874 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
10875 { \
10876 float_status *fpst = fpstp; \
10877 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
10878 }
10879
10880 #define CONV_FTOI(name, fsz, sign, round) \
10881 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
10882 { \
10883 float_status *fpst = fpstp; \
10884 if (float##fsz##_is_any_nan(x)) { \
10885 float_raise(float_flag_invalid, fpst); \
10886 return 0; \
10887 } \
10888 return float##fsz##_to_##sign##int32##round(x, fpst); \
10889 }
10890
10891 #define FLOAT_CONVS(name, p, fsz, sign) \
10892 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
10893 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
10894 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
10895
10896 FLOAT_CONVS(si, s, 32, )
10897 FLOAT_CONVS(si, d, 64, )
10898 FLOAT_CONVS(ui, s, 32, u)
10899 FLOAT_CONVS(ui, d, 64, u)
10900
10901 #undef CONV_ITOF
10902 #undef CONV_FTOI
10903 #undef FLOAT_CONVS
10904
10905 /* floating point conversion */
10906 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
10907 {
10908 float64 r = float32_to_float64(x, &env->vfp.fp_status);
10909 /* ARM requires that S<->D conversion of any kind of NaN generates
10910 * a quiet NaN by forcing the most significant frac bit to 1.
10911 */
10912 return float64_maybe_silence_nan(r, &env->vfp.fp_status);
10913 }
10914
10915 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
10916 {
10917 float32 r = float64_to_float32(x, &env->vfp.fp_status);
10918 /* ARM requires that S<->D conversion of any kind of NaN generates
10919 * a quiet NaN by forcing the most significant frac bit to 1.
10920 */
10921 return float32_maybe_silence_nan(r, &env->vfp.fp_status);
10922 }
10923
10924 /* VFP3 fixed point conversion. */
10925 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10926 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
10927 void *fpstp) \
10928 { \
10929 float_status *fpst = fpstp; \
10930 float##fsz tmp; \
10931 tmp = itype##_to_##float##fsz(x, fpst); \
10932 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
10933 }
10934
10935 /* Notice that we want only input-denormal exception flags from the
10936 * scalbn operation: the other possible flags (overflow+inexact if
10937 * we overflow to infinity, output-denormal) aren't correct for the
10938 * complete scale-and-convert operation.
10939 */
10940 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
10941 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
10942 uint32_t shift, \
10943 void *fpstp) \
10944 { \
10945 float_status *fpst = fpstp; \
10946 int old_exc_flags = get_float_exception_flags(fpst); \
10947 float##fsz tmp; \
10948 if (float##fsz##_is_any_nan(x)) { \
10949 float_raise(float_flag_invalid, fpst); \
10950 return 0; \
10951 } \
10952 tmp = float##fsz##_scalbn(x, shift, fpst); \
10953 old_exc_flags |= get_float_exception_flags(fpst) \
10954 & float_flag_input_denormal; \
10955 set_float_exception_flags(old_exc_flags, fpst); \
10956 return float##fsz##_to_##itype##round(tmp, fpst); \
10957 }
10958
10959 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
10960 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10961 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
10962 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
10963
10964 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
10965 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
10966 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
10967
10968 VFP_CONV_FIX(sh, d, 64, 64, int16)
10969 VFP_CONV_FIX(sl, d, 64, 64, int32)
10970 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
10971 VFP_CONV_FIX(uh, d, 64, 64, uint16)
10972 VFP_CONV_FIX(ul, d, 64, 64, uint32)
10973 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
10974 VFP_CONV_FIX(sh, s, 32, 32, int16)
10975 VFP_CONV_FIX(sl, s, 32, 32, int32)
10976 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
10977 VFP_CONV_FIX(uh, s, 32, 32, uint16)
10978 VFP_CONV_FIX(ul, s, 32, 32, uint32)
10979 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
10980 #undef VFP_CONV_FIX
10981 #undef VFP_CONV_FIX_FLOAT
10982 #undef VFP_CONV_FLOAT_FIX_ROUND
10983
10984 /* Set the current fp rounding mode and return the old one.
10985 * The argument is a softfloat float_round_ value.
10986 */
10987 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
10988 {
10989 float_status *fp_status = &env->vfp.fp_status;
10990
10991 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
10992 set_float_rounding_mode(rmode, fp_status);
10993
10994 return prev_rmode;
10995 }
10996
10997 /* Set the current fp rounding mode in the standard fp status and return
10998 * the old one. This is for NEON instructions that need to change the
10999 * rounding mode but wish to use the standard FPSCR values for everything
11000 * else. Always set the rounding mode back to the correct value after
11001 * modifying it.
11002 * The argument is a softfloat float_round_ value.
11003 */
11004 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
11005 {
11006 float_status *fp_status = &env->vfp.standard_fp_status;
11007
11008 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
11009 set_float_rounding_mode(rmode, fp_status);
11010
11011 return prev_rmode;
11012 }
11013
11014 /* Half precision conversions. */
11015 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
11016 {
11017 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
11018 float32 r = float16_to_float32(make_float16(a), ieee, s);
11019 if (ieee) {
11020 return float32_maybe_silence_nan(r, s);
11021 }
11022 return r;
11023 }
11024
11025 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
11026 {
11027 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
11028 float16 r = float32_to_float16(a, ieee, s);
11029 if (ieee) {
11030 r = float16_maybe_silence_nan(r, s);
11031 }
11032 return float16_val(r);
11033 }
11034
11035 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
11036 {
11037 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
11038 }
11039
11040 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
11041 {
11042 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
11043 }
11044
11045 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
11046 {
11047 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
11048 }
11049
11050 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
11051 {
11052 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
11053 }
11054
11055 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
11056 {
11057 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
11058 float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
11059 if (ieee) {
11060 return float64_maybe_silence_nan(r, &env->vfp.fp_status);
11061 }
11062 return r;
11063 }
11064
11065 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
11066 {
11067 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
11068 float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
11069 if (ieee) {
11070 r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
11071 }
11072 return float16_val(r);
11073 }
11074
11075 #define float32_two make_float32(0x40000000)
11076 #define float32_three make_float32(0x40400000)
11077 #define float32_one_point_five make_float32(0x3fc00000)
11078
11079 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
11080 {
11081 float_status *s = &env->vfp.standard_fp_status;
11082 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
11083 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
11084 if (!(float32_is_zero(a) || float32_is_zero(b))) {
11085 float_raise(float_flag_input_denormal, s);
11086 }
11087 return float32_two;
11088 }
11089 return float32_sub(float32_two, float32_mul(a, b, s), s);
11090 }
11091
11092 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
11093 {
11094 float_status *s = &env->vfp.standard_fp_status;
11095 float32 product;
11096 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
11097 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
11098 if (!(float32_is_zero(a) || float32_is_zero(b))) {
11099 float_raise(float_flag_input_denormal, s);
11100 }
11101 return float32_one_point_five;
11102 }
11103 product = float32_mul(a, b, s);
11104 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
11105 }
11106
11107 /* NEON helpers. */
11108
11109 /* Constants 256 and 512 are used in some helpers; we avoid relying on
11110 * int->float conversions at run-time. */
11111 #define float64_256 make_float64(0x4070000000000000LL)
11112 #define float64_512 make_float64(0x4080000000000000LL)
11113 #define float32_maxnorm make_float32(0x7f7fffff)
11114 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11115
11116 /* Reciprocal functions
11117 *
11118 * The algorithm that must be used to calculate the estimate
11119 * is specified by the ARM ARM, see FPRecipEstimate()
11120 */
11121
11122 static float64 recip_estimate(float64 a, float_status *real_fp_status)
11123 {
11124 /* These calculations mustn't set any fp exception flags,
11125 * so we use a local copy of the fp_status.
11126 */
11127 float_status dummy_status = *real_fp_status;
11128 float_status *s = &dummy_status;
11129 /* q = (int)(a * 512.0) */
11130 float64 q = float64_mul(float64_512, a, s);
11131 int64_t q_int = float64_to_int64_round_to_zero(q, s);
11132
11133 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
11134 q = int64_to_float64(q_int, s);
11135 q = float64_add(q, float64_half, s);
11136 q = float64_div(q, float64_512, s);
11137 q = float64_div(float64_one, q, s);
11138
11139 /* s = (int)(256.0 * r + 0.5) */
11140 q = float64_mul(q, float64_256, s);
11141 q = float64_add(q, float64_half, s);
11142 q_int = float64_to_int64_round_to_zero(q, s);
11143
11144 /* return (double)s / 256.0 */
11145 return float64_div(int64_to_float64(q_int, s), float64_256, s);
11146 }
11147
11148 /* Common wrapper to call recip_estimate */
11149 static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
11150 {
11151 uint64_t val64 = float64_val(num);
11152 uint64_t frac = extract64(val64, 0, 52);
11153 int64_t exp = extract64(val64, 52, 11);
11154 uint64_t sbit;
11155 float64 scaled, estimate;
11156
11157 /* Generate the scaled number for the estimate function */
11158 if (exp == 0) {
11159 if (extract64(frac, 51, 1) == 0) {
11160 exp = -1;
11161 frac = extract64(frac, 0, 50) << 2;
11162 } else {
11163 frac = extract64(frac, 0, 51) << 1;
11164 }
11165 }
11166
11167 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
11168 scaled = make_float64((0x3feULL << 52)
11169 | extract64(frac, 44, 8) << 44);
11170
11171 estimate = recip_estimate(scaled, fpst);
11172
11173 /* Build new result */
11174 val64 = float64_val(estimate);
11175 sbit = 0x8000000000000000ULL & val64;
11176 exp = off - exp;
11177 frac = extract64(val64, 0, 52);
11178
11179 if (exp == 0) {
11180 frac = 1ULL << 51 | extract64(frac, 1, 51);
11181 } else if (exp == -1) {
11182 frac = 1ULL << 50 | extract64(frac, 2, 50);
11183 exp = 0;
11184 }
11185
11186 return make_float64(sbit | (exp << 52) | frac);
11187 }
11188
11189 static bool round_to_inf(float_status *fpst, bool sign_bit)
11190 {
11191 switch (fpst->float_rounding_mode) {
11192 case float_round_nearest_even: /* Round to Nearest */
11193 return true;
11194 case float_round_up: /* Round to +Inf */
11195 return !sign_bit;
11196 case float_round_down: /* Round to -Inf */
11197 return sign_bit;
11198 case float_round_to_zero: /* Round to Zero */
11199 return false;
11200 }
11201
11202 g_assert_not_reached();
11203 }
11204
11205 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
11206 {
11207 float_status *fpst = fpstp;
11208 float32 f32 = float32_squash_input_denormal(input, fpst);
11209 uint32_t f32_val = float32_val(f32);
11210 uint32_t f32_sbit = 0x80000000ULL & f32_val;
11211 int32_t f32_exp = extract32(f32_val, 23, 8);
11212 uint32_t f32_frac = extract32(f32_val, 0, 23);
11213 float64 f64, r64;
11214 uint64_t r64_val;
11215 int64_t r64_exp;
11216 uint64_t r64_frac;
11217
11218 if (float32_is_any_nan(f32)) {
11219 float32 nan = f32;
11220 if (float32_is_signaling_nan(f32, fpst)) {
11221 float_raise(float_flag_invalid, fpst);
11222 nan = float32_maybe_silence_nan(f32, fpst);
11223 }
11224 if (fpst->default_nan_mode) {
11225 nan = float32_default_nan(fpst);
11226 }
11227 return nan;
11228 } else if (float32_is_infinity(f32)) {
11229 return float32_set_sign(float32_zero, float32_is_neg(f32));
11230 } else if (float32_is_zero(f32)) {
11231 float_raise(float_flag_divbyzero, fpst);
11232 return float32_set_sign(float32_infinity, float32_is_neg(f32));
11233 } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
11234 /* Abs(value) < 2.0^-128 */
11235 float_raise(float_flag_overflow | float_flag_inexact, fpst);
11236 if (round_to_inf(fpst, f32_sbit)) {
11237 return float32_set_sign(float32_infinity, float32_is_neg(f32));
11238 } else {
11239 return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
11240 }
11241 } else if (f32_exp >= 253 && fpst->flush_to_zero) {
11242 float_raise(float_flag_underflow, fpst);
11243 return float32_set_sign(float32_zero, float32_is_neg(f32));
11244 }
11245
11246
11247 f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
11248 r64 = call_recip_estimate(f64, 253, fpst);
11249 r64_val = float64_val(r64);
11250 r64_exp = extract64(r64_val, 52, 11);
11251 r64_frac = extract64(r64_val, 0, 52);
11252
11253 /* result = sign : result_exp<7:0> : fraction<51:29>; */
11254 return make_float32(f32_sbit |
11255 (r64_exp & 0xff) << 23 |
11256 extract64(r64_frac, 29, 24));
11257 }
11258
11259 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
11260 {
11261 float_status *fpst = fpstp;
11262 float64 f64 = float64_squash_input_denormal(input, fpst);
11263 uint64_t f64_val = float64_val(f64);
11264 uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
11265 int64_t f64_exp = extract64(f64_val, 52, 11);
11266 float64 r64;
11267 uint64_t r64_val;
11268 int64_t r64_exp;
11269 uint64_t r64_frac;
11270
11271 /* Deal with any special cases */
11272 if (float64_is_any_nan(f64)) {
11273 float64 nan = f64;
11274 if (float64_is_signaling_nan(f64, fpst)) {
11275 float_raise(float_flag_invalid, fpst);
11276 nan = float64_maybe_silence_nan(f64, fpst);
11277 }
11278 if (fpst->default_nan_mode) {
11279 nan = float64_default_nan(fpst);
11280 }
11281 return nan;
11282 } else if (float64_is_infinity(f64)) {
11283 return float64_set_sign(float64_zero, float64_is_neg(f64));
11284 } else if (float64_is_zero(f64)) {
11285 float_raise(float_flag_divbyzero, fpst);
11286 return float64_set_sign(float64_infinity, float64_is_neg(f64));
11287 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
11288 /* Abs(value) < 2.0^-1024 */
11289 float_raise(float_flag_overflow | float_flag_inexact, fpst);
11290 if (round_to_inf(fpst, f64_sbit)) {
11291 return float64_set_sign(float64_infinity, float64_is_neg(f64));
11292 } else {
11293 return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
11294 }
11295 } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
11296 float_raise(float_flag_underflow, fpst);
11297 return float64_set_sign(float64_zero, float64_is_neg(f64));
11298 }
11299
11300 r64 = call_recip_estimate(f64, 2045, fpst);
11301 r64_val = float64_val(r64);
11302 r64_exp = extract64(r64_val, 52, 11);
11303 r64_frac = extract64(r64_val, 0, 52);
11304
11305 /* result = sign : result_exp<10:0> : fraction<51:0> */
11306 return make_float64(f64_sbit |
11307 ((r64_exp & 0x7ff) << 52) |
11308 r64_frac);
11309 }
11310
11311 /* The algorithm that must be used to calculate the estimate
11312 * is specified by the ARM ARM.
11313 */
11314 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
11315 {
11316 /* These calculations mustn't set any fp exception flags,
11317 * so we use a local copy of the fp_status.
11318 */
11319 float_status dummy_status = *real_fp_status;
11320 float_status *s = &dummy_status;
11321 float64 q;
11322 int64_t q_int;
11323
11324 if (float64_lt(a, float64_half, s)) {
11325 /* range 0.25 <= a < 0.5 */
11326
11327 /* a in units of 1/512 rounded down */
11328 /* q0 = (int)(a * 512.0); */
11329 q = float64_mul(float64_512, a, s);
11330 q_int = float64_to_int64_round_to_zero(q, s);
11331
11332 /* reciprocal root r */
11333 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
11334 q = int64_to_float64(q_int, s);
11335 q = float64_add(q, float64_half, s);
11336 q = float64_div(q, float64_512, s);
11337 q = float64_sqrt(q, s);
11338 q = float64_div(float64_one, q, s);
11339 } else {
11340 /* range 0.5 <= a < 1.0 */
11341
11342 /* a in units of 1/256 rounded down */
11343 /* q1 = (int)(a * 256.0); */
11344 q = float64_mul(float64_256, a, s);
11345 int64_t q_int = float64_to_int64_round_to_zero(q, s);
11346
11347 /* reciprocal root r */
11348 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
11349 q = int64_to_float64(q_int, s);
11350 q = float64_add(q, float64_half, s);
11351 q = float64_div(q, float64_256, s);
11352 q = float64_sqrt(q, s);
11353 q = float64_div(float64_one, q, s);
11354 }
11355 /* r in units of 1/256 rounded to nearest */
11356 /* s = (int)(256.0 * r + 0.5); */
11357
11358 q = float64_mul(q, float64_256,s );
11359 q = float64_add(q, float64_half, s);
11360 q_int = float64_to_int64_round_to_zero(q, s);
11361
11362 /* return (double)s / 256.0;*/
11363 return float64_div(int64_to_float64(q_int, s), float64_256, s);
11364 }
11365
11366 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
11367 {
11368 float_status *s = fpstp;
11369 float32 f32 = float32_squash_input_denormal(input, s);
11370 uint32_t val = float32_val(f32);
11371 uint32_t f32_sbit = 0x80000000 & val;
11372 int32_t f32_exp = extract32(val, 23, 8);
11373 uint32_t f32_frac = extract32(val, 0, 23);
11374 uint64_t f64_frac;
11375 uint64_t val64;
11376 int result_exp;
11377 float64 f64;
11378
11379 if (float32_is_any_nan(f32)) {
11380 float32 nan = f32;
11381 if (float32_is_signaling_nan(f32, s)) {
11382 float_raise(float_flag_invalid, s);
11383 nan = float32_maybe_silence_nan(f32, s);
11384 }
11385 if (s->default_nan_mode) {
11386 nan = float32_default_nan(s);
11387 }
11388 return nan;
11389 } else if (float32_is_zero(f32)) {
11390 float_raise(float_flag_divbyzero, s);
11391 return float32_set_sign(float32_infinity, float32_is_neg(f32));
11392 } else if (float32_is_neg(f32)) {
11393 float_raise(float_flag_invalid, s);
11394 return float32_default_nan(s);
11395 } else if (float32_is_infinity(f32)) {
11396 return float32_zero;
11397 }
11398
11399 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11400 * preserving the parity of the exponent. */
11401
11402 f64_frac = ((uint64_t) f32_frac) << 29;
11403 if (f32_exp == 0) {
11404 while (extract64(f64_frac, 51, 1) == 0) {
11405 f64_frac = f64_frac << 1;
11406 f32_exp = f32_exp-1;
11407 }
11408 f64_frac = extract64(f64_frac, 0, 51) << 1;
11409 }
11410
11411 if (extract64(f32_exp, 0, 1) == 0) {
11412 f64 = make_float64(((uint64_t) f32_sbit) << 32
11413 | (0x3feULL << 52)
11414 | f64_frac);
11415 } else {
11416 f64 = make_float64(((uint64_t) f32_sbit) << 32
11417 | (0x3fdULL << 52)
11418 | f64_frac);
11419 }
11420
11421 result_exp = (380 - f32_exp) / 2;
11422
11423 f64 = recip_sqrt_estimate(f64, s);
11424
11425 val64 = float64_val(f64);
11426
11427 val = ((result_exp & 0xff) << 23)
11428 | ((val64 >> 29) & 0x7fffff);
11429 return make_float32(val);
11430 }
11431
11432 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
11433 {
11434 float_status *s = fpstp;
11435 float64 f64 = float64_squash_input_denormal(input, s);
11436 uint64_t val = float64_val(f64);
11437 uint64_t f64_sbit = 0x8000000000000000ULL & val;
11438 int64_t f64_exp = extract64(val, 52, 11);
11439 uint64_t f64_frac = extract64(val, 0, 52);
11440 int64_t result_exp;
11441 uint64_t result_frac;
11442
11443 if (float64_is_any_nan(f64)) {
11444 float64 nan = f64;
11445 if (float64_is_signaling_nan(f64, s)) {
11446 float_raise(float_flag_invalid, s);
11447 nan = float64_maybe_silence_nan(f64, s);
11448 }
11449 if (s->default_nan_mode) {
11450 nan = float64_default_nan(s);
11451 }
11452 return nan;
11453 } else if (float64_is_zero(f64)) {
11454 float_raise(float_flag_divbyzero, s);
11455 return float64_set_sign(float64_infinity, float64_is_neg(f64));
11456 } else if (float64_is_neg(f64)) {
11457 float_raise(float_flag_invalid, s);
11458 return float64_default_nan(s);
11459 } else if (float64_is_infinity(f64)) {
11460 return float64_zero;
11461 }
11462
11463 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11464 * preserving the parity of the exponent. */
11465
11466 if (f64_exp == 0) {
11467 while (extract64(f64_frac, 51, 1) == 0) {
11468 f64_frac = f64_frac << 1;
11469 f64_exp = f64_exp - 1;
11470 }
11471 f64_frac = extract64(f64_frac, 0, 51) << 1;
11472 }
11473
11474 if (extract64(f64_exp, 0, 1) == 0) {
11475 f64 = make_float64(f64_sbit
11476 | (0x3feULL << 52)
11477 | f64_frac);
11478 } else {
11479 f64 = make_float64(f64_sbit
11480 | (0x3fdULL << 52)
11481 | f64_frac);
11482 }
11483
11484 result_exp = (3068 - f64_exp) / 2;
11485
11486 f64 = recip_sqrt_estimate(f64, s);
11487
11488 result_frac = extract64(float64_val(f64), 0, 52);
11489
11490 return make_float64(f64_sbit |
11491 ((result_exp & 0x7ff) << 52) |
11492 result_frac);
11493 }
11494
11495 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
11496 {
11497 float_status *s = fpstp;
11498 float64 f64;
11499
11500 if ((a & 0x80000000) == 0) {
11501 return 0xffffffff;
11502 }
11503
11504 f64 = make_float64((0x3feULL << 52)
11505 | ((int64_t)(a & 0x7fffffff) << 21));
11506
11507 f64 = recip_estimate(f64, s);
11508
11509 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
11510 }
11511
11512 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
11513 {
11514 float_status *fpst = fpstp;
11515 float64 f64;
11516
11517 if ((a & 0xc0000000) == 0) {
11518 return 0xffffffff;
11519 }
11520
11521 if (a & 0x80000000) {
11522 f64 = make_float64((0x3feULL << 52)
11523 | ((uint64_t)(a & 0x7fffffff) << 21));
11524 } else { /* bits 31-30 == '01' */
11525 f64 = make_float64((0x3fdULL << 52)
11526 | ((uint64_t)(a & 0x3fffffff) << 22));
11527 }
11528
11529 f64 = recip_sqrt_estimate(f64, fpst);
11530
11531 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
11532 }
11533
11534 /* VFPv4 fused multiply-accumulate */
11535 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
11536 {
11537 float_status *fpst = fpstp;
11538 return float32_muladd(a, b, c, 0, fpst);
11539 }
11540
11541 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
11542 {
11543 float_status *fpst = fpstp;
11544 return float64_muladd(a, b, c, 0, fpst);
11545 }
11546
11547 /* ARMv8 round to integral */
11548 float32 HELPER(rints_exact)(float32 x, void *fp_status)
11549 {
11550 return float32_round_to_int(x, fp_status);
11551 }
11552
11553 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
11554 {
11555 return float64_round_to_int(x, fp_status);
11556 }
11557
11558 float32 HELPER(rints)(float32 x, void *fp_status)
11559 {
11560 int old_flags = get_float_exception_flags(fp_status), new_flags;
11561 float32 ret;
11562
11563 ret = float32_round_to_int(x, fp_status);
11564
11565 /* Suppress any inexact exceptions the conversion produced */
11566 if (!(old_flags & float_flag_inexact)) {
11567 new_flags = get_float_exception_flags(fp_status);
11568 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
11569 }
11570
11571 return ret;
11572 }
11573
11574 float64 HELPER(rintd)(float64 x, void *fp_status)
11575 {
11576 int old_flags = get_float_exception_flags(fp_status), new_flags;
11577 float64 ret;
11578
11579 ret = float64_round_to_int(x, fp_status);
11580
11581 new_flags = get_float_exception_flags(fp_status);
11582
11583 /* Suppress any inexact exceptions the conversion produced */
11584 if (!(old_flags & float_flag_inexact)) {
11585 new_flags = get_float_exception_flags(fp_status);
11586 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
11587 }
11588
11589 return ret;
11590 }
11591
11592 /* Convert ARM rounding mode to softfloat */
11593 int arm_rmode_to_sf(int rmode)
11594 {
11595 switch (rmode) {
11596 case FPROUNDING_TIEAWAY:
11597 rmode = float_round_ties_away;
11598 break;
11599 case FPROUNDING_ODD:
11600 /* FIXME: add support for TIEAWAY and ODD */
11601 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
11602 rmode);
11603 case FPROUNDING_TIEEVEN:
11604 default:
11605 rmode = float_round_nearest_even;
11606 break;
11607 case FPROUNDING_POSINF:
11608 rmode = float_round_up;
11609 break;
11610 case FPROUNDING_NEGINF:
11611 rmode = float_round_down;
11612 break;
11613 case FPROUNDING_ZERO:
11614 rmode = float_round_to_zero;
11615 break;
11616 }
11617 return rmode;
11618 }
11619
11620 /* CRC helpers.
11621 * The upper bytes of val (above the number specified by 'bytes') must have
11622 * been zeroed out by the caller.
11623 */
11624 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11625 {
11626 uint8_t buf[4];
11627
11628 stl_le_p(buf, val);
11629
11630 /* zlib crc32 converts the accumulator and output to one's complement. */
11631 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11632 }
11633
11634 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11635 {
11636 uint8_t buf[4];
11637
11638 stl_le_p(buf, val);
11639
11640 /* Linux crc32c converts the output to one's complement. */
11641 return crc32c(acc, buf, bytes) ^ 0xffffffff;
11642 }
11643
11644 /* Return the exception level to which FP-disabled exceptions should
11645 * be taken, or 0 if FP is enabled.
11646 */
11647 static inline int fp_exception_el(CPUARMState *env)
11648 {
11649 #ifndef CONFIG_USER_ONLY
11650 int fpen;
11651 int cur_el = arm_current_el(env);
11652
11653 /* CPACR and the CPTR registers don't exist before v6, so FP is
11654 * always accessible
11655 */
11656 if (!arm_feature(env, ARM_FEATURE_V6)) {
11657 return 0;
11658 }
11659
11660 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11661 * 0, 2 : trap EL0 and EL1/PL1 accesses
11662 * 1 : trap only EL0 accesses
11663 * 3 : trap no accesses
11664 */
11665 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
11666 switch (fpen) {
11667 case 0:
11668 case 2:
11669 if (cur_el == 0 || cur_el == 1) {
11670 /* Trap to PL1, which might be EL1 or EL3 */
11671 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
11672 return 3;
11673 }
11674 return 1;
11675 }
11676 if (cur_el == 3 && !is_a64(env)) {
11677 /* Secure PL1 running at EL3 */
11678 return 3;
11679 }
11680 break;
11681 case 1:
11682 if (cur_el == 0) {
11683 return 1;
11684 }
11685 break;
11686 case 3:
11687 break;
11688 }
11689
11690 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
11691 * check because zero bits in the registers mean "don't trap".
11692 */
11693
11694 /* CPTR_EL2 : present in v7VE or v8 */
11695 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
11696 && !arm_is_secure_below_el3(env)) {
11697 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
11698 return 2;
11699 }
11700
11701 /* CPTR_EL3 : present in v8 */
11702 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
11703 /* Trap all FP ops to EL3 */
11704 return 3;
11705 }
11706 #endif
11707 return 0;
11708 }
11709
11710 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11711 target_ulong *cs_base, uint32_t *pflags)
11712 {
11713 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
11714 uint32_t flags;
11715
11716 if (is_a64(env)) {
11717 *pc = env->pc;
11718 flags = ARM_TBFLAG_AARCH64_STATE_MASK;
11719 /* Get control bits for tagged addresses */
11720 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
11721 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
11722 } else {
11723 *pc = env->regs[15];
11724 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
11725 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
11726 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
11727 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
11728 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
11729 if (!(access_secure_reg(env))) {
11730 flags |= ARM_TBFLAG_NS_MASK;
11731 }
11732 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
11733 || arm_el_is_aa64(env, 1)) {
11734 flags |= ARM_TBFLAG_VFPEN_MASK;
11735 }
11736 flags |= (extract32(env->cp15.c15_cpar, 0, 2)
11737 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
11738 }
11739
11740 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
11741
11742 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11743 * states defined in the ARM ARM for software singlestep:
11744 * SS_ACTIVE PSTATE.SS State
11745 * 0 x Inactive (the TB flag for SS is always 0)
11746 * 1 0 Active-pending
11747 * 1 1 Active-not-pending
11748 */
11749 if (arm_singlestep_active(env)) {
11750 flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
11751 if (is_a64(env)) {
11752 if (env->pstate & PSTATE_SS) {
11753 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
11754 }
11755 } else {
11756 if (env->uncached_cpsr & PSTATE_SS) {
11757 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
11758 }
11759 }
11760 }
11761 if (arm_cpu_data_is_big_endian(env)) {
11762 flags |= ARM_TBFLAG_BE_DATA_MASK;
11763 }
11764 flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
11765
11766 if (arm_v7m_is_handler_mode(env)) {
11767 flags |= ARM_TBFLAG_HANDLER_MASK;
11768 }
11769
11770 *pflags = flags;
11771 *cs_base = 0;
11772 }