]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/helper.c
target/arm: Correctly implement ACTLR2, HACTLR2
[mirror_qemu.git] / target / arm / helper.c
1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/tcg.h"
29 #include "qemu/range.h"
30 #include "qapi/qapi-commands-machine-target.h"
31 #include "qapi/error.h"
32 #include "qemu/guest-random.h"
33 #ifdef CONFIG_TCG
34 #include "arm_ldst.h"
35 #include "exec/cpu_ldst.h"
36 #endif
37
38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39
40 #ifndef CONFIG_USER_ONLY
41
42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
43 MMUAccessType access_type, ARMMMUIdx mmu_idx,
44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
45 target_ulong *page_size_ptr,
46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
47 #endif
48
49 static void switch_mode(CPUARMState *env, int mode);
50
51 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
52 {
53 int nregs;
54
55 /* VFP data registers are always little-endian. */
56 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
57 if (reg < nregs) {
58 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
59 return 8;
60 }
61 if (arm_feature(env, ARM_FEATURE_NEON)) {
62 /* Aliases for Q regs. */
63 nregs += 16;
64 if (reg < nregs) {
65 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
66 stq_le_p(buf, q[0]);
67 stq_le_p(buf + 8, q[1]);
68 return 16;
69 }
70 }
71 switch (reg - nregs) {
72 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
73 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
74 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
75 }
76 return 0;
77 }
78
79 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
80 {
81 int nregs;
82
83 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
84 if (reg < nregs) {
85 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
86 return 8;
87 }
88 if (arm_feature(env, ARM_FEATURE_NEON)) {
89 nregs += 16;
90 if (reg < nregs) {
91 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
92 q[0] = ldq_le_p(buf);
93 q[1] = ldq_le_p(buf + 8);
94 return 16;
95 }
96 }
97 switch (reg - nregs) {
98 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
99 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
100 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
101 }
102 return 0;
103 }
104
105 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
106 {
107 switch (reg) {
108 case 0 ... 31:
109 /* 128 bit FP register */
110 {
111 uint64_t *q = aa64_vfp_qreg(env, reg);
112 stq_le_p(buf, q[0]);
113 stq_le_p(buf + 8, q[1]);
114 return 16;
115 }
116 case 32:
117 /* FPSR */
118 stl_p(buf, vfp_get_fpsr(env));
119 return 4;
120 case 33:
121 /* FPCR */
122 stl_p(buf, vfp_get_fpcr(env));
123 return 4;
124 default:
125 return 0;
126 }
127 }
128
129 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
130 {
131 switch (reg) {
132 case 0 ... 31:
133 /* 128 bit FP register */
134 {
135 uint64_t *q = aa64_vfp_qreg(env, reg);
136 q[0] = ldq_le_p(buf);
137 q[1] = ldq_le_p(buf + 8);
138 return 16;
139 }
140 case 32:
141 /* FPSR */
142 vfp_set_fpsr(env, ldl_p(buf));
143 return 4;
144 case 33:
145 /* FPCR */
146 vfp_set_fpcr(env, ldl_p(buf));
147 return 4;
148 default:
149 return 0;
150 }
151 }
152
153 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
154 {
155 assert(ri->fieldoffset);
156 if (cpreg_field_is_64bit(ri)) {
157 return CPREG_FIELD64(env, ri);
158 } else {
159 return CPREG_FIELD32(env, ri);
160 }
161 }
162
163 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
164 uint64_t value)
165 {
166 assert(ri->fieldoffset);
167 if (cpreg_field_is_64bit(ri)) {
168 CPREG_FIELD64(env, ri) = value;
169 } else {
170 CPREG_FIELD32(env, ri) = value;
171 }
172 }
173
174 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
175 {
176 return (char *)env + ri->fieldoffset;
177 }
178
179 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
180 {
181 /* Raw read of a coprocessor register (as needed for migration, etc). */
182 if (ri->type & ARM_CP_CONST) {
183 return ri->resetvalue;
184 } else if (ri->raw_readfn) {
185 return ri->raw_readfn(env, ri);
186 } else if (ri->readfn) {
187 return ri->readfn(env, ri);
188 } else {
189 return raw_read(env, ri);
190 }
191 }
192
193 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
194 uint64_t v)
195 {
196 /* Raw write of a coprocessor register (as needed for migration, etc).
197 * Note that constant registers are treated as write-ignored; the
198 * caller should check for success by whether a readback gives the
199 * value written.
200 */
201 if (ri->type & ARM_CP_CONST) {
202 return;
203 } else if (ri->raw_writefn) {
204 ri->raw_writefn(env, ri, v);
205 } else if (ri->writefn) {
206 ri->writefn(env, ri, v);
207 } else {
208 raw_write(env, ri, v);
209 }
210 }
211
212 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
213 {
214 ARMCPU *cpu = env_archcpu(env);
215 const ARMCPRegInfo *ri;
216 uint32_t key;
217
218 key = cpu->dyn_xml.cpregs_keys[reg];
219 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
220 if (ri) {
221 if (cpreg_field_is_64bit(ri)) {
222 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
223 } else {
224 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
225 }
226 }
227 return 0;
228 }
229
230 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
231 {
232 return 0;
233 }
234
235 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
236 {
237 /* Return true if the regdef would cause an assertion if you called
238 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
239 * program bug for it not to have the NO_RAW flag).
240 * NB that returning false here doesn't necessarily mean that calling
241 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
242 * read/write access functions which are safe for raw use" from "has
243 * read/write access functions which have side effects but has forgotten
244 * to provide raw access functions".
245 * The tests here line up with the conditions in read/write_raw_cp_reg()
246 * and assertions in raw_read()/raw_write().
247 */
248 if ((ri->type & ARM_CP_CONST) ||
249 ri->fieldoffset ||
250 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
251 return false;
252 }
253 return true;
254 }
255
256 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
257 {
258 /* Write the coprocessor state from cpu->env to the (index,value) list. */
259 int i;
260 bool ok = true;
261
262 for (i = 0; i < cpu->cpreg_array_len; i++) {
263 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
264 const ARMCPRegInfo *ri;
265 uint64_t newval;
266
267 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
268 if (!ri) {
269 ok = false;
270 continue;
271 }
272 if (ri->type & ARM_CP_NO_RAW) {
273 continue;
274 }
275
276 newval = read_raw_cp_reg(&cpu->env, ri);
277 if (kvm_sync) {
278 /*
279 * Only sync if the previous list->cpustate sync succeeded.
280 * Rather than tracking the success/failure state for every
281 * item in the list, we just recheck "does the raw write we must
282 * have made in write_list_to_cpustate() read back OK" here.
283 */
284 uint64_t oldval = cpu->cpreg_values[i];
285
286 if (oldval == newval) {
287 continue;
288 }
289
290 write_raw_cp_reg(&cpu->env, ri, oldval);
291 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
292 continue;
293 }
294
295 write_raw_cp_reg(&cpu->env, ri, newval);
296 }
297 cpu->cpreg_values[i] = newval;
298 }
299 return ok;
300 }
301
302 bool write_list_to_cpustate(ARMCPU *cpu)
303 {
304 int i;
305 bool ok = true;
306
307 for (i = 0; i < cpu->cpreg_array_len; i++) {
308 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
309 uint64_t v = cpu->cpreg_values[i];
310 const ARMCPRegInfo *ri;
311
312 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
313 if (!ri) {
314 ok = false;
315 continue;
316 }
317 if (ri->type & ARM_CP_NO_RAW) {
318 continue;
319 }
320 /* Write value and confirm it reads back as written
321 * (to catch read-only registers and partially read-only
322 * registers where the incoming migration value doesn't match)
323 */
324 write_raw_cp_reg(&cpu->env, ri, v);
325 if (read_raw_cp_reg(&cpu->env, ri) != v) {
326 ok = false;
327 }
328 }
329 return ok;
330 }
331
332 static void add_cpreg_to_list(gpointer key, gpointer opaque)
333 {
334 ARMCPU *cpu = opaque;
335 uint64_t regidx;
336 const ARMCPRegInfo *ri;
337
338 regidx = *(uint32_t *)key;
339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
340
341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
342 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
343 /* The value array need not be initialized at this point */
344 cpu->cpreg_array_len++;
345 }
346 }
347
348 static void count_cpreg(gpointer key, gpointer opaque)
349 {
350 ARMCPU *cpu = opaque;
351 uint64_t regidx;
352 const ARMCPRegInfo *ri;
353
354 regidx = *(uint32_t *)key;
355 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
356
357 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
358 cpu->cpreg_array_len++;
359 }
360 }
361
362 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
363 {
364 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
365 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
366
367 if (aidx > bidx) {
368 return 1;
369 }
370 if (aidx < bidx) {
371 return -1;
372 }
373 return 0;
374 }
375
376 void init_cpreg_list(ARMCPU *cpu)
377 {
378 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
379 * Note that we require cpreg_tuples[] to be sorted by key ID.
380 */
381 GList *keys;
382 int arraylen;
383
384 keys = g_hash_table_get_keys(cpu->cp_regs);
385 keys = g_list_sort(keys, cpreg_key_compare);
386
387 cpu->cpreg_array_len = 0;
388
389 g_list_foreach(keys, count_cpreg, cpu);
390
391 arraylen = cpu->cpreg_array_len;
392 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
393 cpu->cpreg_values = g_new(uint64_t, arraylen);
394 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
395 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
396 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
397 cpu->cpreg_array_len = 0;
398
399 g_list_foreach(keys, add_cpreg_to_list, cpu);
400
401 assert(cpu->cpreg_array_len == arraylen);
402
403 g_list_free(keys);
404 }
405
406 /*
407 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
408 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
409 *
410 * access_el3_aa32ns: Used to check AArch32 register views.
411 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
412 */
413 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
414 const ARMCPRegInfo *ri,
415 bool isread)
416 {
417 bool secure = arm_is_secure_below_el3(env);
418
419 assert(!arm_el_is_aa64(env, 3));
420 if (secure) {
421 return CP_ACCESS_TRAP_UNCATEGORIZED;
422 }
423 return CP_ACCESS_OK;
424 }
425
426 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
427 const ARMCPRegInfo *ri,
428 bool isread)
429 {
430 if (!arm_el_is_aa64(env, 3)) {
431 return access_el3_aa32ns(env, ri, isread);
432 }
433 return CP_ACCESS_OK;
434 }
435
436 /* Some secure-only AArch32 registers trap to EL3 if used from
437 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
438 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
439 * We assume that the .access field is set to PL1_RW.
440 */
441 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
442 const ARMCPRegInfo *ri,
443 bool isread)
444 {
445 if (arm_current_el(env) == 3) {
446 return CP_ACCESS_OK;
447 }
448 if (arm_is_secure_below_el3(env)) {
449 return CP_ACCESS_TRAP_EL3;
450 }
451 /* This will be EL1 NS and EL2 NS, which just UNDEF */
452 return CP_ACCESS_TRAP_UNCATEGORIZED;
453 }
454
455 /* Check for traps to "powerdown debug" registers, which are controlled
456 * by MDCR.TDOSA
457 */
458 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
459 bool isread)
460 {
461 int el = arm_current_el(env);
462 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
463 (env->cp15.mdcr_el2 & MDCR_TDE) ||
464 (arm_hcr_el2_eff(env) & HCR_TGE);
465
466 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
467 return CP_ACCESS_TRAP_EL2;
468 }
469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
470 return CP_ACCESS_TRAP_EL3;
471 }
472 return CP_ACCESS_OK;
473 }
474
475 /* Check for traps to "debug ROM" registers, which are controlled
476 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
477 */
478 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
479 bool isread)
480 {
481 int el = arm_current_el(env);
482 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
483 (env->cp15.mdcr_el2 & MDCR_TDE) ||
484 (arm_hcr_el2_eff(env) & HCR_TGE);
485
486 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
487 return CP_ACCESS_TRAP_EL2;
488 }
489 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
490 return CP_ACCESS_TRAP_EL3;
491 }
492 return CP_ACCESS_OK;
493 }
494
495 /* Check for traps to general debug registers, which are controlled
496 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
497 */
498 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
499 bool isread)
500 {
501 int el = arm_current_el(env);
502 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
503 (env->cp15.mdcr_el2 & MDCR_TDE) ||
504 (arm_hcr_el2_eff(env) & HCR_TGE);
505
506 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
507 return CP_ACCESS_TRAP_EL2;
508 }
509 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
510 return CP_ACCESS_TRAP_EL3;
511 }
512 return CP_ACCESS_OK;
513 }
514
515 /* Check for traps to performance monitor registers, which are controlled
516 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
517 */
518 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
519 bool isread)
520 {
521 int el = arm_current_el(env);
522
523 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
524 && !arm_is_secure_below_el3(env)) {
525 return CP_ACCESS_TRAP_EL2;
526 }
527 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
528 return CP_ACCESS_TRAP_EL3;
529 }
530 return CP_ACCESS_OK;
531 }
532
533 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
534 {
535 ARMCPU *cpu = env_archcpu(env);
536
537 raw_write(env, ri, value);
538 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
539 }
540
541 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
542 {
543 ARMCPU *cpu = env_archcpu(env);
544
545 if (raw_read(env, ri) != value) {
546 /* Unlike real hardware the qemu TLB uses virtual addresses,
547 * not modified virtual addresses, so this causes a TLB flush.
548 */
549 tlb_flush(CPU(cpu));
550 raw_write(env, ri, value);
551 }
552 }
553
554 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
555 uint64_t value)
556 {
557 ARMCPU *cpu = env_archcpu(env);
558
559 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
560 && !extended_addresses_enabled(env)) {
561 /* For VMSA (when not using the LPAE long descriptor page table
562 * format) this register includes the ASID, so do a TLB flush.
563 * For PMSA it is purely a process ID and no action is needed.
564 */
565 tlb_flush(CPU(cpu));
566 }
567 raw_write(env, ri, value);
568 }
569
570 /* IS variants of TLB operations must affect all cores */
571 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
572 uint64_t value)
573 {
574 CPUState *cs = env_cpu(env);
575
576 tlb_flush_all_cpus_synced(cs);
577 }
578
579 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
580 uint64_t value)
581 {
582 CPUState *cs = env_cpu(env);
583
584 tlb_flush_all_cpus_synced(cs);
585 }
586
587 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
588 uint64_t value)
589 {
590 CPUState *cs = env_cpu(env);
591
592 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
593 }
594
595 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
596 uint64_t value)
597 {
598 CPUState *cs = env_cpu(env);
599
600 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
601 }
602
603 /*
604 * Non-IS variants of TLB operations are upgraded to
605 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
606 * force broadcast of these operations.
607 */
608 static bool tlb_force_broadcast(CPUARMState *env)
609 {
610 return (env->cp15.hcr_el2 & HCR_FB) &&
611 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
612 }
613
614 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
615 uint64_t value)
616 {
617 /* Invalidate all (TLBIALL) */
618 CPUState *cs = env_cpu(env);
619
620 if (tlb_force_broadcast(env)) {
621 tlb_flush_all_cpus_synced(cs);
622 } else {
623 tlb_flush(cs);
624 }
625 }
626
627 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
628 uint64_t value)
629 {
630 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
631 CPUState *cs = env_cpu(env);
632
633 value &= TARGET_PAGE_MASK;
634 if (tlb_force_broadcast(env)) {
635 tlb_flush_page_all_cpus_synced(cs, value);
636 } else {
637 tlb_flush_page(cs, value);
638 }
639 }
640
641 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
642 uint64_t value)
643 {
644 /* Invalidate by ASID (TLBIASID) */
645 CPUState *cs = env_cpu(env);
646
647 if (tlb_force_broadcast(env)) {
648 tlb_flush_all_cpus_synced(cs);
649 } else {
650 tlb_flush(cs);
651 }
652 }
653
654 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
655 uint64_t value)
656 {
657 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
658 CPUState *cs = env_cpu(env);
659
660 value &= TARGET_PAGE_MASK;
661 if (tlb_force_broadcast(env)) {
662 tlb_flush_page_all_cpus_synced(cs, value);
663 } else {
664 tlb_flush_page(cs, value);
665 }
666 }
667
668 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
669 uint64_t value)
670 {
671 CPUState *cs = env_cpu(env);
672
673 tlb_flush_by_mmuidx(cs,
674 ARMMMUIdxBit_E10_1 |
675 ARMMMUIdxBit_E10_1_PAN |
676 ARMMMUIdxBit_E10_0 |
677 ARMMMUIdxBit_Stage2);
678 }
679
680 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
681 uint64_t value)
682 {
683 CPUState *cs = env_cpu(env);
684
685 tlb_flush_by_mmuidx_all_cpus_synced(cs,
686 ARMMMUIdxBit_E10_1 |
687 ARMMMUIdxBit_E10_1_PAN |
688 ARMMMUIdxBit_E10_0 |
689 ARMMMUIdxBit_Stage2);
690 }
691
692 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
693 uint64_t value)
694 {
695 /* Invalidate by IPA. This has to invalidate any structures that
696 * contain only stage 2 translation information, but does not need
697 * to apply to structures that contain combined stage 1 and stage 2
698 * translation information.
699 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
700 */
701 CPUState *cs = env_cpu(env);
702 uint64_t pageaddr;
703
704 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
705 return;
706 }
707
708 pageaddr = sextract64(value << 12, 0, 40);
709
710 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
711 }
712
713 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
714 uint64_t value)
715 {
716 CPUState *cs = env_cpu(env);
717 uint64_t pageaddr;
718
719 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
720 return;
721 }
722
723 pageaddr = sextract64(value << 12, 0, 40);
724
725 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
726 ARMMMUIdxBit_Stage2);
727 }
728
729 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
730 uint64_t value)
731 {
732 CPUState *cs = env_cpu(env);
733
734 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
735 }
736
737 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
738 uint64_t value)
739 {
740 CPUState *cs = env_cpu(env);
741
742 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
743 }
744
745 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
746 uint64_t value)
747 {
748 CPUState *cs = env_cpu(env);
749 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
750
751 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
752 }
753
754 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
755 uint64_t value)
756 {
757 CPUState *cs = env_cpu(env);
758 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
759
760 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
761 ARMMMUIdxBit_E2);
762 }
763
764 static const ARMCPRegInfo cp_reginfo[] = {
765 /* Define the secure and non-secure FCSE identifier CP registers
766 * separately because there is no secure bank in V8 (no _EL3). This allows
767 * the secure register to be properly reset and migrated. There is also no
768 * v8 EL1 version of the register so the non-secure instance stands alone.
769 */
770 { .name = "FCSEIDR",
771 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
772 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
773 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
774 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
775 { .name = "FCSEIDR_S",
776 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
777 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
778 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
779 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
780 /* Define the secure and non-secure context identifier CP registers
781 * separately because there is no secure bank in V8 (no _EL3). This allows
782 * the secure register to be properly reset and migrated. In the
783 * non-secure case, the 32-bit register will have reset and migration
784 * disabled during registration as it is handled by the 64-bit instance.
785 */
786 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
787 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
788 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
789 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
790 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
791 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
792 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
793 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
794 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
795 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
796 REGINFO_SENTINEL
797 };
798
799 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
800 /* NB: Some of these registers exist in v8 but with more precise
801 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
802 */
803 /* MMU Domain access control / MPU write buffer control */
804 { .name = "DACR",
805 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
806 .access = PL1_RW, .resetvalue = 0,
807 .writefn = dacr_write, .raw_writefn = raw_write,
808 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
809 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
810 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
811 * For v6 and v5, these mappings are overly broad.
812 */
813 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
814 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
815 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
816 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
817 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
818 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
819 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
820 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
821 /* Cache maintenance ops; some of this space may be overridden later. */
822 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
823 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
824 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
825 REGINFO_SENTINEL
826 };
827
828 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
829 /* Not all pre-v6 cores implemented this WFI, so this is slightly
830 * over-broad.
831 */
832 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
833 .access = PL1_W, .type = ARM_CP_WFI },
834 REGINFO_SENTINEL
835 };
836
837 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
838 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
839 * is UNPREDICTABLE; we choose to NOP as most implementations do).
840 */
841 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
842 .access = PL1_W, .type = ARM_CP_WFI },
843 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
844 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
845 * OMAPCP will override this space.
846 */
847 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
848 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
849 .resetvalue = 0 },
850 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
851 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
852 .resetvalue = 0 },
853 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
854 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
855 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
856 .resetvalue = 0 },
857 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
858 * implementing it as RAZ means the "debug architecture version" bits
859 * will read as a reserved value, which should cause Linux to not try
860 * to use the debug hardware.
861 */
862 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
863 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
864 /* MMU TLB control. Note that the wildcarding means we cover not just
865 * the unified TLB ops but also the dside/iside/inner-shareable variants.
866 */
867 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
868 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
869 .type = ARM_CP_NO_RAW },
870 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
871 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
872 .type = ARM_CP_NO_RAW },
873 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
874 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
875 .type = ARM_CP_NO_RAW },
876 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
877 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
878 .type = ARM_CP_NO_RAW },
879 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
880 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
881 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
882 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
883 REGINFO_SENTINEL
884 };
885
886 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
887 uint64_t value)
888 {
889 uint32_t mask = 0;
890
891 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
892 if (!arm_feature(env, ARM_FEATURE_V8)) {
893 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
894 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
895 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
896 */
897 if (arm_feature(env, ARM_FEATURE_VFP)) {
898 /* VFP coprocessor: cp10 & cp11 [23:20] */
899 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
900
901 if (!arm_feature(env, ARM_FEATURE_NEON)) {
902 /* ASEDIS [31] bit is RAO/WI */
903 value |= (1 << 31);
904 }
905
906 /* VFPv3 and upwards with NEON implement 32 double precision
907 * registers (D0-D31).
908 */
909 if (!arm_feature(env, ARM_FEATURE_NEON) ||
910 !arm_feature(env, ARM_FEATURE_VFP3)) {
911 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
912 value |= (1 << 30);
913 }
914 }
915 value &= mask;
916 }
917
918 /*
919 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
920 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
921 */
922 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
923 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
924 value &= ~(0xf << 20);
925 value |= env->cp15.cpacr_el1 & (0xf << 20);
926 }
927
928 env->cp15.cpacr_el1 = value;
929 }
930
931 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
932 {
933 /*
934 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
935 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
936 */
937 uint64_t value = env->cp15.cpacr_el1;
938
939 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
940 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
941 value &= ~(0xf << 20);
942 }
943 return value;
944 }
945
946
947 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
948 {
949 /* Call cpacr_write() so that we reset with the correct RAO bits set
950 * for our CPU features.
951 */
952 cpacr_write(env, ri, 0);
953 }
954
955 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
956 bool isread)
957 {
958 if (arm_feature(env, ARM_FEATURE_V8)) {
959 /* Check if CPACR accesses are to be trapped to EL2 */
960 if (arm_current_el(env) == 1 &&
961 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
962 return CP_ACCESS_TRAP_EL2;
963 /* Check if CPACR accesses are to be trapped to EL3 */
964 } else if (arm_current_el(env) < 3 &&
965 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
966 return CP_ACCESS_TRAP_EL3;
967 }
968 }
969
970 return CP_ACCESS_OK;
971 }
972
973 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
974 bool isread)
975 {
976 /* Check if CPTR accesses are set to trap to EL3 */
977 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
978 return CP_ACCESS_TRAP_EL3;
979 }
980
981 return CP_ACCESS_OK;
982 }
983
984 static const ARMCPRegInfo v6_cp_reginfo[] = {
985 /* prefetch by MVA in v6, NOP in v7 */
986 { .name = "MVA_prefetch",
987 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
988 .access = PL1_W, .type = ARM_CP_NOP },
989 /* We need to break the TB after ISB to execute self-modifying code
990 * correctly and also to take any pending interrupts immediately.
991 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
992 */
993 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
994 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
995 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
996 .access = PL0_W, .type = ARM_CP_NOP },
997 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
998 .access = PL0_W, .type = ARM_CP_NOP },
999 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1000 .access = PL1_RW,
1001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1002 offsetof(CPUARMState, cp15.ifar_ns) },
1003 .resetvalue = 0, },
1004 /* Watchpoint Fault Address Register : should actually only be present
1005 * for 1136, 1176, 11MPCore.
1006 */
1007 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1008 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1009 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1010 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1011 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1012 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1013 REGINFO_SENTINEL
1014 };
1015
1016 /* Definitions for the PMU registers */
1017 #define PMCRN_MASK 0xf800
1018 #define PMCRN_SHIFT 11
1019 #define PMCRLC 0x40
1020 #define PMCRDP 0x20
1021 #define PMCRX 0x10
1022 #define PMCRD 0x8
1023 #define PMCRC 0x4
1024 #define PMCRP 0x2
1025 #define PMCRE 0x1
1026 /*
1027 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1028 * which can be written as 1 to trigger behaviour but which stay RAZ).
1029 */
1030 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1031
1032 #define PMXEVTYPER_P 0x80000000
1033 #define PMXEVTYPER_U 0x40000000
1034 #define PMXEVTYPER_NSK 0x20000000
1035 #define PMXEVTYPER_NSU 0x10000000
1036 #define PMXEVTYPER_NSH 0x08000000
1037 #define PMXEVTYPER_M 0x04000000
1038 #define PMXEVTYPER_MT 0x02000000
1039 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1040 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1041 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1042 PMXEVTYPER_M | PMXEVTYPER_MT | \
1043 PMXEVTYPER_EVTCOUNT)
1044
1045 #define PMCCFILTR 0xf8000000
1046 #define PMCCFILTR_M PMXEVTYPER_M
1047 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1048
1049 static inline uint32_t pmu_num_counters(CPUARMState *env)
1050 {
1051 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1052 }
1053
1054 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1055 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1056 {
1057 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1058 }
1059
1060 typedef struct pm_event {
1061 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1062 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1063 bool (*supported)(CPUARMState *);
1064 /*
1065 * Retrieve the current count of the underlying event. The programmed
1066 * counters hold a difference from the return value from this function
1067 */
1068 uint64_t (*get_count)(CPUARMState *);
1069 /*
1070 * Return how many nanoseconds it will take (at a minimum) for count events
1071 * to occur. A negative value indicates the counter will never overflow, or
1072 * that the counter has otherwise arranged for the overflow bit to be set
1073 * and the PMU interrupt to be raised on overflow.
1074 */
1075 int64_t (*ns_per_count)(uint64_t);
1076 } pm_event;
1077
1078 static bool event_always_supported(CPUARMState *env)
1079 {
1080 return true;
1081 }
1082
1083 static uint64_t swinc_get_count(CPUARMState *env)
1084 {
1085 /*
1086 * SW_INCR events are written directly to the pmevcntr's by writes to
1087 * PMSWINC, so there is no underlying count maintained by the PMU itself
1088 */
1089 return 0;
1090 }
1091
1092 static int64_t swinc_ns_per(uint64_t ignored)
1093 {
1094 return -1;
1095 }
1096
1097 /*
1098 * Return the underlying cycle count for the PMU cycle counters. If we're in
1099 * usermode, simply return 0.
1100 */
1101 static uint64_t cycles_get_count(CPUARMState *env)
1102 {
1103 #ifndef CONFIG_USER_ONLY
1104 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1105 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1106 #else
1107 return cpu_get_host_ticks();
1108 #endif
1109 }
1110
1111 #ifndef CONFIG_USER_ONLY
1112 static int64_t cycles_ns_per(uint64_t cycles)
1113 {
1114 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1115 }
1116
1117 static bool instructions_supported(CPUARMState *env)
1118 {
1119 return use_icount == 1 /* Precise instruction counting */;
1120 }
1121
1122 static uint64_t instructions_get_count(CPUARMState *env)
1123 {
1124 return (uint64_t)cpu_get_icount_raw();
1125 }
1126
1127 static int64_t instructions_ns_per(uint64_t icount)
1128 {
1129 return cpu_icount_to_ns((int64_t)icount);
1130 }
1131 #endif
1132
1133 static bool pmu_8_1_events_supported(CPUARMState *env)
1134 {
1135 /* For events which are supported in any v8.1 PMU */
1136 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1137 }
1138
1139 static bool pmu_8_4_events_supported(CPUARMState *env)
1140 {
1141 /* For events which are supported in any v8.1 PMU */
1142 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1143 }
1144
1145 static uint64_t zero_event_get_count(CPUARMState *env)
1146 {
1147 /* For events which on QEMU never fire, so their count is always zero */
1148 return 0;
1149 }
1150
1151 static int64_t zero_event_ns_per(uint64_t cycles)
1152 {
1153 /* An event which never fires can never overflow */
1154 return -1;
1155 }
1156
1157 static const pm_event pm_events[] = {
1158 { .number = 0x000, /* SW_INCR */
1159 .supported = event_always_supported,
1160 .get_count = swinc_get_count,
1161 .ns_per_count = swinc_ns_per,
1162 },
1163 #ifndef CONFIG_USER_ONLY
1164 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1165 .supported = instructions_supported,
1166 .get_count = instructions_get_count,
1167 .ns_per_count = instructions_ns_per,
1168 },
1169 { .number = 0x011, /* CPU_CYCLES, Cycle */
1170 .supported = event_always_supported,
1171 .get_count = cycles_get_count,
1172 .ns_per_count = cycles_ns_per,
1173 },
1174 #endif
1175 { .number = 0x023, /* STALL_FRONTEND */
1176 .supported = pmu_8_1_events_supported,
1177 .get_count = zero_event_get_count,
1178 .ns_per_count = zero_event_ns_per,
1179 },
1180 { .number = 0x024, /* STALL_BACKEND */
1181 .supported = pmu_8_1_events_supported,
1182 .get_count = zero_event_get_count,
1183 .ns_per_count = zero_event_ns_per,
1184 },
1185 { .number = 0x03c, /* STALL */
1186 .supported = pmu_8_4_events_supported,
1187 .get_count = zero_event_get_count,
1188 .ns_per_count = zero_event_ns_per,
1189 },
1190 };
1191
1192 /*
1193 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1194 * events (i.e. the statistical profiling extension), this implementation
1195 * should first be updated to something sparse instead of the current
1196 * supported_event_map[] array.
1197 */
1198 #define MAX_EVENT_ID 0x3c
1199 #define UNSUPPORTED_EVENT UINT16_MAX
1200 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1201
1202 /*
1203 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1204 * of ARM event numbers to indices in our pm_events array.
1205 *
1206 * Note: Events in the 0x40XX range are not currently supported.
1207 */
1208 void pmu_init(ARMCPU *cpu)
1209 {
1210 unsigned int i;
1211
1212 /*
1213 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1214 * events to them
1215 */
1216 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1217 supported_event_map[i] = UNSUPPORTED_EVENT;
1218 }
1219 cpu->pmceid0 = 0;
1220 cpu->pmceid1 = 0;
1221
1222 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1223 const pm_event *cnt = &pm_events[i];
1224 assert(cnt->number <= MAX_EVENT_ID);
1225 /* We do not currently support events in the 0x40xx range */
1226 assert(cnt->number <= 0x3f);
1227
1228 if (cnt->supported(&cpu->env)) {
1229 supported_event_map[cnt->number] = i;
1230 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1231 if (cnt->number & 0x20) {
1232 cpu->pmceid1 |= event_mask;
1233 } else {
1234 cpu->pmceid0 |= event_mask;
1235 }
1236 }
1237 }
1238 }
1239
1240 /*
1241 * Check at runtime whether a PMU event is supported for the current machine
1242 */
1243 static bool event_supported(uint16_t number)
1244 {
1245 if (number > MAX_EVENT_ID) {
1246 return false;
1247 }
1248 return supported_event_map[number] != UNSUPPORTED_EVENT;
1249 }
1250
1251 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1252 bool isread)
1253 {
1254 /* Performance monitor registers user accessibility is controlled
1255 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1256 * trapping to EL2 or EL3 for other accesses.
1257 */
1258 int el = arm_current_el(env);
1259
1260 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1261 return CP_ACCESS_TRAP;
1262 }
1263 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1264 && !arm_is_secure_below_el3(env)) {
1265 return CP_ACCESS_TRAP_EL2;
1266 }
1267 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1268 return CP_ACCESS_TRAP_EL3;
1269 }
1270
1271 return CP_ACCESS_OK;
1272 }
1273
1274 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1275 const ARMCPRegInfo *ri,
1276 bool isread)
1277 {
1278 /* ER: event counter read trap control */
1279 if (arm_feature(env, ARM_FEATURE_V8)
1280 && arm_current_el(env) == 0
1281 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1282 && isread) {
1283 return CP_ACCESS_OK;
1284 }
1285
1286 return pmreg_access(env, ri, isread);
1287 }
1288
1289 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1290 const ARMCPRegInfo *ri,
1291 bool isread)
1292 {
1293 /* SW: software increment write trap control */
1294 if (arm_feature(env, ARM_FEATURE_V8)
1295 && arm_current_el(env) == 0
1296 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1297 && !isread) {
1298 return CP_ACCESS_OK;
1299 }
1300
1301 return pmreg_access(env, ri, isread);
1302 }
1303
1304 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1305 const ARMCPRegInfo *ri,
1306 bool isread)
1307 {
1308 /* ER: event counter read trap control */
1309 if (arm_feature(env, ARM_FEATURE_V8)
1310 && arm_current_el(env) == 0
1311 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1312 return CP_ACCESS_OK;
1313 }
1314
1315 return pmreg_access(env, ri, isread);
1316 }
1317
1318 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1319 const ARMCPRegInfo *ri,
1320 bool isread)
1321 {
1322 /* CR: cycle counter read trap control */
1323 if (arm_feature(env, ARM_FEATURE_V8)
1324 && arm_current_el(env) == 0
1325 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1326 && isread) {
1327 return CP_ACCESS_OK;
1328 }
1329
1330 return pmreg_access(env, ri, isread);
1331 }
1332
1333 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1334 * the current EL, security state, and register configuration.
1335 */
1336 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1337 {
1338 uint64_t filter;
1339 bool e, p, u, nsk, nsu, nsh, m;
1340 bool enabled, prohibited, filtered;
1341 bool secure = arm_is_secure(env);
1342 int el = arm_current_el(env);
1343 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1344
1345 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1346 return false;
1347 }
1348
1349 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1350 (counter < hpmn || counter == 31)) {
1351 e = env->cp15.c9_pmcr & PMCRE;
1352 } else {
1353 e = env->cp15.mdcr_el2 & MDCR_HPME;
1354 }
1355 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1356
1357 if (!secure) {
1358 if (el == 2 && (counter < hpmn || counter == 31)) {
1359 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1360 } else {
1361 prohibited = false;
1362 }
1363 } else {
1364 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1365 (env->cp15.mdcr_el3 & MDCR_SPME);
1366 }
1367
1368 if (prohibited && counter == 31) {
1369 prohibited = env->cp15.c9_pmcr & PMCRDP;
1370 }
1371
1372 if (counter == 31) {
1373 filter = env->cp15.pmccfiltr_el0;
1374 } else {
1375 filter = env->cp15.c14_pmevtyper[counter];
1376 }
1377
1378 p = filter & PMXEVTYPER_P;
1379 u = filter & PMXEVTYPER_U;
1380 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1381 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1382 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1383 m = arm_el_is_aa64(env, 1) &&
1384 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1385
1386 if (el == 0) {
1387 filtered = secure ? u : u != nsu;
1388 } else if (el == 1) {
1389 filtered = secure ? p : p != nsk;
1390 } else if (el == 2) {
1391 filtered = !nsh;
1392 } else { /* EL3 */
1393 filtered = m != p;
1394 }
1395
1396 if (counter != 31) {
1397 /*
1398 * If not checking PMCCNTR, ensure the counter is setup to an event we
1399 * support
1400 */
1401 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1402 if (!event_supported(event)) {
1403 return false;
1404 }
1405 }
1406
1407 return enabled && !prohibited && !filtered;
1408 }
1409
1410 static void pmu_update_irq(CPUARMState *env)
1411 {
1412 ARMCPU *cpu = env_archcpu(env);
1413 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1414 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1415 }
1416
1417 /*
1418 * Ensure c15_ccnt is the guest-visible count so that operations such as
1419 * enabling/disabling the counter or filtering, modifying the count itself,
1420 * etc. can be done logically. This is essentially a no-op if the counter is
1421 * not enabled at the time of the call.
1422 */
1423 static void pmccntr_op_start(CPUARMState *env)
1424 {
1425 uint64_t cycles = cycles_get_count(env);
1426
1427 if (pmu_counter_enabled(env, 31)) {
1428 uint64_t eff_cycles = cycles;
1429 if (env->cp15.c9_pmcr & PMCRD) {
1430 /* Increment once every 64 processor clock cycles */
1431 eff_cycles /= 64;
1432 }
1433
1434 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1435
1436 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1437 1ull << 63 : 1ull << 31;
1438 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1439 env->cp15.c9_pmovsr |= (1 << 31);
1440 pmu_update_irq(env);
1441 }
1442
1443 env->cp15.c15_ccnt = new_pmccntr;
1444 }
1445 env->cp15.c15_ccnt_delta = cycles;
1446 }
1447
1448 /*
1449 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1450 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1451 * pmccntr_op_start.
1452 */
1453 static void pmccntr_op_finish(CPUARMState *env)
1454 {
1455 if (pmu_counter_enabled(env, 31)) {
1456 #ifndef CONFIG_USER_ONLY
1457 /* Calculate when the counter will next overflow */
1458 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1459 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1460 remaining_cycles = (uint32_t)remaining_cycles;
1461 }
1462 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1463
1464 if (overflow_in > 0) {
1465 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1466 overflow_in;
1467 ARMCPU *cpu = env_archcpu(env);
1468 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1469 }
1470 #endif
1471
1472 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1473 if (env->cp15.c9_pmcr & PMCRD) {
1474 /* Increment once every 64 processor clock cycles */
1475 prev_cycles /= 64;
1476 }
1477 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1478 }
1479 }
1480
1481 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1482 {
1483
1484 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1485 uint64_t count = 0;
1486 if (event_supported(event)) {
1487 uint16_t event_idx = supported_event_map[event];
1488 count = pm_events[event_idx].get_count(env);
1489 }
1490
1491 if (pmu_counter_enabled(env, counter)) {
1492 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1493
1494 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1495 env->cp15.c9_pmovsr |= (1 << counter);
1496 pmu_update_irq(env);
1497 }
1498 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1499 }
1500 env->cp15.c14_pmevcntr_delta[counter] = count;
1501 }
1502
1503 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1504 {
1505 if (pmu_counter_enabled(env, counter)) {
1506 #ifndef CONFIG_USER_ONLY
1507 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1508 uint16_t event_idx = supported_event_map[event];
1509 uint64_t delta = UINT32_MAX -
1510 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1511 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1512
1513 if (overflow_in > 0) {
1514 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1515 overflow_in;
1516 ARMCPU *cpu = env_archcpu(env);
1517 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1518 }
1519 #endif
1520
1521 env->cp15.c14_pmevcntr_delta[counter] -=
1522 env->cp15.c14_pmevcntr[counter];
1523 }
1524 }
1525
1526 void pmu_op_start(CPUARMState *env)
1527 {
1528 unsigned int i;
1529 pmccntr_op_start(env);
1530 for (i = 0; i < pmu_num_counters(env); i++) {
1531 pmevcntr_op_start(env, i);
1532 }
1533 }
1534
1535 void pmu_op_finish(CPUARMState *env)
1536 {
1537 unsigned int i;
1538 pmccntr_op_finish(env);
1539 for (i = 0; i < pmu_num_counters(env); i++) {
1540 pmevcntr_op_finish(env, i);
1541 }
1542 }
1543
1544 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1545 {
1546 pmu_op_start(&cpu->env);
1547 }
1548
1549 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1550 {
1551 pmu_op_finish(&cpu->env);
1552 }
1553
1554 void arm_pmu_timer_cb(void *opaque)
1555 {
1556 ARMCPU *cpu = opaque;
1557
1558 /*
1559 * Update all the counter values based on the current underlying counts,
1560 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1561 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1562 * counter may expire.
1563 */
1564 pmu_op_start(&cpu->env);
1565 pmu_op_finish(&cpu->env);
1566 }
1567
1568 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1569 uint64_t value)
1570 {
1571 pmu_op_start(env);
1572
1573 if (value & PMCRC) {
1574 /* The counter has been reset */
1575 env->cp15.c15_ccnt = 0;
1576 }
1577
1578 if (value & PMCRP) {
1579 unsigned int i;
1580 for (i = 0; i < pmu_num_counters(env); i++) {
1581 env->cp15.c14_pmevcntr[i] = 0;
1582 }
1583 }
1584
1585 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1586 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1587
1588 pmu_op_finish(env);
1589 }
1590
1591 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1592 uint64_t value)
1593 {
1594 unsigned int i;
1595 for (i = 0; i < pmu_num_counters(env); i++) {
1596 /* Increment a counter's count iff: */
1597 if ((value & (1 << i)) && /* counter's bit is set */
1598 /* counter is enabled and not filtered */
1599 pmu_counter_enabled(env, i) &&
1600 /* counter is SW_INCR */
1601 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1602 pmevcntr_op_start(env, i);
1603
1604 /*
1605 * Detect if this write causes an overflow since we can't predict
1606 * PMSWINC overflows like we can for other events
1607 */
1608 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1609
1610 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1611 env->cp15.c9_pmovsr |= (1 << i);
1612 pmu_update_irq(env);
1613 }
1614
1615 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1616
1617 pmevcntr_op_finish(env, i);
1618 }
1619 }
1620 }
1621
1622 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1623 {
1624 uint64_t ret;
1625 pmccntr_op_start(env);
1626 ret = env->cp15.c15_ccnt;
1627 pmccntr_op_finish(env);
1628 return ret;
1629 }
1630
1631 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1632 uint64_t value)
1633 {
1634 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1635 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1636 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1637 * accessed.
1638 */
1639 env->cp15.c9_pmselr = value & 0x1f;
1640 }
1641
1642 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643 uint64_t value)
1644 {
1645 pmccntr_op_start(env);
1646 env->cp15.c15_ccnt = value;
1647 pmccntr_op_finish(env);
1648 }
1649
1650 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1651 uint64_t value)
1652 {
1653 uint64_t cur_val = pmccntr_read(env, NULL);
1654
1655 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1656 }
1657
1658 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1659 uint64_t value)
1660 {
1661 pmccntr_op_start(env);
1662 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1663 pmccntr_op_finish(env);
1664 }
1665
1666 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1667 uint64_t value)
1668 {
1669 pmccntr_op_start(env);
1670 /* M is not accessible from AArch32 */
1671 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1672 (value & PMCCFILTR);
1673 pmccntr_op_finish(env);
1674 }
1675
1676 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1677 {
1678 /* M is not visible in AArch32 */
1679 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1680 }
1681
1682 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1683 uint64_t value)
1684 {
1685 value &= pmu_counter_mask(env);
1686 env->cp15.c9_pmcnten |= value;
1687 }
1688
1689 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1690 uint64_t value)
1691 {
1692 value &= pmu_counter_mask(env);
1693 env->cp15.c9_pmcnten &= ~value;
1694 }
1695
1696 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1697 uint64_t value)
1698 {
1699 value &= pmu_counter_mask(env);
1700 env->cp15.c9_pmovsr &= ~value;
1701 pmu_update_irq(env);
1702 }
1703
1704 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1705 uint64_t value)
1706 {
1707 value &= pmu_counter_mask(env);
1708 env->cp15.c9_pmovsr |= value;
1709 pmu_update_irq(env);
1710 }
1711
1712 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1713 uint64_t value, const uint8_t counter)
1714 {
1715 if (counter == 31) {
1716 pmccfiltr_write(env, ri, value);
1717 } else if (counter < pmu_num_counters(env)) {
1718 pmevcntr_op_start(env, counter);
1719
1720 /*
1721 * If this counter's event type is changing, store the current
1722 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1723 * pmevcntr_op_finish has the correct baseline when it converts back to
1724 * a delta.
1725 */
1726 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1727 PMXEVTYPER_EVTCOUNT;
1728 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1729 if (old_event != new_event) {
1730 uint64_t count = 0;
1731 if (event_supported(new_event)) {
1732 uint16_t event_idx = supported_event_map[new_event];
1733 count = pm_events[event_idx].get_count(env);
1734 }
1735 env->cp15.c14_pmevcntr_delta[counter] = count;
1736 }
1737
1738 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1739 pmevcntr_op_finish(env, counter);
1740 }
1741 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1742 * PMSELR value is equal to or greater than the number of implemented
1743 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1744 */
1745 }
1746
1747 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1748 const uint8_t counter)
1749 {
1750 if (counter == 31) {
1751 return env->cp15.pmccfiltr_el0;
1752 } else if (counter < pmu_num_counters(env)) {
1753 return env->cp15.c14_pmevtyper[counter];
1754 } else {
1755 /*
1756 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1757 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1758 */
1759 return 0;
1760 }
1761 }
1762
1763 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1764 uint64_t value)
1765 {
1766 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1767 pmevtyper_write(env, ri, value, counter);
1768 }
1769
1770 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1771 uint64_t value)
1772 {
1773 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1774 env->cp15.c14_pmevtyper[counter] = value;
1775
1776 /*
1777 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1778 * pmu_op_finish calls when loading saved state for a migration. Because
1779 * we're potentially updating the type of event here, the value written to
1780 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1781 * different counter type. Therefore, we need to set this value to the
1782 * current count for the counter type we're writing so that pmu_op_finish
1783 * has the correct count for its calculation.
1784 */
1785 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1786 if (event_supported(event)) {
1787 uint16_t event_idx = supported_event_map[event];
1788 env->cp15.c14_pmevcntr_delta[counter] =
1789 pm_events[event_idx].get_count(env);
1790 }
1791 }
1792
1793 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1794 {
1795 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1796 return pmevtyper_read(env, ri, counter);
1797 }
1798
1799 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1800 uint64_t value)
1801 {
1802 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1803 }
1804
1805 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1806 {
1807 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1808 }
1809
1810 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1811 uint64_t value, uint8_t counter)
1812 {
1813 if (counter < pmu_num_counters(env)) {
1814 pmevcntr_op_start(env, counter);
1815 env->cp15.c14_pmevcntr[counter] = value;
1816 pmevcntr_op_finish(env, counter);
1817 }
1818 /*
1819 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1820 * are CONSTRAINED UNPREDICTABLE.
1821 */
1822 }
1823
1824 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1825 uint8_t counter)
1826 {
1827 if (counter < pmu_num_counters(env)) {
1828 uint64_t ret;
1829 pmevcntr_op_start(env, counter);
1830 ret = env->cp15.c14_pmevcntr[counter];
1831 pmevcntr_op_finish(env, counter);
1832 return ret;
1833 } else {
1834 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1835 * are CONSTRAINED UNPREDICTABLE. */
1836 return 0;
1837 }
1838 }
1839
1840 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1841 uint64_t value)
1842 {
1843 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1844 pmevcntr_write(env, ri, value, counter);
1845 }
1846
1847 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1848 {
1849 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1850 return pmevcntr_read(env, ri, counter);
1851 }
1852
1853 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1854 uint64_t value)
1855 {
1856 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1857 assert(counter < pmu_num_counters(env));
1858 env->cp15.c14_pmevcntr[counter] = value;
1859 pmevcntr_write(env, ri, value, counter);
1860 }
1861
1862 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1863 {
1864 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1865 assert(counter < pmu_num_counters(env));
1866 return env->cp15.c14_pmevcntr[counter];
1867 }
1868
1869 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1870 uint64_t value)
1871 {
1872 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1873 }
1874
1875 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1876 {
1877 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1878 }
1879
1880 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1881 uint64_t value)
1882 {
1883 if (arm_feature(env, ARM_FEATURE_V8)) {
1884 env->cp15.c9_pmuserenr = value & 0xf;
1885 } else {
1886 env->cp15.c9_pmuserenr = value & 1;
1887 }
1888 }
1889
1890 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1891 uint64_t value)
1892 {
1893 /* We have no event counters so only the C bit can be changed */
1894 value &= pmu_counter_mask(env);
1895 env->cp15.c9_pminten |= value;
1896 pmu_update_irq(env);
1897 }
1898
1899 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1900 uint64_t value)
1901 {
1902 value &= pmu_counter_mask(env);
1903 env->cp15.c9_pminten &= ~value;
1904 pmu_update_irq(env);
1905 }
1906
1907 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908 uint64_t value)
1909 {
1910 /* Note that even though the AArch64 view of this register has bits
1911 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1912 * architectural requirements for bits which are RES0 only in some
1913 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1914 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1915 */
1916 raw_write(env, ri, value & ~0x1FULL);
1917 }
1918
1919 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1920 {
1921 /* Begin with base v8.0 state. */
1922 uint32_t valid_mask = 0x3fff;
1923 ARMCPU *cpu = env_archcpu(env);
1924
1925 if (arm_el_is_aa64(env, 3)) {
1926 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
1927 valid_mask &= ~SCR_NET;
1928 } else {
1929 valid_mask &= ~(SCR_RW | SCR_ST);
1930 }
1931
1932 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1933 valid_mask &= ~SCR_HCE;
1934
1935 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1936 * supported if EL2 exists. The bit is UNK/SBZP when
1937 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1938 * when EL2 is unavailable.
1939 * On ARMv8, this bit is always available.
1940 */
1941 if (arm_feature(env, ARM_FEATURE_V7) &&
1942 !arm_feature(env, ARM_FEATURE_V8)) {
1943 valid_mask &= ~SCR_SMD;
1944 }
1945 }
1946 if (cpu_isar_feature(aa64_lor, cpu)) {
1947 valid_mask |= SCR_TLOR;
1948 }
1949 if (cpu_isar_feature(aa64_pauth, cpu)) {
1950 valid_mask |= SCR_API | SCR_APK;
1951 }
1952
1953 /* Clear all-context RES0 bits. */
1954 value &= valid_mask;
1955 raw_write(env, ri, value);
1956 }
1957
1958 static CPAccessResult access_aa64_tid2(CPUARMState *env,
1959 const ARMCPRegInfo *ri,
1960 bool isread)
1961 {
1962 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1963 return CP_ACCESS_TRAP_EL2;
1964 }
1965
1966 return CP_ACCESS_OK;
1967 }
1968
1969 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1970 {
1971 ARMCPU *cpu = env_archcpu(env);
1972
1973 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1974 * bank
1975 */
1976 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1977 ri->secure & ARM_CP_SECSTATE_S);
1978
1979 return cpu->ccsidr[index];
1980 }
1981
1982 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1983 uint64_t value)
1984 {
1985 raw_write(env, ri, value & 0xf);
1986 }
1987
1988 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1989 {
1990 CPUState *cs = env_cpu(env);
1991 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1992 uint64_t ret = 0;
1993 bool allow_virt = (arm_current_el(env) == 1 &&
1994 (!arm_is_secure_below_el3(env) ||
1995 (env->cp15.scr_el3 & SCR_EEL2)));
1996
1997 if (allow_virt && (hcr_el2 & HCR_IMO)) {
1998 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1999 ret |= CPSR_I;
2000 }
2001 } else {
2002 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2003 ret |= CPSR_I;
2004 }
2005 }
2006
2007 if (allow_virt && (hcr_el2 & HCR_FMO)) {
2008 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2009 ret |= CPSR_F;
2010 }
2011 } else {
2012 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2013 ret |= CPSR_F;
2014 }
2015 }
2016
2017 /* External aborts are not possible in QEMU so A bit is always clear */
2018 return ret;
2019 }
2020
2021 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2022 bool isread)
2023 {
2024 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2025 return CP_ACCESS_TRAP_EL2;
2026 }
2027
2028 return CP_ACCESS_OK;
2029 }
2030
2031 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2032 bool isread)
2033 {
2034 if (arm_feature(env, ARM_FEATURE_V8)) {
2035 return access_aa64_tid1(env, ri, isread);
2036 }
2037
2038 return CP_ACCESS_OK;
2039 }
2040
2041 static const ARMCPRegInfo v7_cp_reginfo[] = {
2042 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2043 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2044 .access = PL1_W, .type = ARM_CP_NOP },
2045 /* Performance monitors are implementation defined in v7,
2046 * but with an ARM recommended set of registers, which we
2047 * follow.
2048 *
2049 * Performance registers fall into three categories:
2050 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2051 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2052 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2053 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2054 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2055 */
2056 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2057 .access = PL0_RW, .type = ARM_CP_ALIAS,
2058 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2059 .writefn = pmcntenset_write,
2060 .accessfn = pmreg_access,
2061 .raw_writefn = raw_write },
2062 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2063 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2064 .access = PL0_RW, .accessfn = pmreg_access,
2065 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2066 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2067 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2068 .access = PL0_RW,
2069 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2070 .accessfn = pmreg_access,
2071 .writefn = pmcntenclr_write,
2072 .type = ARM_CP_ALIAS },
2073 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2074 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2075 .access = PL0_RW, .accessfn = pmreg_access,
2076 .type = ARM_CP_ALIAS,
2077 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2078 .writefn = pmcntenclr_write },
2079 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2080 .access = PL0_RW, .type = ARM_CP_IO,
2081 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2082 .accessfn = pmreg_access,
2083 .writefn = pmovsr_write,
2084 .raw_writefn = raw_write },
2085 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2086 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2087 .access = PL0_RW, .accessfn = pmreg_access,
2088 .type = ARM_CP_ALIAS | ARM_CP_IO,
2089 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2090 .writefn = pmovsr_write,
2091 .raw_writefn = raw_write },
2092 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2093 .access = PL0_W, .accessfn = pmreg_access_swinc,
2094 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2095 .writefn = pmswinc_write },
2096 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2097 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2098 .access = PL0_W, .accessfn = pmreg_access_swinc,
2099 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2100 .writefn = pmswinc_write },
2101 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2102 .access = PL0_RW, .type = ARM_CP_ALIAS,
2103 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2104 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2105 .raw_writefn = raw_write},
2106 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2107 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2108 .access = PL0_RW, .accessfn = pmreg_access_selr,
2109 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2110 .writefn = pmselr_write, .raw_writefn = raw_write, },
2111 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2112 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2113 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2114 .accessfn = pmreg_access_ccntr },
2115 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2116 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2117 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2118 .type = ARM_CP_IO,
2119 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2120 .readfn = pmccntr_read, .writefn = pmccntr_write,
2121 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2122 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2123 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2124 .access = PL0_RW, .accessfn = pmreg_access,
2125 .type = ARM_CP_ALIAS | ARM_CP_IO,
2126 .resetvalue = 0, },
2127 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2128 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2129 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2130 .access = PL0_RW, .accessfn = pmreg_access,
2131 .type = ARM_CP_IO,
2132 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2133 .resetvalue = 0, },
2134 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2135 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2136 .accessfn = pmreg_access,
2137 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2138 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2139 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2140 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2141 .accessfn = pmreg_access,
2142 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2143 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2144 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2145 .accessfn = pmreg_access_xevcntr,
2146 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2147 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2148 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2149 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2150 .accessfn = pmreg_access_xevcntr,
2151 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2152 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2153 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2154 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2155 .resetvalue = 0,
2156 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2157 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2158 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2159 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2160 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2161 .resetvalue = 0,
2162 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2163 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2164 .access = PL1_RW, .accessfn = access_tpm,
2165 .type = ARM_CP_ALIAS | ARM_CP_IO,
2166 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2167 .resetvalue = 0,
2168 .writefn = pmintenset_write, .raw_writefn = raw_write },
2169 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2170 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2171 .access = PL1_RW, .accessfn = access_tpm,
2172 .type = ARM_CP_IO,
2173 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2174 .writefn = pmintenset_write, .raw_writefn = raw_write,
2175 .resetvalue = 0x0 },
2176 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2177 .access = PL1_RW, .accessfn = access_tpm,
2178 .type = ARM_CP_ALIAS | ARM_CP_IO,
2179 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2180 .writefn = pmintenclr_write, },
2181 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2182 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2183 .access = PL1_RW, .accessfn = access_tpm,
2184 .type = ARM_CP_ALIAS | ARM_CP_IO,
2185 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2186 .writefn = pmintenclr_write },
2187 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2188 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2189 .access = PL1_R,
2190 .accessfn = access_aa64_tid2,
2191 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2192 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2193 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2194 .access = PL1_RW,
2195 .accessfn = access_aa64_tid2,
2196 .writefn = csselr_write, .resetvalue = 0,
2197 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2198 offsetof(CPUARMState, cp15.csselr_ns) } },
2199 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2200 * just RAZ for all cores:
2201 */
2202 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2203 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2204 .access = PL1_R, .type = ARM_CP_CONST,
2205 .accessfn = access_aa64_tid1,
2206 .resetvalue = 0 },
2207 /* Auxiliary fault status registers: these also are IMPDEF, and we
2208 * choose to RAZ/WI for all cores.
2209 */
2210 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2211 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2212 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2213 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2214 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2215 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2216 /* MAIR can just read-as-written because we don't implement caches
2217 * and so don't need to care about memory attributes.
2218 */
2219 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2220 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2221 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2222 .resetvalue = 0 },
2223 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2224 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2225 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2226 .resetvalue = 0 },
2227 /* For non-long-descriptor page tables these are PRRR and NMRR;
2228 * regardless they still act as reads-as-written for QEMU.
2229 */
2230 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2231 * allows them to assign the correct fieldoffset based on the endianness
2232 * handled in the field definitions.
2233 */
2234 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2235 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2236 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2237 offsetof(CPUARMState, cp15.mair0_ns) },
2238 .resetfn = arm_cp_reset_ignore },
2239 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2240 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2241 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2242 offsetof(CPUARMState, cp15.mair1_ns) },
2243 .resetfn = arm_cp_reset_ignore },
2244 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2245 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2246 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2247 /* 32 bit ITLB invalidates */
2248 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2249 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2250 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2251 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2252 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2253 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2254 /* 32 bit DTLB invalidates */
2255 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2256 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2257 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2258 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2259 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2260 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2261 /* 32 bit TLB invalidates */
2262 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2263 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2264 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2265 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2266 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2267 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2268 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2269 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2270 REGINFO_SENTINEL
2271 };
2272
2273 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2274 /* 32 bit TLB invalidates, Inner Shareable */
2275 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2276 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2277 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2278 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2279 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2280 .type = ARM_CP_NO_RAW, .access = PL1_W,
2281 .writefn = tlbiasid_is_write },
2282 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2283 .type = ARM_CP_NO_RAW, .access = PL1_W,
2284 .writefn = tlbimvaa_is_write },
2285 REGINFO_SENTINEL
2286 };
2287
2288 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2289 /* PMOVSSET is not implemented in v7 before v7ve */
2290 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2291 .access = PL0_RW, .accessfn = pmreg_access,
2292 .type = ARM_CP_ALIAS | ARM_CP_IO,
2293 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2294 .writefn = pmovsset_write,
2295 .raw_writefn = raw_write },
2296 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2297 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2298 .access = PL0_RW, .accessfn = pmreg_access,
2299 .type = ARM_CP_ALIAS | ARM_CP_IO,
2300 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2301 .writefn = pmovsset_write,
2302 .raw_writefn = raw_write },
2303 REGINFO_SENTINEL
2304 };
2305
2306 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2307 uint64_t value)
2308 {
2309 value &= 1;
2310 env->teecr = value;
2311 }
2312
2313 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2314 bool isread)
2315 {
2316 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2317 return CP_ACCESS_TRAP;
2318 }
2319 return CP_ACCESS_OK;
2320 }
2321
2322 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2323 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2324 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2325 .resetvalue = 0,
2326 .writefn = teecr_write },
2327 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2328 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2329 .accessfn = teehbr_access, .resetvalue = 0 },
2330 REGINFO_SENTINEL
2331 };
2332
2333 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2334 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2335 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2336 .access = PL0_RW,
2337 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2338 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2339 .access = PL0_RW,
2340 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2341 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2342 .resetfn = arm_cp_reset_ignore },
2343 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2344 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2345 .access = PL0_R|PL1_W,
2346 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2347 .resetvalue = 0},
2348 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2349 .access = PL0_R|PL1_W,
2350 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2351 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2352 .resetfn = arm_cp_reset_ignore },
2353 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2354 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2355 .access = PL1_RW,
2356 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2357 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2358 .access = PL1_RW,
2359 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2360 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2361 .resetvalue = 0 },
2362 REGINFO_SENTINEL
2363 };
2364
2365 #ifndef CONFIG_USER_ONLY
2366
2367 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2368 bool isread)
2369 {
2370 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2371 * Writable only at the highest implemented exception level.
2372 */
2373 int el = arm_current_el(env);
2374 uint64_t hcr;
2375 uint32_t cntkctl;
2376
2377 switch (el) {
2378 case 0:
2379 hcr = arm_hcr_el2_eff(env);
2380 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2381 cntkctl = env->cp15.cnthctl_el2;
2382 } else {
2383 cntkctl = env->cp15.c14_cntkctl;
2384 }
2385 if (!extract32(cntkctl, 0, 2)) {
2386 return CP_ACCESS_TRAP;
2387 }
2388 break;
2389 case 1:
2390 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2391 arm_is_secure_below_el3(env)) {
2392 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2393 return CP_ACCESS_TRAP_UNCATEGORIZED;
2394 }
2395 break;
2396 case 2:
2397 case 3:
2398 break;
2399 }
2400
2401 if (!isread && el < arm_highest_el(env)) {
2402 return CP_ACCESS_TRAP_UNCATEGORIZED;
2403 }
2404
2405 return CP_ACCESS_OK;
2406 }
2407
2408 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2409 bool isread)
2410 {
2411 unsigned int cur_el = arm_current_el(env);
2412 bool secure = arm_is_secure(env);
2413 uint64_t hcr = arm_hcr_el2_eff(env);
2414
2415 switch (cur_el) {
2416 case 0:
2417 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2418 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2419 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2420 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2421 }
2422
2423 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2424 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2425 return CP_ACCESS_TRAP;
2426 }
2427
2428 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2429 if (hcr & HCR_E2H) {
2430 if (timeridx == GTIMER_PHYS &&
2431 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2432 return CP_ACCESS_TRAP_EL2;
2433 }
2434 } else {
2435 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2436 if (arm_feature(env, ARM_FEATURE_EL2) &&
2437 timeridx == GTIMER_PHYS && !secure &&
2438 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2439 return CP_ACCESS_TRAP_EL2;
2440 }
2441 }
2442 break;
2443
2444 case 1:
2445 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2446 if (arm_feature(env, ARM_FEATURE_EL2) &&
2447 timeridx == GTIMER_PHYS && !secure &&
2448 (hcr & HCR_E2H
2449 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2450 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2451 return CP_ACCESS_TRAP_EL2;
2452 }
2453 break;
2454 }
2455 return CP_ACCESS_OK;
2456 }
2457
2458 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2459 bool isread)
2460 {
2461 unsigned int cur_el = arm_current_el(env);
2462 bool secure = arm_is_secure(env);
2463 uint64_t hcr = arm_hcr_el2_eff(env);
2464
2465 switch (cur_el) {
2466 case 0:
2467 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2468 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2469 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2470 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2471 }
2472
2473 /*
2474 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2475 * EL0 if EL0[PV]TEN is zero.
2476 */
2477 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2478 return CP_ACCESS_TRAP;
2479 }
2480 /* fall through */
2481
2482 case 1:
2483 if (arm_feature(env, ARM_FEATURE_EL2) &&
2484 timeridx == GTIMER_PHYS && !secure) {
2485 if (hcr & HCR_E2H) {
2486 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2487 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2488 return CP_ACCESS_TRAP_EL2;
2489 }
2490 } else {
2491 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2492 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2493 return CP_ACCESS_TRAP_EL2;
2494 }
2495 }
2496 }
2497 break;
2498 }
2499 return CP_ACCESS_OK;
2500 }
2501
2502 static CPAccessResult gt_pct_access(CPUARMState *env,
2503 const ARMCPRegInfo *ri,
2504 bool isread)
2505 {
2506 return gt_counter_access(env, GTIMER_PHYS, isread);
2507 }
2508
2509 static CPAccessResult gt_vct_access(CPUARMState *env,
2510 const ARMCPRegInfo *ri,
2511 bool isread)
2512 {
2513 return gt_counter_access(env, GTIMER_VIRT, isread);
2514 }
2515
2516 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2517 bool isread)
2518 {
2519 return gt_timer_access(env, GTIMER_PHYS, isread);
2520 }
2521
2522 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2523 bool isread)
2524 {
2525 return gt_timer_access(env, GTIMER_VIRT, isread);
2526 }
2527
2528 static CPAccessResult gt_stimer_access(CPUARMState *env,
2529 const ARMCPRegInfo *ri,
2530 bool isread)
2531 {
2532 /* The AArch64 register view of the secure physical timer is
2533 * always accessible from EL3, and configurably accessible from
2534 * Secure EL1.
2535 */
2536 switch (arm_current_el(env)) {
2537 case 1:
2538 if (!arm_is_secure(env)) {
2539 return CP_ACCESS_TRAP;
2540 }
2541 if (!(env->cp15.scr_el3 & SCR_ST)) {
2542 return CP_ACCESS_TRAP_EL3;
2543 }
2544 return CP_ACCESS_OK;
2545 case 0:
2546 case 2:
2547 return CP_ACCESS_TRAP;
2548 case 3:
2549 return CP_ACCESS_OK;
2550 default:
2551 g_assert_not_reached();
2552 }
2553 }
2554
2555 static uint64_t gt_get_countervalue(CPUARMState *env)
2556 {
2557 ARMCPU *cpu = env_archcpu(env);
2558
2559 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2560 }
2561
2562 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2563 {
2564 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2565
2566 if (gt->ctl & 1) {
2567 /* Timer enabled: calculate and set current ISTATUS, irq, and
2568 * reset timer to when ISTATUS next has to change
2569 */
2570 uint64_t offset = timeridx == GTIMER_VIRT ?
2571 cpu->env.cp15.cntvoff_el2 : 0;
2572 uint64_t count = gt_get_countervalue(&cpu->env);
2573 /* Note that this must be unsigned 64 bit arithmetic: */
2574 int istatus = count - offset >= gt->cval;
2575 uint64_t nexttick;
2576 int irqstate;
2577
2578 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2579
2580 irqstate = (istatus && !(gt->ctl & 2));
2581 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2582
2583 if (istatus) {
2584 /* Next transition is when count rolls back over to zero */
2585 nexttick = UINT64_MAX;
2586 } else {
2587 /* Next transition is when we hit cval */
2588 nexttick = gt->cval + offset;
2589 }
2590 /* Note that the desired next expiry time might be beyond the
2591 * signed-64-bit range of a QEMUTimer -- in this case we just
2592 * set the timer for as far in the future as possible. When the
2593 * timer expires we will reset the timer for any remaining period.
2594 */
2595 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2596 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2597 } else {
2598 timer_mod(cpu->gt_timer[timeridx], nexttick);
2599 }
2600 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2601 } else {
2602 /* Timer disabled: ISTATUS and timer output always clear */
2603 gt->ctl &= ~4;
2604 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2605 timer_del(cpu->gt_timer[timeridx]);
2606 trace_arm_gt_recalc_disabled(timeridx);
2607 }
2608 }
2609
2610 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2611 int timeridx)
2612 {
2613 ARMCPU *cpu = env_archcpu(env);
2614
2615 timer_del(cpu->gt_timer[timeridx]);
2616 }
2617
2618 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2619 {
2620 return gt_get_countervalue(env);
2621 }
2622
2623 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2624 {
2625 uint64_t hcr;
2626
2627 switch (arm_current_el(env)) {
2628 case 2:
2629 hcr = arm_hcr_el2_eff(env);
2630 if (hcr & HCR_E2H) {
2631 return 0;
2632 }
2633 break;
2634 case 0:
2635 hcr = arm_hcr_el2_eff(env);
2636 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2637 return 0;
2638 }
2639 break;
2640 }
2641
2642 return env->cp15.cntvoff_el2;
2643 }
2644
2645 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2646 {
2647 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2648 }
2649
2650 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2651 int timeridx,
2652 uint64_t value)
2653 {
2654 trace_arm_gt_cval_write(timeridx, value);
2655 env->cp15.c14_timer[timeridx].cval = value;
2656 gt_recalc_timer(env_archcpu(env), timeridx);
2657 }
2658
2659 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2660 int timeridx)
2661 {
2662 uint64_t offset = 0;
2663
2664 switch (timeridx) {
2665 case GTIMER_VIRT:
2666 case GTIMER_HYPVIRT:
2667 offset = gt_virt_cnt_offset(env);
2668 break;
2669 }
2670
2671 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2672 (gt_get_countervalue(env) - offset));
2673 }
2674
2675 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2676 int timeridx,
2677 uint64_t value)
2678 {
2679 uint64_t offset = 0;
2680
2681 switch (timeridx) {
2682 case GTIMER_VIRT:
2683 case GTIMER_HYPVIRT:
2684 offset = gt_virt_cnt_offset(env);
2685 break;
2686 }
2687
2688 trace_arm_gt_tval_write(timeridx, value);
2689 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2690 sextract64(value, 0, 32);
2691 gt_recalc_timer(env_archcpu(env), timeridx);
2692 }
2693
2694 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2695 int timeridx,
2696 uint64_t value)
2697 {
2698 ARMCPU *cpu = env_archcpu(env);
2699 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2700
2701 trace_arm_gt_ctl_write(timeridx, value);
2702 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2703 if ((oldval ^ value) & 1) {
2704 /* Enable toggled */
2705 gt_recalc_timer(cpu, timeridx);
2706 } else if ((oldval ^ value) & 2) {
2707 /* IMASK toggled: don't need to recalculate,
2708 * just set the interrupt line based on ISTATUS
2709 */
2710 int irqstate = (oldval & 4) && !(value & 2);
2711
2712 trace_arm_gt_imask_toggle(timeridx, irqstate);
2713 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2714 }
2715 }
2716
2717 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2718 {
2719 gt_timer_reset(env, ri, GTIMER_PHYS);
2720 }
2721
2722 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2723 uint64_t value)
2724 {
2725 gt_cval_write(env, ri, GTIMER_PHYS, value);
2726 }
2727
2728 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2729 {
2730 return gt_tval_read(env, ri, GTIMER_PHYS);
2731 }
2732
2733 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2734 uint64_t value)
2735 {
2736 gt_tval_write(env, ri, GTIMER_PHYS, value);
2737 }
2738
2739 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2740 uint64_t value)
2741 {
2742 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2743 }
2744
2745 static int gt_phys_redir_timeridx(CPUARMState *env)
2746 {
2747 switch (arm_mmu_idx(env)) {
2748 case ARMMMUIdx_E20_0:
2749 case ARMMMUIdx_E20_2:
2750 case ARMMMUIdx_E20_2_PAN:
2751 return GTIMER_HYP;
2752 default:
2753 return GTIMER_PHYS;
2754 }
2755 }
2756
2757 static int gt_virt_redir_timeridx(CPUARMState *env)
2758 {
2759 switch (arm_mmu_idx(env)) {
2760 case ARMMMUIdx_E20_0:
2761 case ARMMMUIdx_E20_2:
2762 case ARMMMUIdx_E20_2_PAN:
2763 return GTIMER_HYPVIRT;
2764 default:
2765 return GTIMER_VIRT;
2766 }
2767 }
2768
2769 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2770 const ARMCPRegInfo *ri)
2771 {
2772 int timeridx = gt_phys_redir_timeridx(env);
2773 return env->cp15.c14_timer[timeridx].cval;
2774 }
2775
2776 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2777 uint64_t value)
2778 {
2779 int timeridx = gt_phys_redir_timeridx(env);
2780 gt_cval_write(env, ri, timeridx, value);
2781 }
2782
2783 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2784 const ARMCPRegInfo *ri)
2785 {
2786 int timeridx = gt_phys_redir_timeridx(env);
2787 return gt_tval_read(env, ri, timeridx);
2788 }
2789
2790 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2791 uint64_t value)
2792 {
2793 int timeridx = gt_phys_redir_timeridx(env);
2794 gt_tval_write(env, ri, timeridx, value);
2795 }
2796
2797 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2798 const ARMCPRegInfo *ri)
2799 {
2800 int timeridx = gt_phys_redir_timeridx(env);
2801 return env->cp15.c14_timer[timeridx].ctl;
2802 }
2803
2804 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2805 uint64_t value)
2806 {
2807 int timeridx = gt_phys_redir_timeridx(env);
2808 gt_ctl_write(env, ri, timeridx, value);
2809 }
2810
2811 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2812 {
2813 gt_timer_reset(env, ri, GTIMER_VIRT);
2814 }
2815
2816 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2817 uint64_t value)
2818 {
2819 gt_cval_write(env, ri, GTIMER_VIRT, value);
2820 }
2821
2822 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2823 {
2824 return gt_tval_read(env, ri, GTIMER_VIRT);
2825 }
2826
2827 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2828 uint64_t value)
2829 {
2830 gt_tval_write(env, ri, GTIMER_VIRT, value);
2831 }
2832
2833 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2834 uint64_t value)
2835 {
2836 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2837 }
2838
2839 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2840 uint64_t value)
2841 {
2842 ARMCPU *cpu = env_archcpu(env);
2843
2844 trace_arm_gt_cntvoff_write(value);
2845 raw_write(env, ri, value);
2846 gt_recalc_timer(cpu, GTIMER_VIRT);
2847 }
2848
2849 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2850 const ARMCPRegInfo *ri)
2851 {
2852 int timeridx = gt_virt_redir_timeridx(env);
2853 return env->cp15.c14_timer[timeridx].cval;
2854 }
2855
2856 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857 uint64_t value)
2858 {
2859 int timeridx = gt_virt_redir_timeridx(env);
2860 gt_cval_write(env, ri, timeridx, value);
2861 }
2862
2863 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2864 const ARMCPRegInfo *ri)
2865 {
2866 int timeridx = gt_virt_redir_timeridx(env);
2867 return gt_tval_read(env, ri, timeridx);
2868 }
2869
2870 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2871 uint64_t value)
2872 {
2873 int timeridx = gt_virt_redir_timeridx(env);
2874 gt_tval_write(env, ri, timeridx, value);
2875 }
2876
2877 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2878 const ARMCPRegInfo *ri)
2879 {
2880 int timeridx = gt_virt_redir_timeridx(env);
2881 return env->cp15.c14_timer[timeridx].ctl;
2882 }
2883
2884 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2885 uint64_t value)
2886 {
2887 int timeridx = gt_virt_redir_timeridx(env);
2888 gt_ctl_write(env, ri, timeridx, value);
2889 }
2890
2891 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2892 {
2893 gt_timer_reset(env, ri, GTIMER_HYP);
2894 }
2895
2896 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2897 uint64_t value)
2898 {
2899 gt_cval_write(env, ri, GTIMER_HYP, value);
2900 }
2901
2902 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2903 {
2904 return gt_tval_read(env, ri, GTIMER_HYP);
2905 }
2906
2907 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2908 uint64_t value)
2909 {
2910 gt_tval_write(env, ri, GTIMER_HYP, value);
2911 }
2912
2913 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2914 uint64_t value)
2915 {
2916 gt_ctl_write(env, ri, GTIMER_HYP, value);
2917 }
2918
2919 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2920 {
2921 gt_timer_reset(env, ri, GTIMER_SEC);
2922 }
2923
2924 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2925 uint64_t value)
2926 {
2927 gt_cval_write(env, ri, GTIMER_SEC, value);
2928 }
2929
2930 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2931 {
2932 return gt_tval_read(env, ri, GTIMER_SEC);
2933 }
2934
2935 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2936 uint64_t value)
2937 {
2938 gt_tval_write(env, ri, GTIMER_SEC, value);
2939 }
2940
2941 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2942 uint64_t value)
2943 {
2944 gt_ctl_write(env, ri, GTIMER_SEC, value);
2945 }
2946
2947 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2948 {
2949 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2950 }
2951
2952 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2953 uint64_t value)
2954 {
2955 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2956 }
2957
2958 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2959 {
2960 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2961 }
2962
2963 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2964 uint64_t value)
2965 {
2966 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2967 }
2968
2969 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970 uint64_t value)
2971 {
2972 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2973 }
2974
2975 void arm_gt_ptimer_cb(void *opaque)
2976 {
2977 ARMCPU *cpu = opaque;
2978
2979 gt_recalc_timer(cpu, GTIMER_PHYS);
2980 }
2981
2982 void arm_gt_vtimer_cb(void *opaque)
2983 {
2984 ARMCPU *cpu = opaque;
2985
2986 gt_recalc_timer(cpu, GTIMER_VIRT);
2987 }
2988
2989 void arm_gt_htimer_cb(void *opaque)
2990 {
2991 ARMCPU *cpu = opaque;
2992
2993 gt_recalc_timer(cpu, GTIMER_HYP);
2994 }
2995
2996 void arm_gt_stimer_cb(void *opaque)
2997 {
2998 ARMCPU *cpu = opaque;
2999
3000 gt_recalc_timer(cpu, GTIMER_SEC);
3001 }
3002
3003 void arm_gt_hvtimer_cb(void *opaque)
3004 {
3005 ARMCPU *cpu = opaque;
3006
3007 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3008 }
3009
3010 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3011 {
3012 ARMCPU *cpu = env_archcpu(env);
3013
3014 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3015 }
3016
3017 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3018 /* Note that CNTFRQ is purely reads-as-written for the benefit
3019 * of software; writing it doesn't actually change the timer frequency.
3020 * Our reset value matches the fixed frequency we implement the timer at.
3021 */
3022 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3023 .type = ARM_CP_ALIAS,
3024 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3025 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3026 },
3027 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3028 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3029 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3030 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3031 .resetfn = arm_gt_cntfrq_reset,
3032 },
3033 /* overall control: mostly access permissions */
3034 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3035 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3036 .access = PL1_RW,
3037 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3038 .resetvalue = 0,
3039 },
3040 /* per-timer control */
3041 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3042 .secure = ARM_CP_SECSTATE_NS,
3043 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3044 .accessfn = gt_ptimer_access,
3045 .fieldoffset = offsetoflow32(CPUARMState,
3046 cp15.c14_timer[GTIMER_PHYS].ctl),
3047 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3048 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3049 },
3050 { .name = "CNTP_CTL_S",
3051 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3052 .secure = ARM_CP_SECSTATE_S,
3053 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3054 .accessfn = gt_ptimer_access,
3055 .fieldoffset = offsetoflow32(CPUARMState,
3056 cp15.c14_timer[GTIMER_SEC].ctl),
3057 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3058 },
3059 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3060 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3061 .type = ARM_CP_IO, .access = PL0_RW,
3062 .accessfn = gt_ptimer_access,
3063 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3064 .resetvalue = 0,
3065 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3066 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3067 },
3068 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3069 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3070 .accessfn = gt_vtimer_access,
3071 .fieldoffset = offsetoflow32(CPUARMState,
3072 cp15.c14_timer[GTIMER_VIRT].ctl),
3073 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3074 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3075 },
3076 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3077 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3078 .type = ARM_CP_IO, .access = PL0_RW,
3079 .accessfn = gt_vtimer_access,
3080 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3081 .resetvalue = 0,
3082 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3083 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3084 },
3085 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3086 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3087 .secure = ARM_CP_SECSTATE_NS,
3088 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3089 .accessfn = gt_ptimer_access,
3090 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3091 },
3092 { .name = "CNTP_TVAL_S",
3093 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3094 .secure = ARM_CP_SECSTATE_S,
3095 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3096 .accessfn = gt_ptimer_access,
3097 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3098 },
3099 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3100 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3101 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3102 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3103 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3104 },
3105 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3106 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3107 .accessfn = gt_vtimer_access,
3108 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3109 },
3110 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3111 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3112 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3113 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3114 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3115 },
3116 /* The counter itself */
3117 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3118 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3119 .accessfn = gt_pct_access,
3120 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3121 },
3122 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3123 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3124 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3125 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3126 },
3127 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3128 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3129 .accessfn = gt_vct_access,
3130 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3131 },
3132 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3133 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3134 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3135 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3136 },
3137 /* Comparison value, indicating when the timer goes off */
3138 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3139 .secure = ARM_CP_SECSTATE_NS,
3140 .access = PL0_RW,
3141 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3142 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3143 .accessfn = gt_ptimer_access,
3144 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3145 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3146 },
3147 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3148 .secure = ARM_CP_SECSTATE_S,
3149 .access = PL0_RW,
3150 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3151 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3152 .accessfn = gt_ptimer_access,
3153 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3154 },
3155 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3156 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3157 .access = PL0_RW,
3158 .type = ARM_CP_IO,
3159 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3160 .resetvalue = 0, .accessfn = gt_ptimer_access,
3161 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3162 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3163 },
3164 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3165 .access = PL0_RW,
3166 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3167 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3168 .accessfn = gt_vtimer_access,
3169 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3170 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3171 },
3172 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3173 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3174 .access = PL0_RW,
3175 .type = ARM_CP_IO,
3176 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3177 .resetvalue = 0, .accessfn = gt_vtimer_access,
3178 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3179 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3180 },
3181 /* Secure timer -- this is actually restricted to only EL3
3182 * and configurably Secure-EL1 via the accessfn.
3183 */
3184 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3185 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3186 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3187 .accessfn = gt_stimer_access,
3188 .readfn = gt_sec_tval_read,
3189 .writefn = gt_sec_tval_write,
3190 .resetfn = gt_sec_timer_reset,
3191 },
3192 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3193 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3194 .type = ARM_CP_IO, .access = PL1_RW,
3195 .accessfn = gt_stimer_access,
3196 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3197 .resetvalue = 0,
3198 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3199 },
3200 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3201 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3202 .type = ARM_CP_IO, .access = PL1_RW,
3203 .accessfn = gt_stimer_access,
3204 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3205 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3206 },
3207 REGINFO_SENTINEL
3208 };
3209
3210 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3211 bool isread)
3212 {
3213 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3214 return CP_ACCESS_TRAP;
3215 }
3216 return CP_ACCESS_OK;
3217 }
3218
3219 #else
3220
3221 /* In user-mode most of the generic timer registers are inaccessible
3222 * however modern kernels (4.12+) allow access to cntvct_el0
3223 */
3224
3225 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3226 {
3227 ARMCPU *cpu = env_archcpu(env);
3228
3229 /* Currently we have no support for QEMUTimer in linux-user so we
3230 * can't call gt_get_countervalue(env), instead we directly
3231 * call the lower level functions.
3232 */
3233 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3234 }
3235
3236 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3237 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3238 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3239 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3240 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3241 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3242 },
3243 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3244 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3245 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3246 .readfn = gt_virt_cnt_read,
3247 },
3248 REGINFO_SENTINEL
3249 };
3250
3251 #endif
3252
3253 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3254 {
3255 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3256 raw_write(env, ri, value);
3257 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3258 raw_write(env, ri, value & 0xfffff6ff);
3259 } else {
3260 raw_write(env, ri, value & 0xfffff1ff);
3261 }
3262 }
3263
3264 #ifndef CONFIG_USER_ONLY
3265 /* get_phys_addr() isn't present for user-mode-only targets */
3266
3267 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3268 bool isread)
3269 {
3270 if (ri->opc2 & 4) {
3271 /* The ATS12NSO* operations must trap to EL3 if executed in
3272 * Secure EL1 (which can only happen if EL3 is AArch64).
3273 * They are simply UNDEF if executed from NS EL1.
3274 * They function normally from EL2 or EL3.
3275 */
3276 if (arm_current_el(env) == 1) {
3277 if (arm_is_secure_below_el3(env)) {
3278 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3279 }
3280 return CP_ACCESS_TRAP_UNCATEGORIZED;
3281 }
3282 }
3283 return CP_ACCESS_OK;
3284 }
3285
3286 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3287 MMUAccessType access_type, ARMMMUIdx mmu_idx)
3288 {
3289 hwaddr phys_addr;
3290 target_ulong page_size;
3291 int prot;
3292 bool ret;
3293 uint64_t par64;
3294 bool format64 = false;
3295 MemTxAttrs attrs = {};
3296 ARMMMUFaultInfo fi = {};
3297 ARMCacheAttrs cacheattrs = {};
3298
3299 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3300 &prot, &page_size, &fi, &cacheattrs);
3301
3302 if (ret) {
3303 /*
3304 * Some kinds of translation fault must cause exceptions rather
3305 * than being reported in the PAR.
3306 */
3307 int current_el = arm_current_el(env);
3308 int target_el;
3309 uint32_t syn, fsr, fsc;
3310 bool take_exc = false;
3311
3312 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
3313 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3314 /*
3315 * Synchronous stage 2 fault on an access made as part of the
3316 * translation table walk for AT S1E0* or AT S1E1* insn
3317 * executed from NS EL1. If this is a synchronous external abort
3318 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3319 * to EL3. Otherwise the fault is taken as an exception to EL2,
3320 * and HPFAR_EL2 holds the faulting IPA.
3321 */
3322 if (fi.type == ARMFault_SyncExternalOnWalk &&
3323 (env->cp15.scr_el3 & SCR_EA)) {
3324 target_el = 3;
3325 } else {
3326 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3327 target_el = 2;
3328 }
3329 take_exc = true;
3330 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3331 /*
3332 * Synchronous external aborts during a translation table walk
3333 * are taken as Data Abort exceptions.
3334 */
3335 if (fi.stage2) {
3336 if (current_el == 3) {
3337 target_el = 3;
3338 } else {
3339 target_el = 2;
3340 }
3341 } else {
3342 target_el = exception_target_el(env);
3343 }
3344 take_exc = true;
3345 }
3346
3347 if (take_exc) {
3348 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3349 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3350 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3351 fsr = arm_fi_to_lfsc(&fi);
3352 fsc = extract32(fsr, 0, 6);
3353 } else {
3354 fsr = arm_fi_to_sfsc(&fi);
3355 fsc = 0x3f;
3356 }
3357 /*
3358 * Report exception with ESR indicating a fault due to a
3359 * translation table walk for a cache maintenance instruction.
3360 */
3361 syn = syn_data_abort_no_iss(current_el == target_el,
3362 fi.ea, 1, fi.s1ptw, 1, fsc);
3363 env->exception.vaddress = value;
3364 env->exception.fsr = fsr;
3365 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3366 }
3367 }
3368
3369 if (is_a64(env)) {
3370 format64 = true;
3371 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3372 /*
3373 * ATS1Cxx:
3374 * * TTBCR.EAE determines whether the result is returned using the
3375 * 32-bit or the 64-bit PAR format
3376 * * Instructions executed in Hyp mode always use the 64bit format
3377 *
3378 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3379 * * The Non-secure TTBCR.EAE bit is set to 1
3380 * * The implementation includes EL2, and the value of HCR.VM is 1
3381 *
3382 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3383 *
3384 * ATS1Hx always uses the 64bit format.
3385 */
3386 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3387
3388 if (arm_feature(env, ARM_FEATURE_EL2)) {
3389 if (mmu_idx == ARMMMUIdx_E10_0 ||
3390 mmu_idx == ARMMMUIdx_E10_1 ||
3391 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3392 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3393 } else {
3394 format64 |= arm_current_el(env) == 2;
3395 }
3396 }
3397 }
3398
3399 if (format64) {
3400 /* Create a 64-bit PAR */
3401 par64 = (1 << 11); /* LPAE bit always set */
3402 if (!ret) {
3403 par64 |= phys_addr & ~0xfffULL;
3404 if (!attrs.secure) {
3405 par64 |= (1 << 9); /* NS */
3406 }
3407 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3408 par64 |= cacheattrs.shareability << 7; /* SH */
3409 } else {
3410 uint32_t fsr = arm_fi_to_lfsc(&fi);
3411
3412 par64 |= 1; /* F */
3413 par64 |= (fsr & 0x3f) << 1; /* FS */
3414 if (fi.stage2) {
3415 par64 |= (1 << 9); /* S */
3416 }
3417 if (fi.s1ptw) {
3418 par64 |= (1 << 8); /* PTW */
3419 }
3420 }
3421 } else {
3422 /* fsr is a DFSR/IFSR value for the short descriptor
3423 * translation table format (with WnR always clear).
3424 * Convert it to a 32-bit PAR.
3425 */
3426 if (!ret) {
3427 /* We do not set any attribute bits in the PAR */
3428 if (page_size == (1 << 24)
3429 && arm_feature(env, ARM_FEATURE_V7)) {
3430 par64 = (phys_addr & 0xff000000) | (1 << 1);
3431 } else {
3432 par64 = phys_addr & 0xfffff000;
3433 }
3434 if (!attrs.secure) {
3435 par64 |= (1 << 9); /* NS */
3436 }
3437 } else {
3438 uint32_t fsr = arm_fi_to_sfsc(&fi);
3439
3440 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3441 ((fsr & 0xf) << 1) | 1;
3442 }
3443 }
3444 return par64;
3445 }
3446
3447 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3448 {
3449 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3450 uint64_t par64;
3451 ARMMMUIdx mmu_idx;
3452 int el = arm_current_el(env);
3453 bool secure = arm_is_secure_below_el3(env);
3454
3455 switch (ri->opc2 & 6) {
3456 case 0:
3457 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3458 switch (el) {
3459 case 3:
3460 mmu_idx = ARMMMUIdx_SE3;
3461 break;
3462 case 2:
3463 g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */
3464 /* fall through */
3465 case 1:
3466 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3467 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
3468 : ARMMMUIdx_Stage1_E1_PAN);
3469 } else {
3470 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
3471 }
3472 break;
3473 default:
3474 g_assert_not_reached();
3475 }
3476 break;
3477 case 2:
3478 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3479 switch (el) {
3480 case 3:
3481 mmu_idx = ARMMMUIdx_SE10_0;
3482 break;
3483 case 2:
3484 mmu_idx = ARMMMUIdx_Stage1_E0;
3485 break;
3486 case 1:
3487 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
3488 break;
3489 default:
3490 g_assert_not_reached();
3491 }
3492 break;
3493 case 4:
3494 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3495 mmu_idx = ARMMMUIdx_E10_1;
3496 break;
3497 case 6:
3498 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3499 mmu_idx = ARMMMUIdx_E10_0;
3500 break;
3501 default:
3502 g_assert_not_reached();
3503 }
3504
3505 par64 = do_ats_write(env, value, access_type, mmu_idx);
3506
3507 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3508 }
3509
3510 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3511 uint64_t value)
3512 {
3513 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3514 uint64_t par64;
3515
3516 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
3517
3518 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3519 }
3520
3521 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3522 bool isread)
3523 {
3524 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3525 return CP_ACCESS_TRAP;
3526 }
3527 return CP_ACCESS_OK;
3528 }
3529
3530 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3531 uint64_t value)
3532 {
3533 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3534 ARMMMUIdx mmu_idx;
3535 int secure = arm_is_secure_below_el3(env);
3536
3537 switch (ri->opc2 & 6) {
3538 case 0:
3539 switch (ri->opc1) {
3540 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3541 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3542 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
3543 : ARMMMUIdx_Stage1_E1_PAN);
3544 } else {
3545 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
3546 }
3547 break;
3548 case 4: /* AT S1E2R, AT S1E2W */
3549 mmu_idx = ARMMMUIdx_E2;
3550 break;
3551 case 6: /* AT S1E3R, AT S1E3W */
3552 mmu_idx = ARMMMUIdx_SE3;
3553 break;
3554 default:
3555 g_assert_not_reached();
3556 }
3557 break;
3558 case 2: /* AT S1E0R, AT S1E0W */
3559 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
3560 break;
3561 case 4: /* AT S12E1R, AT S12E1W */
3562 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
3563 break;
3564 case 6: /* AT S12E0R, AT S12E0W */
3565 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
3566 break;
3567 default:
3568 g_assert_not_reached();
3569 }
3570
3571 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3572 }
3573 #endif
3574
3575 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3576 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3577 .access = PL1_RW, .resetvalue = 0,
3578 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3579 offsetoflow32(CPUARMState, cp15.par_ns) },
3580 .writefn = par_write },
3581 #ifndef CONFIG_USER_ONLY
3582 /* This underdecoding is safe because the reginfo is NO_RAW. */
3583 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3584 .access = PL1_W, .accessfn = ats_access,
3585 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3586 #endif
3587 REGINFO_SENTINEL
3588 };
3589
3590 /* Return basic MPU access permission bits. */
3591 static uint32_t simple_mpu_ap_bits(uint32_t val)
3592 {
3593 uint32_t ret;
3594 uint32_t mask;
3595 int i;
3596 ret = 0;
3597 mask = 3;
3598 for (i = 0; i < 16; i += 2) {
3599 ret |= (val >> i) & mask;
3600 mask <<= 2;
3601 }
3602 return ret;
3603 }
3604
3605 /* Pad basic MPU access permission bits to extended format. */
3606 static uint32_t extended_mpu_ap_bits(uint32_t val)
3607 {
3608 uint32_t ret;
3609 uint32_t mask;
3610 int i;
3611 ret = 0;
3612 mask = 3;
3613 for (i = 0; i < 16; i += 2) {
3614 ret |= (val & mask) << i;
3615 mask <<= 2;
3616 }
3617 return ret;
3618 }
3619
3620 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3621 uint64_t value)
3622 {
3623 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3624 }
3625
3626 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3627 {
3628 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3629 }
3630
3631 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3632 uint64_t value)
3633 {
3634 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3635 }
3636
3637 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3638 {
3639 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3640 }
3641
3642 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3643 {
3644 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3645
3646 if (!u32p) {
3647 return 0;
3648 }
3649
3650 u32p += env->pmsav7.rnr[M_REG_NS];
3651 return *u32p;
3652 }
3653
3654 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3655 uint64_t value)
3656 {
3657 ARMCPU *cpu = env_archcpu(env);
3658 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3659
3660 if (!u32p) {
3661 return;
3662 }
3663
3664 u32p += env->pmsav7.rnr[M_REG_NS];
3665 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3666 *u32p = value;
3667 }
3668
3669 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3670 uint64_t value)
3671 {
3672 ARMCPU *cpu = env_archcpu(env);
3673 uint32_t nrgs = cpu->pmsav7_dregion;
3674
3675 if (value >= nrgs) {
3676 qemu_log_mask(LOG_GUEST_ERROR,
3677 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3678 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3679 return;
3680 }
3681
3682 raw_write(env, ri, value);
3683 }
3684
3685 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3686 /* Reset for all these registers is handled in arm_cpu_reset(),
3687 * because the PMSAv7 is also used by M-profile CPUs, which do
3688 * not register cpregs but still need the state to be reset.
3689 */
3690 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3691 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3692 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3693 .readfn = pmsav7_read, .writefn = pmsav7_write,
3694 .resetfn = arm_cp_reset_ignore },
3695 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3696 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3697 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3698 .readfn = pmsav7_read, .writefn = pmsav7_write,
3699 .resetfn = arm_cp_reset_ignore },
3700 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3701 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3702 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3703 .readfn = pmsav7_read, .writefn = pmsav7_write,
3704 .resetfn = arm_cp_reset_ignore },
3705 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3706 .access = PL1_RW,
3707 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3708 .writefn = pmsav7_rgnr_write,
3709 .resetfn = arm_cp_reset_ignore },
3710 REGINFO_SENTINEL
3711 };
3712
3713 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3714 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3715 .access = PL1_RW, .type = ARM_CP_ALIAS,
3716 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3717 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3718 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3719 .access = PL1_RW, .type = ARM_CP_ALIAS,
3720 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3721 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3722 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3723 .access = PL1_RW,
3724 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3725 .resetvalue = 0, },
3726 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3727 .access = PL1_RW,
3728 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3729 .resetvalue = 0, },
3730 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3731 .access = PL1_RW,
3732 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3733 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3734 .access = PL1_RW,
3735 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3736 /* Protection region base and size registers */
3737 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3738 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3739 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3740 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3741 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3742 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3743 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3744 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3745 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3746 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3747 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3748 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3749 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3750 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3751 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3752 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3753 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3754 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3755 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3756 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3757 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3758 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3759 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3760 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3761 REGINFO_SENTINEL
3762 };
3763
3764 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3765 uint64_t value)
3766 {
3767 TCR *tcr = raw_ptr(env, ri);
3768 int maskshift = extract32(value, 0, 3);
3769
3770 if (!arm_feature(env, ARM_FEATURE_V8)) {
3771 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3772 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3773 * using Long-desciptor translation table format */
3774 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3775 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3776 /* In an implementation that includes the Security Extensions
3777 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3778 * Short-descriptor translation table format.
3779 */
3780 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3781 } else {
3782 value &= TTBCR_N;
3783 }
3784 }
3785
3786 /* Update the masks corresponding to the TCR bank being written
3787 * Note that we always calculate mask and base_mask, but
3788 * they are only used for short-descriptor tables (ie if EAE is 0);
3789 * for long-descriptor tables the TCR fields are used differently
3790 * and the mask and base_mask values are meaningless.
3791 */
3792 tcr->raw_tcr = value;
3793 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3794 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3795 }
3796
3797 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3798 uint64_t value)
3799 {
3800 ARMCPU *cpu = env_archcpu(env);
3801 TCR *tcr = raw_ptr(env, ri);
3802
3803 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3804 /* With LPAE the TTBCR could result in a change of ASID
3805 * via the TTBCR.A1 bit, so do a TLB flush.
3806 */
3807 tlb_flush(CPU(cpu));
3808 }
3809 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3810 value = deposit64(tcr->raw_tcr, 0, 32, value);
3811 vmsa_ttbcr_raw_write(env, ri, value);
3812 }
3813
3814 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3815 {
3816 TCR *tcr = raw_ptr(env, ri);
3817
3818 /* Reset both the TCR as well as the masks corresponding to the bank of
3819 * the TCR being reset.
3820 */
3821 tcr->raw_tcr = 0;
3822 tcr->mask = 0;
3823 tcr->base_mask = 0xffffc000u;
3824 }
3825
3826 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
3827 uint64_t value)
3828 {
3829 ARMCPU *cpu = env_archcpu(env);
3830 TCR *tcr = raw_ptr(env, ri);
3831
3832 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3833 tlb_flush(CPU(cpu));
3834 tcr->raw_tcr = value;
3835 }
3836
3837 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3838 uint64_t value)
3839 {
3840 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3841 if (cpreg_field_is_64bit(ri) &&
3842 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3843 ARMCPU *cpu = env_archcpu(env);
3844 tlb_flush(CPU(cpu));
3845 }
3846 raw_write(env, ri, value);
3847 }
3848
3849 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3850 uint64_t value)
3851 {
3852 /*
3853 * If we are running with E2&0 regime, then an ASID is active.
3854 * Flush if that might be changing. Note we're not checking
3855 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3856 * holds the active ASID, only checking the field that might.
3857 */
3858 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3859 (arm_hcr_el2_eff(env) & HCR_E2H)) {
3860 tlb_flush_by_mmuidx(env_cpu(env),
3861 ARMMMUIdxBit_E20_2 |
3862 ARMMMUIdxBit_E20_2_PAN |
3863 ARMMMUIdxBit_E20_0);
3864 }
3865 raw_write(env, ri, value);
3866 }
3867
3868 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3869 uint64_t value)
3870 {
3871 ARMCPU *cpu = env_archcpu(env);
3872 CPUState *cs = CPU(cpu);
3873
3874 /*
3875 * A change in VMID to the stage2 page table (Stage2) invalidates
3876 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3877 */
3878 if (raw_read(env, ri) != value) {
3879 tlb_flush_by_mmuidx(cs,
3880 ARMMMUIdxBit_E10_1 |
3881 ARMMMUIdxBit_E10_1_PAN |
3882 ARMMMUIdxBit_E10_0 |
3883 ARMMMUIdxBit_Stage2);
3884 raw_write(env, ri, value);
3885 }
3886 }
3887
3888 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3889 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3890 .access = PL1_RW, .type = ARM_CP_ALIAS,
3891 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3892 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3893 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3894 .access = PL1_RW, .resetvalue = 0,
3895 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3896 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3897 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3898 .access = PL1_RW, .resetvalue = 0,
3899 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3900 offsetof(CPUARMState, cp15.dfar_ns) } },
3901 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3902 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3903 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3904 .resetvalue = 0, },
3905 REGINFO_SENTINEL
3906 };
3907
3908 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3909 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3910 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3911 .access = PL1_RW,
3912 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3913 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3914 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3915 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3916 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3917 offsetof(CPUARMState, cp15.ttbr0_ns) } },
3918 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3919 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3920 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3921 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3922 offsetof(CPUARMState, cp15.ttbr1_ns) } },
3923 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3924 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3925 .access = PL1_RW, .writefn = vmsa_tcr_el12_write,
3926 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3927 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3928 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3929 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3930 .raw_writefn = vmsa_ttbcr_raw_write,
3931 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3932 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3933 REGINFO_SENTINEL
3934 };
3935
3936 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3937 * qemu tlbs nor adjusting cached masks.
3938 */
3939 static const ARMCPRegInfo ttbcr2_reginfo = {
3940 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3941 .access = PL1_RW, .type = ARM_CP_ALIAS,
3942 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3943 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3944 };
3945
3946 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3947 uint64_t value)
3948 {
3949 env->cp15.c15_ticonfig = value & 0xe7;
3950 /* The OS_TYPE bit in this register changes the reported CPUID! */
3951 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3952 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3953 }
3954
3955 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3956 uint64_t value)
3957 {
3958 env->cp15.c15_threadid = value & 0xffff;
3959 }
3960
3961 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3962 uint64_t value)
3963 {
3964 /* Wait-for-interrupt (deprecated) */
3965 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3966 }
3967
3968 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3969 uint64_t value)
3970 {
3971 /* On OMAP there are registers indicating the max/min index of dcache lines
3972 * containing a dirty line; cache flush operations have to reset these.
3973 */
3974 env->cp15.c15_i_max = 0x000;
3975 env->cp15.c15_i_min = 0xff0;
3976 }
3977
3978 static const ARMCPRegInfo omap_cp_reginfo[] = {
3979 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3980 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3981 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3982 .resetvalue = 0, },
3983 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3984 .access = PL1_RW, .type = ARM_CP_NOP },
3985 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3986 .access = PL1_RW,
3987 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3988 .writefn = omap_ticonfig_write },
3989 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3990 .access = PL1_RW,
3991 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3992 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3993 .access = PL1_RW, .resetvalue = 0xff0,
3994 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3995 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3996 .access = PL1_RW,
3997 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3998 .writefn = omap_threadid_write },
3999 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4000 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4001 .type = ARM_CP_NO_RAW,
4002 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4003 /* TODO: Peripheral port remap register:
4004 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4005 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4006 * when MMU is off.
4007 */
4008 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4009 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4010 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4011 .writefn = omap_cachemaint_write },
4012 { .name = "C9", .cp = 15, .crn = 9,
4013 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4014 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4015 REGINFO_SENTINEL
4016 };
4017
4018 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4019 uint64_t value)
4020 {
4021 env->cp15.c15_cpar = value & 0x3fff;
4022 }
4023
4024 static const ARMCPRegInfo xscale_cp_reginfo[] = {
4025 { .name = "XSCALE_CPAR",
4026 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4027 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4028 .writefn = xscale_cpar_write, },
4029 { .name = "XSCALE_AUXCR",
4030 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4031 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4032 .resetvalue = 0, },
4033 /* XScale specific cache-lockdown: since we have no cache we NOP these
4034 * and hope the guest does not really rely on cache behaviour.
4035 */
4036 { .name = "XSCALE_LOCK_ICACHE_LINE",
4037 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4038 .access = PL1_W, .type = ARM_CP_NOP },
4039 { .name = "XSCALE_UNLOCK_ICACHE",
4040 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4041 .access = PL1_W, .type = ARM_CP_NOP },
4042 { .name = "XSCALE_DCACHE_LOCK",
4043 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4044 .access = PL1_RW, .type = ARM_CP_NOP },
4045 { .name = "XSCALE_UNLOCK_DCACHE",
4046 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4047 .access = PL1_W, .type = ARM_CP_NOP },
4048 REGINFO_SENTINEL
4049 };
4050
4051 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4052 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4053 * implementation of this implementation-defined space.
4054 * Ideally this should eventually disappear in favour of actually
4055 * implementing the correct behaviour for all cores.
4056 */
4057 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4058 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4059 .access = PL1_RW,
4060 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4061 .resetvalue = 0 },
4062 REGINFO_SENTINEL
4063 };
4064
4065 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4066 /* Cache status: RAZ because we have no cache so it's always clean */
4067 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4068 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4069 .resetvalue = 0 },
4070 REGINFO_SENTINEL
4071 };
4072
4073 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4074 /* We never have a a block transfer operation in progress */
4075 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4076 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4077 .resetvalue = 0 },
4078 /* The cache ops themselves: these all NOP for QEMU */
4079 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4080 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4081 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4082 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4083 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4084 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4085 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4086 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4087 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4088 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4089 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4090 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4091 REGINFO_SENTINEL
4092 };
4093
4094 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4095 /* The cache test-and-clean instructions always return (1 << 30)
4096 * to indicate that there are no dirty cache lines.
4097 */
4098 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4099 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4100 .resetvalue = (1 << 30) },
4101 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4102 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4103 .resetvalue = (1 << 30) },
4104 REGINFO_SENTINEL
4105 };
4106
4107 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4108 /* Ignore ReadBuffer accesses */
4109 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4110 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4111 .access = PL1_RW, .resetvalue = 0,
4112 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4113 REGINFO_SENTINEL
4114 };
4115
4116 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4117 {
4118 ARMCPU *cpu = env_archcpu(env);
4119 unsigned int cur_el = arm_current_el(env);
4120 bool secure = arm_is_secure(env);
4121
4122 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
4123 return env->cp15.vpidr_el2;
4124 }
4125 return raw_read(env, ri);
4126 }
4127
4128 static uint64_t mpidr_read_val(CPUARMState *env)
4129 {
4130 ARMCPU *cpu = env_archcpu(env);
4131 uint64_t mpidr = cpu->mp_affinity;
4132
4133 if (arm_feature(env, ARM_FEATURE_V7MP)) {
4134 mpidr |= (1U << 31);
4135 /* Cores which are uniprocessor (non-coherent)
4136 * but still implement the MP extensions set
4137 * bit 30. (For instance, Cortex-R5).
4138 */
4139 if (cpu->mp_is_up) {
4140 mpidr |= (1u << 30);
4141 }
4142 }
4143 return mpidr;
4144 }
4145
4146 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4147 {
4148 unsigned int cur_el = arm_current_el(env);
4149 bool secure = arm_is_secure(env);
4150
4151 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
4152 return env->cp15.vmpidr_el2;
4153 }
4154 return mpidr_read_val(env);
4155 }
4156
4157 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4158 /* NOP AMAIR0/1 */
4159 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4160 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4161 .access = PL1_RW, .type = ARM_CP_CONST,
4162 .resetvalue = 0 },
4163 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4164 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4165 .access = PL1_RW, .type = ARM_CP_CONST,
4166 .resetvalue = 0 },
4167 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4168 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4169 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4170 offsetof(CPUARMState, cp15.par_ns)} },
4171 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4172 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4173 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4174 offsetof(CPUARMState, cp15.ttbr0_ns) },
4175 .writefn = vmsa_ttbr_write, },
4176 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4177 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4178 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4179 offsetof(CPUARMState, cp15.ttbr1_ns) },
4180 .writefn = vmsa_ttbr_write, },
4181 REGINFO_SENTINEL
4182 };
4183
4184 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4185 {
4186 return vfp_get_fpcr(env);
4187 }
4188
4189 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4190 uint64_t value)
4191 {
4192 vfp_set_fpcr(env, value);
4193 }
4194
4195 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4196 {
4197 return vfp_get_fpsr(env);
4198 }
4199
4200 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4201 uint64_t value)
4202 {
4203 vfp_set_fpsr(env, value);
4204 }
4205
4206 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4207 bool isread)
4208 {
4209 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4210 return CP_ACCESS_TRAP;
4211 }
4212 return CP_ACCESS_OK;
4213 }
4214
4215 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4216 uint64_t value)
4217 {
4218 env->daif = value & PSTATE_DAIF;
4219 }
4220
4221 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4222 {
4223 return env->pstate & PSTATE_PAN;
4224 }
4225
4226 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4227 uint64_t value)
4228 {
4229 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4230 }
4231
4232 static const ARMCPRegInfo pan_reginfo = {
4233 .name = "PAN", .state = ARM_CP_STATE_AA64,
4234 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4235 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4236 .readfn = aa64_pan_read, .writefn = aa64_pan_write
4237 };
4238
4239 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4240 {
4241 return env->pstate & PSTATE_UAO;
4242 }
4243
4244 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4245 uint64_t value)
4246 {
4247 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4248 }
4249
4250 static const ARMCPRegInfo uao_reginfo = {
4251 .name = "UAO", .state = ARM_CP_STATE_AA64,
4252 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4253 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4254 .readfn = aa64_uao_read, .writefn = aa64_uao_write
4255 };
4256
4257 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
4258 const ARMCPRegInfo *ri,
4259 bool isread)
4260 {
4261 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
4262 * SCTLR_EL1.UCI is set.
4263 */
4264 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UCI)) {
4265 return CP_ACCESS_TRAP;
4266 }
4267 return CP_ACCESS_OK;
4268 }
4269
4270 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4271 * Page D4-1736 (DDI0487A.b)
4272 */
4273
4274 static int vae1_tlbmask(CPUARMState *env)
4275 {
4276 /* Since we exclude secure first, we may read HCR_EL2 directly. */
4277 if (arm_is_secure_below_el3(env)) {
4278 return ARMMMUIdxBit_SE10_1 |
4279 ARMMMUIdxBit_SE10_1_PAN |
4280 ARMMMUIdxBit_SE10_0;
4281 } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
4282 == (HCR_E2H | HCR_TGE)) {
4283 return ARMMMUIdxBit_E20_2 |
4284 ARMMMUIdxBit_E20_2_PAN |
4285 ARMMMUIdxBit_E20_0;
4286 } else {
4287 return ARMMMUIdxBit_E10_1 |
4288 ARMMMUIdxBit_E10_1_PAN |
4289 ARMMMUIdxBit_E10_0;
4290 }
4291 }
4292
4293 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4294 uint64_t value)
4295 {
4296 CPUState *cs = env_cpu(env);
4297 int mask = vae1_tlbmask(env);
4298
4299 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4300 }
4301
4302 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4303 uint64_t value)
4304 {
4305 CPUState *cs = env_cpu(env);
4306 int mask = vae1_tlbmask(env);
4307
4308 if (tlb_force_broadcast(env)) {
4309 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4310 } else {
4311 tlb_flush_by_mmuidx(cs, mask);
4312 }
4313 }
4314
4315 static int alle1_tlbmask(CPUARMState *env)
4316 {
4317 /*
4318 * Note that the 'ALL' scope must invalidate both stage 1 and
4319 * stage 2 translations, whereas most other scopes only invalidate
4320 * stage 1 translations.
4321 */
4322 if (arm_is_secure_below_el3(env)) {
4323 return ARMMMUIdxBit_SE10_1 |
4324 ARMMMUIdxBit_SE10_1_PAN |
4325 ARMMMUIdxBit_SE10_0;
4326 } else if (arm_feature(env, ARM_FEATURE_EL2)) {
4327 return ARMMMUIdxBit_E10_1 |
4328 ARMMMUIdxBit_E10_1_PAN |
4329 ARMMMUIdxBit_E10_0 |
4330 ARMMMUIdxBit_Stage2;
4331 } else {
4332 return ARMMMUIdxBit_E10_1 |
4333 ARMMMUIdxBit_E10_1_PAN |
4334 ARMMMUIdxBit_E10_0;
4335 }
4336 }
4337
4338 static int e2_tlbmask(CPUARMState *env)
4339 {
4340 /* TODO: ARMv8.4-SecEL2 */
4341 return ARMMMUIdxBit_E20_0 |
4342 ARMMMUIdxBit_E20_2 |
4343 ARMMMUIdxBit_E20_2_PAN |
4344 ARMMMUIdxBit_E2;
4345 }
4346
4347 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4348 uint64_t value)
4349 {
4350 CPUState *cs = env_cpu(env);
4351 int mask = alle1_tlbmask(env);
4352
4353 tlb_flush_by_mmuidx(cs, mask);
4354 }
4355
4356 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4357 uint64_t value)
4358 {
4359 CPUState *cs = env_cpu(env);
4360 int mask = e2_tlbmask(env);
4361
4362 tlb_flush_by_mmuidx(cs, mask);
4363 }
4364
4365 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4366 uint64_t value)
4367 {
4368 ARMCPU *cpu = env_archcpu(env);
4369 CPUState *cs = CPU(cpu);
4370
4371 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
4372 }
4373
4374 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4375 uint64_t value)
4376 {
4377 CPUState *cs = env_cpu(env);
4378 int mask = alle1_tlbmask(env);
4379
4380 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4381 }
4382
4383 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4384 uint64_t value)
4385 {
4386 CPUState *cs = env_cpu(env);
4387 int mask = e2_tlbmask(env);
4388
4389 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4390 }
4391
4392 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4393 uint64_t value)
4394 {
4395 CPUState *cs = env_cpu(env);
4396
4397 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
4398 }
4399
4400 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4401 uint64_t value)
4402 {
4403 /* Invalidate by VA, EL2
4404 * Currently handles both VAE2 and VALE2, since we don't support
4405 * flush-last-level-only.
4406 */
4407 CPUState *cs = env_cpu(env);
4408 int mask = e2_tlbmask(env);
4409 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4410
4411 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4412 }
4413
4414 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4415 uint64_t value)
4416 {
4417 /* Invalidate by VA, EL3
4418 * Currently handles both VAE3 and VALE3, since we don't support
4419 * flush-last-level-only.
4420 */
4421 ARMCPU *cpu = env_archcpu(env);
4422 CPUState *cs = CPU(cpu);
4423 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4424
4425 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
4426 }
4427
4428 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4429 uint64_t value)
4430 {
4431 CPUState *cs = env_cpu(env);
4432 int mask = vae1_tlbmask(env);
4433 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4434
4435 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
4436 }
4437
4438 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4439 uint64_t value)
4440 {
4441 /* Invalidate by VA, EL1&0 (AArch64 version).
4442 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4443 * since we don't support flush-for-specific-ASID-only or
4444 * flush-last-level-only.
4445 */
4446 CPUState *cs = env_cpu(env);
4447 int mask = vae1_tlbmask(env);
4448 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4449
4450 if (tlb_force_broadcast(env)) {
4451 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
4452 } else {
4453 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4454 }
4455 }
4456
4457 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4458 uint64_t value)
4459 {
4460 CPUState *cs = env_cpu(env);
4461 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4462
4463 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4464 ARMMMUIdxBit_E2);
4465 }
4466
4467 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4468 uint64_t value)
4469 {
4470 CPUState *cs = env_cpu(env);
4471 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4472
4473 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4474 ARMMMUIdxBit_SE3);
4475 }
4476
4477 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4478 uint64_t value)
4479 {
4480 /* Invalidate by IPA. This has to invalidate any structures that
4481 * contain only stage 2 translation information, but does not need
4482 * to apply to structures that contain combined stage 1 and stage 2
4483 * translation information.
4484 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4485 */
4486 ARMCPU *cpu = env_archcpu(env);
4487 CPUState *cs = CPU(cpu);
4488 uint64_t pageaddr;
4489
4490 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4491 return;
4492 }
4493
4494 pageaddr = sextract64(value << 12, 0, 48);
4495
4496 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
4497 }
4498
4499 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4500 uint64_t value)
4501 {
4502 CPUState *cs = env_cpu(env);
4503 uint64_t pageaddr;
4504
4505 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4506 return;
4507 }
4508
4509 pageaddr = sextract64(value << 12, 0, 48);
4510
4511 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4512 ARMMMUIdxBit_Stage2);
4513 }
4514
4515 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4516 bool isread)
4517 {
4518 int cur_el = arm_current_el(env);
4519
4520 if (cur_el < 2) {
4521 uint64_t hcr = arm_hcr_el2_eff(env);
4522
4523 if (cur_el == 0) {
4524 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4525 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4526 return CP_ACCESS_TRAP_EL2;
4527 }
4528 } else {
4529 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4530 return CP_ACCESS_TRAP;
4531 }
4532 if (hcr & HCR_TDZ) {
4533 return CP_ACCESS_TRAP_EL2;
4534 }
4535 }
4536 } else if (hcr & HCR_TDZ) {
4537 return CP_ACCESS_TRAP_EL2;
4538 }
4539 }
4540 return CP_ACCESS_OK;
4541 }
4542
4543 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4544 {
4545 ARMCPU *cpu = env_archcpu(env);
4546 int dzp_bit = 1 << 4;
4547
4548 /* DZP indicates whether DC ZVA access is allowed */
4549 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4550 dzp_bit = 0;
4551 }
4552 return cpu->dcz_blocksize | dzp_bit;
4553 }
4554
4555 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4556 bool isread)
4557 {
4558 if (!(env->pstate & PSTATE_SP)) {
4559 /* Access to SP_EL0 is undefined if it's being used as
4560 * the stack pointer.
4561 */
4562 return CP_ACCESS_TRAP_UNCATEGORIZED;
4563 }
4564 return CP_ACCESS_OK;
4565 }
4566
4567 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4568 {
4569 return env->pstate & PSTATE_SP;
4570 }
4571
4572 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4573 {
4574 update_spsel(env, val);
4575 }
4576
4577 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4578 uint64_t value)
4579 {
4580 ARMCPU *cpu = env_archcpu(env);
4581
4582 if (raw_read(env, ri) == value) {
4583 /* Skip the TLB flush if nothing actually changed; Linux likes
4584 * to do a lot of pointless SCTLR writes.
4585 */
4586 return;
4587 }
4588
4589 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4590 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4591 value &= ~SCTLR_M;
4592 }
4593
4594 raw_write(env, ri, value);
4595 /* ??? Lots of these bits are not implemented. */
4596 /* This may enable/disable the MMU, so do a TLB flush. */
4597 tlb_flush(CPU(cpu));
4598
4599 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4600 /*
4601 * Normally we would always end the TB on an SCTLR write; see the
4602 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4603 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4604 * of hflags from the translator, so do it here.
4605 */
4606 arm_rebuild_hflags(env);
4607 }
4608 }
4609
4610 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4611 bool isread)
4612 {
4613 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4614 return CP_ACCESS_TRAP_FP_EL2;
4615 }
4616 if (env->cp15.cptr_el[3] & CPTR_TFP) {
4617 return CP_ACCESS_TRAP_FP_EL3;
4618 }
4619 return CP_ACCESS_OK;
4620 }
4621
4622 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4623 uint64_t value)
4624 {
4625 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4626 }
4627
4628 static const ARMCPRegInfo v8_cp_reginfo[] = {
4629 /* Minimal set of EL0-visible registers. This will need to be expanded
4630 * significantly for system emulation of AArch64 CPUs.
4631 */
4632 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4633 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4634 .access = PL0_RW, .type = ARM_CP_NZCV },
4635 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4636 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4637 .type = ARM_CP_NO_RAW,
4638 .access = PL0_RW, .accessfn = aa64_daif_access,
4639 .fieldoffset = offsetof(CPUARMState, daif),
4640 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4641 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4642 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4643 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4644 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4645 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4646 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4647 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4648 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4649 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4650 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4651 .access = PL0_R, .type = ARM_CP_NO_RAW,
4652 .readfn = aa64_dczid_read },
4653 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4654 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4655 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4656 #ifndef CONFIG_USER_ONLY
4657 /* Avoid overhead of an access check that always passes in user-mode */
4658 .accessfn = aa64_zva_access,
4659 #endif
4660 },
4661 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4662 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4663 .access = PL1_R, .type = ARM_CP_CURRENTEL },
4664 /* Cache ops: all NOPs since we don't emulate caches */
4665 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4666 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4667 .access = PL1_W, .type = ARM_CP_NOP },
4668 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4669 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4670 .access = PL1_W, .type = ARM_CP_NOP },
4671 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4672 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4673 .access = PL0_W, .type = ARM_CP_NOP,
4674 .accessfn = aa64_cacheop_access },
4675 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4676 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4677 .access = PL1_W, .type = ARM_CP_NOP },
4678 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4679 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4680 .access = PL1_W, .type = ARM_CP_NOP },
4681 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4682 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4683 .access = PL0_W, .type = ARM_CP_NOP,
4684 .accessfn = aa64_cacheop_access },
4685 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4686 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4687 .access = PL1_W, .type = ARM_CP_NOP },
4688 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4689 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4690 .access = PL0_W, .type = ARM_CP_NOP,
4691 .accessfn = aa64_cacheop_access },
4692 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4693 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4694 .access = PL0_W, .type = ARM_CP_NOP,
4695 .accessfn = aa64_cacheop_access },
4696 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4697 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4698 .access = PL1_W, .type = ARM_CP_NOP },
4699 /* TLBI operations */
4700 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4701 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4702 .access = PL1_W, .type = ARM_CP_NO_RAW,
4703 .writefn = tlbi_aa64_vmalle1is_write },
4704 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4705 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4706 .access = PL1_W, .type = ARM_CP_NO_RAW,
4707 .writefn = tlbi_aa64_vae1is_write },
4708 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4709 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4710 .access = PL1_W, .type = ARM_CP_NO_RAW,
4711 .writefn = tlbi_aa64_vmalle1is_write },
4712 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4713 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4714 .access = PL1_W, .type = ARM_CP_NO_RAW,
4715 .writefn = tlbi_aa64_vae1is_write },
4716 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4717 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4718 .access = PL1_W, .type = ARM_CP_NO_RAW,
4719 .writefn = tlbi_aa64_vae1is_write },
4720 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4721 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4722 .access = PL1_W, .type = ARM_CP_NO_RAW,
4723 .writefn = tlbi_aa64_vae1is_write },
4724 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4725 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4726 .access = PL1_W, .type = ARM_CP_NO_RAW,
4727 .writefn = tlbi_aa64_vmalle1_write },
4728 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4729 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4730 .access = PL1_W, .type = ARM_CP_NO_RAW,
4731 .writefn = tlbi_aa64_vae1_write },
4732 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4733 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4734 .access = PL1_W, .type = ARM_CP_NO_RAW,
4735 .writefn = tlbi_aa64_vmalle1_write },
4736 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4737 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4738 .access = PL1_W, .type = ARM_CP_NO_RAW,
4739 .writefn = tlbi_aa64_vae1_write },
4740 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4741 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4742 .access = PL1_W, .type = ARM_CP_NO_RAW,
4743 .writefn = tlbi_aa64_vae1_write },
4744 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4745 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4746 .access = PL1_W, .type = ARM_CP_NO_RAW,
4747 .writefn = tlbi_aa64_vae1_write },
4748 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4749 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4750 .access = PL2_W, .type = ARM_CP_NO_RAW,
4751 .writefn = tlbi_aa64_ipas2e1is_write },
4752 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4753 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4754 .access = PL2_W, .type = ARM_CP_NO_RAW,
4755 .writefn = tlbi_aa64_ipas2e1is_write },
4756 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4757 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4758 .access = PL2_W, .type = ARM_CP_NO_RAW,
4759 .writefn = tlbi_aa64_alle1is_write },
4760 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4761 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4762 .access = PL2_W, .type = ARM_CP_NO_RAW,
4763 .writefn = tlbi_aa64_alle1is_write },
4764 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4765 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4766 .access = PL2_W, .type = ARM_CP_NO_RAW,
4767 .writefn = tlbi_aa64_ipas2e1_write },
4768 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4769 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4770 .access = PL2_W, .type = ARM_CP_NO_RAW,
4771 .writefn = tlbi_aa64_ipas2e1_write },
4772 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4773 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4774 .access = PL2_W, .type = ARM_CP_NO_RAW,
4775 .writefn = tlbi_aa64_alle1_write },
4776 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4777 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4778 .access = PL2_W, .type = ARM_CP_NO_RAW,
4779 .writefn = tlbi_aa64_alle1is_write },
4780 #ifndef CONFIG_USER_ONLY
4781 /* 64 bit address translation operations */
4782 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4783 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4784 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4785 .writefn = ats_write64 },
4786 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4787 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4788 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4789 .writefn = ats_write64 },
4790 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4791 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4792 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4793 .writefn = ats_write64 },
4794 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4795 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4796 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4797 .writefn = ats_write64 },
4798 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4799 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4800 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4801 .writefn = ats_write64 },
4802 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4803 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4804 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4805 .writefn = ats_write64 },
4806 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4807 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4808 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4809 .writefn = ats_write64 },
4810 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4811 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4812 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4813 .writefn = ats_write64 },
4814 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4815 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4816 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4817 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4818 .writefn = ats_write64 },
4819 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4820 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4821 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4822 .writefn = ats_write64 },
4823 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4824 .type = ARM_CP_ALIAS,
4825 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4826 .access = PL1_RW, .resetvalue = 0,
4827 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4828 .writefn = par_write },
4829 #endif
4830 /* TLB invalidate last level of translation table walk */
4831 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4832 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4833 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4834 .type = ARM_CP_NO_RAW, .access = PL1_W,
4835 .writefn = tlbimvaa_is_write },
4836 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4837 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4838 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4839 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4840 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4841 .type = ARM_CP_NO_RAW, .access = PL2_W,
4842 .writefn = tlbimva_hyp_write },
4843 { .name = "TLBIMVALHIS",
4844 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4845 .type = ARM_CP_NO_RAW, .access = PL2_W,
4846 .writefn = tlbimva_hyp_is_write },
4847 { .name = "TLBIIPAS2",
4848 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4849 .type = ARM_CP_NO_RAW, .access = PL2_W,
4850 .writefn = tlbiipas2_write },
4851 { .name = "TLBIIPAS2IS",
4852 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4853 .type = ARM_CP_NO_RAW, .access = PL2_W,
4854 .writefn = tlbiipas2_is_write },
4855 { .name = "TLBIIPAS2L",
4856 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4857 .type = ARM_CP_NO_RAW, .access = PL2_W,
4858 .writefn = tlbiipas2_write },
4859 { .name = "TLBIIPAS2LIS",
4860 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4861 .type = ARM_CP_NO_RAW, .access = PL2_W,
4862 .writefn = tlbiipas2_is_write },
4863 /* 32 bit cache operations */
4864 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4865 .type = ARM_CP_NOP, .access = PL1_W },
4866 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4867 .type = ARM_CP_NOP, .access = PL1_W },
4868 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4869 .type = ARM_CP_NOP, .access = PL1_W },
4870 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4871 .type = ARM_CP_NOP, .access = PL1_W },
4872 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4873 .type = ARM_CP_NOP, .access = PL1_W },
4874 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4875 .type = ARM_CP_NOP, .access = PL1_W },
4876 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4877 .type = ARM_CP_NOP, .access = PL1_W },
4878 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4879 .type = ARM_CP_NOP, .access = PL1_W },
4880 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4881 .type = ARM_CP_NOP, .access = PL1_W },
4882 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4883 .type = ARM_CP_NOP, .access = PL1_W },
4884 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4885 .type = ARM_CP_NOP, .access = PL1_W },
4886 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4887 .type = ARM_CP_NOP, .access = PL1_W },
4888 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4889 .type = ARM_CP_NOP, .access = PL1_W },
4890 /* MMU Domain access control / MPU write buffer control */
4891 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4892 .access = PL1_RW, .resetvalue = 0,
4893 .writefn = dacr_write, .raw_writefn = raw_write,
4894 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4895 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4896 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4897 .type = ARM_CP_ALIAS,
4898 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4899 .access = PL1_RW,
4900 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4901 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4902 .type = ARM_CP_ALIAS,
4903 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4904 .access = PL1_RW,
4905 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4906 /* We rely on the access checks not allowing the guest to write to the
4907 * state field when SPSel indicates that it's being used as the stack
4908 * pointer.
4909 */
4910 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4911 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4912 .access = PL1_RW, .accessfn = sp_el0_access,
4913 .type = ARM_CP_ALIAS,
4914 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4915 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4916 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4917 .access = PL2_RW, .type = ARM_CP_ALIAS,
4918 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4919 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4920 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4921 .type = ARM_CP_NO_RAW,
4922 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4923 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4924 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4925 .type = ARM_CP_ALIAS,
4926 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4927 .access = PL2_RW, .accessfn = fpexc32_access },
4928 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4929 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4930 .access = PL2_RW, .resetvalue = 0,
4931 .writefn = dacr_write, .raw_writefn = raw_write,
4932 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4933 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4934 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4935 .access = PL2_RW, .resetvalue = 0,
4936 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4937 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4938 .type = ARM_CP_ALIAS,
4939 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4940 .access = PL2_RW,
4941 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4942 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4943 .type = ARM_CP_ALIAS,
4944 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4945 .access = PL2_RW,
4946 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4947 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4948 .type = ARM_CP_ALIAS,
4949 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4950 .access = PL2_RW,
4951 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4952 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4953 .type = ARM_CP_ALIAS,
4954 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4955 .access = PL2_RW,
4956 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4957 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4958 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4959 .resetvalue = 0,
4960 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4961 { .name = "SDCR", .type = ARM_CP_ALIAS,
4962 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4963 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4964 .writefn = sdcr_write,
4965 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4966 REGINFO_SENTINEL
4967 };
4968
4969 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4970 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4971 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4972 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4973 .access = PL2_RW,
4974 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4975 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4976 .type = ARM_CP_NO_RAW,
4977 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4978 .access = PL2_RW,
4979 .type = ARM_CP_CONST, .resetvalue = 0 },
4980 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4981 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4982 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4983 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4984 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4985 .access = PL2_RW,
4986 .type = ARM_CP_CONST, .resetvalue = 0 },
4987 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4988 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4989 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4990 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4991 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4992 .access = PL2_RW, .type = ARM_CP_CONST,
4993 .resetvalue = 0 },
4994 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4995 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4996 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4997 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4998 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4999 .access = PL2_RW, .type = ARM_CP_CONST,
5000 .resetvalue = 0 },
5001 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5002 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5003 .access = PL2_RW, .type = ARM_CP_CONST,
5004 .resetvalue = 0 },
5005 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5006 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5007 .access = PL2_RW, .type = ARM_CP_CONST,
5008 .resetvalue = 0 },
5009 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5010 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5011 .access = PL2_RW, .type = ARM_CP_CONST,
5012 .resetvalue = 0 },
5013 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5014 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5015 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5016 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
5017 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5018 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5019 .type = ARM_CP_CONST, .resetvalue = 0 },
5020 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5021 .cp = 15, .opc1 = 6, .crm = 2,
5022 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5023 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
5024 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5025 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5026 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5027 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5028 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5029 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5030 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5031 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5032 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5033 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5034 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5035 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5036 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5037 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5038 .resetvalue = 0 },
5039 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5040 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5041 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5042 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5043 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5044 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5045 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5046 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5047 .resetvalue = 0 },
5048 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5049 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5050 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5051 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5052 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5053 .resetvalue = 0 },
5054 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5055 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5056 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5057 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5058 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5059 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5060 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5061 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5062 .access = PL2_RW, .accessfn = access_tda,
5063 .type = ARM_CP_CONST, .resetvalue = 0 },
5064 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
5065 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5066 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5067 .type = ARM_CP_CONST, .resetvalue = 0 },
5068 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5069 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5070 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5071 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5072 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5073 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5074 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5075 .type = ARM_CP_CONST,
5076 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5077 .access = PL2_RW, .resetvalue = 0 },
5078 REGINFO_SENTINEL
5079 };
5080
5081 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5082 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
5083 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5084 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5085 .access = PL2_RW,
5086 .type = ARM_CP_CONST, .resetvalue = 0 },
5087 REGINFO_SENTINEL
5088 };
5089
5090 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5091 {
5092 ARMCPU *cpu = env_archcpu(env);
5093 /* Begin with bits defined in base ARMv8.0. */
5094 uint64_t valid_mask = MAKE_64BIT_MASK(0, 34);
5095
5096 if (arm_feature(env, ARM_FEATURE_EL3)) {
5097 valid_mask &= ~HCR_HCD;
5098 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5099 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5100 * However, if we're using the SMC PSCI conduit then QEMU is
5101 * effectively acting like EL3 firmware and so the guest at
5102 * EL2 should retain the ability to prevent EL1 from being
5103 * able to make SMC calls into the ersatz firmware, so in
5104 * that case HCR.TSC should be read/write.
5105 */
5106 valid_mask &= ~HCR_TSC;
5107 }
5108 if (cpu_isar_feature(aa64_vh, cpu)) {
5109 valid_mask |= HCR_E2H;
5110 }
5111 if (cpu_isar_feature(aa64_lor, cpu)) {
5112 valid_mask |= HCR_TLOR;
5113 }
5114 if (cpu_isar_feature(aa64_pauth, cpu)) {
5115 valid_mask |= HCR_API | HCR_APK;
5116 }
5117
5118 /* Clear RES0 bits. */
5119 value &= valid_mask;
5120
5121 /* These bits change the MMU setup:
5122 * HCR_VM enables stage 2 translation
5123 * HCR_PTW forbids certain page-table setups
5124 * HCR_DC Disables stage1 and enables stage2 translation
5125 */
5126 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
5127 tlb_flush(CPU(cpu));
5128 }
5129 env->cp15.hcr_el2 = value;
5130
5131 /*
5132 * Updates to VI and VF require us to update the status of
5133 * virtual interrupts, which are the logical OR of these bits
5134 * and the state of the input lines from the GIC. (This requires
5135 * that we have the iothread lock, which is done by marking the
5136 * reginfo structs as ARM_CP_IO.)
5137 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5138 * possible for it to be taken immediately, because VIRQ and
5139 * VFIQ are masked unless running at EL0 or EL1, and HCR
5140 * can only be written at EL2.
5141 */
5142 g_assert(qemu_mutex_iothread_locked());
5143 arm_cpu_update_virq(cpu);
5144 arm_cpu_update_vfiq(cpu);
5145 }
5146
5147 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5148 uint64_t value)
5149 {
5150 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5151 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5152 hcr_write(env, NULL, value);
5153 }
5154
5155 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5156 uint64_t value)
5157 {
5158 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5159 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5160 hcr_write(env, NULL, value);
5161 }
5162
5163 /*
5164 * Return the effective value of HCR_EL2.
5165 * Bits that are not included here:
5166 * RW (read from SCR_EL3.RW as needed)
5167 */
5168 uint64_t arm_hcr_el2_eff(CPUARMState *env)
5169 {
5170 uint64_t ret = env->cp15.hcr_el2;
5171
5172 if (arm_is_secure_below_el3(env)) {
5173 /*
5174 * "This register has no effect if EL2 is not enabled in the
5175 * current Security state". This is ARMv8.4-SecEL2 speak for
5176 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5177 *
5178 * Prior to that, the language was "In an implementation that
5179 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5180 * as if this field is 0 for all purposes other than a direct
5181 * read or write access of HCR_EL2". With lots of enumeration
5182 * on a per-field basis. In current QEMU, this is condition
5183 * is arm_is_secure_below_el3.
5184 *
5185 * Since the v8.4 language applies to the entire register, and
5186 * appears to be backward compatible, use that.
5187 */
5188 ret = 0;
5189 } else if (ret & HCR_TGE) {
5190 /* These bits are up-to-date as of ARMv8.4. */
5191 if (ret & HCR_E2H) {
5192 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5193 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5194 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5195 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
5196 } else {
5197 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5198 }
5199 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5200 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5201 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5202 HCR_TLOR);
5203 }
5204
5205 return ret;
5206 }
5207
5208 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5209 uint64_t value)
5210 {
5211 /*
5212 * For A-profile AArch32 EL3, if NSACR.CP10
5213 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5214 */
5215 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5216 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5217 value &= ~(0x3 << 10);
5218 value |= env->cp15.cptr_el[2] & (0x3 << 10);
5219 }
5220 env->cp15.cptr_el[2] = value;
5221 }
5222
5223 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5224 {
5225 /*
5226 * For A-profile AArch32 EL3, if NSACR.CP10
5227 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5228 */
5229 uint64_t value = env->cp15.cptr_el[2];
5230
5231 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5232 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5233 value |= 0x3 << 10;
5234 }
5235 return value;
5236 }
5237
5238 static const ARMCPRegInfo el2_cp_reginfo[] = {
5239 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5240 .type = ARM_CP_IO,
5241 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5242 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5243 .writefn = hcr_write },
5244 { .name = "HCR", .state = ARM_CP_STATE_AA32,
5245 .type = ARM_CP_ALIAS | ARM_CP_IO,
5246 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5247 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5248 .writefn = hcr_writelow },
5249 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5250 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5251 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5252 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5253 .type = ARM_CP_ALIAS,
5254 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5255 .access = PL2_RW,
5256 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5257 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5258 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5259 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5260 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5261 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5262 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5263 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5264 .type = ARM_CP_ALIAS,
5265 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5266 .access = PL2_RW,
5267 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5268 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5269 .type = ARM_CP_ALIAS,
5270 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5271 .access = PL2_RW,
5272 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5273 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5274 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5275 .access = PL2_RW, .writefn = vbar_write,
5276 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5277 .resetvalue = 0 },
5278 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5279 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5280 .access = PL3_RW, .type = ARM_CP_ALIAS,
5281 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5282 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5283 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5284 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5285 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5286 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5287 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5288 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5289 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5290 .resetvalue = 0 },
5291 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5292 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5293 .access = PL2_RW, .type = ARM_CP_ALIAS,
5294 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5295 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5296 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5297 .access = PL2_RW, .type = ARM_CP_CONST,
5298 .resetvalue = 0 },
5299 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5300 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5301 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5302 .access = PL2_RW, .type = ARM_CP_CONST,
5303 .resetvalue = 0 },
5304 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5305 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5306 .access = PL2_RW, .type = ARM_CP_CONST,
5307 .resetvalue = 0 },
5308 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5309 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5310 .access = PL2_RW, .type = ARM_CP_CONST,
5311 .resetvalue = 0 },
5312 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5313 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5314 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5315 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5316 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5317 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5318 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5319 .type = ARM_CP_ALIAS,
5320 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5321 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5322 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5323 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5324 .access = PL2_RW,
5325 /* no .writefn needed as this can't cause an ASID change;
5326 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5327 */
5328 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5329 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5330 .cp = 15, .opc1 = 6, .crm = 2,
5331 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5332 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5333 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5334 .writefn = vttbr_write },
5335 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5336 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5337 .access = PL2_RW, .writefn = vttbr_write,
5338 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5339 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5340 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5341 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5342 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5343 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5344 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5345 .access = PL2_RW, .resetvalue = 0,
5346 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5347 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5348 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5349 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
5350 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5351 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5352 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5353 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5354 { .name = "TLBIALLNSNH",
5355 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5356 .type = ARM_CP_NO_RAW, .access = PL2_W,
5357 .writefn = tlbiall_nsnh_write },
5358 { .name = "TLBIALLNSNHIS",
5359 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5360 .type = ARM_CP_NO_RAW, .access = PL2_W,
5361 .writefn = tlbiall_nsnh_is_write },
5362 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5363 .type = ARM_CP_NO_RAW, .access = PL2_W,
5364 .writefn = tlbiall_hyp_write },
5365 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5366 .type = ARM_CP_NO_RAW, .access = PL2_W,
5367 .writefn = tlbiall_hyp_is_write },
5368 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5369 .type = ARM_CP_NO_RAW, .access = PL2_W,
5370 .writefn = tlbimva_hyp_write },
5371 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5372 .type = ARM_CP_NO_RAW, .access = PL2_W,
5373 .writefn = tlbimva_hyp_is_write },
5374 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5375 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5376 .type = ARM_CP_NO_RAW, .access = PL2_W,
5377 .writefn = tlbi_aa64_alle2_write },
5378 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5379 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5380 .type = ARM_CP_NO_RAW, .access = PL2_W,
5381 .writefn = tlbi_aa64_vae2_write },
5382 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5383 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5384 .access = PL2_W, .type = ARM_CP_NO_RAW,
5385 .writefn = tlbi_aa64_vae2_write },
5386 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5387 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5388 .access = PL2_W, .type = ARM_CP_NO_RAW,
5389 .writefn = tlbi_aa64_alle2is_write },
5390 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5391 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5392 .type = ARM_CP_NO_RAW, .access = PL2_W,
5393 .writefn = tlbi_aa64_vae2is_write },
5394 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5395 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5396 .access = PL2_W, .type = ARM_CP_NO_RAW,
5397 .writefn = tlbi_aa64_vae2is_write },
5398 #ifndef CONFIG_USER_ONLY
5399 /* Unlike the other EL2-related AT operations, these must
5400 * UNDEF from EL3 if EL2 is not implemented, which is why we
5401 * define them here rather than with the rest of the AT ops.
5402 */
5403 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5404 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5405 .access = PL2_W, .accessfn = at_s1e2_access,
5406 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5407 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5408 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5409 .access = PL2_W, .accessfn = at_s1e2_access,
5410 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5411 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5412 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5413 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5414 * to behave as if SCR.NS was 1.
5415 */
5416 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5417 .access = PL2_W,
5418 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5419 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5420 .access = PL2_W,
5421 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5422 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5423 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5424 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5425 * reset values as IMPDEF. We choose to reset to 3 to comply with
5426 * both ARMv7 and ARMv8.
5427 */
5428 .access = PL2_RW, .resetvalue = 3,
5429 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5430 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5431 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5432 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5433 .writefn = gt_cntvoff_write,
5434 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5435 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5436 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5437 .writefn = gt_cntvoff_write,
5438 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5439 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5440 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5441 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5442 .type = ARM_CP_IO, .access = PL2_RW,
5443 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5444 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5445 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5446 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5447 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5448 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5449 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5450 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5451 .resetfn = gt_hyp_timer_reset,
5452 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5453 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5454 .type = ARM_CP_IO,
5455 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5456 .access = PL2_RW,
5457 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5458 .resetvalue = 0,
5459 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5460 #endif
5461 /* The only field of MDCR_EL2 that has a defined architectural reset value
5462 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5463 * don't implement any PMU event counters, so using zero as a reset
5464 * value for MDCR_EL2 is okay
5465 */
5466 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5467 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5468 .access = PL2_RW, .resetvalue = 0,
5469 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
5470 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5471 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5472 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5473 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5474 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5475 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5476 .access = PL2_RW,
5477 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5478 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5479 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5480 .access = PL2_RW,
5481 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5482 REGINFO_SENTINEL
5483 };
5484
5485 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5486 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5487 .type = ARM_CP_ALIAS | ARM_CP_IO,
5488 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5489 .access = PL2_RW,
5490 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5491 .writefn = hcr_writehigh },
5492 REGINFO_SENTINEL
5493 };
5494
5495 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5496 bool isread)
5497 {
5498 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5499 * At Secure EL1 it traps to EL3.
5500 */
5501 if (arm_current_el(env) == 3) {
5502 return CP_ACCESS_OK;
5503 }
5504 if (arm_is_secure_below_el3(env)) {
5505 return CP_ACCESS_TRAP_EL3;
5506 }
5507 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5508 if (isread) {
5509 return CP_ACCESS_OK;
5510 }
5511 return CP_ACCESS_TRAP_UNCATEGORIZED;
5512 }
5513
5514 static const ARMCPRegInfo el3_cp_reginfo[] = {
5515 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5516 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5517 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5518 .resetvalue = 0, .writefn = scr_write },
5519 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5520 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5521 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5522 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5523 .writefn = scr_write },
5524 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5525 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5526 .access = PL3_RW, .resetvalue = 0,
5527 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5528 { .name = "SDER",
5529 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5530 .access = PL3_RW, .resetvalue = 0,
5531 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5532 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5533 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5534 .writefn = vbar_write, .resetvalue = 0,
5535 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5536 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5537 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5538 .access = PL3_RW, .resetvalue = 0,
5539 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5540 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5541 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5542 .access = PL3_RW,
5543 /* no .writefn needed as this can't cause an ASID change;
5544 * we must provide a .raw_writefn and .resetfn because we handle
5545 * reset and migration for the AArch32 TTBCR(S), which might be
5546 * using mask and base_mask.
5547 */
5548 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5549 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5550 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5551 .type = ARM_CP_ALIAS,
5552 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5553 .access = PL3_RW,
5554 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5555 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5556 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5557 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5558 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5559 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5560 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5561 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5562 .type = ARM_CP_ALIAS,
5563 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5564 .access = PL3_RW,
5565 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5566 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5567 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5568 .access = PL3_RW, .writefn = vbar_write,
5569 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5570 .resetvalue = 0 },
5571 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5572 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5573 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5574 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5575 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5576 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5577 .access = PL3_RW, .resetvalue = 0,
5578 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5579 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5580 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5581 .access = PL3_RW, .type = ARM_CP_CONST,
5582 .resetvalue = 0 },
5583 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5584 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5585 .access = PL3_RW, .type = ARM_CP_CONST,
5586 .resetvalue = 0 },
5587 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5588 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5589 .access = PL3_RW, .type = ARM_CP_CONST,
5590 .resetvalue = 0 },
5591 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5592 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5593 .access = PL3_W, .type = ARM_CP_NO_RAW,
5594 .writefn = tlbi_aa64_alle3is_write },
5595 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5596 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5597 .access = PL3_W, .type = ARM_CP_NO_RAW,
5598 .writefn = tlbi_aa64_vae3is_write },
5599 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5600 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5601 .access = PL3_W, .type = ARM_CP_NO_RAW,
5602 .writefn = tlbi_aa64_vae3is_write },
5603 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5604 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5605 .access = PL3_W, .type = ARM_CP_NO_RAW,
5606 .writefn = tlbi_aa64_alle3_write },
5607 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5608 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5609 .access = PL3_W, .type = ARM_CP_NO_RAW,
5610 .writefn = tlbi_aa64_vae3_write },
5611 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5612 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5613 .access = PL3_W, .type = ARM_CP_NO_RAW,
5614 .writefn = tlbi_aa64_vae3_write },
5615 REGINFO_SENTINEL
5616 };
5617
5618 #ifndef CONFIG_USER_ONLY
5619 /* Test if system register redirection is to occur in the current state. */
5620 static bool redirect_for_e2h(CPUARMState *env)
5621 {
5622 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5623 }
5624
5625 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5626 {
5627 CPReadFn *readfn;
5628
5629 if (redirect_for_e2h(env)) {
5630 /* Switch to the saved EL2 version of the register. */
5631 ri = ri->opaque;
5632 readfn = ri->readfn;
5633 } else {
5634 readfn = ri->orig_readfn;
5635 }
5636 if (readfn == NULL) {
5637 readfn = raw_read;
5638 }
5639 return readfn(env, ri);
5640 }
5641
5642 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5643 uint64_t value)
5644 {
5645 CPWriteFn *writefn;
5646
5647 if (redirect_for_e2h(env)) {
5648 /* Switch to the saved EL2 version of the register. */
5649 ri = ri->opaque;
5650 writefn = ri->writefn;
5651 } else {
5652 writefn = ri->orig_writefn;
5653 }
5654 if (writefn == NULL) {
5655 writefn = raw_write;
5656 }
5657 writefn(env, ri, value);
5658 }
5659
5660 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5661 {
5662 struct E2HAlias {
5663 uint32_t src_key, dst_key, new_key;
5664 const char *src_name, *dst_name, *new_name;
5665 bool (*feature)(const ARMISARegisters *id);
5666 };
5667
5668 #define K(op0, op1, crn, crm, op2) \
5669 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5670
5671 static const struct E2HAlias aliases[] = {
5672 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5673 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5674 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5675 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5676 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5677 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5678 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5679 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5680 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5681 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5682 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5683 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5684 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5685 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5686 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5687 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5688 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5689 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5690 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5691 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5692 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5693 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5694 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5695 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5696 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5697 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5698 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5699 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5700 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5701 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5702 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5703 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5704
5705 /*
5706 * Note that redirection of ZCR is mentioned in the description
5707 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5708 * not in the summary table.
5709 */
5710 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5711 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
5712
5713 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5714 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5715 };
5716 #undef K
5717
5718 size_t i;
5719
5720 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5721 const struct E2HAlias *a = &aliases[i];
5722 ARMCPRegInfo *src_reg, *dst_reg;
5723
5724 if (a->feature && !a->feature(&cpu->isar)) {
5725 continue;
5726 }
5727
5728 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
5729 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
5730 g_assert(src_reg != NULL);
5731 g_assert(dst_reg != NULL);
5732
5733 /* Cross-compare names to detect typos in the keys. */
5734 g_assert(strcmp(src_reg->name, a->src_name) == 0);
5735 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5736
5737 /* None of the core system registers use opaque; we will. */
5738 g_assert(src_reg->opaque == NULL);
5739
5740 /* Create alias before redirection so we dup the right data. */
5741 if (a->new_key) {
5742 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
5743 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
5744 bool ok;
5745
5746 new_reg->name = a->new_name;
5747 new_reg->type |= ARM_CP_ALIAS;
5748 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5749 new_reg->access &= PL2_RW | PL3_RW;
5750
5751 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
5752 g_assert(ok);
5753 }
5754
5755 src_reg->opaque = dst_reg;
5756 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5757 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5758 if (!src_reg->raw_readfn) {
5759 src_reg->raw_readfn = raw_read;
5760 }
5761 if (!src_reg->raw_writefn) {
5762 src_reg->raw_writefn = raw_write;
5763 }
5764 src_reg->readfn = el2_e2h_read;
5765 src_reg->writefn = el2_e2h_write;
5766 }
5767 }
5768 #endif
5769
5770 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5771 bool isread)
5772 {
5773 int cur_el = arm_current_el(env);
5774
5775 if (cur_el < 2) {
5776 uint64_t hcr = arm_hcr_el2_eff(env);
5777
5778 if (cur_el == 0) {
5779 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5780 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
5781 return CP_ACCESS_TRAP_EL2;
5782 }
5783 } else {
5784 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5785 return CP_ACCESS_TRAP;
5786 }
5787 if (hcr & HCR_TID2) {
5788 return CP_ACCESS_TRAP_EL2;
5789 }
5790 }
5791 } else if (hcr & HCR_TID2) {
5792 return CP_ACCESS_TRAP_EL2;
5793 }
5794 }
5795
5796 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
5797 return CP_ACCESS_TRAP_EL2;
5798 }
5799
5800 return CP_ACCESS_OK;
5801 }
5802
5803 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5804 uint64_t value)
5805 {
5806 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5807 * read via a bit in OSLSR_EL1.
5808 */
5809 int oslock;
5810
5811 if (ri->state == ARM_CP_STATE_AA32) {
5812 oslock = (value == 0xC5ACCE55);
5813 } else {
5814 oslock = value & 1;
5815 }
5816
5817 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5818 }
5819
5820 static const ARMCPRegInfo debug_cp_reginfo[] = {
5821 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5822 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5823 * unlike DBGDRAR it is never accessible from EL0.
5824 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5825 * accessor.
5826 */
5827 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
5828 .access = PL0_R, .accessfn = access_tdra,
5829 .type = ARM_CP_CONST, .resetvalue = 0 },
5830 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5831 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5832 .access = PL1_R, .accessfn = access_tdra,
5833 .type = ARM_CP_CONST, .resetvalue = 0 },
5834 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
5835 .access = PL0_R, .accessfn = access_tdra,
5836 .type = ARM_CP_CONST, .resetvalue = 0 },
5837 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5838 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5839 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5840 .access = PL1_RW, .accessfn = access_tda,
5841 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5842 .resetvalue = 0 },
5843 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5844 * We don't implement the configurable EL0 access.
5845 */
5846 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5847 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5848 .type = ARM_CP_ALIAS,
5849 .access = PL1_R, .accessfn = access_tda,
5850 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
5851 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5852 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
5853 .access = PL1_W, .type = ARM_CP_NO_RAW,
5854 .accessfn = access_tdosa,
5855 .writefn = oslar_write },
5856 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5857 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5858 .access = PL1_R, .resetvalue = 10,
5859 .accessfn = access_tdosa,
5860 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5861 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5862 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5863 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
5864 .access = PL1_RW, .accessfn = access_tdosa,
5865 .type = ARM_CP_NOP },
5866 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5867 * implement vector catch debug events yet.
5868 */
5869 { .name = "DBGVCR",
5870 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5871 .access = PL1_RW, .accessfn = access_tda,
5872 .type = ARM_CP_NOP },
5873 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5874 * to save and restore a 32-bit guest's DBGVCR)
5875 */
5876 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5877 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5878 .access = PL2_RW, .accessfn = access_tda,
5879 .type = ARM_CP_NOP },
5880 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5881 * Channel but Linux may try to access this register. The 32-bit
5882 * alias is DBGDCCINT.
5883 */
5884 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5885 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5886 .access = PL1_RW, .accessfn = access_tda,
5887 .type = ARM_CP_NOP },
5888 REGINFO_SENTINEL
5889 };
5890
5891 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5892 /* 64 bit access versions of the (dummy) debug registers */
5893 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5894 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5895 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5896 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5897 REGINFO_SENTINEL
5898 };
5899
5900 /* Return the exception level to which exceptions should be taken
5901 * via SVEAccessTrap. If an exception should be routed through
5902 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5903 * take care of raising that exception.
5904 * C.f. the ARM pseudocode function CheckSVEEnabled.
5905 */
5906 int sve_exception_el(CPUARMState *env, int el)
5907 {
5908 #ifndef CONFIG_USER_ONLY
5909 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
5910
5911 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
5912 bool disabled = false;
5913
5914 /* The CPACR.ZEN controls traps to EL1:
5915 * 0, 2 : trap EL0 and EL1 accesses
5916 * 1 : trap only EL0 accesses
5917 * 3 : trap no accesses
5918 */
5919 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5920 disabled = true;
5921 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
5922 disabled = el == 0;
5923 }
5924 if (disabled) {
5925 /* route_to_el2 */
5926 return hcr_el2 & HCR_TGE ? 2 : 1;
5927 }
5928
5929 /* Check CPACR.FPEN. */
5930 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5931 disabled = true;
5932 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
5933 disabled = el == 0;
5934 }
5935 if (disabled) {
5936 return 0;
5937 }
5938 }
5939
5940 /* CPTR_EL2. Since TZ and TFP are positive,
5941 * they will be zero when EL2 is not present.
5942 */
5943 if (el <= 2 && !arm_is_secure_below_el3(env)) {
5944 if (env->cp15.cptr_el[2] & CPTR_TZ) {
5945 return 2;
5946 }
5947 if (env->cp15.cptr_el[2] & CPTR_TFP) {
5948 return 0;
5949 }
5950 }
5951
5952 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5953 if (arm_feature(env, ARM_FEATURE_EL3)
5954 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5955 return 3;
5956 }
5957 #endif
5958 return 0;
5959 }
5960
5961 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
5962 {
5963 uint32_t end_len;
5964
5965 end_len = start_len &= 0xf;
5966 if (!test_bit(start_len, cpu->sve_vq_map)) {
5967 end_len = find_last_bit(cpu->sve_vq_map, start_len);
5968 assert(end_len < start_len);
5969 }
5970 return end_len;
5971 }
5972
5973 /*
5974 * Given that SVE is enabled, return the vector length for EL.
5975 */
5976 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
5977 {
5978 ARMCPU *cpu = env_archcpu(env);
5979 uint32_t zcr_len = cpu->sve_max_vq - 1;
5980
5981 if (el <= 1) {
5982 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5983 }
5984 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
5985 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5986 }
5987 if (arm_feature(env, ARM_FEATURE_EL3)) {
5988 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5989 }
5990
5991 return sve_zcr_get_valid_len(cpu, zcr_len);
5992 }
5993
5994 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5995 uint64_t value)
5996 {
5997 int cur_el = arm_current_el(env);
5998 int old_len = sve_zcr_len_for_el(env, cur_el);
5999 int new_len;
6000
6001 /* Bits other than [3:0] are RAZ/WI. */
6002 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6003 raw_write(env, ri, value & 0xf);
6004
6005 /*
6006 * Because we arrived here, we know both FP and SVE are enabled;
6007 * otherwise we would have trapped access to the ZCR_ELn register.
6008 */
6009 new_len = sve_zcr_len_for_el(env, cur_el);
6010 if (new_len < old_len) {
6011 aarch64_sve_narrow_vq(env, new_len + 1);
6012 }
6013 }
6014
6015 static const ARMCPRegInfo zcr_el1_reginfo = {
6016 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6017 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6018 .access = PL1_RW, .type = ARM_CP_SVE,
6019 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6020 .writefn = zcr_write, .raw_writefn = raw_write
6021 };
6022
6023 static const ARMCPRegInfo zcr_el2_reginfo = {
6024 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6025 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6026 .access = PL2_RW, .type = ARM_CP_SVE,
6027 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6028 .writefn = zcr_write, .raw_writefn = raw_write
6029 };
6030
6031 static const ARMCPRegInfo zcr_no_el2_reginfo = {
6032 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6033 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6034 .access = PL2_RW, .type = ARM_CP_SVE,
6035 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
6036 };
6037
6038 static const ARMCPRegInfo zcr_el3_reginfo = {
6039 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6040 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6041 .access = PL3_RW, .type = ARM_CP_SVE,
6042 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6043 .writefn = zcr_write, .raw_writefn = raw_write
6044 };
6045
6046 void hw_watchpoint_update(ARMCPU *cpu, int n)
6047 {
6048 CPUARMState *env = &cpu->env;
6049 vaddr len = 0;
6050 vaddr wvr = env->cp15.dbgwvr[n];
6051 uint64_t wcr = env->cp15.dbgwcr[n];
6052 int mask;
6053 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
6054
6055 if (env->cpu_watchpoint[n]) {
6056 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
6057 env->cpu_watchpoint[n] = NULL;
6058 }
6059
6060 if (!extract64(wcr, 0, 1)) {
6061 /* E bit clear : watchpoint disabled */
6062 return;
6063 }
6064
6065 switch (extract64(wcr, 3, 2)) {
6066 case 0:
6067 /* LSC 00 is reserved and must behave as if the wp is disabled */
6068 return;
6069 case 1:
6070 flags |= BP_MEM_READ;
6071 break;
6072 case 2:
6073 flags |= BP_MEM_WRITE;
6074 break;
6075 case 3:
6076 flags |= BP_MEM_ACCESS;
6077 break;
6078 }
6079
6080 /* Attempts to use both MASK and BAS fields simultaneously are
6081 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6082 * thus generating a watchpoint for every byte in the masked region.
6083 */
6084 mask = extract64(wcr, 24, 4);
6085 if (mask == 1 || mask == 2) {
6086 /* Reserved values of MASK; we must act as if the mask value was
6087 * some non-reserved value, or as if the watchpoint were disabled.
6088 * We choose the latter.
6089 */
6090 return;
6091 } else if (mask) {
6092 /* Watchpoint covers an aligned area up to 2GB in size */
6093 len = 1ULL << mask;
6094 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6095 * whether the watchpoint fires when the unmasked bits match; we opt
6096 * to generate the exceptions.
6097 */
6098 wvr &= ~(len - 1);
6099 } else {
6100 /* Watchpoint covers bytes defined by the byte address select bits */
6101 int bas = extract64(wcr, 5, 8);
6102 int basstart;
6103
6104 if (bas == 0) {
6105 /* This must act as if the watchpoint is disabled */
6106 return;
6107 }
6108
6109 if (extract64(wvr, 2, 1)) {
6110 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6111 * ignored, and BAS[3:0] define which bytes to watch.
6112 */
6113 bas &= 0xf;
6114 }
6115 /* The BAS bits are supposed to be programmed to indicate a contiguous
6116 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6117 * we fire for each byte in the word/doubleword addressed by the WVR.
6118 * We choose to ignore any non-zero bits after the first range of 1s.
6119 */
6120 basstart = ctz32(bas);
6121 len = cto32(bas >> basstart);
6122 wvr += basstart;
6123 }
6124
6125 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
6126 &env->cpu_watchpoint[n]);
6127 }
6128
6129 void hw_watchpoint_update_all(ARMCPU *cpu)
6130 {
6131 int i;
6132 CPUARMState *env = &cpu->env;
6133
6134 /* Completely clear out existing QEMU watchpoints and our array, to
6135 * avoid possible stale entries following migration load.
6136 */
6137 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
6138 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
6139
6140 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
6141 hw_watchpoint_update(cpu, i);
6142 }
6143 }
6144
6145 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6146 uint64_t value)
6147 {
6148 ARMCPU *cpu = env_archcpu(env);
6149 int i = ri->crm;
6150
6151 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6152 * register reads and behaves as if values written are sign extended.
6153 * Bits [1:0] are RES0.
6154 */
6155 value = sextract64(value, 0, 49) & ~3ULL;
6156
6157 raw_write(env, ri, value);
6158 hw_watchpoint_update(cpu, i);
6159 }
6160
6161 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6162 uint64_t value)
6163 {
6164 ARMCPU *cpu = env_archcpu(env);
6165 int i = ri->crm;
6166
6167 raw_write(env, ri, value);
6168 hw_watchpoint_update(cpu, i);
6169 }
6170
6171 void hw_breakpoint_update(ARMCPU *cpu, int n)
6172 {
6173 CPUARMState *env = &cpu->env;
6174 uint64_t bvr = env->cp15.dbgbvr[n];
6175 uint64_t bcr = env->cp15.dbgbcr[n];
6176 vaddr addr;
6177 int bt;
6178 int flags = BP_CPU;
6179
6180 if (env->cpu_breakpoint[n]) {
6181 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
6182 env->cpu_breakpoint[n] = NULL;
6183 }
6184
6185 if (!extract64(bcr, 0, 1)) {
6186 /* E bit clear : watchpoint disabled */
6187 return;
6188 }
6189
6190 bt = extract64(bcr, 20, 4);
6191
6192 switch (bt) {
6193 case 4: /* unlinked address mismatch (reserved if AArch64) */
6194 case 5: /* linked address mismatch (reserved if AArch64) */
6195 qemu_log_mask(LOG_UNIMP,
6196 "arm: address mismatch breakpoint types not implemented\n");
6197 return;
6198 case 0: /* unlinked address match */
6199 case 1: /* linked address match */
6200 {
6201 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6202 * we behave as if the register was sign extended. Bits [1:0] are
6203 * RES0. The BAS field is used to allow setting breakpoints on 16
6204 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6205 * a bp will fire if the addresses covered by the bp and the addresses
6206 * covered by the insn overlap but the insn doesn't start at the
6207 * start of the bp address range. We choose to require the insn and
6208 * the bp to have the same address. The constraints on writing to
6209 * BAS enforced in dbgbcr_write mean we have only four cases:
6210 * 0b0000 => no breakpoint
6211 * 0b0011 => breakpoint on addr
6212 * 0b1100 => breakpoint on addr + 2
6213 * 0b1111 => breakpoint on addr
6214 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6215 */
6216 int bas = extract64(bcr, 5, 4);
6217 addr = sextract64(bvr, 0, 49) & ~3ULL;
6218 if (bas == 0) {
6219 return;
6220 }
6221 if (bas == 0xc) {
6222 addr += 2;
6223 }
6224 break;
6225 }
6226 case 2: /* unlinked context ID match */
6227 case 8: /* unlinked VMID match (reserved if no EL2) */
6228 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6229 qemu_log_mask(LOG_UNIMP,
6230 "arm: unlinked context breakpoint types not implemented\n");
6231 return;
6232 case 9: /* linked VMID match (reserved if no EL2) */
6233 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6234 case 3: /* linked context ID match */
6235 default:
6236 /* We must generate no events for Linked context matches (unless
6237 * they are linked to by some other bp/wp, which is handled in
6238 * updates for the linking bp/wp). We choose to also generate no events
6239 * for reserved values.
6240 */
6241 return;
6242 }
6243
6244 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
6245 }
6246
6247 void hw_breakpoint_update_all(ARMCPU *cpu)
6248 {
6249 int i;
6250 CPUARMState *env = &cpu->env;
6251
6252 /* Completely clear out existing QEMU breakpoints and our array, to
6253 * avoid possible stale entries following migration load.
6254 */
6255 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
6256 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
6257
6258 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
6259 hw_breakpoint_update(cpu, i);
6260 }
6261 }
6262
6263 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6264 uint64_t value)
6265 {
6266 ARMCPU *cpu = env_archcpu(env);
6267 int i = ri->crm;
6268
6269 raw_write(env, ri, value);
6270 hw_breakpoint_update(cpu, i);
6271 }
6272
6273 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6274 uint64_t value)
6275 {
6276 ARMCPU *cpu = env_archcpu(env);
6277 int i = ri->crm;
6278
6279 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6280 * copy of BAS[0].
6281 */
6282 value = deposit64(value, 6, 1, extract64(value, 5, 1));
6283 value = deposit64(value, 8, 1, extract64(value, 7, 1));
6284
6285 raw_write(env, ri, value);
6286 hw_breakpoint_update(cpu, i);
6287 }
6288
6289 static void define_debug_regs(ARMCPU *cpu)
6290 {
6291 /* Define v7 and v8 architectural debug registers.
6292 * These are just dummy implementations for now.
6293 */
6294 int i;
6295 int wrps, brps, ctx_cmps;
6296 ARMCPRegInfo dbgdidr = {
6297 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
6298 .access = PL0_R, .accessfn = access_tda,
6299 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
6300 };
6301
6302 /* Note that all these register fields hold "number of Xs minus 1". */
6303 brps = arm_num_brps(cpu);
6304 wrps = arm_num_wrps(cpu);
6305 ctx_cmps = arm_num_ctx_cmps(cpu);
6306
6307 assert(ctx_cmps <= brps);
6308
6309 define_one_arm_cp_reg(cpu, &dbgdidr);
6310 define_arm_cp_regs(cpu, debug_cp_reginfo);
6311
6312 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
6313 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
6314 }
6315
6316 for (i = 0; i < brps; i++) {
6317 ARMCPRegInfo dbgregs[] = {
6318 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
6319 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
6320 .access = PL1_RW, .accessfn = access_tda,
6321 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
6322 .writefn = dbgbvr_write, .raw_writefn = raw_write
6323 },
6324 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
6325 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
6326 .access = PL1_RW, .accessfn = access_tda,
6327 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
6328 .writefn = dbgbcr_write, .raw_writefn = raw_write
6329 },
6330 REGINFO_SENTINEL
6331 };
6332 define_arm_cp_regs(cpu, dbgregs);
6333 }
6334
6335 for (i = 0; i < wrps; i++) {
6336 ARMCPRegInfo dbgregs[] = {
6337 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
6338 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
6339 .access = PL1_RW, .accessfn = access_tda,
6340 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
6341 .writefn = dbgwvr_write, .raw_writefn = raw_write
6342 },
6343 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
6344 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
6345 .access = PL1_RW, .accessfn = access_tda,
6346 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
6347 .writefn = dbgwcr_write, .raw_writefn = raw_write
6348 },
6349 REGINFO_SENTINEL
6350 };
6351 define_arm_cp_regs(cpu, dbgregs);
6352 }
6353 }
6354
6355 static void define_pmu_regs(ARMCPU *cpu)
6356 {
6357 /*
6358 * v7 performance monitor control register: same implementor
6359 * field as main ID register, and we implement four counters in
6360 * addition to the cycle count register.
6361 */
6362 unsigned int i, pmcrn = 4;
6363 ARMCPRegInfo pmcr = {
6364 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6365 .access = PL0_RW,
6366 .type = ARM_CP_IO | ARM_CP_ALIAS,
6367 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6368 .accessfn = pmreg_access, .writefn = pmcr_write,
6369 .raw_writefn = raw_write,
6370 };
6371 ARMCPRegInfo pmcr64 = {
6372 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6373 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6374 .access = PL0_RW, .accessfn = pmreg_access,
6375 .type = ARM_CP_IO,
6376 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6377 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
6378 PMCRLC,
6379 .writefn = pmcr_write, .raw_writefn = raw_write,
6380 };
6381 define_one_arm_cp_reg(cpu, &pmcr);
6382 define_one_arm_cp_reg(cpu, &pmcr64);
6383 for (i = 0; i < pmcrn; i++) {
6384 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6385 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6386 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6387 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6388 ARMCPRegInfo pmev_regs[] = {
6389 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6390 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6391 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6392 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6393 .accessfn = pmreg_access },
6394 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6395 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6396 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6397 .type = ARM_CP_IO,
6398 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6399 .raw_readfn = pmevcntr_rawread,
6400 .raw_writefn = pmevcntr_rawwrite },
6401 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6402 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6403 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6404 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6405 .accessfn = pmreg_access },
6406 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6407 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6408 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6409 .type = ARM_CP_IO,
6410 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6411 .raw_writefn = pmevtyper_rawwrite },
6412 REGINFO_SENTINEL
6413 };
6414 define_arm_cp_regs(cpu, pmev_regs);
6415 g_free(pmevcntr_name);
6416 g_free(pmevcntr_el0_name);
6417 g_free(pmevtyper_name);
6418 g_free(pmevtyper_el0_name);
6419 }
6420 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
6421 ARMCPRegInfo v81_pmu_regs[] = {
6422 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6423 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6424 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6425 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6426 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6427 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6428 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6429 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6430 REGINFO_SENTINEL
6431 };
6432 define_arm_cp_regs(cpu, v81_pmu_regs);
6433 }
6434 if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6435 static const ARMCPRegInfo v84_pmmir = {
6436 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6437 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6438 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6439 .resetvalue = 0
6440 };
6441 define_one_arm_cp_reg(cpu, &v84_pmmir);
6442 }
6443 }
6444
6445 /* We don't know until after realize whether there's a GICv3
6446 * attached, and that is what registers the gicv3 sysregs.
6447 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6448 * at runtime.
6449 */
6450 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6451 {
6452 ARMCPU *cpu = env_archcpu(env);
6453 uint64_t pfr1 = cpu->id_pfr1;
6454
6455 if (env->gicv3state) {
6456 pfr1 |= 1 << 28;
6457 }
6458 return pfr1;
6459 }
6460
6461 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6462 {
6463 ARMCPU *cpu = env_archcpu(env);
6464 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6465
6466 if (env->gicv3state) {
6467 pfr0 |= 1 << 24;
6468 }
6469 return pfr0;
6470 }
6471
6472 /* Shared logic between LORID and the rest of the LOR* registers.
6473 * Secure state has already been delt with.
6474 */
6475 static CPAccessResult access_lor_ns(CPUARMState *env)
6476 {
6477 int el = arm_current_el(env);
6478
6479 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6480 return CP_ACCESS_TRAP_EL2;
6481 }
6482 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6483 return CP_ACCESS_TRAP_EL3;
6484 }
6485 return CP_ACCESS_OK;
6486 }
6487
6488 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
6489 bool isread)
6490 {
6491 if (arm_is_secure_below_el3(env)) {
6492 /* Access ok in secure mode. */
6493 return CP_ACCESS_OK;
6494 }
6495 return access_lor_ns(env);
6496 }
6497
6498 static CPAccessResult access_lor_other(CPUARMState *env,
6499 const ARMCPRegInfo *ri, bool isread)
6500 {
6501 if (arm_is_secure_below_el3(env)) {
6502 /* Access denied in secure mode. */
6503 return CP_ACCESS_TRAP;
6504 }
6505 return access_lor_ns(env);
6506 }
6507
6508 /*
6509 * A trivial implementation of ARMv8.1-LOR leaves all of these
6510 * registers fixed at 0, which indicates that there are zero
6511 * supported Limited Ordering regions.
6512 */
6513 static const ARMCPRegInfo lor_reginfo[] = {
6514 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6515 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6516 .access = PL1_RW, .accessfn = access_lor_other,
6517 .type = ARM_CP_CONST, .resetvalue = 0 },
6518 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6519 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6520 .access = PL1_RW, .accessfn = access_lor_other,
6521 .type = ARM_CP_CONST, .resetvalue = 0 },
6522 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6523 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6524 .access = PL1_RW, .accessfn = access_lor_other,
6525 .type = ARM_CP_CONST, .resetvalue = 0 },
6526 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6527 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6528 .access = PL1_RW, .accessfn = access_lor_other,
6529 .type = ARM_CP_CONST, .resetvalue = 0 },
6530 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6531 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6532 .access = PL1_R, .accessfn = access_lorid,
6533 .type = ARM_CP_CONST, .resetvalue = 0 },
6534 REGINFO_SENTINEL
6535 };
6536
6537 #ifdef TARGET_AARCH64
6538 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6539 bool isread)
6540 {
6541 int el = arm_current_el(env);
6542
6543 if (el < 2 &&
6544 arm_feature(env, ARM_FEATURE_EL2) &&
6545 !(arm_hcr_el2_eff(env) & HCR_APK)) {
6546 return CP_ACCESS_TRAP_EL2;
6547 }
6548 if (el < 3 &&
6549 arm_feature(env, ARM_FEATURE_EL3) &&
6550 !(env->cp15.scr_el3 & SCR_APK)) {
6551 return CP_ACCESS_TRAP_EL3;
6552 }
6553 return CP_ACCESS_OK;
6554 }
6555
6556 static const ARMCPRegInfo pauth_reginfo[] = {
6557 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6558 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6559 .access = PL1_RW, .accessfn = access_pauth,
6560 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6561 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6562 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6563 .access = PL1_RW, .accessfn = access_pauth,
6564 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6565 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6566 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6567 .access = PL1_RW, .accessfn = access_pauth,
6568 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6569 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6570 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6571 .access = PL1_RW, .accessfn = access_pauth,
6572 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6573 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6574 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6575 .access = PL1_RW, .accessfn = access_pauth,
6576 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6577 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6578 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6579 .access = PL1_RW, .accessfn = access_pauth,
6580 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6581 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6582 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6583 .access = PL1_RW, .accessfn = access_pauth,
6584 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6585 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6586 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6587 .access = PL1_RW, .accessfn = access_pauth,
6588 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6589 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6590 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6591 .access = PL1_RW, .accessfn = access_pauth,
6592 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6593 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6594 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6595 .access = PL1_RW, .accessfn = access_pauth,
6596 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6597 REGINFO_SENTINEL
6598 };
6599
6600 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6601 {
6602 Error *err = NULL;
6603 uint64_t ret;
6604
6605 /* Success sets NZCV = 0000. */
6606 env->NF = env->CF = env->VF = 0, env->ZF = 1;
6607
6608 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6609 /*
6610 * ??? Failed, for unknown reasons in the crypto subsystem.
6611 * The best we can do is log the reason and return the
6612 * timed-out indication to the guest. There is no reason
6613 * we know to expect this failure to be transitory, so the
6614 * guest may well hang retrying the operation.
6615 */
6616 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6617 ri->name, error_get_pretty(err));
6618 error_free(err);
6619
6620 env->ZF = 0; /* NZCF = 0100 */
6621 return 0;
6622 }
6623 return ret;
6624 }
6625
6626 /* We do not support re-seeding, so the two registers operate the same. */
6627 static const ARMCPRegInfo rndr_reginfo[] = {
6628 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6629 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6630 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6631 .access = PL0_R, .readfn = rndr_readfn },
6632 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6633 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6634 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6635 .access = PL0_R, .readfn = rndr_readfn },
6636 REGINFO_SENTINEL
6637 };
6638
6639 #ifndef CONFIG_USER_ONLY
6640 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6641 uint64_t value)
6642 {
6643 ARMCPU *cpu = env_archcpu(env);
6644 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6645 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6646 uint64_t vaddr_in = (uint64_t) value;
6647 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6648 void *haddr;
6649 int mem_idx = cpu_mmu_index(env, false);
6650
6651 /* This won't be crossing page boundaries */
6652 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6653 if (haddr) {
6654
6655 ram_addr_t offset;
6656 MemoryRegion *mr;
6657
6658 /* RCU lock is already being held */
6659 mr = memory_region_from_host(haddr, &offset);
6660
6661 if (mr) {
6662 memory_region_do_writeback(mr, offset, dline_size);
6663 }
6664 }
6665 }
6666
6667 static const ARMCPRegInfo dcpop_reg[] = {
6668 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6669 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6670 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6671 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn },
6672 REGINFO_SENTINEL
6673 };
6674
6675 static const ARMCPRegInfo dcpodp_reg[] = {
6676 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6677 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6678 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6679 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn },
6680 REGINFO_SENTINEL
6681 };
6682 #endif /*CONFIG_USER_ONLY*/
6683
6684 #endif
6685
6686 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
6687 bool isread)
6688 {
6689 int el = arm_current_el(env);
6690
6691 if (el == 0) {
6692 uint64_t sctlr = arm_sctlr(env, el);
6693 if (!(sctlr & SCTLR_EnRCTX)) {
6694 return CP_ACCESS_TRAP;
6695 }
6696 } else if (el == 1) {
6697 uint64_t hcr = arm_hcr_el2_eff(env);
6698 if (hcr & HCR_NV) {
6699 return CP_ACCESS_TRAP_EL2;
6700 }
6701 }
6702 return CP_ACCESS_OK;
6703 }
6704
6705 static const ARMCPRegInfo predinv_reginfo[] = {
6706 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
6707 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
6708 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6709 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
6710 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
6711 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6712 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
6713 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
6714 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6715 /*
6716 * Note the AArch32 opcodes have a different OPC1.
6717 */
6718 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
6719 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
6720 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6721 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
6722 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
6723 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6724 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
6725 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
6726 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6727 REGINFO_SENTINEL
6728 };
6729
6730 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
6731 bool isread)
6732 {
6733 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
6734 return CP_ACCESS_TRAP_EL2;
6735 }
6736
6737 return CP_ACCESS_OK;
6738 }
6739
6740 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
6741 bool isread)
6742 {
6743 if (arm_feature(env, ARM_FEATURE_V8)) {
6744 return access_aa64_tid3(env, ri, isread);
6745 }
6746
6747 return CP_ACCESS_OK;
6748 }
6749
6750 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
6751 bool isread)
6752 {
6753 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
6754 return CP_ACCESS_TRAP_EL2;
6755 }
6756
6757 return CP_ACCESS_OK;
6758 }
6759
6760 static const ARMCPRegInfo jazelle_regs[] = {
6761 { .name = "JIDR",
6762 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
6763 .access = PL1_R, .accessfn = access_jazelle,
6764 .type = ARM_CP_CONST, .resetvalue = 0 },
6765 { .name = "JOSCR",
6766 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
6767 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6768 { .name = "JMCR",
6769 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
6770 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6771 REGINFO_SENTINEL
6772 };
6773
6774 static const ARMCPRegInfo vhe_reginfo[] = {
6775 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
6776 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
6777 .access = PL2_RW,
6778 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
6779 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
6780 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
6781 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
6782 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
6783 #ifndef CONFIG_USER_ONLY
6784 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
6785 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
6786 .fieldoffset =
6787 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
6788 .type = ARM_CP_IO, .access = PL2_RW,
6789 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
6790 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
6791 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
6792 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
6793 .resetfn = gt_hv_timer_reset,
6794 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
6795 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
6796 .type = ARM_CP_IO,
6797 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
6798 .access = PL2_RW,
6799 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
6800 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
6801 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
6802 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
6803 .type = ARM_CP_IO | ARM_CP_ALIAS,
6804 .access = PL2_RW, .accessfn = e2h_access,
6805 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
6806 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
6807 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
6808 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
6809 .type = ARM_CP_IO | ARM_CP_ALIAS,
6810 .access = PL2_RW, .accessfn = e2h_access,
6811 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
6812 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
6813 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
6814 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
6815 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
6816 .access = PL2_RW, .accessfn = e2h_access,
6817 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
6818 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
6819 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
6820 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
6821 .access = PL2_RW, .accessfn = e2h_access,
6822 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
6823 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
6824 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
6825 .type = ARM_CP_IO | ARM_CP_ALIAS,
6826 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
6827 .access = PL2_RW, .accessfn = e2h_access,
6828 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
6829 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
6830 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
6831 .type = ARM_CP_IO | ARM_CP_ALIAS,
6832 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
6833 .access = PL2_RW, .accessfn = e2h_access,
6834 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
6835 #endif
6836 REGINFO_SENTINEL
6837 };
6838
6839 #ifndef CONFIG_USER_ONLY
6840 static const ARMCPRegInfo ats1e1_reginfo[] = {
6841 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
6842 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
6843 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
6844 .writefn = ats_write64 },
6845 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
6846 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
6847 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
6848 .writefn = ats_write64 },
6849 REGINFO_SENTINEL
6850 };
6851
6852 static const ARMCPRegInfo ats1cp_reginfo[] = {
6853 { .name = "ATS1CPRP",
6854 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
6855 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
6856 .writefn = ats_write },
6857 { .name = "ATS1CPWP",
6858 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
6859 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
6860 .writefn = ats_write },
6861 REGINFO_SENTINEL
6862 };
6863 #endif
6864
6865 /*
6866 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
6867 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
6868 * is non-zero, which is never for ARMv7, optionally in ARMv8
6869 * and mandatorily for ARMv8.2 and up.
6870 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
6871 * implementation is RAZ/WI we can ignore this detail, as we
6872 * do for ACTLR.
6873 */
6874 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
6875 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
6876 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
6877 .access = PL1_RW, .type = ARM_CP_CONST,
6878 .resetvalue = 0 },
6879 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6880 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6881 .access = PL2_RW, .type = ARM_CP_CONST,
6882 .resetvalue = 0 },
6883 REGINFO_SENTINEL
6884 };
6885
6886 void register_cp_regs_for_features(ARMCPU *cpu)
6887 {
6888 /* Register all the coprocessor registers based on feature bits */
6889 CPUARMState *env = &cpu->env;
6890 if (arm_feature(env, ARM_FEATURE_M)) {
6891 /* M profile has no coprocessor registers */
6892 return;
6893 }
6894
6895 define_arm_cp_regs(cpu, cp_reginfo);
6896 if (!arm_feature(env, ARM_FEATURE_V8)) {
6897 /* Must go early as it is full of wildcards that may be
6898 * overridden by later definitions.
6899 */
6900 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6901 }
6902
6903 if (arm_feature(env, ARM_FEATURE_V6)) {
6904 /* The ID registers all have impdef reset values */
6905 ARMCPRegInfo v6_idregs[] = {
6906 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6907 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6908 .access = PL1_R, .type = ARM_CP_CONST,
6909 .accessfn = access_aa32_tid3,
6910 .resetvalue = cpu->id_pfr0 },
6911 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6912 * the value of the GIC field until after we define these regs.
6913 */
6914 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6915 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6916 .access = PL1_R, .type = ARM_CP_NO_RAW,
6917 .accessfn = access_aa32_tid3,
6918 .readfn = id_pfr1_read,
6919 .writefn = arm_cp_write_ignore },
6920 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6921 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6922 .access = PL1_R, .type = ARM_CP_CONST,
6923 .accessfn = access_aa32_tid3,
6924 .resetvalue = cpu->isar.id_dfr0 },
6925 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6926 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6927 .access = PL1_R, .type = ARM_CP_CONST,
6928 .accessfn = access_aa32_tid3,
6929 .resetvalue = cpu->id_afr0 },
6930 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6931 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6932 .access = PL1_R, .type = ARM_CP_CONST,
6933 .accessfn = access_aa32_tid3,
6934 .resetvalue = cpu->isar.id_mmfr0 },
6935 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6936 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6937 .access = PL1_R, .type = ARM_CP_CONST,
6938 .accessfn = access_aa32_tid3,
6939 .resetvalue = cpu->isar.id_mmfr1 },
6940 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6941 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6942 .access = PL1_R, .type = ARM_CP_CONST,
6943 .accessfn = access_aa32_tid3,
6944 .resetvalue = cpu->isar.id_mmfr2 },
6945 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6946 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6947 .access = PL1_R, .type = ARM_CP_CONST,
6948 .accessfn = access_aa32_tid3,
6949 .resetvalue = cpu->isar.id_mmfr3 },
6950 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6951 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6952 .access = PL1_R, .type = ARM_CP_CONST,
6953 .accessfn = access_aa32_tid3,
6954 .resetvalue = cpu->isar.id_isar0 },
6955 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6956 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6957 .access = PL1_R, .type = ARM_CP_CONST,
6958 .accessfn = access_aa32_tid3,
6959 .resetvalue = cpu->isar.id_isar1 },
6960 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6961 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6962 .access = PL1_R, .type = ARM_CP_CONST,
6963 .accessfn = access_aa32_tid3,
6964 .resetvalue = cpu->isar.id_isar2 },
6965 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6966 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6967 .access = PL1_R, .type = ARM_CP_CONST,
6968 .accessfn = access_aa32_tid3,
6969 .resetvalue = cpu->isar.id_isar3 },
6970 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6971 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6972 .access = PL1_R, .type = ARM_CP_CONST,
6973 .accessfn = access_aa32_tid3,
6974 .resetvalue = cpu->isar.id_isar4 },
6975 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6976 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6977 .access = PL1_R, .type = ARM_CP_CONST,
6978 .accessfn = access_aa32_tid3,
6979 .resetvalue = cpu->isar.id_isar5 },
6980 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6981 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6982 .access = PL1_R, .type = ARM_CP_CONST,
6983 .accessfn = access_aa32_tid3,
6984 .resetvalue = cpu->isar.id_mmfr4 },
6985 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6986 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6987 .access = PL1_R, .type = ARM_CP_CONST,
6988 .accessfn = access_aa32_tid3,
6989 .resetvalue = cpu->isar.id_isar6 },
6990 REGINFO_SENTINEL
6991 };
6992 define_arm_cp_regs(cpu, v6_idregs);
6993 define_arm_cp_regs(cpu, v6_cp_reginfo);
6994 } else {
6995 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6996 }
6997 if (arm_feature(env, ARM_FEATURE_V6K)) {
6998 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6999 }
7000 if (arm_feature(env, ARM_FEATURE_V7MP) &&
7001 !arm_feature(env, ARM_FEATURE_PMSA)) {
7002 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7003 }
7004 if (arm_feature(env, ARM_FEATURE_V7VE)) {
7005 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7006 }
7007 if (arm_feature(env, ARM_FEATURE_V7)) {
7008 ARMCPRegInfo clidr = {
7009 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7010 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7011 .access = PL1_R, .type = ARM_CP_CONST,
7012 .accessfn = access_aa64_tid2,
7013 .resetvalue = cpu->clidr
7014 };
7015 define_one_arm_cp_reg(cpu, &clidr);
7016 define_arm_cp_regs(cpu, v7_cp_reginfo);
7017 define_debug_regs(cpu);
7018 define_pmu_regs(cpu);
7019 } else {
7020 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7021 }
7022 if (arm_feature(env, ARM_FEATURE_V8)) {
7023 /* AArch64 ID registers, which all have impdef reset values.
7024 * Note that within the ID register ranges the unused slots
7025 * must all RAZ, not UNDEF; future architecture versions may
7026 * define new registers here.
7027 */
7028 ARMCPRegInfo v8_idregs[] = {
7029 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
7030 * know the right value for the GIC field until after we
7031 * define these regs.
7032 */
7033 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7034 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7035 .access = PL1_R, .type = ARM_CP_NO_RAW,
7036 .accessfn = access_aa64_tid3,
7037 .readfn = id_aa64pfr0_read,
7038 .writefn = arm_cp_write_ignore },
7039 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7041 .access = PL1_R, .type = ARM_CP_CONST,
7042 .accessfn = access_aa64_tid3,
7043 .resetvalue = cpu->isar.id_aa64pfr1},
7044 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7045 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7046 .access = PL1_R, .type = ARM_CP_CONST,
7047 .accessfn = access_aa64_tid3,
7048 .resetvalue = 0 },
7049 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7050 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7051 .access = PL1_R, .type = ARM_CP_CONST,
7052 .accessfn = access_aa64_tid3,
7053 .resetvalue = 0 },
7054 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7056 .access = PL1_R, .type = ARM_CP_CONST,
7057 .accessfn = access_aa64_tid3,
7058 /* At present, only SVEver == 0 is defined anyway. */
7059 .resetvalue = 0 },
7060 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7061 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7062 .access = PL1_R, .type = ARM_CP_CONST,
7063 .accessfn = access_aa64_tid3,
7064 .resetvalue = 0 },
7065 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7066 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7067 .access = PL1_R, .type = ARM_CP_CONST,
7068 .accessfn = access_aa64_tid3,
7069 .resetvalue = 0 },
7070 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7072 .access = PL1_R, .type = ARM_CP_CONST,
7073 .accessfn = access_aa64_tid3,
7074 .resetvalue = 0 },
7075 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7076 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7077 .access = PL1_R, .type = ARM_CP_CONST,
7078 .accessfn = access_aa64_tid3,
7079 .resetvalue = cpu->isar.id_aa64dfr0 },
7080 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7081 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7082 .access = PL1_R, .type = ARM_CP_CONST,
7083 .accessfn = access_aa64_tid3,
7084 .resetvalue = cpu->isar.id_aa64dfr1 },
7085 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7086 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7087 .access = PL1_R, .type = ARM_CP_CONST,
7088 .accessfn = access_aa64_tid3,
7089 .resetvalue = 0 },
7090 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7091 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7092 .access = PL1_R, .type = ARM_CP_CONST,
7093 .accessfn = access_aa64_tid3,
7094 .resetvalue = 0 },
7095 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7097 .access = PL1_R, .type = ARM_CP_CONST,
7098 .accessfn = access_aa64_tid3,
7099 .resetvalue = cpu->id_aa64afr0 },
7100 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7101 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7102 .access = PL1_R, .type = ARM_CP_CONST,
7103 .accessfn = access_aa64_tid3,
7104 .resetvalue = cpu->id_aa64afr1 },
7105 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7106 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7107 .access = PL1_R, .type = ARM_CP_CONST,
7108 .accessfn = access_aa64_tid3,
7109 .resetvalue = 0 },
7110 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7111 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7112 .access = PL1_R, .type = ARM_CP_CONST,
7113 .accessfn = access_aa64_tid3,
7114 .resetvalue = 0 },
7115 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7116 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7117 .access = PL1_R, .type = ARM_CP_CONST,
7118 .accessfn = access_aa64_tid3,
7119 .resetvalue = cpu->isar.id_aa64isar0 },
7120 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7122 .access = PL1_R, .type = ARM_CP_CONST,
7123 .accessfn = access_aa64_tid3,
7124 .resetvalue = cpu->isar.id_aa64isar1 },
7125 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7126 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7127 .access = PL1_R, .type = ARM_CP_CONST,
7128 .accessfn = access_aa64_tid3,
7129 .resetvalue = 0 },
7130 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7131 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7132 .access = PL1_R, .type = ARM_CP_CONST,
7133 .accessfn = access_aa64_tid3,
7134 .resetvalue = 0 },
7135 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7136 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7137 .access = PL1_R, .type = ARM_CP_CONST,
7138 .accessfn = access_aa64_tid3,
7139 .resetvalue = 0 },
7140 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7141 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7142 .access = PL1_R, .type = ARM_CP_CONST,
7143 .accessfn = access_aa64_tid3,
7144 .resetvalue = 0 },
7145 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7146 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7147 .access = PL1_R, .type = ARM_CP_CONST,
7148 .accessfn = access_aa64_tid3,
7149 .resetvalue = 0 },
7150 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7151 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7152 .access = PL1_R, .type = ARM_CP_CONST,
7153 .accessfn = access_aa64_tid3,
7154 .resetvalue = 0 },
7155 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7156 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7157 .access = PL1_R, .type = ARM_CP_CONST,
7158 .accessfn = access_aa64_tid3,
7159 .resetvalue = cpu->isar.id_aa64mmfr0 },
7160 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7161 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7162 .access = PL1_R, .type = ARM_CP_CONST,
7163 .accessfn = access_aa64_tid3,
7164 .resetvalue = cpu->isar.id_aa64mmfr1 },
7165 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7166 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7167 .access = PL1_R, .type = ARM_CP_CONST,
7168 .accessfn = access_aa64_tid3,
7169 .resetvalue = cpu->isar.id_aa64mmfr2 },
7170 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7171 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7172 .access = PL1_R, .type = ARM_CP_CONST,
7173 .accessfn = access_aa64_tid3,
7174 .resetvalue = 0 },
7175 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7176 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7177 .access = PL1_R, .type = ARM_CP_CONST,
7178 .accessfn = access_aa64_tid3,
7179 .resetvalue = 0 },
7180 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7181 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7182 .access = PL1_R, .type = ARM_CP_CONST,
7183 .accessfn = access_aa64_tid3,
7184 .resetvalue = 0 },
7185 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7186 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7187 .access = PL1_R, .type = ARM_CP_CONST,
7188 .accessfn = access_aa64_tid3,
7189 .resetvalue = 0 },
7190 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7191 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7192 .access = PL1_R, .type = ARM_CP_CONST,
7193 .accessfn = access_aa64_tid3,
7194 .resetvalue = 0 },
7195 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7196 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7197 .access = PL1_R, .type = ARM_CP_CONST,
7198 .accessfn = access_aa64_tid3,
7199 .resetvalue = cpu->isar.mvfr0 },
7200 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7201 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7202 .access = PL1_R, .type = ARM_CP_CONST,
7203 .accessfn = access_aa64_tid3,
7204 .resetvalue = cpu->isar.mvfr1 },
7205 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7206 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7207 .access = PL1_R, .type = ARM_CP_CONST,
7208 .accessfn = access_aa64_tid3,
7209 .resetvalue = cpu->isar.mvfr2 },
7210 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7211 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7212 .access = PL1_R, .type = ARM_CP_CONST,
7213 .accessfn = access_aa64_tid3,
7214 .resetvalue = 0 },
7215 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7216 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7217 .access = PL1_R, .type = ARM_CP_CONST,
7218 .accessfn = access_aa64_tid3,
7219 .resetvalue = 0 },
7220 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7221 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7222 .access = PL1_R, .type = ARM_CP_CONST,
7223 .accessfn = access_aa64_tid3,
7224 .resetvalue = 0 },
7225 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7226 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7227 .access = PL1_R, .type = ARM_CP_CONST,
7228 .accessfn = access_aa64_tid3,
7229 .resetvalue = 0 },
7230 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7231 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7232 .access = PL1_R, .type = ARM_CP_CONST,
7233 .accessfn = access_aa64_tid3,
7234 .resetvalue = 0 },
7235 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7236 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7237 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7238 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7239 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7240 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7241 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7242 .resetvalue = cpu->pmceid0 },
7243 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7244 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7245 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7246 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7247 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7248 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7249 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7250 .resetvalue = cpu->pmceid1 },
7251 REGINFO_SENTINEL
7252 };
7253 #ifdef CONFIG_USER_ONLY
7254 ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7255 { .name = "ID_AA64PFR0_EL1",
7256 .exported_bits = 0x000f000f00ff0000,
7257 .fixed_bits = 0x0000000000000011 },
7258 { .name = "ID_AA64PFR1_EL1",
7259 .exported_bits = 0x00000000000000f0 },
7260 { .name = "ID_AA64PFR*_EL1_RESERVED",
7261 .is_glob = true },
7262 { .name = "ID_AA64ZFR0_EL1" },
7263 { .name = "ID_AA64MMFR0_EL1",
7264 .fixed_bits = 0x00000000ff000000 },
7265 { .name = "ID_AA64MMFR1_EL1" },
7266 { .name = "ID_AA64MMFR*_EL1_RESERVED",
7267 .is_glob = true },
7268 { .name = "ID_AA64DFR0_EL1",
7269 .fixed_bits = 0x0000000000000006 },
7270 { .name = "ID_AA64DFR1_EL1" },
7271 { .name = "ID_AA64DFR*_EL1_RESERVED",
7272 .is_glob = true },
7273 { .name = "ID_AA64AFR*",
7274 .is_glob = true },
7275 { .name = "ID_AA64ISAR0_EL1",
7276 .exported_bits = 0x00fffffff0fffff0 },
7277 { .name = "ID_AA64ISAR1_EL1",
7278 .exported_bits = 0x000000f0ffffffff },
7279 { .name = "ID_AA64ISAR*_EL1_RESERVED",
7280 .is_glob = true },
7281 REGUSERINFO_SENTINEL
7282 };
7283 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7284 #endif
7285 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7286 if (!arm_feature(env, ARM_FEATURE_EL3) &&
7287 !arm_feature(env, ARM_FEATURE_EL2)) {
7288 ARMCPRegInfo rvbar = {
7289 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7290 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
7291 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
7292 };
7293 define_one_arm_cp_reg(cpu, &rvbar);
7294 }
7295 define_arm_cp_regs(cpu, v8_idregs);
7296 define_arm_cp_regs(cpu, v8_cp_reginfo);
7297 }
7298 if (arm_feature(env, ARM_FEATURE_EL2)) {
7299 uint64_t vmpidr_def = mpidr_read_val(env);
7300 ARMCPRegInfo vpidr_regs[] = {
7301 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7302 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7303 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7304 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
7305 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
7306 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7307 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7308 .access = PL2_RW, .resetvalue = cpu->midr,
7309 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7310 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7311 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7312 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7313 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
7314 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
7315 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7316 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7317 .access = PL2_RW,
7318 .resetvalue = vmpidr_def,
7319 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
7320 REGINFO_SENTINEL
7321 };
7322 define_arm_cp_regs(cpu, vpidr_regs);
7323 define_arm_cp_regs(cpu, el2_cp_reginfo);
7324 if (arm_feature(env, ARM_FEATURE_V8)) {
7325 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7326 }
7327 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7328 if (!arm_feature(env, ARM_FEATURE_EL3)) {
7329 ARMCPRegInfo rvbar = {
7330 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7331 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
7332 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
7333 };
7334 define_one_arm_cp_reg(cpu, &rvbar);
7335 }
7336 } else {
7337 /* If EL2 is missing but higher ELs are enabled, we need to
7338 * register the no_el2 reginfos.
7339 */
7340 if (arm_feature(env, ARM_FEATURE_EL3)) {
7341 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7342 * of MIDR_EL1 and MPIDR_EL1.
7343 */
7344 ARMCPRegInfo vpidr_regs[] = {
7345 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7346 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7347 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
7348 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
7349 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7350 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7351 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7352 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
7353 .type = ARM_CP_NO_RAW,
7354 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
7355 REGINFO_SENTINEL
7356 };
7357 define_arm_cp_regs(cpu, vpidr_regs);
7358 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
7359 if (arm_feature(env, ARM_FEATURE_V8)) {
7360 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
7361 }
7362 }
7363 }
7364 if (arm_feature(env, ARM_FEATURE_EL3)) {
7365 define_arm_cp_regs(cpu, el3_cp_reginfo);
7366 ARMCPRegInfo el3_regs[] = {
7367 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
7368 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
7369 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
7370 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
7371 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
7372 .access = PL3_RW,
7373 .raw_writefn = raw_write, .writefn = sctlr_write,
7374 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
7375 .resetvalue = cpu->reset_sctlr },
7376 REGINFO_SENTINEL
7377 };
7378
7379 define_arm_cp_regs(cpu, el3_regs);
7380 }
7381 /* The behaviour of NSACR is sufficiently various that we don't
7382 * try to describe it in a single reginfo:
7383 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7384 * reads as constant 0xc00 from NS EL1 and NS EL2
7385 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7386 * if v7 without EL3, register doesn't exist
7387 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7388 */
7389 if (arm_feature(env, ARM_FEATURE_EL3)) {
7390 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7391 ARMCPRegInfo nsacr = {
7392 .name = "NSACR", .type = ARM_CP_CONST,
7393 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7394 .access = PL1_RW, .accessfn = nsacr_access,
7395 .resetvalue = 0xc00
7396 };
7397 define_one_arm_cp_reg(cpu, &nsacr);
7398 } else {
7399 ARMCPRegInfo nsacr = {
7400 .name = "NSACR",
7401 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7402 .access = PL3_RW | PL1_R,
7403 .resetvalue = 0,
7404 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
7405 };
7406 define_one_arm_cp_reg(cpu, &nsacr);
7407 }
7408 } else {
7409 if (arm_feature(env, ARM_FEATURE_V8)) {
7410 ARMCPRegInfo nsacr = {
7411 .name = "NSACR", .type = ARM_CP_CONST,
7412 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7413 .access = PL1_R,
7414 .resetvalue = 0xc00
7415 };
7416 define_one_arm_cp_reg(cpu, &nsacr);
7417 }
7418 }
7419
7420 if (arm_feature(env, ARM_FEATURE_PMSA)) {
7421 if (arm_feature(env, ARM_FEATURE_V6)) {
7422 /* PMSAv6 not implemented */
7423 assert(arm_feature(env, ARM_FEATURE_V7));
7424 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7425 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
7426 } else {
7427 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
7428 }
7429 } else {
7430 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7431 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
7432 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7433 if (cpu_isar_feature(aa32_hpd, cpu)) {
7434 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
7435 }
7436 }
7437 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
7438 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
7439 }
7440 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
7441 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
7442 }
7443 if (arm_feature(env, ARM_FEATURE_VAPA)) {
7444 define_arm_cp_regs(cpu, vapa_cp_reginfo);
7445 }
7446 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7447 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7448 }
7449 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7450 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7451 }
7452 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7453 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7454 }
7455 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7456 define_arm_cp_regs(cpu, omap_cp_reginfo);
7457 }
7458 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7459 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7460 }
7461 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7462 define_arm_cp_regs(cpu, xscale_cp_reginfo);
7463 }
7464 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7465 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7466 }
7467 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7468 define_arm_cp_regs(cpu, lpae_cp_reginfo);
7469 }
7470 if (cpu_isar_feature(aa32_jazelle, cpu)) {
7471 define_arm_cp_regs(cpu, jazelle_regs);
7472 }
7473 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7474 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7475 * be read-only (ie write causes UNDEF exception).
7476 */
7477 {
7478 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7479 /* Pre-v8 MIDR space.
7480 * Note that the MIDR isn't a simple constant register because
7481 * of the TI925 behaviour where writes to another register can
7482 * cause the MIDR value to change.
7483 *
7484 * Unimplemented registers in the c15 0 0 0 space default to
7485 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7486 * and friends override accordingly.
7487 */
7488 { .name = "MIDR",
7489 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7490 .access = PL1_R, .resetvalue = cpu->midr,
7491 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
7492 .readfn = midr_read,
7493 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7494 .type = ARM_CP_OVERRIDE },
7495 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7496 { .name = "DUMMY",
7497 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7498 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7499 { .name = "DUMMY",
7500 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7501 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7502 { .name = "DUMMY",
7503 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7504 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7505 { .name = "DUMMY",
7506 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7507 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7508 { .name = "DUMMY",
7509 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7510 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7511 REGINFO_SENTINEL
7512 };
7513 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
7514 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7515 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
7516 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7517 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7518 .readfn = midr_read },
7519 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7520 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7521 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7522 .access = PL1_R, .resetvalue = cpu->midr },
7523 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7524 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7525 .access = PL1_R, .resetvalue = cpu->midr },
7526 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7527 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
7528 .access = PL1_R,
7529 .accessfn = access_aa64_tid1,
7530 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
7531 REGINFO_SENTINEL
7532 };
7533 ARMCPRegInfo id_cp_reginfo[] = {
7534 /* These are common to v8 and pre-v8 */
7535 { .name = "CTR",
7536 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
7537 .access = PL1_R, .accessfn = ctr_el0_access,
7538 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7539 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7540 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7541 .access = PL0_R, .accessfn = ctr_el0_access,
7542 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7543 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7544 { .name = "TCMTR",
7545 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
7546 .access = PL1_R,
7547 .accessfn = access_aa32_tid1,
7548 .type = ARM_CP_CONST, .resetvalue = 0 },
7549 REGINFO_SENTINEL
7550 };
7551 /* TLBTR is specific to VMSA */
7552 ARMCPRegInfo id_tlbtr_reginfo = {
7553 .name = "TLBTR",
7554 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
7555 .access = PL1_R,
7556 .accessfn = access_aa32_tid1,
7557 .type = ARM_CP_CONST, .resetvalue = 0,
7558 };
7559 /* MPUIR is specific to PMSA V6+ */
7560 ARMCPRegInfo id_mpuir_reginfo = {
7561 .name = "MPUIR",
7562 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7563 .access = PL1_R, .type = ARM_CP_CONST,
7564 .resetvalue = cpu->pmsav7_dregion << 8
7565 };
7566 ARMCPRegInfo crn0_wi_reginfo = {
7567 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7568 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
7569 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
7570 };
7571 #ifdef CONFIG_USER_ONLY
7572 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
7573 { .name = "MIDR_EL1",
7574 .exported_bits = 0x00000000ffffffff },
7575 { .name = "REVIDR_EL1" },
7576 REGUSERINFO_SENTINEL
7577 };
7578 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
7579 #endif
7580 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
7581 arm_feature(env, ARM_FEATURE_STRONGARM)) {
7582 ARMCPRegInfo *r;
7583 /* Register the blanket "writes ignored" value first to cover the
7584 * whole space. Then update the specific ID registers to allow write
7585 * access, so that they ignore writes rather than causing them to
7586 * UNDEF.
7587 */
7588 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
7589 for (r = id_pre_v8_midr_cp_reginfo;
7590 r->type != ARM_CP_SENTINEL; r++) {
7591 r->access = PL1_RW;
7592 }
7593 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
7594 r->access = PL1_RW;
7595 }
7596 id_mpuir_reginfo.access = PL1_RW;
7597 id_tlbtr_reginfo.access = PL1_RW;
7598 }
7599 if (arm_feature(env, ARM_FEATURE_V8)) {
7600 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
7601 } else {
7602 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
7603 }
7604 define_arm_cp_regs(cpu, id_cp_reginfo);
7605 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7606 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
7607 } else if (arm_feature(env, ARM_FEATURE_V7)) {
7608 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7609 }
7610 }
7611
7612 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
7613 ARMCPRegInfo mpidr_cp_reginfo[] = {
7614 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
7615 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
7616 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
7617 REGINFO_SENTINEL
7618 };
7619 #ifdef CONFIG_USER_ONLY
7620 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
7621 { .name = "MPIDR_EL1",
7622 .fixed_bits = 0x0000000080000000 },
7623 REGUSERINFO_SENTINEL
7624 };
7625 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7626 #endif
7627 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7628 }
7629
7630 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
7631 ARMCPRegInfo auxcr_reginfo[] = {
7632 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7633 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
7634 .access = PL1_RW, .type = ARM_CP_CONST,
7635 .resetvalue = cpu->reset_auxcr },
7636 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7637 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7638 .access = PL2_RW, .type = ARM_CP_CONST,
7639 .resetvalue = 0 },
7640 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7641 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7642 .access = PL3_RW, .type = ARM_CP_CONST,
7643 .resetvalue = 0 },
7644 REGINFO_SENTINEL
7645 };
7646 define_arm_cp_regs(cpu, auxcr_reginfo);
7647 if (cpu_isar_feature(aa32_ac2, cpu)) {
7648 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
7649 }
7650 }
7651
7652 if (arm_feature(env, ARM_FEATURE_CBAR)) {
7653 /*
7654 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7655 * There are two flavours:
7656 * (1) older 32-bit only cores have a simple 32-bit CBAR
7657 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7658 * 32-bit register visible to AArch32 at a different encoding
7659 * to the "flavour 1" register and with the bits rearranged to
7660 * be able to squash a 64-bit address into the 32-bit view.
7661 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7662 * in future if we support AArch32-only configs of some of the
7663 * AArch64 cores we might need to add a specific feature flag
7664 * to indicate cores with "flavour 2" CBAR.
7665 */
7666 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7667 /* 32 bit view is [31:18] 0...0 [43:32]. */
7668 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7669 | extract64(cpu->reset_cbar, 32, 12);
7670 ARMCPRegInfo cbar_reginfo[] = {
7671 { .name = "CBAR",
7672 .type = ARM_CP_CONST,
7673 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7674 .access = PL1_R, .resetvalue = cbar32 },
7675 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7676 .type = ARM_CP_CONST,
7677 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
7678 .access = PL1_R, .resetvalue = cpu->reset_cbar },
7679 REGINFO_SENTINEL
7680 };
7681 /* We don't implement a r/w 64 bit CBAR currently */
7682 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7683 define_arm_cp_regs(cpu, cbar_reginfo);
7684 } else {
7685 ARMCPRegInfo cbar = {
7686 .name = "CBAR",
7687 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7688 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
7689 .fieldoffset = offsetof(CPUARMState,
7690 cp15.c15_config_base_address)
7691 };
7692 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7693 cbar.access = PL1_R;
7694 cbar.fieldoffset = 0;
7695 cbar.type = ARM_CP_CONST;
7696 }
7697 define_one_arm_cp_reg(cpu, &cbar);
7698 }
7699 }
7700
7701 if (arm_feature(env, ARM_FEATURE_VBAR)) {
7702 ARMCPRegInfo vbar_cp_reginfo[] = {
7703 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
7704 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7705 .access = PL1_RW, .writefn = vbar_write,
7706 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7707 offsetof(CPUARMState, cp15.vbar_ns) },
7708 .resetvalue = 0 },
7709 REGINFO_SENTINEL
7710 };
7711 define_arm_cp_regs(cpu, vbar_cp_reginfo);
7712 }
7713
7714 /* Generic registers whose values depend on the implementation */
7715 {
7716 ARMCPRegInfo sctlr = {
7717 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
7718 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7719 .access = PL1_RW,
7720 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7721 offsetof(CPUARMState, cp15.sctlr_ns) },
7722 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7723 .raw_writefn = raw_write,
7724 };
7725 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7726 /* Normally we would always end the TB on an SCTLR write, but Linux
7727 * arch/arm/mach-pxa/sleep.S expects two instructions following
7728 * an MMU enable to execute from cache. Imitate this behaviour.
7729 */
7730 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
7731 }
7732 define_one_arm_cp_reg(cpu, &sctlr);
7733 }
7734
7735 if (cpu_isar_feature(aa64_lor, cpu)) {
7736 define_arm_cp_regs(cpu, lor_reginfo);
7737 }
7738 if (cpu_isar_feature(aa64_pan, cpu)) {
7739 define_one_arm_cp_reg(cpu, &pan_reginfo);
7740 }
7741 #ifndef CONFIG_USER_ONLY
7742 if (cpu_isar_feature(aa64_ats1e1, cpu)) {
7743 define_arm_cp_regs(cpu, ats1e1_reginfo);
7744 }
7745 if (cpu_isar_feature(aa32_ats1e1, cpu)) {
7746 define_arm_cp_regs(cpu, ats1cp_reginfo);
7747 }
7748 #endif
7749 if (cpu_isar_feature(aa64_uao, cpu)) {
7750 define_one_arm_cp_reg(cpu, &uao_reginfo);
7751 }
7752
7753 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7754 define_arm_cp_regs(cpu, vhe_reginfo);
7755 }
7756
7757 if (cpu_isar_feature(aa64_sve, cpu)) {
7758 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
7759 if (arm_feature(env, ARM_FEATURE_EL2)) {
7760 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
7761 } else {
7762 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
7763 }
7764 if (arm_feature(env, ARM_FEATURE_EL3)) {
7765 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
7766 }
7767 }
7768
7769 #ifdef TARGET_AARCH64
7770 if (cpu_isar_feature(aa64_pauth, cpu)) {
7771 define_arm_cp_regs(cpu, pauth_reginfo);
7772 }
7773 if (cpu_isar_feature(aa64_rndr, cpu)) {
7774 define_arm_cp_regs(cpu, rndr_reginfo);
7775 }
7776 #ifndef CONFIG_USER_ONLY
7777 /* Data Cache clean instructions up to PoP */
7778 if (cpu_isar_feature(aa64_dcpop, cpu)) {
7779 define_one_arm_cp_reg(cpu, dcpop_reg);
7780
7781 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7782 define_one_arm_cp_reg(cpu, dcpodp_reg);
7783 }
7784 }
7785 #endif /*CONFIG_USER_ONLY*/
7786 #endif
7787
7788 if (cpu_isar_feature(any_predinv, cpu)) {
7789 define_arm_cp_regs(cpu, predinv_reginfo);
7790 }
7791
7792 #ifndef CONFIG_USER_ONLY
7793 /*
7794 * Register redirections and aliases must be done last,
7795 * after the registers from the other extensions have been defined.
7796 */
7797 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7798 define_arm_vh_e2h_redirects_aliases(cpu);
7799 }
7800 #endif
7801 }
7802
7803 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
7804 {
7805 CPUState *cs = CPU(cpu);
7806 CPUARMState *env = &cpu->env;
7807
7808 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7809 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
7810 aarch64_fpu_gdb_set_reg,
7811 34, "aarch64-fpu.xml", 0);
7812 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
7813 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
7814 51, "arm-neon.xml", 0);
7815 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
7816 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
7817 35, "arm-vfp3.xml", 0);
7818 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
7819 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
7820 19, "arm-vfp.xml", 0);
7821 }
7822 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
7823 arm_gen_dynamic_xml(cs),
7824 "system-registers.xml", 0);
7825 }
7826
7827 /* Sort alphabetically by type name, except for "any". */
7828 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
7829 {
7830 ObjectClass *class_a = (ObjectClass *)a;
7831 ObjectClass *class_b = (ObjectClass *)b;
7832 const char *name_a, *name_b;
7833
7834 name_a = object_class_get_name(class_a);
7835 name_b = object_class_get_name(class_b);
7836 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
7837 return 1;
7838 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
7839 return -1;
7840 } else {
7841 return strcmp(name_a, name_b);
7842 }
7843 }
7844
7845 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
7846 {
7847 ObjectClass *oc = data;
7848 const char *typename;
7849 char *name;
7850
7851 typename = object_class_get_name(oc);
7852 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
7853 qemu_printf(" %s\n", name);
7854 g_free(name);
7855 }
7856
7857 void arm_cpu_list(void)
7858 {
7859 GSList *list;
7860
7861 list = object_class_get_list(TYPE_ARM_CPU, false);
7862 list = g_slist_sort(list, arm_cpu_list_compare);
7863 qemu_printf("Available CPUs:\n");
7864 g_slist_foreach(list, arm_cpu_list_entry, NULL);
7865 g_slist_free(list);
7866 }
7867
7868 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
7869 {
7870 ObjectClass *oc = data;
7871 CpuDefinitionInfoList **cpu_list = user_data;
7872 CpuDefinitionInfoList *entry;
7873 CpuDefinitionInfo *info;
7874 const char *typename;
7875
7876 typename = object_class_get_name(oc);
7877 info = g_malloc0(sizeof(*info));
7878 info->name = g_strndup(typename,
7879 strlen(typename) - strlen("-" TYPE_ARM_CPU));
7880 info->q_typename = g_strdup(typename);
7881
7882 entry = g_malloc0(sizeof(*entry));
7883 entry->value = info;
7884 entry->next = *cpu_list;
7885 *cpu_list = entry;
7886 }
7887
7888 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
7889 {
7890 CpuDefinitionInfoList *cpu_list = NULL;
7891 GSList *list;
7892
7893 list = object_class_get_list(TYPE_ARM_CPU, false);
7894 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
7895 g_slist_free(list);
7896
7897 return cpu_list;
7898 }
7899
7900 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
7901 void *opaque, int state, int secstate,
7902 int crm, int opc1, int opc2,
7903 const char *name)
7904 {
7905 /* Private utility function for define_one_arm_cp_reg_with_opaque():
7906 * add a single reginfo struct to the hash table.
7907 */
7908 uint32_t *key = g_new(uint32_t, 1);
7909 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
7910 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
7911 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
7912
7913 r2->name = g_strdup(name);
7914 /* Reset the secure state to the specific incoming state. This is
7915 * necessary as the register may have been defined with both states.
7916 */
7917 r2->secure = secstate;
7918
7919 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7920 /* Register is banked (using both entries in array).
7921 * Overwriting fieldoffset as the array is only used to define
7922 * banked registers but later only fieldoffset is used.
7923 */
7924 r2->fieldoffset = r->bank_fieldoffsets[ns];
7925 }
7926
7927 if (state == ARM_CP_STATE_AA32) {
7928 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7929 /* If the register is banked then we don't need to migrate or
7930 * reset the 32-bit instance in certain cases:
7931 *
7932 * 1) If the register has both 32-bit and 64-bit instances then we
7933 * can count on the 64-bit instance taking care of the
7934 * non-secure bank.
7935 * 2) If ARMv8 is enabled then we can count on a 64-bit version
7936 * taking care of the secure bank. This requires that separate
7937 * 32 and 64-bit definitions are provided.
7938 */
7939 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7940 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
7941 r2->type |= ARM_CP_ALIAS;
7942 }
7943 } else if ((secstate != r->secure) && !ns) {
7944 /* The register is not banked so we only want to allow migration of
7945 * the non-secure instance.
7946 */
7947 r2->type |= ARM_CP_ALIAS;
7948 }
7949
7950 if (r->state == ARM_CP_STATE_BOTH) {
7951 /* We assume it is a cp15 register if the .cp field is left unset.
7952 */
7953 if (r2->cp == 0) {
7954 r2->cp = 15;
7955 }
7956
7957 #ifdef HOST_WORDS_BIGENDIAN
7958 if (r2->fieldoffset) {
7959 r2->fieldoffset += sizeof(uint32_t);
7960 }
7961 #endif
7962 }
7963 }
7964 if (state == ARM_CP_STATE_AA64) {
7965 /* To allow abbreviation of ARMCPRegInfo
7966 * definitions, we treat cp == 0 as equivalent to
7967 * the value for "standard guest-visible sysreg".
7968 * STATE_BOTH definitions are also always "standard
7969 * sysreg" in their AArch64 view (the .cp value may
7970 * be non-zero for the benefit of the AArch32 view).
7971 */
7972 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
7973 r2->cp = CP_REG_ARM64_SYSREG_CP;
7974 }
7975 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
7976 r2->opc0, opc1, opc2);
7977 } else {
7978 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
7979 }
7980 if (opaque) {
7981 r2->opaque = opaque;
7982 }
7983 /* reginfo passed to helpers is correct for the actual access,
7984 * and is never ARM_CP_STATE_BOTH:
7985 */
7986 r2->state = state;
7987 /* Make sure reginfo passed to helpers for wildcarded regs
7988 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7989 */
7990 r2->crm = crm;
7991 r2->opc1 = opc1;
7992 r2->opc2 = opc2;
7993 /* By convention, for wildcarded registers only the first
7994 * entry is used for migration; the others are marked as
7995 * ALIAS so we don't try to transfer the register
7996 * multiple times. Special registers (ie NOP/WFI) are
7997 * never migratable and not even raw-accessible.
7998 */
7999 if ((r->type & ARM_CP_SPECIAL)) {
8000 r2->type |= ARM_CP_NO_RAW;
8001 }
8002 if (((r->crm == CP_ANY) && crm != 0) ||
8003 ((r->opc1 == CP_ANY) && opc1 != 0) ||
8004 ((r->opc2 == CP_ANY) && opc2 != 0)) {
8005 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8006 }
8007
8008 /* Check that raw accesses are either forbidden or handled. Note that
8009 * we can't assert this earlier because the setup of fieldoffset for
8010 * banked registers has to be done first.
8011 */
8012 if (!(r2->type & ARM_CP_NO_RAW)) {
8013 assert(!raw_accessors_invalid(r2));
8014 }
8015
8016 /* Overriding of an existing definition must be explicitly
8017 * requested.
8018 */
8019 if (!(r->type & ARM_CP_OVERRIDE)) {
8020 ARMCPRegInfo *oldreg;
8021 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
8022 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
8023 fprintf(stderr, "Register redefined: cp=%d %d bit "
8024 "crn=%d crm=%d opc1=%d opc2=%d, "
8025 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
8026 r2->crn, r2->crm, r2->opc1, r2->opc2,
8027 oldreg->name, r2->name);
8028 g_assert_not_reached();
8029 }
8030 }
8031 g_hash_table_insert(cpu->cp_regs, key, r2);
8032 }
8033
8034
8035 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8036 const ARMCPRegInfo *r, void *opaque)
8037 {
8038 /* Define implementations of coprocessor registers.
8039 * We store these in a hashtable because typically
8040 * there are less than 150 registers in a space which
8041 * is 16*16*16*8*8 = 262144 in size.
8042 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8043 * If a register is defined twice then the second definition is
8044 * used, so this can be used to define some generic registers and
8045 * then override them with implementation specific variations.
8046 * At least one of the original and the second definition should
8047 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8048 * against accidental use.
8049 *
8050 * The state field defines whether the register is to be
8051 * visible in the AArch32 or AArch64 execution state. If the
8052 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8053 * reginfo structure for the AArch32 view, which sees the lower
8054 * 32 bits of the 64 bit register.
8055 *
8056 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8057 * be wildcarded. AArch64 registers are always considered to be 64
8058 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8059 * the register, if any.
8060 */
8061 int crm, opc1, opc2, state;
8062 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8063 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8064 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8065 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8066 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8067 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
8068 /* 64 bit registers have only CRm and Opc1 fields */
8069 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
8070 /* op0 only exists in the AArch64 encodings */
8071 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8072 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8073 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
8074 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8075 * encodes a minimum access level for the register. We roll this
8076 * runtime check into our general permission check code, so check
8077 * here that the reginfo's specified permissions are strict enough
8078 * to encompass the generic architectural permission check.
8079 */
8080 if (r->state != ARM_CP_STATE_AA32) {
8081 int mask = 0;
8082 switch (r->opc1) {
8083 case 0:
8084 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8085 mask = PL0U_R | PL1_RW;
8086 break;
8087 case 1: case 2:
8088 /* min_EL EL1 */
8089 mask = PL1_RW;
8090 break;
8091 case 3:
8092 /* min_EL EL0 */
8093 mask = PL0_RW;
8094 break;
8095 case 4:
8096 case 5:
8097 /* min_EL EL2 */
8098 mask = PL2_RW;
8099 break;
8100 case 6:
8101 /* min_EL EL3 */
8102 mask = PL3_RW;
8103 break;
8104 case 7:
8105 /* min_EL EL1, secure mode only (we don't check the latter) */
8106 mask = PL1_RW;
8107 break;
8108 default:
8109 /* broken reginfo with out-of-range opc1 */
8110 assert(false);
8111 break;
8112 }
8113 /* assert our permissions are not too lax (stricter is fine) */
8114 assert((r->access & ~mask) == 0);
8115 }
8116
8117 /* Check that the register definition has enough info to handle
8118 * reads and writes if they are permitted.
8119 */
8120 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
8121 if (r->access & PL3_R) {
8122 assert((r->fieldoffset ||
8123 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8124 r->readfn);
8125 }
8126 if (r->access & PL3_W) {
8127 assert((r->fieldoffset ||
8128 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8129 r->writefn);
8130 }
8131 }
8132 /* Bad type field probably means missing sentinel at end of reg list */
8133 assert(cptype_valid(r->type));
8134 for (crm = crmmin; crm <= crmmax; crm++) {
8135 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8136 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
8137 for (state = ARM_CP_STATE_AA32;
8138 state <= ARM_CP_STATE_AA64; state++) {
8139 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8140 continue;
8141 }
8142 if (state == ARM_CP_STATE_AA32) {
8143 /* Under AArch32 CP registers can be common
8144 * (same for secure and non-secure world) or banked.
8145 */
8146 char *name;
8147
8148 switch (r->secure) {
8149 case ARM_CP_SECSTATE_S:
8150 case ARM_CP_SECSTATE_NS:
8151 add_cpreg_to_hashtable(cpu, r, opaque, state,
8152 r->secure, crm, opc1, opc2,
8153 r->name);
8154 break;
8155 default:
8156 name = g_strdup_printf("%s_S", r->name);
8157 add_cpreg_to_hashtable(cpu, r, opaque, state,
8158 ARM_CP_SECSTATE_S,
8159 crm, opc1, opc2, name);
8160 g_free(name);
8161 add_cpreg_to_hashtable(cpu, r, opaque, state,
8162 ARM_CP_SECSTATE_NS,
8163 crm, opc1, opc2, r->name);
8164 break;
8165 }
8166 } else {
8167 /* AArch64 registers get mapped to non-secure instance
8168 * of AArch32 */
8169 add_cpreg_to_hashtable(cpu, r, opaque, state,
8170 ARM_CP_SECSTATE_NS,
8171 crm, opc1, opc2, r->name);
8172 }
8173 }
8174 }
8175 }
8176 }
8177 }
8178
8179 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
8180 const ARMCPRegInfo *regs, void *opaque)
8181 {
8182 /* Define a whole list of registers */
8183 const ARMCPRegInfo *r;
8184 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8185 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
8186 }
8187 }
8188
8189 /*
8190 * Modify ARMCPRegInfo for access from userspace.
8191 *
8192 * This is a data driven modification directed by
8193 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8194 * user-space cannot alter any values and dynamic values pertaining to
8195 * execution state are hidden from user space view anyway.
8196 */
8197 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
8198 {
8199 const ARMCPRegUserSpaceInfo *m;
8200 ARMCPRegInfo *r;
8201
8202 for (m = mods; m->name; m++) {
8203 GPatternSpec *pat = NULL;
8204 if (m->is_glob) {
8205 pat = g_pattern_spec_new(m->name);
8206 }
8207 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8208 if (pat && g_pattern_match_string(pat, r->name)) {
8209 r->type = ARM_CP_CONST;
8210 r->access = PL0U_R;
8211 r->resetvalue = 0;
8212 /* continue */
8213 } else if (strcmp(r->name, m->name) == 0) {
8214 r->type = ARM_CP_CONST;
8215 r->access = PL0U_R;
8216 r->resetvalue &= m->exported_bits;
8217 r->resetvalue |= m->fixed_bits;
8218 break;
8219 }
8220 }
8221 if (pat) {
8222 g_pattern_spec_free(pat);
8223 }
8224 }
8225 }
8226
8227 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8228 {
8229 return g_hash_table_lookup(cpregs, &encoded_cp);
8230 }
8231
8232 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8233 uint64_t value)
8234 {
8235 /* Helper coprocessor write function for write-ignore registers */
8236 }
8237
8238 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8239 {
8240 /* Helper coprocessor write function for read-as-zero registers */
8241 return 0;
8242 }
8243
8244 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8245 {
8246 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8247 }
8248
8249 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8250 {
8251 /* Return true if it is not valid for us to switch to
8252 * this CPU mode (ie all the UNPREDICTABLE cases in
8253 * the ARM ARM CPSRWriteByInstr pseudocode).
8254 */
8255
8256 /* Changes to or from Hyp via MSR and CPS are illegal. */
8257 if (write_type == CPSRWriteByInstr &&
8258 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8259 mode == ARM_CPU_MODE_HYP)) {
8260 return 1;
8261 }
8262
8263 switch (mode) {
8264 case ARM_CPU_MODE_USR:
8265 return 0;
8266 case ARM_CPU_MODE_SYS:
8267 case ARM_CPU_MODE_SVC:
8268 case ARM_CPU_MODE_ABT:
8269 case ARM_CPU_MODE_UND:
8270 case ARM_CPU_MODE_IRQ:
8271 case ARM_CPU_MODE_FIQ:
8272 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8273 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8274 */
8275 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8276 * and CPS are treated as illegal mode changes.
8277 */
8278 if (write_type == CPSRWriteByInstr &&
8279 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8280 (arm_hcr_el2_eff(env) & HCR_TGE)) {
8281 return 1;
8282 }
8283 return 0;
8284 case ARM_CPU_MODE_HYP:
8285 return !arm_feature(env, ARM_FEATURE_EL2)
8286 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
8287 case ARM_CPU_MODE_MON:
8288 return arm_current_el(env) < 3;
8289 default:
8290 return 1;
8291 }
8292 }
8293
8294 uint32_t cpsr_read(CPUARMState *env)
8295 {
8296 int ZF;
8297 ZF = (env->ZF == 0);
8298 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8299 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8300 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8301 | ((env->condexec_bits & 0xfc) << 8)
8302 | (env->GE << 16) | (env->daif & CPSR_AIF);
8303 }
8304
8305 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8306 CPSRWriteType write_type)
8307 {
8308 uint32_t changed_daif;
8309
8310 if (mask & CPSR_NZCV) {
8311 env->ZF = (~val) & CPSR_Z;
8312 env->NF = val;
8313 env->CF = (val >> 29) & 1;
8314 env->VF = (val << 3) & 0x80000000;
8315 }
8316 if (mask & CPSR_Q)
8317 env->QF = ((val & CPSR_Q) != 0);
8318 if (mask & CPSR_T)
8319 env->thumb = ((val & CPSR_T) != 0);
8320 if (mask & CPSR_IT_0_1) {
8321 env->condexec_bits &= ~3;
8322 env->condexec_bits |= (val >> 25) & 3;
8323 }
8324 if (mask & CPSR_IT_2_7) {
8325 env->condexec_bits &= 3;
8326 env->condexec_bits |= (val >> 8) & 0xfc;
8327 }
8328 if (mask & CPSR_GE) {
8329 env->GE = (val >> 16) & 0xf;
8330 }
8331
8332 /* In a V7 implementation that includes the security extensions but does
8333 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8334 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8335 * bits respectively.
8336 *
8337 * In a V8 implementation, it is permitted for privileged software to
8338 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8339 */
8340 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
8341 arm_feature(env, ARM_FEATURE_EL3) &&
8342 !arm_feature(env, ARM_FEATURE_EL2) &&
8343 !arm_is_secure(env)) {
8344
8345 changed_daif = (env->daif ^ val) & mask;
8346
8347 if (changed_daif & CPSR_A) {
8348 /* Check to see if we are allowed to change the masking of async
8349 * abort exceptions from a non-secure state.
8350 */
8351 if (!(env->cp15.scr_el3 & SCR_AW)) {
8352 qemu_log_mask(LOG_GUEST_ERROR,
8353 "Ignoring attempt to switch CPSR_A flag from "
8354 "non-secure world with SCR.AW bit clear\n");
8355 mask &= ~CPSR_A;
8356 }
8357 }
8358
8359 if (changed_daif & CPSR_F) {
8360 /* Check to see if we are allowed to change the masking of FIQ
8361 * exceptions from a non-secure state.
8362 */
8363 if (!(env->cp15.scr_el3 & SCR_FW)) {
8364 qemu_log_mask(LOG_GUEST_ERROR,
8365 "Ignoring attempt to switch CPSR_F flag from "
8366 "non-secure world with SCR.FW bit clear\n");
8367 mask &= ~CPSR_F;
8368 }
8369
8370 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8371 * If this bit is set software is not allowed to mask
8372 * FIQs, but is allowed to set CPSR_F to 0.
8373 */
8374 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8375 (val & CPSR_F)) {
8376 qemu_log_mask(LOG_GUEST_ERROR,
8377 "Ignoring attempt to enable CPSR_F flag "
8378 "(non-maskable FIQ [NMFI] support enabled)\n");
8379 mask &= ~CPSR_F;
8380 }
8381 }
8382 }
8383
8384 env->daif &= ~(CPSR_AIF & mask);
8385 env->daif |= val & CPSR_AIF & mask;
8386
8387 if (write_type != CPSRWriteRaw &&
8388 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8389 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8390 /* Note that we can only get here in USR mode if this is a
8391 * gdb stub write; for this case we follow the architectural
8392 * behaviour for guest writes in USR mode of ignoring an attempt
8393 * to switch mode. (Those are caught by translate.c for writes
8394 * triggered by guest instructions.)
8395 */
8396 mask &= ~CPSR_M;
8397 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
8398 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8399 * v7, and has defined behaviour in v8:
8400 * + leave CPSR.M untouched
8401 * + allow changes to the other CPSR fields
8402 * + set PSTATE.IL
8403 * For user changes via the GDB stub, we don't set PSTATE.IL,
8404 * as this would be unnecessarily harsh for a user error.
8405 */
8406 mask &= ~CPSR_M;
8407 if (write_type != CPSRWriteByGDBStub &&
8408 arm_feature(env, ARM_FEATURE_V8)) {
8409 mask |= CPSR_IL;
8410 val |= CPSR_IL;
8411 }
8412 qemu_log_mask(LOG_GUEST_ERROR,
8413 "Illegal AArch32 mode switch attempt from %s to %s\n",
8414 aarch32_mode_name(env->uncached_cpsr),
8415 aarch32_mode_name(val));
8416 } else {
8417 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8418 write_type == CPSRWriteExceptionReturn ?
8419 "Exception return from AArch32" :
8420 "AArch32 mode switch from",
8421 aarch32_mode_name(env->uncached_cpsr),
8422 aarch32_mode_name(val), env->regs[15]);
8423 switch_mode(env, val & CPSR_M);
8424 }
8425 }
8426 mask &= ~CACHED_CPSR_BITS;
8427 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
8428 }
8429
8430 /* Sign/zero extend */
8431 uint32_t HELPER(sxtb16)(uint32_t x)
8432 {
8433 uint32_t res;
8434 res = (uint16_t)(int8_t)x;
8435 res |= (uint32_t)(int8_t)(x >> 16) << 16;
8436 return res;
8437 }
8438
8439 uint32_t HELPER(uxtb16)(uint32_t x)
8440 {
8441 uint32_t res;
8442 res = (uint16_t)(uint8_t)x;
8443 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
8444 return res;
8445 }
8446
8447 int32_t HELPER(sdiv)(int32_t num, int32_t den)
8448 {
8449 if (den == 0)
8450 return 0;
8451 if (num == INT_MIN && den == -1)
8452 return INT_MIN;
8453 return num / den;
8454 }
8455
8456 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
8457 {
8458 if (den == 0)
8459 return 0;
8460 return num / den;
8461 }
8462
8463 uint32_t HELPER(rbit)(uint32_t x)
8464 {
8465 return revbit32(x);
8466 }
8467
8468 #ifdef CONFIG_USER_ONLY
8469
8470 static void switch_mode(CPUARMState *env, int mode)
8471 {
8472 ARMCPU *cpu = env_archcpu(env);
8473
8474 if (mode != ARM_CPU_MODE_USR) {
8475 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
8476 }
8477 }
8478
8479 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8480 uint32_t cur_el, bool secure)
8481 {
8482 return 1;
8483 }
8484
8485 void aarch64_sync_64_to_32(CPUARMState *env)
8486 {
8487 g_assert_not_reached();
8488 }
8489
8490 #else
8491
8492 static void switch_mode(CPUARMState *env, int mode)
8493 {
8494 int old_mode;
8495 int i;
8496
8497 old_mode = env->uncached_cpsr & CPSR_M;
8498 if (mode == old_mode)
8499 return;
8500
8501 if (old_mode == ARM_CPU_MODE_FIQ) {
8502 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8503 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
8504 } else if (mode == ARM_CPU_MODE_FIQ) {
8505 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8506 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
8507 }
8508
8509 i = bank_number(old_mode);
8510 env->banked_r13[i] = env->regs[13];
8511 env->banked_spsr[i] = env->spsr;
8512
8513 i = bank_number(mode);
8514 env->regs[13] = env->banked_r13[i];
8515 env->spsr = env->banked_spsr[i];
8516
8517 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
8518 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
8519 }
8520
8521 /* Physical Interrupt Target EL Lookup Table
8522 *
8523 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8524 *
8525 * The below multi-dimensional table is used for looking up the target
8526 * exception level given numerous condition criteria. Specifically, the
8527 * target EL is based on SCR and HCR routing controls as well as the
8528 * currently executing EL and secure state.
8529 *
8530 * Dimensions:
8531 * target_el_table[2][2][2][2][2][4]
8532 * | | | | | +--- Current EL
8533 * | | | | +------ Non-secure(0)/Secure(1)
8534 * | | | +--------- HCR mask override
8535 * | | +------------ SCR exec state control
8536 * | +--------------- SCR mask override
8537 * +------------------ 32-bit(0)/64-bit(1) EL3
8538 *
8539 * The table values are as such:
8540 * 0-3 = EL0-EL3
8541 * -1 = Cannot occur
8542 *
8543 * The ARM ARM target EL table includes entries indicating that an "exception
8544 * is not taken". The two cases where this is applicable are:
8545 * 1) An exception is taken from EL3 but the SCR does not have the exception
8546 * routed to EL3.
8547 * 2) An exception is taken from EL2 but the HCR does not have the exception
8548 * routed to EL2.
8549 * In these two cases, the below table contain a target of EL1. This value is
8550 * returned as it is expected that the consumer of the table data will check
8551 * for "target EL >= current EL" to ensure the exception is not taken.
8552 *
8553 * SCR HCR
8554 * 64 EA AMO From
8555 * BIT IRQ IMO Non-secure Secure
8556 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
8557 */
8558 static const int8_t target_el_table[2][2][2][2][2][4] = {
8559 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8560 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8561 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8562 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8563 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8564 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8565 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8566 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8567 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8568 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
8569 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
8570 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
8571 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8572 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8573 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8574 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
8575 };
8576
8577 /*
8578 * Determine the target EL for physical exceptions
8579 */
8580 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8581 uint32_t cur_el, bool secure)
8582 {
8583 CPUARMState *env = cs->env_ptr;
8584 bool rw;
8585 bool scr;
8586 bool hcr;
8587 int target_el;
8588 /* Is the highest EL AArch64? */
8589 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
8590 uint64_t hcr_el2;
8591
8592 if (arm_feature(env, ARM_FEATURE_EL3)) {
8593 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
8594 } else {
8595 /* Either EL2 is the highest EL (and so the EL2 register width
8596 * is given by is64); or there is no EL2 or EL3, in which case
8597 * the value of 'rw' does not affect the table lookup anyway.
8598 */
8599 rw = is64;
8600 }
8601
8602 hcr_el2 = arm_hcr_el2_eff(env);
8603 switch (excp_idx) {
8604 case EXCP_IRQ:
8605 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
8606 hcr = hcr_el2 & HCR_IMO;
8607 break;
8608 case EXCP_FIQ:
8609 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
8610 hcr = hcr_el2 & HCR_FMO;
8611 break;
8612 default:
8613 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
8614 hcr = hcr_el2 & HCR_AMO;
8615 break;
8616 };
8617
8618 /*
8619 * For these purposes, TGE and AMO/IMO/FMO both force the
8620 * interrupt to EL2. Fold TGE into the bit extracted above.
8621 */
8622 hcr |= (hcr_el2 & HCR_TGE) != 0;
8623
8624 /* Perform a table-lookup for the target EL given the current state */
8625 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
8626
8627 assert(target_el > 0);
8628
8629 return target_el;
8630 }
8631
8632 void arm_log_exception(int idx)
8633 {
8634 if (qemu_loglevel_mask(CPU_LOG_INT)) {
8635 const char *exc = NULL;
8636 static const char * const excnames[] = {
8637 [EXCP_UDEF] = "Undefined Instruction",
8638 [EXCP_SWI] = "SVC",
8639 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
8640 [EXCP_DATA_ABORT] = "Data Abort",
8641 [EXCP_IRQ] = "IRQ",
8642 [EXCP_FIQ] = "FIQ",
8643 [EXCP_BKPT] = "Breakpoint",
8644 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
8645 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
8646 [EXCP_HVC] = "Hypervisor Call",
8647 [EXCP_HYP_TRAP] = "Hypervisor Trap",
8648 [EXCP_SMC] = "Secure Monitor Call",
8649 [EXCP_VIRQ] = "Virtual IRQ",
8650 [EXCP_VFIQ] = "Virtual FIQ",
8651 [EXCP_SEMIHOST] = "Semihosting call",
8652 [EXCP_NOCP] = "v7M NOCP UsageFault",
8653 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
8654 [EXCP_STKOF] = "v8M STKOF UsageFault",
8655 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
8656 [EXCP_LSERR] = "v8M LSERR UsageFault",
8657 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
8658 };
8659
8660 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
8661 exc = excnames[idx];
8662 }
8663 if (!exc) {
8664 exc = "unknown";
8665 }
8666 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
8667 }
8668 }
8669
8670 /*
8671 * Function used to synchronize QEMU's AArch64 register set with AArch32
8672 * register set. This is necessary when switching between AArch32 and AArch64
8673 * execution state.
8674 */
8675 void aarch64_sync_32_to_64(CPUARMState *env)
8676 {
8677 int i;
8678 uint32_t mode = env->uncached_cpsr & CPSR_M;
8679
8680 /* We can blanket copy R[0:7] to X[0:7] */
8681 for (i = 0; i < 8; i++) {
8682 env->xregs[i] = env->regs[i];
8683 }
8684
8685 /*
8686 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8687 * Otherwise, they come from the banked user regs.
8688 */
8689 if (mode == ARM_CPU_MODE_FIQ) {
8690 for (i = 8; i < 13; i++) {
8691 env->xregs[i] = env->usr_regs[i - 8];
8692 }
8693 } else {
8694 for (i = 8; i < 13; i++) {
8695 env->xregs[i] = env->regs[i];
8696 }
8697 }
8698
8699 /*
8700 * Registers x13-x23 are the various mode SP and FP registers. Registers
8701 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8702 * from the mode banked register.
8703 */
8704 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8705 env->xregs[13] = env->regs[13];
8706 env->xregs[14] = env->regs[14];
8707 } else {
8708 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8709 /* HYP is an exception in that it is copied from r14 */
8710 if (mode == ARM_CPU_MODE_HYP) {
8711 env->xregs[14] = env->regs[14];
8712 } else {
8713 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8714 }
8715 }
8716
8717 if (mode == ARM_CPU_MODE_HYP) {
8718 env->xregs[15] = env->regs[13];
8719 } else {
8720 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8721 }
8722
8723 if (mode == ARM_CPU_MODE_IRQ) {
8724 env->xregs[16] = env->regs[14];
8725 env->xregs[17] = env->regs[13];
8726 } else {
8727 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8728 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8729 }
8730
8731 if (mode == ARM_CPU_MODE_SVC) {
8732 env->xregs[18] = env->regs[14];
8733 env->xregs[19] = env->regs[13];
8734 } else {
8735 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8736 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8737 }
8738
8739 if (mode == ARM_CPU_MODE_ABT) {
8740 env->xregs[20] = env->regs[14];
8741 env->xregs[21] = env->regs[13];
8742 } else {
8743 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8744 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8745 }
8746
8747 if (mode == ARM_CPU_MODE_UND) {
8748 env->xregs[22] = env->regs[14];
8749 env->xregs[23] = env->regs[13];
8750 } else {
8751 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8752 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8753 }
8754
8755 /*
8756 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8757 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8758 * FIQ bank for r8-r14.
8759 */
8760 if (mode == ARM_CPU_MODE_FIQ) {
8761 for (i = 24; i < 31; i++) {
8762 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
8763 }
8764 } else {
8765 for (i = 24; i < 29; i++) {
8766 env->xregs[i] = env->fiq_regs[i - 24];
8767 }
8768 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8769 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8770 }
8771
8772 env->pc = env->regs[15];
8773 }
8774
8775 /*
8776 * Function used to synchronize QEMU's AArch32 register set with AArch64
8777 * register set. This is necessary when switching between AArch32 and AArch64
8778 * execution state.
8779 */
8780 void aarch64_sync_64_to_32(CPUARMState *env)
8781 {
8782 int i;
8783 uint32_t mode = env->uncached_cpsr & CPSR_M;
8784
8785 /* We can blanket copy X[0:7] to R[0:7] */
8786 for (i = 0; i < 8; i++) {
8787 env->regs[i] = env->xregs[i];
8788 }
8789
8790 /*
8791 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8792 * Otherwise, we copy x8-x12 into the banked user regs.
8793 */
8794 if (mode == ARM_CPU_MODE_FIQ) {
8795 for (i = 8; i < 13; i++) {
8796 env->usr_regs[i - 8] = env->xregs[i];
8797 }
8798 } else {
8799 for (i = 8; i < 13; i++) {
8800 env->regs[i] = env->xregs[i];
8801 }
8802 }
8803
8804 /*
8805 * Registers r13 & r14 depend on the current mode.
8806 * If we are in a given mode, we copy the corresponding x registers to r13
8807 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8808 * for the mode.
8809 */
8810 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8811 env->regs[13] = env->xregs[13];
8812 env->regs[14] = env->xregs[14];
8813 } else {
8814 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8815
8816 /*
8817 * HYP is an exception in that it does not have its own banked r14 but
8818 * shares the USR r14
8819 */
8820 if (mode == ARM_CPU_MODE_HYP) {
8821 env->regs[14] = env->xregs[14];
8822 } else {
8823 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8824 }
8825 }
8826
8827 if (mode == ARM_CPU_MODE_HYP) {
8828 env->regs[13] = env->xregs[15];
8829 } else {
8830 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8831 }
8832
8833 if (mode == ARM_CPU_MODE_IRQ) {
8834 env->regs[14] = env->xregs[16];
8835 env->regs[13] = env->xregs[17];
8836 } else {
8837 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8838 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8839 }
8840
8841 if (mode == ARM_CPU_MODE_SVC) {
8842 env->regs[14] = env->xregs[18];
8843 env->regs[13] = env->xregs[19];
8844 } else {
8845 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8846 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8847 }
8848
8849 if (mode == ARM_CPU_MODE_ABT) {
8850 env->regs[14] = env->xregs[20];
8851 env->regs[13] = env->xregs[21];
8852 } else {
8853 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8854 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8855 }
8856
8857 if (mode == ARM_CPU_MODE_UND) {
8858 env->regs[14] = env->xregs[22];
8859 env->regs[13] = env->xregs[23];
8860 } else {
8861 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8862 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8863 }
8864
8865 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8866 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8867 * FIQ bank for r8-r14.
8868 */
8869 if (mode == ARM_CPU_MODE_FIQ) {
8870 for (i = 24; i < 31; i++) {
8871 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8872 }
8873 } else {
8874 for (i = 24; i < 29; i++) {
8875 env->fiq_regs[i - 24] = env->xregs[i];
8876 }
8877 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8878 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8879 }
8880
8881 env->regs[15] = env->pc;
8882 }
8883
8884 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8885 uint32_t mask, uint32_t offset,
8886 uint32_t newpc)
8887 {
8888 int new_el;
8889
8890 /* Change the CPU state so as to actually take the exception. */
8891 switch_mode(env, new_mode);
8892 new_el = arm_current_el(env);
8893
8894 /*
8895 * For exceptions taken to AArch32 we must clear the SS bit in both
8896 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8897 */
8898 env->uncached_cpsr &= ~PSTATE_SS;
8899 env->spsr = cpsr_read(env);
8900 /* Clear IT bits. */
8901 env->condexec_bits = 0;
8902 /* Switch to the new mode, and to the correct instruction set. */
8903 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8904 /* Set new mode endianness */
8905 env->uncached_cpsr &= ~CPSR_E;
8906 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
8907 env->uncached_cpsr |= CPSR_E;
8908 }
8909 /* J and IL must always be cleared for exception entry */
8910 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8911 env->daif |= mask;
8912
8913 if (new_mode == ARM_CPU_MODE_HYP) {
8914 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8915 env->elr_el[2] = env->regs[15];
8916 } else {
8917 /* CPSR.PAN is normally preserved preserved unless... */
8918 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
8919 switch (new_el) {
8920 case 3:
8921 if (!arm_is_secure_below_el3(env)) {
8922 /* ... the target is EL3, from non-secure state. */
8923 env->uncached_cpsr &= ~CPSR_PAN;
8924 break;
8925 }
8926 /* ... the target is EL3, from secure state ... */
8927 /* fall through */
8928 case 1:
8929 /* ... the target is EL1 and SCTLR.SPAN is 0. */
8930 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
8931 env->uncached_cpsr |= CPSR_PAN;
8932 }
8933 break;
8934 }
8935 }
8936 /*
8937 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8938 * and we should just guard the thumb mode on V4
8939 */
8940 if (arm_feature(env, ARM_FEATURE_V4T)) {
8941 env->thumb =
8942 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8943 }
8944 env->regs[14] = env->regs[15] + offset;
8945 }
8946 env->regs[15] = newpc;
8947 arm_rebuild_hflags(env);
8948 }
8949
8950 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8951 {
8952 /*
8953 * Handle exception entry to Hyp mode; this is sufficiently
8954 * different to entry to other AArch32 modes that we handle it
8955 * separately here.
8956 *
8957 * The vector table entry used is always the 0x14 Hyp mode entry point,
8958 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8959 * The offset applied to the preferred return address is always zero
8960 * (see DDI0487C.a section G1.12.3).
8961 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8962 */
8963 uint32_t addr, mask;
8964 ARMCPU *cpu = ARM_CPU(cs);
8965 CPUARMState *env = &cpu->env;
8966
8967 switch (cs->exception_index) {
8968 case EXCP_UDEF:
8969 addr = 0x04;
8970 break;
8971 case EXCP_SWI:
8972 addr = 0x14;
8973 break;
8974 case EXCP_BKPT:
8975 /* Fall through to prefetch abort. */
8976 case EXCP_PREFETCH_ABORT:
8977 env->cp15.ifar_s = env->exception.vaddress;
8978 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8979 (uint32_t)env->exception.vaddress);
8980 addr = 0x0c;
8981 break;
8982 case EXCP_DATA_ABORT:
8983 env->cp15.dfar_s = env->exception.vaddress;
8984 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8985 (uint32_t)env->exception.vaddress);
8986 addr = 0x10;
8987 break;
8988 case EXCP_IRQ:
8989 addr = 0x18;
8990 break;
8991 case EXCP_FIQ:
8992 addr = 0x1c;
8993 break;
8994 case EXCP_HVC:
8995 addr = 0x08;
8996 break;
8997 case EXCP_HYP_TRAP:
8998 addr = 0x14;
8999 break;
9000 default:
9001 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9002 }
9003
9004 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9005 if (!arm_feature(env, ARM_FEATURE_V8)) {
9006 /*
9007 * QEMU syndrome values are v8-style. v7 has the IL bit
9008 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9009 * If this is a v7 CPU, squash the IL bit in those cases.
9010 */
9011 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9012 (cs->exception_index == EXCP_DATA_ABORT &&
9013 !(env->exception.syndrome & ARM_EL_ISV)) ||
9014 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9015 env->exception.syndrome &= ~ARM_EL_IL;
9016 }
9017 }
9018 env->cp15.esr_el[2] = env->exception.syndrome;
9019 }
9020
9021 if (arm_current_el(env) != 2 && addr < 0x14) {
9022 addr = 0x14;
9023 }
9024
9025 mask = 0;
9026 if (!(env->cp15.scr_el3 & SCR_EA)) {
9027 mask |= CPSR_A;
9028 }
9029 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9030 mask |= CPSR_I;
9031 }
9032 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9033 mask |= CPSR_F;
9034 }
9035
9036 addr += env->cp15.hvbar;
9037
9038 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9039 }
9040
9041 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9042 {
9043 ARMCPU *cpu = ARM_CPU(cs);
9044 CPUARMState *env = &cpu->env;
9045 uint32_t addr;
9046 uint32_t mask;
9047 int new_mode;
9048 uint32_t offset;
9049 uint32_t moe;
9050
9051 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9052 switch (syn_get_ec(env->exception.syndrome)) {
9053 case EC_BREAKPOINT:
9054 case EC_BREAKPOINT_SAME_EL:
9055 moe = 1;
9056 break;
9057 case EC_WATCHPOINT:
9058 case EC_WATCHPOINT_SAME_EL:
9059 moe = 10;
9060 break;
9061 case EC_AA32_BKPT:
9062 moe = 3;
9063 break;
9064 case EC_VECTORCATCH:
9065 moe = 5;
9066 break;
9067 default:
9068 moe = 0;
9069 break;
9070 }
9071
9072 if (moe) {
9073 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9074 }
9075
9076 if (env->exception.target_el == 2) {
9077 arm_cpu_do_interrupt_aarch32_hyp(cs);
9078 return;
9079 }
9080
9081 switch (cs->exception_index) {
9082 case EXCP_UDEF:
9083 new_mode = ARM_CPU_MODE_UND;
9084 addr = 0x04;
9085 mask = CPSR_I;
9086 if (env->thumb)
9087 offset = 2;
9088 else
9089 offset = 4;
9090 break;
9091 case EXCP_SWI:
9092 new_mode = ARM_CPU_MODE_SVC;
9093 addr = 0x08;
9094 mask = CPSR_I;
9095 /* The PC already points to the next instruction. */
9096 offset = 0;
9097 break;
9098 case EXCP_BKPT:
9099 /* Fall through to prefetch abort. */
9100 case EXCP_PREFETCH_ABORT:
9101 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9102 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9103 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9104 env->exception.fsr, (uint32_t)env->exception.vaddress);
9105 new_mode = ARM_CPU_MODE_ABT;
9106 addr = 0x0c;
9107 mask = CPSR_A | CPSR_I;
9108 offset = 4;
9109 break;
9110 case EXCP_DATA_ABORT:
9111 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9112 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9113 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9114 env->exception.fsr,
9115 (uint32_t)env->exception.vaddress);
9116 new_mode = ARM_CPU_MODE_ABT;
9117 addr = 0x10;
9118 mask = CPSR_A | CPSR_I;
9119 offset = 8;
9120 break;
9121 case EXCP_IRQ:
9122 new_mode = ARM_CPU_MODE_IRQ;
9123 addr = 0x18;
9124 /* Disable IRQ and imprecise data aborts. */
9125 mask = CPSR_A | CPSR_I;
9126 offset = 4;
9127 if (env->cp15.scr_el3 & SCR_IRQ) {
9128 /* IRQ routed to monitor mode */
9129 new_mode = ARM_CPU_MODE_MON;
9130 mask |= CPSR_F;
9131 }
9132 break;
9133 case EXCP_FIQ:
9134 new_mode = ARM_CPU_MODE_FIQ;
9135 addr = 0x1c;
9136 /* Disable FIQ, IRQ and imprecise data aborts. */
9137 mask = CPSR_A | CPSR_I | CPSR_F;
9138 if (env->cp15.scr_el3 & SCR_FIQ) {
9139 /* FIQ routed to monitor mode */
9140 new_mode = ARM_CPU_MODE_MON;
9141 }
9142 offset = 4;
9143 break;
9144 case EXCP_VIRQ:
9145 new_mode = ARM_CPU_MODE_IRQ;
9146 addr = 0x18;
9147 /* Disable IRQ and imprecise data aborts. */
9148 mask = CPSR_A | CPSR_I;
9149 offset = 4;
9150 break;
9151 case EXCP_VFIQ:
9152 new_mode = ARM_CPU_MODE_FIQ;
9153 addr = 0x1c;
9154 /* Disable FIQ, IRQ and imprecise data aborts. */
9155 mask = CPSR_A | CPSR_I | CPSR_F;
9156 offset = 4;
9157 break;
9158 case EXCP_SMC:
9159 new_mode = ARM_CPU_MODE_MON;
9160 addr = 0x08;
9161 mask = CPSR_A | CPSR_I | CPSR_F;
9162 offset = 0;
9163 break;
9164 default:
9165 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9166 return; /* Never happens. Keep compiler happy. */
9167 }
9168
9169 if (new_mode == ARM_CPU_MODE_MON) {
9170 addr += env->cp15.mvbar;
9171 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9172 /* High vectors. When enabled, base address cannot be remapped. */
9173 addr += 0xffff0000;
9174 } else {
9175 /* ARM v7 architectures provide a vector base address register to remap
9176 * the interrupt vector table.
9177 * This register is only followed in non-monitor mode, and is banked.
9178 * Note: only bits 31:5 are valid.
9179 */
9180 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9181 }
9182
9183 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9184 env->cp15.scr_el3 &= ~SCR_NS;
9185 }
9186
9187 take_aarch32_exception(env, new_mode, mask, offset, addr);
9188 }
9189
9190 /* Handle exception entry to a target EL which is using AArch64 */
9191 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9192 {
9193 ARMCPU *cpu = ARM_CPU(cs);
9194 CPUARMState *env = &cpu->env;
9195 unsigned int new_el = env->exception.target_el;
9196 target_ulong addr = env->cp15.vbar_el[new_el];
9197 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
9198 unsigned int old_mode;
9199 unsigned int cur_el = arm_current_el(env);
9200
9201 /*
9202 * Note that new_el can never be 0. If cur_el is 0, then
9203 * el0_a64 is is_a64(), else el0_a64 is ignored.
9204 */
9205 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9206
9207 if (cur_el < new_el) {
9208 /* Entry vector offset depends on whether the implemented EL
9209 * immediately lower than the target level is using AArch32 or AArch64
9210 */
9211 bool is_aa64;
9212 uint64_t hcr;
9213
9214 switch (new_el) {
9215 case 3:
9216 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9217 break;
9218 case 2:
9219 hcr = arm_hcr_el2_eff(env);
9220 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9221 is_aa64 = (hcr & HCR_RW) != 0;
9222 break;
9223 }
9224 /* fall through */
9225 case 1:
9226 is_aa64 = is_a64(env);
9227 break;
9228 default:
9229 g_assert_not_reached();
9230 }
9231
9232 if (is_aa64) {
9233 addr += 0x400;
9234 } else {
9235 addr += 0x600;
9236 }
9237 } else if (pstate_read(env) & PSTATE_SP) {
9238 addr += 0x200;
9239 }
9240
9241 switch (cs->exception_index) {
9242 case EXCP_PREFETCH_ABORT:
9243 case EXCP_DATA_ABORT:
9244 env->cp15.far_el[new_el] = env->exception.vaddress;
9245 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9246 env->cp15.far_el[new_el]);
9247 /* fall through */
9248 case EXCP_BKPT:
9249 case EXCP_UDEF:
9250 case EXCP_SWI:
9251 case EXCP_HVC:
9252 case EXCP_HYP_TRAP:
9253 case EXCP_SMC:
9254 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
9255 /*
9256 * QEMU internal FP/SIMD syndromes from AArch32 include the
9257 * TA and coproc fields which are only exposed if the exception
9258 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9259 * AArch64 format syndrome.
9260 */
9261 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9262 }
9263 env->cp15.esr_el[new_el] = env->exception.syndrome;
9264 break;
9265 case EXCP_IRQ:
9266 case EXCP_VIRQ:
9267 addr += 0x80;
9268 break;
9269 case EXCP_FIQ:
9270 case EXCP_VFIQ:
9271 addr += 0x100;
9272 break;
9273 default:
9274 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9275 }
9276
9277 if (is_a64(env)) {
9278 old_mode = pstate_read(env);
9279 aarch64_save_sp(env, arm_current_el(env));
9280 env->elr_el[new_el] = env->pc;
9281 } else {
9282 old_mode = cpsr_read(env);
9283 env->elr_el[new_el] = env->regs[15];
9284
9285 aarch64_sync_32_to_64(env);
9286
9287 env->condexec_bits = 0;
9288 }
9289 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9290
9291 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9292 env->elr_el[new_el]);
9293
9294 if (cpu_isar_feature(aa64_pan, cpu)) {
9295 /* The value of PSTATE.PAN is normally preserved, except when ... */
9296 new_mode |= old_mode & PSTATE_PAN;
9297 switch (new_el) {
9298 case 2:
9299 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9300 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9301 != (HCR_E2H | HCR_TGE)) {
9302 break;
9303 }
9304 /* fall through */
9305 case 1:
9306 /* ... the target is EL1 ... */
9307 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9308 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9309 new_mode |= PSTATE_PAN;
9310 }
9311 break;
9312 }
9313 }
9314
9315 pstate_write(env, PSTATE_DAIF | new_mode);
9316 env->aarch64 = 1;
9317 aarch64_restore_sp(env, new_el);
9318 helper_rebuild_hflags_a64(env, new_el);
9319
9320 env->pc = addr;
9321
9322 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9323 new_el, env->pc, pstate_read(env));
9324 }
9325
9326 /*
9327 * Do semihosting call and set the appropriate return value. All the
9328 * permission and validity checks have been done at translate time.
9329 *
9330 * We only see semihosting exceptions in TCG only as they are not
9331 * trapped to the hypervisor in KVM.
9332 */
9333 #ifdef CONFIG_TCG
9334 static void handle_semihosting(CPUState *cs)
9335 {
9336 ARMCPU *cpu = ARM_CPU(cs);
9337 CPUARMState *env = &cpu->env;
9338
9339 if (is_a64(env)) {
9340 qemu_log_mask(CPU_LOG_INT,
9341 "...handling as semihosting call 0x%" PRIx64 "\n",
9342 env->xregs[0]);
9343 env->xregs[0] = do_arm_semihosting(env);
9344 env->pc += 4;
9345 } else {
9346 qemu_log_mask(CPU_LOG_INT,
9347 "...handling as semihosting call 0x%x\n",
9348 env->regs[0]);
9349 env->regs[0] = do_arm_semihosting(env);
9350 env->regs[15] += env->thumb ? 2 : 4;
9351 }
9352 }
9353 #endif
9354
9355 /* Handle a CPU exception for A and R profile CPUs.
9356 * Do any appropriate logging, handle PSCI calls, and then hand off
9357 * to the AArch64-entry or AArch32-entry function depending on the
9358 * target exception level's register width.
9359 */
9360 void arm_cpu_do_interrupt(CPUState *cs)
9361 {
9362 ARMCPU *cpu = ARM_CPU(cs);
9363 CPUARMState *env = &cpu->env;
9364 unsigned int new_el = env->exception.target_el;
9365
9366 assert(!arm_feature(env, ARM_FEATURE_M));
9367
9368 arm_log_exception(cs->exception_index);
9369 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9370 new_el);
9371 if (qemu_loglevel_mask(CPU_LOG_INT)
9372 && !excp_is_internal(cs->exception_index)) {
9373 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
9374 syn_get_ec(env->exception.syndrome),
9375 env->exception.syndrome);
9376 }
9377
9378 if (arm_is_psci_call(cpu, cs->exception_index)) {
9379 arm_handle_psci_call(cpu);
9380 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9381 return;
9382 }
9383
9384 /*
9385 * Semihosting semantics depend on the register width of the code
9386 * that caused the exception, not the target exception level, so
9387 * must be handled here.
9388 */
9389 #ifdef CONFIG_TCG
9390 if (cs->exception_index == EXCP_SEMIHOST) {
9391 handle_semihosting(cs);
9392 return;
9393 }
9394 #endif
9395
9396 /* Hooks may change global state so BQL should be held, also the
9397 * BQL needs to be held for any modification of
9398 * cs->interrupt_request.
9399 */
9400 g_assert(qemu_mutex_iothread_locked());
9401
9402 arm_call_pre_el_change_hook(cpu);
9403
9404 assert(!excp_is_internal(cs->exception_index));
9405 if (arm_el_is_aa64(env, new_el)) {
9406 arm_cpu_do_interrupt_aarch64(cs);
9407 } else {
9408 arm_cpu_do_interrupt_aarch32(cs);
9409 }
9410
9411 arm_call_el_change_hook(cpu);
9412
9413 if (!kvm_enabled()) {
9414 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
9415 }
9416 }
9417 #endif /* !CONFIG_USER_ONLY */
9418
9419 /* Return the exception level which controls this address translation regime */
9420 static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
9421 {
9422 switch (mmu_idx) {
9423 case ARMMMUIdx_E20_0:
9424 case ARMMMUIdx_E20_2:
9425 case ARMMMUIdx_E20_2_PAN:
9426 case ARMMMUIdx_Stage2:
9427 case ARMMMUIdx_E2:
9428 return 2;
9429 case ARMMMUIdx_SE3:
9430 return 3;
9431 case ARMMMUIdx_SE10_0:
9432 return arm_el_is_aa64(env, 3) ? 1 : 3;
9433 case ARMMMUIdx_SE10_1:
9434 case ARMMMUIdx_SE10_1_PAN:
9435 case ARMMMUIdx_Stage1_E0:
9436 case ARMMMUIdx_Stage1_E1:
9437 case ARMMMUIdx_Stage1_E1_PAN:
9438 case ARMMMUIdx_E10_0:
9439 case ARMMMUIdx_E10_1:
9440 case ARMMMUIdx_E10_1_PAN:
9441 case ARMMMUIdx_MPrivNegPri:
9442 case ARMMMUIdx_MUserNegPri:
9443 case ARMMMUIdx_MPriv:
9444 case ARMMMUIdx_MUser:
9445 case ARMMMUIdx_MSPrivNegPri:
9446 case ARMMMUIdx_MSUserNegPri:
9447 case ARMMMUIdx_MSPriv:
9448 case ARMMMUIdx_MSUser:
9449 return 1;
9450 default:
9451 g_assert_not_reached();
9452 }
9453 }
9454
9455 uint64_t arm_sctlr(CPUARMState *env, int el)
9456 {
9457 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
9458 if (el == 0) {
9459 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9460 el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1);
9461 }
9462 return env->cp15.sctlr_el[el];
9463 }
9464
9465 /* Return the SCTLR value which controls this address translation regime */
9466 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
9467 {
9468 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
9469 }
9470
9471 #ifndef CONFIG_USER_ONLY
9472
9473 /* Return true if the specified stage of address translation is disabled */
9474 static inline bool regime_translation_disabled(CPUARMState *env,
9475 ARMMMUIdx mmu_idx)
9476 {
9477 if (arm_feature(env, ARM_FEATURE_M)) {
9478 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
9479 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
9480 case R_V7M_MPU_CTRL_ENABLE_MASK:
9481 /* Enabled, but not for HardFault and NMI */
9482 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
9483 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
9484 /* Enabled for all cases */
9485 return false;
9486 case 0:
9487 default:
9488 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9489 * we warned about that in armv7m_nvic.c when the guest set it.
9490 */
9491 return true;
9492 }
9493 }
9494
9495 if (mmu_idx == ARMMMUIdx_Stage2) {
9496 /* HCR.DC means HCR.VM behaves as 1 */
9497 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
9498 }
9499
9500 if (env->cp15.hcr_el2 & HCR_TGE) {
9501 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9502 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
9503 return true;
9504 }
9505 }
9506
9507 if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
9508 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9509 return true;
9510 }
9511
9512 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
9513 }
9514
9515 static inline bool regime_translation_big_endian(CPUARMState *env,
9516 ARMMMUIdx mmu_idx)
9517 {
9518 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
9519 }
9520
9521 /* Return the TTBR associated with this translation regime */
9522 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
9523 int ttbrn)
9524 {
9525 if (mmu_idx == ARMMMUIdx_Stage2) {
9526 return env->cp15.vttbr_el2;
9527 }
9528 if (ttbrn == 0) {
9529 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
9530 } else {
9531 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
9532 }
9533 }
9534
9535 #endif /* !CONFIG_USER_ONLY */
9536
9537 /* Return the TCR controlling this translation regime */
9538 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
9539 {
9540 if (mmu_idx == ARMMMUIdx_Stage2) {
9541 return &env->cp15.vtcr_el2;
9542 }
9543 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
9544 }
9545
9546 /* Convert a possible stage1+2 MMU index into the appropriate
9547 * stage 1 MMU index
9548 */
9549 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
9550 {
9551 switch (mmu_idx) {
9552 case ARMMMUIdx_E10_0:
9553 return ARMMMUIdx_Stage1_E0;
9554 case ARMMMUIdx_E10_1:
9555 return ARMMMUIdx_Stage1_E1;
9556 case ARMMMUIdx_E10_1_PAN:
9557 return ARMMMUIdx_Stage1_E1_PAN;
9558 default:
9559 return mmu_idx;
9560 }
9561 }
9562
9563 /* Return true if the translation regime is using LPAE format page tables */
9564 static inline bool regime_using_lpae_format(CPUARMState *env,
9565 ARMMMUIdx mmu_idx)
9566 {
9567 int el = regime_el(env, mmu_idx);
9568 if (el == 2 || arm_el_is_aa64(env, el)) {
9569 return true;
9570 }
9571 if (arm_feature(env, ARM_FEATURE_LPAE)
9572 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
9573 return true;
9574 }
9575 return false;
9576 }
9577
9578 /* Returns true if the stage 1 translation regime is using LPAE format page
9579 * tables. Used when raising alignment exceptions, whose FSR changes depending
9580 * on whether the long or short descriptor format is in use. */
9581 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
9582 {
9583 mmu_idx = stage_1_mmu_idx(mmu_idx);
9584
9585 return regime_using_lpae_format(env, mmu_idx);
9586 }
9587
9588 #ifndef CONFIG_USER_ONLY
9589 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
9590 {
9591 switch (mmu_idx) {
9592 case ARMMMUIdx_SE10_0:
9593 case ARMMMUIdx_E20_0:
9594 case ARMMMUIdx_Stage1_E0:
9595 case ARMMMUIdx_MUser:
9596 case ARMMMUIdx_MSUser:
9597 case ARMMMUIdx_MUserNegPri:
9598 case ARMMMUIdx_MSUserNegPri:
9599 return true;
9600 default:
9601 return false;
9602 case ARMMMUIdx_E10_0:
9603 case ARMMMUIdx_E10_1:
9604 case ARMMMUIdx_E10_1_PAN:
9605 g_assert_not_reached();
9606 }
9607 }
9608
9609 /* Translate section/page access permissions to page
9610 * R/W protection flags
9611 *
9612 * @env: CPUARMState
9613 * @mmu_idx: MMU index indicating required translation regime
9614 * @ap: The 3-bit access permissions (AP[2:0])
9615 * @domain_prot: The 2-bit domain access permissions
9616 */
9617 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
9618 int ap, int domain_prot)
9619 {
9620 bool is_user = regime_is_user(env, mmu_idx);
9621
9622 if (domain_prot == 3) {
9623 return PAGE_READ | PAGE_WRITE;
9624 }
9625
9626 switch (ap) {
9627 case 0:
9628 if (arm_feature(env, ARM_FEATURE_V7)) {
9629 return 0;
9630 }
9631 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
9632 case SCTLR_S:
9633 return is_user ? 0 : PAGE_READ;
9634 case SCTLR_R:
9635 return PAGE_READ;
9636 default:
9637 return 0;
9638 }
9639 case 1:
9640 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9641 case 2:
9642 if (is_user) {
9643 return PAGE_READ;
9644 } else {
9645 return PAGE_READ | PAGE_WRITE;
9646 }
9647 case 3:
9648 return PAGE_READ | PAGE_WRITE;
9649 case 4: /* Reserved. */
9650 return 0;
9651 case 5:
9652 return is_user ? 0 : PAGE_READ;
9653 case 6:
9654 return PAGE_READ;
9655 case 7:
9656 if (!arm_feature(env, ARM_FEATURE_V6K)) {
9657 return 0;
9658 }
9659 return PAGE_READ;
9660 default:
9661 g_assert_not_reached();
9662 }
9663 }
9664
9665 /* Translate section/page access permissions to page
9666 * R/W protection flags.
9667 *
9668 * @ap: The 2-bit simple AP (AP[2:1])
9669 * @is_user: TRUE if accessing from PL0
9670 */
9671 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
9672 {
9673 switch (ap) {
9674 case 0:
9675 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9676 case 1:
9677 return PAGE_READ | PAGE_WRITE;
9678 case 2:
9679 return is_user ? 0 : PAGE_READ;
9680 case 3:
9681 return PAGE_READ;
9682 default:
9683 g_assert_not_reached();
9684 }
9685 }
9686
9687 static inline int
9688 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
9689 {
9690 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
9691 }
9692
9693 /* Translate S2 section/page access permissions to protection flags
9694 *
9695 * @env: CPUARMState
9696 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9697 * @xn: XN (execute-never) bit
9698 */
9699 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
9700 {
9701 int prot = 0;
9702
9703 if (s2ap & 1) {
9704 prot |= PAGE_READ;
9705 }
9706 if (s2ap & 2) {
9707 prot |= PAGE_WRITE;
9708 }
9709 if (!xn) {
9710 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
9711 prot |= PAGE_EXEC;
9712 }
9713 }
9714 return prot;
9715 }
9716
9717 /* Translate section/page access permissions to protection flags
9718 *
9719 * @env: CPUARMState
9720 * @mmu_idx: MMU index indicating required translation regime
9721 * @is_aa64: TRUE if AArch64
9722 * @ap: The 2-bit simple AP (AP[2:1])
9723 * @ns: NS (non-secure) bit
9724 * @xn: XN (execute-never) bit
9725 * @pxn: PXN (privileged execute-never) bit
9726 */
9727 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
9728 int ap, int ns, int xn, int pxn)
9729 {
9730 bool is_user = regime_is_user(env, mmu_idx);
9731 int prot_rw, user_rw;
9732 bool have_wxn;
9733 int wxn = 0;
9734
9735 assert(mmu_idx != ARMMMUIdx_Stage2);
9736
9737 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
9738 if (is_user) {
9739 prot_rw = user_rw;
9740 } else {
9741 if (user_rw && regime_is_pan(env, mmu_idx)) {
9742 return 0;
9743 }
9744 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
9745 }
9746
9747 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
9748 return prot_rw;
9749 }
9750
9751 /* TODO have_wxn should be replaced with
9752 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9753 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9754 * compatible processors have EL2, which is required for [U]WXN.
9755 */
9756 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
9757
9758 if (have_wxn) {
9759 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
9760 }
9761
9762 if (is_aa64) {
9763 if (regime_has_2_ranges(mmu_idx) && !is_user) {
9764 xn = pxn || (user_rw & PAGE_WRITE);
9765 }
9766 } else if (arm_feature(env, ARM_FEATURE_V7)) {
9767 switch (regime_el(env, mmu_idx)) {
9768 case 1:
9769 case 3:
9770 if (is_user) {
9771 xn = xn || !(user_rw & PAGE_READ);
9772 } else {
9773 int uwxn = 0;
9774 if (have_wxn) {
9775 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
9776 }
9777 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
9778 (uwxn && (user_rw & PAGE_WRITE));
9779 }
9780 break;
9781 case 2:
9782 break;
9783 }
9784 } else {
9785 xn = wxn = 0;
9786 }
9787
9788 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
9789 return prot_rw;
9790 }
9791 return prot_rw | PAGE_EXEC;
9792 }
9793
9794 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
9795 uint32_t *table, uint32_t address)
9796 {
9797 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9798 TCR *tcr = regime_tcr(env, mmu_idx);
9799
9800 if (address & tcr->mask) {
9801 if (tcr->raw_tcr & TTBCR_PD1) {
9802 /* Translation table walk disabled for TTBR1 */
9803 return false;
9804 }
9805 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
9806 } else {
9807 if (tcr->raw_tcr & TTBCR_PD0) {
9808 /* Translation table walk disabled for TTBR0 */
9809 return false;
9810 }
9811 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
9812 }
9813 *table |= (address >> 18) & 0x3ffc;
9814 return true;
9815 }
9816
9817 /* Translate a S1 pagetable walk through S2 if needed. */
9818 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
9819 hwaddr addr, MemTxAttrs txattrs,
9820 ARMMMUFaultInfo *fi)
9821 {
9822 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
9823 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
9824 target_ulong s2size;
9825 hwaddr s2pa;
9826 int s2prot;
9827 int ret;
9828 ARMCacheAttrs cacheattrs = {};
9829 ARMCacheAttrs *pcacheattrs = NULL;
9830
9831 if (env->cp15.hcr_el2 & HCR_PTW) {
9832 /*
9833 * PTW means we must fault if this S1 walk touches S2 Device
9834 * memory; otherwise we don't care about the attributes and can
9835 * save the S2 translation the effort of computing them.
9836 */
9837 pcacheattrs = &cacheattrs;
9838 }
9839
9840 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa,
9841 &txattrs, &s2prot, &s2size, fi, pcacheattrs);
9842 if (ret) {
9843 assert(fi->type != ARMFault_None);
9844 fi->s2addr = addr;
9845 fi->stage2 = true;
9846 fi->s1ptw = true;
9847 return ~0;
9848 }
9849 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
9850 /* Access was to Device memory: generate Permission fault */
9851 fi->type = ARMFault_Permission;
9852 fi->s2addr = addr;
9853 fi->stage2 = true;
9854 fi->s1ptw = true;
9855 return ~0;
9856 }
9857 addr = s2pa;
9858 }
9859 return addr;
9860 }
9861
9862 /* All loads done in the course of a page table walk go through here. */
9863 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9864 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9865 {
9866 ARMCPU *cpu = ARM_CPU(cs);
9867 CPUARMState *env = &cpu->env;
9868 MemTxAttrs attrs = {};
9869 MemTxResult result = MEMTX_OK;
9870 AddressSpace *as;
9871 uint32_t data;
9872
9873 attrs.secure = is_secure;
9874 as = arm_addressspace(cs, attrs);
9875 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9876 if (fi->s1ptw) {
9877 return 0;
9878 }
9879 if (regime_translation_big_endian(env, mmu_idx)) {
9880 data = address_space_ldl_be(as, addr, attrs, &result);
9881 } else {
9882 data = address_space_ldl_le(as, addr, attrs, &result);
9883 }
9884 if (result == MEMTX_OK) {
9885 return data;
9886 }
9887 fi->type = ARMFault_SyncExternalOnWalk;
9888 fi->ea = arm_extabort_type(result);
9889 return 0;
9890 }
9891
9892 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9893 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9894 {
9895 ARMCPU *cpu = ARM_CPU(cs);
9896 CPUARMState *env = &cpu->env;
9897 MemTxAttrs attrs = {};
9898 MemTxResult result = MEMTX_OK;
9899 AddressSpace *as;
9900 uint64_t data;
9901
9902 attrs.secure = is_secure;
9903 as = arm_addressspace(cs, attrs);
9904 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9905 if (fi->s1ptw) {
9906 return 0;
9907 }
9908 if (regime_translation_big_endian(env, mmu_idx)) {
9909 data = address_space_ldq_be(as, addr, attrs, &result);
9910 } else {
9911 data = address_space_ldq_le(as, addr, attrs, &result);
9912 }
9913 if (result == MEMTX_OK) {
9914 return data;
9915 }
9916 fi->type = ARMFault_SyncExternalOnWalk;
9917 fi->ea = arm_extabort_type(result);
9918 return 0;
9919 }
9920
9921 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
9922 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9923 hwaddr *phys_ptr, int *prot,
9924 target_ulong *page_size,
9925 ARMMMUFaultInfo *fi)
9926 {
9927 CPUState *cs = env_cpu(env);
9928 int level = 1;
9929 uint32_t table;
9930 uint32_t desc;
9931 int type;
9932 int ap;
9933 int domain = 0;
9934 int domain_prot;
9935 hwaddr phys_addr;
9936 uint32_t dacr;
9937
9938 /* Pagetable walk. */
9939 /* Lookup l1 descriptor. */
9940 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9941 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9942 fi->type = ARMFault_Translation;
9943 goto do_fault;
9944 }
9945 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9946 mmu_idx, fi);
9947 if (fi->type != ARMFault_None) {
9948 goto do_fault;
9949 }
9950 type = (desc & 3);
9951 domain = (desc >> 5) & 0x0f;
9952 if (regime_el(env, mmu_idx) == 1) {
9953 dacr = env->cp15.dacr_ns;
9954 } else {
9955 dacr = env->cp15.dacr_s;
9956 }
9957 domain_prot = (dacr >> (domain * 2)) & 3;
9958 if (type == 0) {
9959 /* Section translation fault. */
9960 fi->type = ARMFault_Translation;
9961 goto do_fault;
9962 }
9963 if (type != 2) {
9964 level = 2;
9965 }
9966 if (domain_prot == 0 || domain_prot == 2) {
9967 fi->type = ARMFault_Domain;
9968 goto do_fault;
9969 }
9970 if (type == 2) {
9971 /* 1Mb section. */
9972 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9973 ap = (desc >> 10) & 3;
9974 *page_size = 1024 * 1024;
9975 } else {
9976 /* Lookup l2 entry. */
9977 if (type == 1) {
9978 /* Coarse pagetable. */
9979 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9980 } else {
9981 /* Fine pagetable. */
9982 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
9983 }
9984 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9985 mmu_idx, fi);
9986 if (fi->type != ARMFault_None) {
9987 goto do_fault;
9988 }
9989 switch (desc & 3) {
9990 case 0: /* Page translation fault. */
9991 fi->type = ARMFault_Translation;
9992 goto do_fault;
9993 case 1: /* 64k page. */
9994 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9995 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
9996 *page_size = 0x10000;
9997 break;
9998 case 2: /* 4k page. */
9999 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10000 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10001 *page_size = 0x1000;
10002 break;
10003 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10004 if (type == 1) {
10005 /* ARMv6/XScale extended small page format */
10006 if (arm_feature(env, ARM_FEATURE_XSCALE)
10007 || arm_feature(env, ARM_FEATURE_V6)) {
10008 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10009 *page_size = 0x1000;
10010 } else {
10011 /* UNPREDICTABLE in ARMv5; we choose to take a
10012 * page translation fault.
10013 */
10014 fi->type = ARMFault_Translation;
10015 goto do_fault;
10016 }
10017 } else {
10018 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10019 *page_size = 0x400;
10020 }
10021 ap = (desc >> 4) & 3;
10022 break;
10023 default:
10024 /* Never happens, but compiler isn't smart enough to tell. */
10025 abort();
10026 }
10027 }
10028 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10029 *prot |= *prot ? PAGE_EXEC : 0;
10030 if (!(*prot & (1 << access_type))) {
10031 /* Access permission fault. */
10032 fi->type = ARMFault_Permission;
10033 goto do_fault;
10034 }
10035 *phys_ptr = phys_addr;
10036 return false;
10037 do_fault:
10038 fi->domain = domain;
10039 fi->level = level;
10040 return true;
10041 }
10042
10043 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
10044 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10045 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10046 target_ulong *page_size, ARMMMUFaultInfo *fi)
10047 {
10048 CPUState *cs = env_cpu(env);
10049 int level = 1;
10050 uint32_t table;
10051 uint32_t desc;
10052 uint32_t xn;
10053 uint32_t pxn = 0;
10054 int type;
10055 int ap;
10056 int domain = 0;
10057 int domain_prot;
10058 hwaddr phys_addr;
10059 uint32_t dacr;
10060 bool ns;
10061
10062 /* Pagetable walk. */
10063 /* Lookup l1 descriptor. */
10064 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10065 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10066 fi->type = ARMFault_Translation;
10067 goto do_fault;
10068 }
10069 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10070 mmu_idx, fi);
10071 if (fi->type != ARMFault_None) {
10072 goto do_fault;
10073 }
10074 type = (desc & 3);
10075 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
10076 /* Section translation fault, or attempt to use the encoding
10077 * which is Reserved on implementations without PXN.
10078 */
10079 fi->type = ARMFault_Translation;
10080 goto do_fault;
10081 }
10082 if ((type == 1) || !(desc & (1 << 18))) {
10083 /* Page or Section. */
10084 domain = (desc >> 5) & 0x0f;
10085 }
10086 if (regime_el(env, mmu_idx) == 1) {
10087 dacr = env->cp15.dacr_ns;
10088 } else {
10089 dacr = env->cp15.dacr_s;
10090 }
10091 if (type == 1) {
10092 level = 2;
10093 }
10094 domain_prot = (dacr >> (domain * 2)) & 3;
10095 if (domain_prot == 0 || domain_prot == 2) {
10096 /* Section or Page domain fault */
10097 fi->type = ARMFault_Domain;
10098 goto do_fault;
10099 }
10100 if (type != 1) {
10101 if (desc & (1 << 18)) {
10102 /* Supersection. */
10103 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
10104 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
10105 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
10106 *page_size = 0x1000000;
10107 } else {
10108 /* Section. */
10109 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10110 *page_size = 0x100000;
10111 }
10112 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
10113 xn = desc & (1 << 4);
10114 pxn = desc & 1;
10115 ns = extract32(desc, 19, 1);
10116 } else {
10117 if (arm_feature(env, ARM_FEATURE_PXN)) {
10118 pxn = (desc >> 2) & 1;
10119 }
10120 ns = extract32(desc, 3, 1);
10121 /* Lookup l2 entry. */
10122 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10123 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10124 mmu_idx, fi);
10125 if (fi->type != ARMFault_None) {
10126 goto do_fault;
10127 }
10128 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
10129 switch (desc & 3) {
10130 case 0: /* Page translation fault. */
10131 fi->type = ARMFault_Translation;
10132 goto do_fault;
10133 case 1: /* 64k page. */
10134 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10135 xn = desc & (1 << 15);
10136 *page_size = 0x10000;
10137 break;
10138 case 2: case 3: /* 4k page. */
10139 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10140 xn = desc & 1;
10141 *page_size = 0x1000;
10142 break;
10143 default:
10144 /* Never happens, but compiler isn't smart enough to tell. */
10145 abort();
10146 }
10147 }
10148 if (domain_prot == 3) {
10149 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10150 } else {
10151 if (pxn && !regime_is_user(env, mmu_idx)) {
10152 xn = 1;
10153 }
10154 if (xn && access_type == MMU_INST_FETCH) {
10155 fi->type = ARMFault_Permission;
10156 goto do_fault;
10157 }
10158
10159 if (arm_feature(env, ARM_FEATURE_V6K) &&
10160 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
10161 /* The simplified model uses AP[0] as an access control bit. */
10162 if ((ap & 1) == 0) {
10163 /* Access flag fault. */
10164 fi->type = ARMFault_AccessFlag;
10165 goto do_fault;
10166 }
10167 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
10168 } else {
10169 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10170 }
10171 if (*prot && !xn) {
10172 *prot |= PAGE_EXEC;
10173 }
10174 if (!(*prot & (1 << access_type))) {
10175 /* Access permission fault. */
10176 fi->type = ARMFault_Permission;
10177 goto do_fault;
10178 }
10179 }
10180 if (ns) {
10181 /* The NS bit will (as required by the architecture) have no effect if
10182 * the CPU doesn't support TZ or this is a non-secure translation
10183 * regime, because the attribute will already be non-secure.
10184 */
10185 attrs->secure = false;
10186 }
10187 *phys_ptr = phys_addr;
10188 return false;
10189 do_fault:
10190 fi->domain = domain;
10191 fi->level = level;
10192 return true;
10193 }
10194
10195 /*
10196 * check_s2_mmu_setup
10197 * @cpu: ARMCPU
10198 * @is_aa64: True if the translation regime is in AArch64 state
10199 * @startlevel: Suggested starting level
10200 * @inputsize: Bitsize of IPAs
10201 * @stride: Page-table stride (See the ARM ARM)
10202 *
10203 * Returns true if the suggested S2 translation parameters are OK and
10204 * false otherwise.
10205 */
10206 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
10207 int inputsize, int stride)
10208 {
10209 const int grainsize = stride + 3;
10210 int startsizecheck;
10211
10212 /* Negative levels are never allowed. */
10213 if (level < 0) {
10214 return false;
10215 }
10216
10217 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
10218 if (startsizecheck < 1 || startsizecheck > stride + 4) {
10219 return false;
10220 }
10221
10222 if (is_aa64) {
10223 CPUARMState *env = &cpu->env;
10224 unsigned int pamax = arm_pamax(cpu);
10225
10226 switch (stride) {
10227 case 13: /* 64KB Pages. */
10228 if (level == 0 || (level == 1 && pamax <= 42)) {
10229 return false;
10230 }
10231 break;
10232 case 11: /* 16KB Pages. */
10233 if (level == 0 || (level == 1 && pamax <= 40)) {
10234 return false;
10235 }
10236 break;
10237 case 9: /* 4KB Pages. */
10238 if (level == 0 && pamax <= 42) {
10239 return false;
10240 }
10241 break;
10242 default:
10243 g_assert_not_reached();
10244 }
10245
10246 /* Inputsize checks. */
10247 if (inputsize > pamax &&
10248 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
10249 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10250 return false;
10251 }
10252 } else {
10253 /* AArch32 only supports 4KB pages. Assert on that. */
10254 assert(stride == 9);
10255
10256 if (level == 0) {
10257 return false;
10258 }
10259 }
10260 return true;
10261 }
10262
10263 /* Translate from the 4-bit stage 2 representation of
10264 * memory attributes (without cache-allocation hints) to
10265 * the 8-bit representation of the stage 1 MAIR registers
10266 * (which includes allocation hints).
10267 *
10268 * ref: shared/translation/attrs/S2AttrDecode()
10269 * .../S2ConvertAttrsHints()
10270 */
10271 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
10272 {
10273 uint8_t hiattr = extract32(s2attrs, 2, 2);
10274 uint8_t loattr = extract32(s2attrs, 0, 2);
10275 uint8_t hihint = 0, lohint = 0;
10276
10277 if (hiattr != 0) { /* normal memory */
10278 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
10279 hiattr = loattr = 1; /* non-cacheable */
10280 } else {
10281 if (hiattr != 1) { /* Write-through or write-back */
10282 hihint = 3; /* RW allocate */
10283 }
10284 if (loattr != 1) { /* Write-through or write-back */
10285 lohint = 3; /* RW allocate */
10286 }
10287 }
10288 }
10289
10290 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
10291 }
10292 #endif /* !CONFIG_USER_ONLY */
10293
10294 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
10295 {
10296 if (regime_has_2_ranges(mmu_idx)) {
10297 return extract64(tcr, 37, 2);
10298 } else if (mmu_idx == ARMMMUIdx_Stage2) {
10299 return 0; /* VTCR_EL2 */
10300 } else {
10301 return extract32(tcr, 20, 1);
10302 }
10303 }
10304
10305 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
10306 {
10307 if (regime_has_2_ranges(mmu_idx)) {
10308 return extract64(tcr, 51, 2);
10309 } else if (mmu_idx == ARMMMUIdx_Stage2) {
10310 return 0; /* VTCR_EL2 */
10311 } else {
10312 return extract32(tcr, 29, 1);
10313 }
10314 }
10315
10316 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10317 ARMMMUIdx mmu_idx, bool data)
10318 {
10319 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10320 bool epd, hpd, using16k, using64k;
10321 int select, tsz, tbi;
10322
10323 if (!regime_has_2_ranges(mmu_idx)) {
10324 select = 0;
10325 tsz = extract32(tcr, 0, 6);
10326 using64k = extract32(tcr, 14, 1);
10327 using16k = extract32(tcr, 15, 1);
10328 if (mmu_idx == ARMMMUIdx_Stage2) {
10329 /* VTCR_EL2 */
10330 hpd = false;
10331 } else {
10332 hpd = extract32(tcr, 24, 1);
10333 }
10334 epd = false;
10335 } else {
10336 /*
10337 * Bit 55 is always between the two regions, and is canonical for
10338 * determining if address tagging is enabled.
10339 */
10340 select = extract64(va, 55, 1);
10341 if (!select) {
10342 tsz = extract32(tcr, 0, 6);
10343 epd = extract32(tcr, 7, 1);
10344 using64k = extract32(tcr, 14, 1);
10345 using16k = extract32(tcr, 15, 1);
10346 hpd = extract64(tcr, 41, 1);
10347 } else {
10348 int tg = extract32(tcr, 30, 2);
10349 using16k = tg == 1;
10350 using64k = tg == 3;
10351 tsz = extract32(tcr, 16, 6);
10352 epd = extract32(tcr, 23, 1);
10353 hpd = extract64(tcr, 42, 1);
10354 }
10355 }
10356 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */
10357 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
10358
10359 /* Present TBI as a composite with TBID. */
10360 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10361 if (!data) {
10362 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10363 }
10364 tbi = (tbi >> select) & 1;
10365
10366 return (ARMVAParameters) {
10367 .tsz = tsz,
10368 .select = select,
10369 .tbi = tbi,
10370 .epd = epd,
10371 .hpd = hpd,
10372 .using16k = using16k,
10373 .using64k = using64k,
10374 };
10375 }
10376
10377 #ifndef CONFIG_USER_ONLY
10378 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
10379 ARMMMUIdx mmu_idx)
10380 {
10381 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10382 uint32_t el = regime_el(env, mmu_idx);
10383 int select, tsz;
10384 bool epd, hpd;
10385
10386 if (mmu_idx == ARMMMUIdx_Stage2) {
10387 /* VTCR */
10388 bool sext = extract32(tcr, 4, 1);
10389 bool sign = extract32(tcr, 3, 1);
10390
10391 /*
10392 * If the sign-extend bit is not the same as t0sz[3], the result
10393 * is unpredictable. Flag this as a guest error.
10394 */
10395 if (sign != sext) {
10396 qemu_log_mask(LOG_GUEST_ERROR,
10397 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10398 }
10399 tsz = sextract32(tcr, 0, 4) + 8;
10400 select = 0;
10401 hpd = false;
10402 epd = false;
10403 } else if (el == 2) {
10404 /* HTCR */
10405 tsz = extract32(tcr, 0, 3);
10406 select = 0;
10407 hpd = extract64(tcr, 24, 1);
10408 epd = false;
10409 } else {
10410 int t0sz = extract32(tcr, 0, 3);
10411 int t1sz = extract32(tcr, 16, 3);
10412
10413 if (t1sz == 0) {
10414 select = va > (0xffffffffu >> t0sz);
10415 } else {
10416 /* Note that we will detect errors later. */
10417 select = va >= ~(0xffffffffu >> t1sz);
10418 }
10419 if (!select) {
10420 tsz = t0sz;
10421 epd = extract32(tcr, 7, 1);
10422 hpd = extract64(tcr, 41, 1);
10423 } else {
10424 tsz = t1sz;
10425 epd = extract32(tcr, 23, 1);
10426 hpd = extract64(tcr, 42, 1);
10427 }
10428 /* For aarch32, hpd0 is not enabled without t2e as well. */
10429 hpd &= extract32(tcr, 6, 1);
10430 }
10431
10432 return (ARMVAParameters) {
10433 .tsz = tsz,
10434 .select = select,
10435 .epd = epd,
10436 .hpd = hpd,
10437 };
10438 }
10439
10440 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
10441 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10442 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
10443 target_ulong *page_size_ptr,
10444 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10445 {
10446 ARMCPU *cpu = env_archcpu(env);
10447 CPUState *cs = CPU(cpu);
10448 /* Read an LPAE long-descriptor translation table. */
10449 ARMFaultType fault_type = ARMFault_Translation;
10450 uint32_t level;
10451 ARMVAParameters param;
10452 uint64_t ttbr;
10453 hwaddr descaddr, indexmask, indexmask_grainsize;
10454 uint32_t tableattrs;
10455 target_ulong page_size;
10456 uint32_t attrs;
10457 int32_t stride;
10458 int addrsize, inputsize;
10459 TCR *tcr = regime_tcr(env, mmu_idx);
10460 int ap, ns, xn, pxn;
10461 uint32_t el = regime_el(env, mmu_idx);
10462 uint64_t descaddrmask;
10463 bool aarch64 = arm_el_is_aa64(env, el);
10464 bool guarded = false;
10465
10466 /* TODO:
10467 * This code does not handle the different format TCR for VTCR_EL2.
10468 * This code also does not support shareability levels.
10469 * Attribute and permission bit handling should also be checked when adding
10470 * support for those page table walks.
10471 */
10472 if (aarch64) {
10473 param = aa64_va_parameters(env, address, mmu_idx,
10474 access_type != MMU_INST_FETCH);
10475 level = 0;
10476 addrsize = 64 - 8 * param.tbi;
10477 inputsize = 64 - param.tsz;
10478 } else {
10479 param = aa32_va_parameters(env, address, mmu_idx);
10480 level = 1;
10481 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
10482 inputsize = addrsize - param.tsz;
10483 }
10484
10485 /*
10486 * We determined the region when collecting the parameters, but we
10487 * have not yet validated that the address is valid for the region.
10488 * Extract the top bits and verify that they all match select.
10489 *
10490 * For aa32, if inputsize == addrsize, then we have selected the
10491 * region by exclusion in aa32_va_parameters and there is no more
10492 * validation to do here.
10493 */
10494 if (inputsize < addrsize) {
10495 target_ulong top_bits = sextract64(address, inputsize,
10496 addrsize - inputsize);
10497 if (-top_bits != param.select) {
10498 /* The gap between the two regions is a Translation fault */
10499 fault_type = ARMFault_Translation;
10500 goto do_fault;
10501 }
10502 }
10503
10504 if (param.using64k) {
10505 stride = 13;
10506 } else if (param.using16k) {
10507 stride = 11;
10508 } else {
10509 stride = 9;
10510 }
10511
10512 /* Note that QEMU ignores shareability and cacheability attributes,
10513 * so we don't need to do anything with the SH, ORGN, IRGN fields
10514 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10515 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10516 * implement any ASID-like capability so we can ignore it (instead
10517 * we will always flush the TLB any time the ASID is changed).
10518 */
10519 ttbr = regime_ttbr(env, mmu_idx, param.select);
10520
10521 /* Here we should have set up all the parameters for the translation:
10522 * inputsize, ttbr, epd, stride, tbi
10523 */
10524
10525 if (param.epd) {
10526 /* Translation table walk disabled => Translation fault on TLB miss
10527 * Note: This is always 0 on 64-bit EL2 and EL3.
10528 */
10529 goto do_fault;
10530 }
10531
10532 if (mmu_idx != ARMMMUIdx_Stage2) {
10533 /* The starting level depends on the virtual address size (which can
10534 * be up to 48 bits) and the translation granule size. It indicates
10535 * the number of strides (stride bits at a time) needed to
10536 * consume the bits of the input address. In the pseudocode this is:
10537 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10538 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10539 * our 'stride + 3' and 'stride' is our 'stride'.
10540 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10541 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10542 * = 4 - (inputsize - 4) / stride;
10543 */
10544 level = 4 - (inputsize - 4) / stride;
10545 } else {
10546 /* For stage 2 translations the starting level is specified by the
10547 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10548 */
10549 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
10550 uint32_t startlevel;
10551 bool ok;
10552
10553 if (!aarch64 || stride == 9) {
10554 /* AArch32 or 4KB pages */
10555 startlevel = 2 - sl0;
10556 } else {
10557 /* 16KB or 64KB pages */
10558 startlevel = 3 - sl0;
10559 }
10560
10561 /* Check that the starting level is valid. */
10562 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
10563 inputsize, stride);
10564 if (!ok) {
10565 fault_type = ARMFault_Translation;
10566 goto do_fault;
10567 }
10568 level = startlevel;
10569 }
10570
10571 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
10572 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
10573
10574 /* Now we can extract the actual base address from the TTBR */
10575 descaddr = extract64(ttbr, 0, 48);
10576 descaddr &= ~indexmask;
10577
10578 /* The address field in the descriptor goes up to bit 39 for ARMv7
10579 * but up to bit 47 for ARMv8, but we use the descaddrmask
10580 * up to bit 39 for AArch32, because we don't need other bits in that case
10581 * to construct next descriptor address (anyway they should be all zeroes).
10582 */
10583 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
10584 ~indexmask_grainsize;
10585
10586 /* Secure accesses start with the page table in secure memory and
10587 * can be downgraded to non-secure at any step. Non-secure accesses
10588 * remain non-secure. We implement this by just ORing in the NSTable/NS
10589 * bits at each step.
10590 */
10591 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
10592 for (;;) {
10593 uint64_t descriptor;
10594 bool nstable;
10595
10596 descaddr |= (address >> (stride * (4 - level))) & indexmask;
10597 descaddr &= ~7ULL;
10598 nstable = extract32(tableattrs, 4, 1);
10599 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
10600 if (fi->type != ARMFault_None) {
10601 goto do_fault;
10602 }
10603
10604 if (!(descriptor & 1) ||
10605 (!(descriptor & 2) && (level == 3))) {
10606 /* Invalid, or the Reserved level 3 encoding */
10607 goto do_fault;
10608 }
10609 descaddr = descriptor & descaddrmask;
10610
10611 if ((descriptor & 2) && (level < 3)) {
10612 /* Table entry. The top five bits are attributes which may
10613 * propagate down through lower levels of the table (and
10614 * which are all arranged so that 0 means "no effect", so
10615 * we can gather them up by ORing in the bits at each level).
10616 */
10617 tableattrs |= extract64(descriptor, 59, 5);
10618 level++;
10619 indexmask = indexmask_grainsize;
10620 continue;
10621 }
10622 /* Block entry at level 1 or 2, or page entry at level 3.
10623 * These are basically the same thing, although the number
10624 * of bits we pull in from the vaddr varies.
10625 */
10626 page_size = (1ULL << ((stride * (4 - level)) + 3));
10627 descaddr |= (address & (page_size - 1));
10628 /* Extract attributes from the descriptor */
10629 attrs = extract64(descriptor, 2, 10)
10630 | (extract64(descriptor, 52, 12) << 10);
10631
10632 if (mmu_idx == ARMMMUIdx_Stage2) {
10633 /* Stage 2 table descriptors do not include any attribute fields */
10634 break;
10635 }
10636 /* Merge in attributes from table descriptors */
10637 attrs |= nstable << 3; /* NS */
10638 guarded = extract64(descriptor, 50, 1); /* GP */
10639 if (param.hpd) {
10640 /* HPD disables all the table attributes except NSTable. */
10641 break;
10642 }
10643 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
10644 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10645 * means "force PL1 access only", which means forcing AP[1] to 0.
10646 */
10647 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
10648 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
10649 break;
10650 }
10651 /* Here descaddr is the final physical address, and attributes
10652 * are all in attrs.
10653 */
10654 fault_type = ARMFault_AccessFlag;
10655 if ((attrs & (1 << 8)) == 0) {
10656 /* Access flag */
10657 goto do_fault;
10658 }
10659
10660 ap = extract32(attrs, 4, 2);
10661 xn = extract32(attrs, 12, 1);
10662
10663 if (mmu_idx == ARMMMUIdx_Stage2) {
10664 ns = true;
10665 *prot = get_S2prot(env, ap, xn);
10666 } else {
10667 ns = extract32(attrs, 3, 1);
10668 pxn = extract32(attrs, 11, 1);
10669 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
10670 }
10671
10672 fault_type = ARMFault_Permission;
10673 if (!(*prot & (1 << access_type))) {
10674 goto do_fault;
10675 }
10676
10677 if (ns) {
10678 /* The NS bit will (as required by the architecture) have no effect if
10679 * the CPU doesn't support TZ or this is a non-secure translation
10680 * regime, because the attribute will already be non-secure.
10681 */
10682 txattrs->secure = false;
10683 }
10684 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10685 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
10686 txattrs->target_tlb_bit0 = true;
10687 }
10688
10689 if (cacheattrs != NULL) {
10690 if (mmu_idx == ARMMMUIdx_Stage2) {
10691 cacheattrs->attrs = convert_stage2_attrs(env,
10692 extract32(attrs, 0, 4));
10693 } else {
10694 /* Index into MAIR registers for cache attributes */
10695 uint8_t attrindx = extract32(attrs, 0, 3);
10696 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
10697 assert(attrindx <= 7);
10698 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
10699 }
10700 cacheattrs->shareability = extract32(attrs, 6, 2);
10701 }
10702
10703 *phys_ptr = descaddr;
10704 *page_size_ptr = page_size;
10705 return false;
10706
10707 do_fault:
10708 fi->type = fault_type;
10709 fi->level = level;
10710 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10711 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2);
10712 return true;
10713 }
10714
10715 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
10716 ARMMMUIdx mmu_idx,
10717 int32_t address, int *prot)
10718 {
10719 if (!arm_feature(env, ARM_FEATURE_M)) {
10720 *prot = PAGE_READ | PAGE_WRITE;
10721 switch (address) {
10722 case 0xF0000000 ... 0xFFFFFFFF:
10723 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
10724 /* hivecs execing is ok */
10725 *prot |= PAGE_EXEC;
10726 }
10727 break;
10728 case 0x00000000 ... 0x7FFFFFFF:
10729 *prot |= PAGE_EXEC;
10730 break;
10731 }
10732 } else {
10733 /* Default system address map for M profile cores.
10734 * The architecture specifies which regions are execute-never;
10735 * at the MPU level no other checks are defined.
10736 */
10737 switch (address) {
10738 case 0x00000000 ... 0x1fffffff: /* ROM */
10739 case 0x20000000 ... 0x3fffffff: /* SRAM */
10740 case 0x60000000 ... 0x7fffffff: /* RAM */
10741 case 0x80000000 ... 0x9fffffff: /* RAM */
10742 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10743 break;
10744 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10745 case 0xa0000000 ... 0xbfffffff: /* Device */
10746 case 0xc0000000 ... 0xdfffffff: /* Device */
10747 case 0xe0000000 ... 0xffffffff: /* System */
10748 *prot = PAGE_READ | PAGE_WRITE;
10749 break;
10750 default:
10751 g_assert_not_reached();
10752 }
10753 }
10754 }
10755
10756 static bool pmsav7_use_background_region(ARMCPU *cpu,
10757 ARMMMUIdx mmu_idx, bool is_user)
10758 {
10759 /* Return true if we should use the default memory map as a
10760 * "background" region if there are no hits against any MPU regions.
10761 */
10762 CPUARMState *env = &cpu->env;
10763
10764 if (is_user) {
10765 return false;
10766 }
10767
10768 if (arm_feature(env, ARM_FEATURE_M)) {
10769 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
10770 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
10771 } else {
10772 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
10773 }
10774 }
10775
10776 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
10777 {
10778 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10779 return arm_feature(env, ARM_FEATURE_M) &&
10780 extract32(address, 20, 12) == 0xe00;
10781 }
10782
10783 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
10784 {
10785 /* True if address is in the M profile system region
10786 * 0xe0000000 - 0xffffffff
10787 */
10788 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
10789 }
10790
10791 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
10792 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10793 hwaddr *phys_ptr, int *prot,
10794 target_ulong *page_size,
10795 ARMMMUFaultInfo *fi)
10796 {
10797 ARMCPU *cpu = env_archcpu(env);
10798 int n;
10799 bool is_user = regime_is_user(env, mmu_idx);
10800
10801 *phys_ptr = address;
10802 *page_size = TARGET_PAGE_SIZE;
10803 *prot = 0;
10804
10805 if (regime_translation_disabled(env, mmu_idx) ||
10806 m_is_ppb_region(env, address)) {
10807 /* MPU disabled or M profile PPB access: use default memory map.
10808 * The other case which uses the default memory map in the
10809 * v7M ARM ARM pseudocode is exception vector reads from the vector
10810 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10811 * which always does a direct read using address_space_ldl(), rather
10812 * than going via this function, so we don't need to check that here.
10813 */
10814 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10815 } else { /* MPU enabled */
10816 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10817 /* region search */
10818 uint32_t base = env->pmsav7.drbar[n];
10819 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
10820 uint32_t rmask;
10821 bool srdis = false;
10822
10823 if (!(env->pmsav7.drsr[n] & 0x1)) {
10824 continue;
10825 }
10826
10827 if (!rsize) {
10828 qemu_log_mask(LOG_GUEST_ERROR,
10829 "DRSR[%d]: Rsize field cannot be 0\n", n);
10830 continue;
10831 }
10832 rsize++;
10833 rmask = (1ull << rsize) - 1;
10834
10835 if (base & rmask) {
10836 qemu_log_mask(LOG_GUEST_ERROR,
10837 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
10838 "to DRSR region size, mask = 0x%" PRIx32 "\n",
10839 n, base, rmask);
10840 continue;
10841 }
10842
10843 if (address < base || address > base + rmask) {
10844 /*
10845 * Address not in this region. We must check whether the
10846 * region covers addresses in the same page as our address.
10847 * In that case we must not report a size that covers the
10848 * whole page for a subsequent hit against a different MPU
10849 * region or the background region, because it would result in
10850 * incorrect TLB hits for subsequent accesses to addresses that
10851 * are in this MPU region.
10852 */
10853 if (ranges_overlap(base, rmask,
10854 address & TARGET_PAGE_MASK,
10855 TARGET_PAGE_SIZE)) {
10856 *page_size = 1;
10857 }
10858 continue;
10859 }
10860
10861 /* Region matched */
10862
10863 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
10864 int i, snd;
10865 uint32_t srdis_mask;
10866
10867 rsize -= 3; /* sub region size (power of 2) */
10868 snd = ((address - base) >> rsize) & 0x7;
10869 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
10870
10871 srdis_mask = srdis ? 0x3 : 0x0;
10872 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
10873 /* This will check in groups of 2, 4 and then 8, whether
10874 * the subregion bits are consistent. rsize is incremented
10875 * back up to give the region size, considering consistent
10876 * adjacent subregions as one region. Stop testing if rsize
10877 * is already big enough for an entire QEMU page.
10878 */
10879 int snd_rounded = snd & ~(i - 1);
10880 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
10881 snd_rounded + 8, i);
10882 if (srdis_mask ^ srdis_multi) {
10883 break;
10884 }
10885 srdis_mask = (srdis_mask << i) | srdis_mask;
10886 rsize++;
10887 }
10888 }
10889 if (srdis) {
10890 continue;
10891 }
10892 if (rsize < TARGET_PAGE_BITS) {
10893 *page_size = 1 << rsize;
10894 }
10895 break;
10896 }
10897
10898 if (n == -1) { /* no hits */
10899 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10900 /* background fault */
10901 fi->type = ARMFault_Background;
10902 return true;
10903 }
10904 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10905 } else { /* a MPU hit! */
10906 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
10907 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
10908
10909 if (m_is_system_region(env, address)) {
10910 /* System space is always execute never */
10911 xn = 1;
10912 }
10913
10914 if (is_user) { /* User mode AP bit decoding */
10915 switch (ap) {
10916 case 0:
10917 case 1:
10918 case 5:
10919 break; /* no access */
10920 case 3:
10921 *prot |= PAGE_WRITE;
10922 /* fall through */
10923 case 2:
10924 case 6:
10925 *prot |= PAGE_READ | PAGE_EXEC;
10926 break;
10927 case 7:
10928 /* for v7M, same as 6; for R profile a reserved value */
10929 if (arm_feature(env, ARM_FEATURE_M)) {
10930 *prot |= PAGE_READ | PAGE_EXEC;
10931 break;
10932 }
10933 /* fall through */
10934 default:
10935 qemu_log_mask(LOG_GUEST_ERROR,
10936 "DRACR[%d]: Bad value for AP bits: 0x%"
10937 PRIx32 "\n", n, ap);
10938 }
10939 } else { /* Priv. mode AP bits decoding */
10940 switch (ap) {
10941 case 0:
10942 break; /* no access */
10943 case 1:
10944 case 2:
10945 case 3:
10946 *prot |= PAGE_WRITE;
10947 /* fall through */
10948 case 5:
10949 case 6:
10950 *prot |= PAGE_READ | PAGE_EXEC;
10951 break;
10952 case 7:
10953 /* for v7M, same as 6; for R profile a reserved value */
10954 if (arm_feature(env, ARM_FEATURE_M)) {
10955 *prot |= PAGE_READ | PAGE_EXEC;
10956 break;
10957 }
10958 /* fall through */
10959 default:
10960 qemu_log_mask(LOG_GUEST_ERROR,
10961 "DRACR[%d]: Bad value for AP bits: 0x%"
10962 PRIx32 "\n", n, ap);
10963 }
10964 }
10965
10966 /* execute never */
10967 if (xn) {
10968 *prot &= ~PAGE_EXEC;
10969 }
10970 }
10971 }
10972
10973 fi->type = ARMFault_Permission;
10974 fi->level = 1;
10975 return !(*prot & (1 << access_type));
10976 }
10977
10978 static bool v8m_is_sau_exempt(CPUARMState *env,
10979 uint32_t address, MMUAccessType access_type)
10980 {
10981 /* The architecture specifies that certain address ranges are
10982 * exempt from v8M SAU/IDAU checks.
10983 */
10984 return
10985 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
10986 (address >= 0xe0000000 && address <= 0xe0002fff) ||
10987 (address >= 0xe000e000 && address <= 0xe000efff) ||
10988 (address >= 0xe002e000 && address <= 0xe002efff) ||
10989 (address >= 0xe0040000 && address <= 0xe0041fff) ||
10990 (address >= 0xe00ff000 && address <= 0xe00fffff);
10991 }
10992
10993 void v8m_security_lookup(CPUARMState *env, uint32_t address,
10994 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10995 V8M_SAttributes *sattrs)
10996 {
10997 /* Look up the security attributes for this address. Compare the
10998 * pseudocode SecurityCheck() function.
10999 * We assume the caller has zero-initialized *sattrs.
11000 */
11001 ARMCPU *cpu = env_archcpu(env);
11002 int r;
11003 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11004 int idau_region = IREGION_NOTVALID;
11005 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11006 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11007
11008 if (cpu->idau) {
11009 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11010 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11011
11012 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11013 &idau_nsc);
11014 }
11015
11016 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11017 /* 0xf0000000..0xffffffff is always S for insn fetches */
11018 return;
11019 }
11020
11021 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11022 sattrs->ns = !regime_is_secure(env, mmu_idx);
11023 return;
11024 }
11025
11026 if (idau_region != IREGION_NOTVALID) {
11027 sattrs->irvalid = true;
11028 sattrs->iregion = idau_region;
11029 }
11030
11031 switch (env->sau.ctrl & 3) {
11032 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11033 break;
11034 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11035 sattrs->ns = true;
11036 break;
11037 default: /* SAU.ENABLE == 1 */
11038 for (r = 0; r < cpu->sau_sregion; r++) {
11039 if (env->sau.rlar[r] & 1) {
11040 uint32_t base = env->sau.rbar[r] & ~0x1f;
11041 uint32_t limit = env->sau.rlar[r] | 0x1f;
11042
11043 if (base <= address && limit >= address) {
11044 if (base > addr_page_base || limit < addr_page_limit) {
11045 sattrs->subpage = true;
11046 }
11047 if (sattrs->srvalid) {
11048 /* If we hit in more than one region then we must report
11049 * as Secure, not NS-Callable, with no valid region
11050 * number info.
11051 */
11052 sattrs->ns = false;
11053 sattrs->nsc = false;
11054 sattrs->sregion = 0;
11055 sattrs->srvalid = false;
11056 break;
11057 } else {
11058 if (env->sau.rlar[r] & 2) {
11059 sattrs->nsc = true;
11060 } else {
11061 sattrs->ns = true;
11062 }
11063 sattrs->srvalid = true;
11064 sattrs->sregion = r;
11065 }
11066 } else {
11067 /*
11068 * Address not in this region. We must check whether the
11069 * region covers addresses in the same page as our address.
11070 * In that case we must not report a size that covers the
11071 * whole page for a subsequent hit against a different MPU
11072 * region or the background region, because it would result
11073 * in incorrect TLB hits for subsequent accesses to
11074 * addresses that are in this MPU region.
11075 */
11076 if (limit >= base &&
11077 ranges_overlap(base, limit - base + 1,
11078 addr_page_base,
11079 TARGET_PAGE_SIZE)) {
11080 sattrs->subpage = true;
11081 }
11082 }
11083 }
11084 }
11085 break;
11086 }
11087
11088 /*
11089 * The IDAU will override the SAU lookup results if it specifies
11090 * higher security than the SAU does.
11091 */
11092 if (!idau_ns) {
11093 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
11094 sattrs->ns = false;
11095 sattrs->nsc = idau_nsc;
11096 }
11097 }
11098 }
11099
11100 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
11101 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11102 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11103 int *prot, bool *is_subpage,
11104 ARMMMUFaultInfo *fi, uint32_t *mregion)
11105 {
11106 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11107 * that a full phys-to-virt translation does).
11108 * mregion is (if not NULL) set to the region number which matched,
11109 * or -1 if no region number is returned (MPU off, address did not
11110 * hit a region, address hit in multiple regions).
11111 * We set is_subpage to true if the region hit doesn't cover the
11112 * entire TARGET_PAGE the address is within.
11113 */
11114 ARMCPU *cpu = env_archcpu(env);
11115 bool is_user = regime_is_user(env, mmu_idx);
11116 uint32_t secure = regime_is_secure(env, mmu_idx);
11117 int n;
11118 int matchregion = -1;
11119 bool hit = false;
11120 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11121 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11122
11123 *is_subpage = false;
11124 *phys_ptr = address;
11125 *prot = 0;
11126 if (mregion) {
11127 *mregion = -1;
11128 }
11129
11130 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11131 * was an exception vector read from the vector table (which is always
11132 * done using the default system address map), because those accesses
11133 * are done in arm_v7m_load_vector(), which always does a direct
11134 * read using address_space_ldl(), rather than going via this function.
11135 */
11136 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
11137 hit = true;
11138 } else if (m_is_ppb_region(env, address)) {
11139 hit = true;
11140 } else {
11141 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11142 hit = true;
11143 }
11144
11145 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11146 /* region search */
11147 /* Note that the base address is bits [31:5] from the register
11148 * with bits [4:0] all zeroes, but the limit address is bits
11149 * [31:5] from the register with bits [4:0] all ones.
11150 */
11151 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
11152 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
11153
11154 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
11155 /* Region disabled */
11156 continue;
11157 }
11158
11159 if (address < base || address > limit) {
11160 /*
11161 * Address not in this region. We must check whether the
11162 * region covers addresses in the same page as our address.
11163 * In that case we must not report a size that covers the
11164 * whole page for a subsequent hit against a different MPU
11165 * region or the background region, because it would result in
11166 * incorrect TLB hits for subsequent accesses to addresses that
11167 * are in this MPU region.
11168 */
11169 if (limit >= base &&
11170 ranges_overlap(base, limit - base + 1,
11171 addr_page_base,
11172 TARGET_PAGE_SIZE)) {
11173 *is_subpage = true;
11174 }
11175 continue;
11176 }
11177
11178 if (base > addr_page_base || limit < addr_page_limit) {
11179 *is_subpage = true;
11180 }
11181
11182 if (matchregion != -1) {
11183 /* Multiple regions match -- always a failure (unlike
11184 * PMSAv7 where highest-numbered-region wins)
11185 */
11186 fi->type = ARMFault_Permission;
11187 fi->level = 1;
11188 return true;
11189 }
11190
11191 matchregion = n;
11192 hit = true;
11193 }
11194 }
11195
11196 if (!hit) {
11197 /* background fault */
11198 fi->type = ARMFault_Background;
11199 return true;
11200 }
11201
11202 if (matchregion == -1) {
11203 /* hit using the background region */
11204 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11205 } else {
11206 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
11207 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
11208
11209 if (m_is_system_region(env, address)) {
11210 /* System space is always execute never */
11211 xn = 1;
11212 }
11213
11214 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
11215 if (*prot && !xn) {
11216 *prot |= PAGE_EXEC;
11217 }
11218 /* We don't need to look the attribute up in the MAIR0/MAIR1
11219 * registers because that only tells us about cacheability.
11220 */
11221 if (mregion) {
11222 *mregion = matchregion;
11223 }
11224 }
11225
11226 fi->type = ARMFault_Permission;
11227 fi->level = 1;
11228 return !(*prot & (1 << access_type));
11229 }
11230
11231
11232 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
11233 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11234 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11235 int *prot, target_ulong *page_size,
11236 ARMMMUFaultInfo *fi)
11237 {
11238 uint32_t secure = regime_is_secure(env, mmu_idx);
11239 V8M_SAttributes sattrs = {};
11240 bool ret;
11241 bool mpu_is_subpage;
11242
11243 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11244 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
11245 if (access_type == MMU_INST_FETCH) {
11246 /* Instruction fetches always use the MMU bank and the
11247 * transaction attribute determined by the fetch address,
11248 * regardless of CPU state. This is painful for QEMU
11249 * to handle, because it would mean we need to encode
11250 * into the mmu_idx not just the (user, negpri) information
11251 * for the current security state but also that for the
11252 * other security state, which would balloon the number
11253 * of mmu_idx values needed alarmingly.
11254 * Fortunately we can avoid this because it's not actually
11255 * possible to arbitrarily execute code from memory with
11256 * the wrong security attribute: it will always generate
11257 * an exception of some kind or another, apart from the
11258 * special case of an NS CPU executing an SG instruction
11259 * in S&NSC memory. So we always just fail the translation
11260 * here and sort things out in the exception handler
11261 * (including possibly emulating an SG instruction).
11262 */
11263 if (sattrs.ns != !secure) {
11264 if (sattrs.nsc) {
11265 fi->type = ARMFault_QEMU_NSCExec;
11266 } else {
11267 fi->type = ARMFault_QEMU_SFault;
11268 }
11269 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11270 *phys_ptr = address;
11271 *prot = 0;
11272 return true;
11273 }
11274 } else {
11275 /* For data accesses we always use the MMU bank indicated
11276 * by the current CPU state, but the security attributes
11277 * might downgrade a secure access to nonsecure.
11278 */
11279 if (sattrs.ns) {
11280 txattrs->secure = false;
11281 } else if (!secure) {
11282 /* NS access to S memory must fault.
11283 * Architecturally we should first check whether the
11284 * MPU information for this address indicates that we
11285 * are doing an unaligned access to Device memory, which
11286 * should generate a UsageFault instead. QEMU does not
11287 * currently check for that kind of unaligned access though.
11288 * If we added it we would need to do so as a special case
11289 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11290 */
11291 fi->type = ARMFault_QEMU_SFault;
11292 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11293 *phys_ptr = address;
11294 *prot = 0;
11295 return true;
11296 }
11297 }
11298 }
11299
11300 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
11301 txattrs, prot, &mpu_is_subpage, fi, NULL);
11302 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
11303 return ret;
11304 }
11305
11306 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
11307 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11308 hwaddr *phys_ptr, int *prot,
11309 ARMMMUFaultInfo *fi)
11310 {
11311 int n;
11312 uint32_t mask;
11313 uint32_t base;
11314 bool is_user = regime_is_user(env, mmu_idx);
11315
11316 if (regime_translation_disabled(env, mmu_idx)) {
11317 /* MPU disabled. */
11318 *phys_ptr = address;
11319 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11320 return false;
11321 }
11322
11323 *phys_ptr = address;
11324 for (n = 7; n >= 0; n--) {
11325 base = env->cp15.c6_region[n];
11326 if ((base & 1) == 0) {
11327 continue;
11328 }
11329 mask = 1 << ((base >> 1) & 0x1f);
11330 /* Keep this shift separate from the above to avoid an
11331 (undefined) << 32. */
11332 mask = (mask << 1) - 1;
11333 if (((base ^ address) & ~mask) == 0) {
11334 break;
11335 }
11336 }
11337 if (n < 0) {
11338 fi->type = ARMFault_Background;
11339 return true;
11340 }
11341
11342 if (access_type == MMU_INST_FETCH) {
11343 mask = env->cp15.pmsav5_insn_ap;
11344 } else {
11345 mask = env->cp15.pmsav5_data_ap;
11346 }
11347 mask = (mask >> (n * 4)) & 0xf;
11348 switch (mask) {
11349 case 0:
11350 fi->type = ARMFault_Permission;
11351 fi->level = 1;
11352 return true;
11353 case 1:
11354 if (is_user) {
11355 fi->type = ARMFault_Permission;
11356 fi->level = 1;
11357 return true;
11358 }
11359 *prot = PAGE_READ | PAGE_WRITE;
11360 break;
11361 case 2:
11362 *prot = PAGE_READ;
11363 if (!is_user) {
11364 *prot |= PAGE_WRITE;
11365 }
11366 break;
11367 case 3:
11368 *prot = PAGE_READ | PAGE_WRITE;
11369 break;
11370 case 5:
11371 if (is_user) {
11372 fi->type = ARMFault_Permission;
11373 fi->level = 1;
11374 return true;
11375 }
11376 *prot = PAGE_READ;
11377 break;
11378 case 6:
11379 *prot = PAGE_READ;
11380 break;
11381 default:
11382 /* Bad permission. */
11383 fi->type = ARMFault_Permission;
11384 fi->level = 1;
11385 return true;
11386 }
11387 *prot |= PAGE_EXEC;
11388 return false;
11389 }
11390
11391 /* Combine either inner or outer cacheability attributes for normal
11392 * memory, according to table D4-42 and pseudocode procedure
11393 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11394 *
11395 * NB: only stage 1 includes allocation hints (RW bits), leading to
11396 * some asymmetry.
11397 */
11398 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
11399 {
11400 if (s1 == 4 || s2 == 4) {
11401 /* non-cacheable has precedence */
11402 return 4;
11403 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
11404 /* stage 1 write-through takes precedence */
11405 return s1;
11406 } else if (extract32(s2, 2, 2) == 2) {
11407 /* stage 2 write-through takes precedence, but the allocation hint
11408 * is still taken from stage 1
11409 */
11410 return (2 << 2) | extract32(s1, 0, 2);
11411 } else { /* write-back */
11412 return s1;
11413 }
11414 }
11415
11416 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11417 * and CombineS1S2Desc()
11418 *
11419 * @s1: Attributes from stage 1 walk
11420 * @s2: Attributes from stage 2 walk
11421 */
11422 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
11423 {
11424 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
11425 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
11426 ARMCacheAttrs ret;
11427
11428 /* Combine shareability attributes (table D4-43) */
11429 if (s1.shareability == 2 || s2.shareability == 2) {
11430 /* if either are outer-shareable, the result is outer-shareable */
11431 ret.shareability = 2;
11432 } else if (s1.shareability == 3 || s2.shareability == 3) {
11433 /* if either are inner-shareable, the result is inner-shareable */
11434 ret.shareability = 3;
11435 } else {
11436 /* both non-shareable */
11437 ret.shareability = 0;
11438 }
11439
11440 /* Combine memory type and cacheability attributes */
11441 if (s1hi == 0 || s2hi == 0) {
11442 /* Device has precedence over normal */
11443 if (s1lo == 0 || s2lo == 0) {
11444 /* nGnRnE has precedence over anything */
11445 ret.attrs = 0;
11446 } else if (s1lo == 4 || s2lo == 4) {
11447 /* non-Reordering has precedence over Reordering */
11448 ret.attrs = 4; /* nGnRE */
11449 } else if (s1lo == 8 || s2lo == 8) {
11450 /* non-Gathering has precedence over Gathering */
11451 ret.attrs = 8; /* nGRE */
11452 } else {
11453 ret.attrs = 0xc; /* GRE */
11454 }
11455
11456 /* Any location for which the resultant memory type is any
11457 * type of Device memory is always treated as Outer Shareable.
11458 */
11459 ret.shareability = 2;
11460 } else { /* Normal memory */
11461 /* Outer/inner cacheability combine independently */
11462 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
11463 | combine_cacheattr_nibble(s1lo, s2lo);
11464
11465 if (ret.attrs == 0x44) {
11466 /* Any location for which the resultant memory type is Normal
11467 * Inner Non-cacheable, Outer Non-cacheable is always treated
11468 * as Outer Shareable.
11469 */
11470 ret.shareability = 2;
11471 }
11472 }
11473
11474 return ret;
11475 }
11476
11477
11478 /* get_phys_addr - get the physical address for this virtual address
11479 *
11480 * Find the physical address corresponding to the given virtual address,
11481 * by doing a translation table walk on MMU based systems or using the
11482 * MPU state on MPU based systems.
11483 *
11484 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11485 * prot and page_size may not be filled in, and the populated fsr value provides
11486 * information on why the translation aborted, in the format of a
11487 * DFSR/IFSR fault register, with the following caveats:
11488 * * we honour the short vs long DFSR format differences.
11489 * * the WnR bit is never set (the caller must do this).
11490 * * for PSMAv5 based systems we don't bother to return a full FSR format
11491 * value.
11492 *
11493 * @env: CPUARMState
11494 * @address: virtual address to get physical address for
11495 * @access_type: 0 for read, 1 for write, 2 for execute
11496 * @mmu_idx: MMU index indicating required translation regime
11497 * @phys_ptr: set to the physical address corresponding to the virtual address
11498 * @attrs: set to the memory transaction attributes to use
11499 * @prot: set to the permissions for the page containing phys_ptr
11500 * @page_size: set to the size of the page containing phys_ptr
11501 * @fi: set to fault info if the translation fails
11502 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11503 */
11504 bool get_phys_addr(CPUARMState *env, target_ulong address,
11505 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11506 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
11507 target_ulong *page_size,
11508 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
11509 {
11510 if (mmu_idx == ARMMMUIdx_E10_0 ||
11511 mmu_idx == ARMMMUIdx_E10_1 ||
11512 mmu_idx == ARMMMUIdx_E10_1_PAN) {
11513 /* Call ourselves recursively to do the stage 1 and then stage 2
11514 * translations.
11515 */
11516 if (arm_feature(env, ARM_FEATURE_EL2)) {
11517 hwaddr ipa;
11518 int s2_prot;
11519 int ret;
11520 ARMCacheAttrs cacheattrs2 = {};
11521
11522 ret = get_phys_addr(env, address, access_type,
11523 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
11524 prot, page_size, fi, cacheattrs);
11525
11526 /* If S1 fails or S2 is disabled, return early. */
11527 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
11528 *phys_ptr = ipa;
11529 return ret;
11530 }
11531
11532 /* S1 is done. Now do S2 translation. */
11533 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
11534 phys_ptr, attrs, &s2_prot,
11535 page_size, fi,
11536 cacheattrs != NULL ? &cacheattrs2 : NULL);
11537 fi->s2addr = ipa;
11538 /* Combine the S1 and S2 perms. */
11539 *prot &= s2_prot;
11540
11541 /* Combine the S1 and S2 cache attributes, if needed */
11542 if (!ret && cacheattrs != NULL) {
11543 if (env->cp15.hcr_el2 & HCR_DC) {
11544 /*
11545 * HCR.DC forces the first stage attributes to
11546 * Normal Non-Shareable,
11547 * Inner Write-Back Read-Allocate Write-Allocate,
11548 * Outer Write-Back Read-Allocate Write-Allocate.
11549 */
11550 cacheattrs->attrs = 0xff;
11551 cacheattrs->shareability = 0;
11552 }
11553 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
11554 }
11555
11556 return ret;
11557 } else {
11558 /*
11559 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11560 */
11561 mmu_idx = stage_1_mmu_idx(mmu_idx);
11562 }
11563 }
11564
11565 /* The page table entries may downgrade secure to non-secure, but
11566 * cannot upgrade an non-secure translation regime's attributes
11567 * to secure.
11568 */
11569 attrs->secure = regime_is_secure(env, mmu_idx);
11570 attrs->user = regime_is_user(env, mmu_idx);
11571
11572 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11573 * In v7 and earlier it affects all stage 1 translations.
11574 */
11575 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
11576 && !arm_feature(env, ARM_FEATURE_V8)) {
11577 if (regime_el(env, mmu_idx) == 3) {
11578 address += env->cp15.fcseidr_s;
11579 } else {
11580 address += env->cp15.fcseidr_ns;
11581 }
11582 }
11583
11584 if (arm_feature(env, ARM_FEATURE_PMSA)) {
11585 bool ret;
11586 *page_size = TARGET_PAGE_SIZE;
11587
11588 if (arm_feature(env, ARM_FEATURE_V8)) {
11589 /* PMSAv8 */
11590 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
11591 phys_ptr, attrs, prot, page_size, fi);
11592 } else if (arm_feature(env, ARM_FEATURE_V7)) {
11593 /* PMSAv7 */
11594 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
11595 phys_ptr, prot, page_size, fi);
11596 } else {
11597 /* Pre-v7 MPU */
11598 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
11599 phys_ptr, prot, fi);
11600 }
11601 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
11602 " mmu_idx %u -> %s (prot %c%c%c)\n",
11603 access_type == MMU_DATA_LOAD ? "reading" :
11604 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
11605 (uint32_t)address, mmu_idx,
11606 ret ? "Miss" : "Hit",
11607 *prot & PAGE_READ ? 'r' : '-',
11608 *prot & PAGE_WRITE ? 'w' : '-',
11609 *prot & PAGE_EXEC ? 'x' : '-');
11610
11611 return ret;
11612 }
11613
11614 /* Definitely a real MMU, not an MPU */
11615
11616 if (regime_translation_disabled(env, mmu_idx)) {
11617 /* MMU disabled. */
11618 *phys_ptr = address;
11619 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11620 *page_size = TARGET_PAGE_SIZE;
11621 return 0;
11622 }
11623
11624 if (regime_using_lpae_format(env, mmu_idx)) {
11625 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
11626 phys_ptr, attrs, prot, page_size,
11627 fi, cacheattrs);
11628 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
11629 return get_phys_addr_v6(env, address, access_type, mmu_idx,
11630 phys_ptr, attrs, prot, page_size, fi);
11631 } else {
11632 return get_phys_addr_v5(env, address, access_type, mmu_idx,
11633 phys_ptr, prot, page_size, fi);
11634 }
11635 }
11636
11637 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
11638 MemTxAttrs *attrs)
11639 {
11640 ARMCPU *cpu = ARM_CPU(cs);
11641 CPUARMState *env = &cpu->env;
11642 hwaddr phys_addr;
11643 target_ulong page_size;
11644 int prot;
11645 bool ret;
11646 ARMMMUFaultInfo fi = {};
11647 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
11648
11649 *attrs = (MemTxAttrs) {};
11650
11651 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
11652 attrs, &prot, &page_size, &fi, NULL);
11653
11654 if (ret) {
11655 return -1;
11656 }
11657 return phys_addr;
11658 }
11659
11660 #endif
11661
11662 /* Note that signed overflow is undefined in C. The following routines are
11663 careful to use unsigned types where modulo arithmetic is required.
11664 Failure to do so _will_ break on newer gcc. */
11665
11666 /* Signed saturating arithmetic. */
11667
11668 /* Perform 16-bit signed saturating addition. */
11669 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11670 {
11671 uint16_t res;
11672
11673 res = a + b;
11674 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11675 if (a & 0x8000)
11676 res = 0x8000;
11677 else
11678 res = 0x7fff;
11679 }
11680 return res;
11681 }
11682
11683 /* Perform 8-bit signed saturating addition. */
11684 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11685 {
11686 uint8_t res;
11687
11688 res = a + b;
11689 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11690 if (a & 0x80)
11691 res = 0x80;
11692 else
11693 res = 0x7f;
11694 }
11695 return res;
11696 }
11697
11698 /* Perform 16-bit signed saturating subtraction. */
11699 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11700 {
11701 uint16_t res;
11702
11703 res = a - b;
11704 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11705 if (a & 0x8000)
11706 res = 0x8000;
11707 else
11708 res = 0x7fff;
11709 }
11710 return res;
11711 }
11712
11713 /* Perform 8-bit signed saturating subtraction. */
11714 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11715 {
11716 uint8_t res;
11717
11718 res = a - b;
11719 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11720 if (a & 0x80)
11721 res = 0x80;
11722 else
11723 res = 0x7f;
11724 }
11725 return res;
11726 }
11727
11728 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11729 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11730 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11731 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11732 #define PFX q
11733
11734 #include "op_addsub.h"
11735
11736 /* Unsigned saturating arithmetic. */
11737 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11738 {
11739 uint16_t res;
11740 res = a + b;
11741 if (res < a)
11742 res = 0xffff;
11743 return res;
11744 }
11745
11746 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11747 {
11748 if (a > b)
11749 return a - b;
11750 else
11751 return 0;
11752 }
11753
11754 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11755 {
11756 uint8_t res;
11757 res = a + b;
11758 if (res < a)
11759 res = 0xff;
11760 return res;
11761 }
11762
11763 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11764 {
11765 if (a > b)
11766 return a - b;
11767 else
11768 return 0;
11769 }
11770
11771 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11772 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11773 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11774 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11775 #define PFX uq
11776
11777 #include "op_addsub.h"
11778
11779 /* Signed modulo arithmetic. */
11780 #define SARITH16(a, b, n, op) do { \
11781 int32_t sum; \
11782 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11783 RESULT(sum, n, 16); \
11784 if (sum >= 0) \
11785 ge |= 3 << (n * 2); \
11786 } while(0)
11787
11788 #define SARITH8(a, b, n, op) do { \
11789 int32_t sum; \
11790 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11791 RESULT(sum, n, 8); \
11792 if (sum >= 0) \
11793 ge |= 1 << n; \
11794 } while(0)
11795
11796
11797 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11798 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11799 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11800 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11801 #define PFX s
11802 #define ARITH_GE
11803
11804 #include "op_addsub.h"
11805
11806 /* Unsigned modulo arithmetic. */
11807 #define ADD16(a, b, n) do { \
11808 uint32_t sum; \
11809 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11810 RESULT(sum, n, 16); \
11811 if ((sum >> 16) == 1) \
11812 ge |= 3 << (n * 2); \
11813 } while(0)
11814
11815 #define ADD8(a, b, n) do { \
11816 uint32_t sum; \
11817 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11818 RESULT(sum, n, 8); \
11819 if ((sum >> 8) == 1) \
11820 ge |= 1 << n; \
11821 } while(0)
11822
11823 #define SUB16(a, b, n) do { \
11824 uint32_t sum; \
11825 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11826 RESULT(sum, n, 16); \
11827 if ((sum >> 16) == 0) \
11828 ge |= 3 << (n * 2); \
11829 } while(0)
11830
11831 #define SUB8(a, b, n) do { \
11832 uint32_t sum; \
11833 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11834 RESULT(sum, n, 8); \
11835 if ((sum >> 8) == 0) \
11836 ge |= 1 << n; \
11837 } while(0)
11838
11839 #define PFX u
11840 #define ARITH_GE
11841
11842 #include "op_addsub.h"
11843
11844 /* Halved signed arithmetic. */
11845 #define ADD16(a, b, n) \
11846 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11847 #define SUB16(a, b, n) \
11848 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11849 #define ADD8(a, b, n) \
11850 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11851 #define SUB8(a, b, n) \
11852 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11853 #define PFX sh
11854
11855 #include "op_addsub.h"
11856
11857 /* Halved unsigned arithmetic. */
11858 #define ADD16(a, b, n) \
11859 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11860 #define SUB16(a, b, n) \
11861 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11862 #define ADD8(a, b, n) \
11863 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11864 #define SUB8(a, b, n) \
11865 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11866 #define PFX uh
11867
11868 #include "op_addsub.h"
11869
11870 static inline uint8_t do_usad(uint8_t a, uint8_t b)
11871 {
11872 if (a > b)
11873 return a - b;
11874 else
11875 return b - a;
11876 }
11877
11878 /* Unsigned sum of absolute byte differences. */
11879 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11880 {
11881 uint32_t sum;
11882 sum = do_usad(a, b);
11883 sum += do_usad(a >> 8, b >> 8);
11884 sum += do_usad(a >> 16, b >>16);
11885 sum += do_usad(a >> 24, b >> 24);
11886 return sum;
11887 }
11888
11889 /* For ARMv6 SEL instruction. */
11890 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11891 {
11892 uint32_t mask;
11893
11894 mask = 0;
11895 if (flags & 1)
11896 mask |= 0xff;
11897 if (flags & 2)
11898 mask |= 0xff00;
11899 if (flags & 4)
11900 mask |= 0xff0000;
11901 if (flags & 8)
11902 mask |= 0xff000000;
11903 return (a & mask) | (b & ~mask);
11904 }
11905
11906 /* CRC helpers.
11907 * The upper bytes of val (above the number specified by 'bytes') must have
11908 * been zeroed out by the caller.
11909 */
11910 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11911 {
11912 uint8_t buf[4];
11913
11914 stl_le_p(buf, val);
11915
11916 /* zlib crc32 converts the accumulator and output to one's complement. */
11917 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11918 }
11919
11920 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11921 {
11922 uint8_t buf[4];
11923
11924 stl_le_p(buf, val);
11925
11926 /* Linux crc32c converts the output to one's complement. */
11927 return crc32c(acc, buf, bytes) ^ 0xffffffff;
11928 }
11929
11930 /* Return the exception level to which FP-disabled exceptions should
11931 * be taken, or 0 if FP is enabled.
11932 */
11933 int fp_exception_el(CPUARMState *env, int cur_el)
11934 {
11935 #ifndef CONFIG_USER_ONLY
11936 /* CPACR and the CPTR registers don't exist before v6, so FP is
11937 * always accessible
11938 */
11939 if (!arm_feature(env, ARM_FEATURE_V6)) {
11940 return 0;
11941 }
11942
11943 if (arm_feature(env, ARM_FEATURE_M)) {
11944 /* CPACR can cause a NOCP UsageFault taken to current security state */
11945 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11946 return 1;
11947 }
11948
11949 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11950 if (!extract32(env->v7m.nsacr, 10, 1)) {
11951 /* FP insns cause a NOCP UsageFault taken to Secure */
11952 return 3;
11953 }
11954 }
11955
11956 return 0;
11957 }
11958
11959 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11960 * 0, 2 : trap EL0 and EL1/PL1 accesses
11961 * 1 : trap only EL0 accesses
11962 * 3 : trap no accesses
11963 * This register is ignored if E2H+TGE are both set.
11964 */
11965 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11966 int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
11967
11968 switch (fpen) {
11969 case 0:
11970 case 2:
11971 if (cur_el == 0 || cur_el == 1) {
11972 /* Trap to PL1, which might be EL1 or EL3 */
11973 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
11974 return 3;
11975 }
11976 return 1;
11977 }
11978 if (cur_el == 3 && !is_a64(env)) {
11979 /* Secure PL1 running at EL3 */
11980 return 3;
11981 }
11982 break;
11983 case 1:
11984 if (cur_el == 0) {
11985 return 1;
11986 }
11987 break;
11988 case 3:
11989 break;
11990 }
11991 }
11992
11993 /*
11994 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11995 * to control non-secure access to the FPU. It doesn't have any
11996 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11997 */
11998 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
11999 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12000 if (!extract32(env->cp15.nsacr, 10, 1)) {
12001 /* FP insns act as UNDEF */
12002 return cur_el == 2 ? 2 : 1;
12003 }
12004 }
12005
12006 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12007 * check because zero bits in the registers mean "don't trap".
12008 */
12009
12010 /* CPTR_EL2 : present in v7VE or v8 */
12011 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12012 && !arm_is_secure_below_el3(env)) {
12013 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12014 return 2;
12015 }
12016
12017 /* CPTR_EL3 : present in v8 */
12018 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12019 /* Trap all FP ops to EL3 */
12020 return 3;
12021 }
12022 #endif
12023 return 0;
12024 }
12025
12026 /* Return the exception level we're running at if this is our mmu_idx */
12027 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12028 {
12029 if (mmu_idx & ARM_MMU_IDX_M) {
12030 return mmu_idx & ARM_MMU_IDX_M_PRIV;
12031 }
12032
12033 switch (mmu_idx) {
12034 case ARMMMUIdx_E10_0:
12035 case ARMMMUIdx_E20_0:
12036 case ARMMMUIdx_SE10_0:
12037 return 0;
12038 case ARMMMUIdx_E10_1:
12039 case ARMMMUIdx_E10_1_PAN:
12040 case ARMMMUIdx_SE10_1:
12041 case ARMMMUIdx_SE10_1_PAN:
12042 return 1;
12043 case ARMMMUIdx_E2:
12044 case ARMMMUIdx_E20_2:
12045 case ARMMMUIdx_E20_2_PAN:
12046 return 2;
12047 case ARMMMUIdx_SE3:
12048 return 3;
12049 default:
12050 g_assert_not_reached();
12051 }
12052 }
12053
12054 #ifndef CONFIG_TCG
12055 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12056 {
12057 g_assert_not_reached();
12058 }
12059 #endif
12060
12061 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12062 {
12063 if (arm_feature(env, ARM_FEATURE_M)) {
12064 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12065 }
12066
12067 /* See ARM pseudo-function ELIsInHost. */
12068 switch (el) {
12069 case 0:
12070 if (arm_is_secure_below_el3(env)) {
12071 return ARMMMUIdx_SE10_0;
12072 }
12073 if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)
12074 && arm_el_is_aa64(env, 2)) {
12075 return ARMMMUIdx_E20_0;
12076 }
12077 return ARMMMUIdx_E10_0;
12078 case 1:
12079 if (arm_is_secure_below_el3(env)) {
12080 if (env->pstate & PSTATE_PAN) {
12081 return ARMMMUIdx_SE10_1_PAN;
12082 }
12083 return ARMMMUIdx_SE10_1;
12084 }
12085 if (env->pstate & PSTATE_PAN) {
12086 return ARMMMUIdx_E10_1_PAN;
12087 }
12088 return ARMMMUIdx_E10_1;
12089 case 2:
12090 /* TODO: ARMv8.4-SecEL2 */
12091 /* Note that TGE does not apply at EL2. */
12092 if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
12093 if (env->pstate & PSTATE_PAN) {
12094 return ARMMMUIdx_E20_2_PAN;
12095 }
12096 return ARMMMUIdx_E20_2;
12097 }
12098 return ARMMMUIdx_E2;
12099 case 3:
12100 return ARMMMUIdx_SE3;
12101 default:
12102 g_assert_not_reached();
12103 }
12104 }
12105
12106 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12107 {
12108 return arm_mmu_idx_el(env, arm_current_el(env));
12109 }
12110
12111 int cpu_mmu_index(CPUARMState *env, bool ifetch)
12112 {
12113 return arm_to_core_mmu_idx(arm_mmu_idx(env));
12114 }
12115
12116 #ifndef CONFIG_USER_ONLY
12117 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
12118 {
12119 return stage_1_mmu_idx(arm_mmu_idx(env));
12120 }
12121 #endif
12122
12123 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
12124 ARMMMUIdx mmu_idx, uint32_t flags)
12125 {
12126 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
12127 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
12128 arm_to_core_mmu_idx(mmu_idx));
12129
12130 if (arm_singlestep_active(env)) {
12131 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
12132 }
12133 return flags;
12134 }
12135
12136 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
12137 ARMMMUIdx mmu_idx, uint32_t flags)
12138 {
12139 bool sctlr_b = arm_sctlr_b(env);
12140
12141 if (sctlr_b) {
12142 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
12143 }
12144 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
12145 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12146 }
12147 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
12148
12149 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
12150 }
12151
12152 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
12153 ARMMMUIdx mmu_idx)
12154 {
12155 uint32_t flags = 0;
12156
12157 if (arm_v7m_is_handler_mode(env)) {
12158 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
12159 }
12160
12161 /*
12162 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12163 * is suppressing them because the requested execution priority
12164 * is less than 0.
12165 */
12166 if (arm_feature(env, ARM_FEATURE_V8) &&
12167 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
12168 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12169 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
12170 }
12171
12172 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
12173 }
12174
12175 static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
12176 {
12177 int flags = 0;
12178
12179 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
12180 arm_debug_target_el(env));
12181 return flags;
12182 }
12183
12184 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
12185 ARMMMUIdx mmu_idx)
12186 {
12187 uint32_t flags = rebuild_hflags_aprofile(env);
12188
12189 if (arm_el_is_aa64(env, 1)) {
12190 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
12191 }
12192
12193 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
12194 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12195 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
12196 }
12197
12198 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
12199 }
12200
12201 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
12202 ARMMMUIdx mmu_idx)
12203 {
12204 uint32_t flags = rebuild_hflags_aprofile(env);
12205 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
12206 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
12207 uint64_t sctlr;
12208 int tbii, tbid;
12209
12210 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
12211
12212 /* Get control bits for tagged addresses. */
12213 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
12214 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
12215
12216 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
12217 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
12218
12219 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
12220 int sve_el = sve_exception_el(env, el);
12221 uint32_t zcr_len;
12222
12223 /*
12224 * If SVE is disabled, but FP is enabled,
12225 * then the effective len is 0.
12226 */
12227 if (sve_el != 0 && fp_el == 0) {
12228 zcr_len = 0;
12229 } else {
12230 zcr_len = sve_zcr_len_for_el(env, el);
12231 }
12232 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
12233 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
12234 }
12235
12236 sctlr = regime_sctlr(env, stage1);
12237
12238 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
12239 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12240 }
12241
12242 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
12243 /*
12244 * In order to save space in flags, we record only whether
12245 * pauth is "inactive", meaning all insns are implemented as
12246 * a nop, or "active" when some action must be performed.
12247 * The decision of which action to take is left to a helper.
12248 */
12249 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
12250 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
12251 }
12252 }
12253
12254 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12255 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12256 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
12257 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
12258 }
12259 }
12260
12261 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
12262 if (!(env->pstate & PSTATE_UAO)) {
12263 switch (mmu_idx) {
12264 case ARMMMUIdx_E10_1:
12265 case ARMMMUIdx_E10_1_PAN:
12266 case ARMMMUIdx_SE10_1:
12267 case ARMMMUIdx_SE10_1_PAN:
12268 /* TODO: ARMv8.3-NV */
12269 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
12270 break;
12271 case ARMMMUIdx_E20_2:
12272 case ARMMMUIdx_E20_2_PAN:
12273 /* TODO: ARMv8.4-SecEL2 */
12274 /*
12275 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
12276 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
12277 */
12278 if (env->cp15.hcr_el2 & HCR_TGE) {
12279 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
12280 }
12281 break;
12282 default:
12283 break;
12284 }
12285 }
12286
12287 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
12288 }
12289
12290 static uint32_t rebuild_hflags_internal(CPUARMState *env)
12291 {
12292 int el = arm_current_el(env);
12293 int fp_el = fp_exception_el(env, el);
12294 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12295
12296 if (is_a64(env)) {
12297 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
12298 } else if (arm_feature(env, ARM_FEATURE_M)) {
12299 return rebuild_hflags_m32(env, fp_el, mmu_idx);
12300 } else {
12301 return rebuild_hflags_a32(env, fp_el, mmu_idx);
12302 }
12303 }
12304
12305 void arm_rebuild_hflags(CPUARMState *env)
12306 {
12307 env->hflags = rebuild_hflags_internal(env);
12308 }
12309
12310 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
12311 {
12312 int fp_el = fp_exception_el(env, el);
12313 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12314
12315 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
12316 }
12317
12318 /*
12319 * If we have triggered a EL state change we can't rely on the
12320 * translator having passed it too us, we need to recompute.
12321 */
12322 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
12323 {
12324 int el = arm_current_el(env);
12325 int fp_el = fp_exception_el(env, el);
12326 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12327 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
12328 }
12329
12330 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
12331 {
12332 int fp_el = fp_exception_el(env, el);
12333 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12334
12335 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
12336 }
12337
12338 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
12339 {
12340 int fp_el = fp_exception_el(env, el);
12341 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12342
12343 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
12344 }
12345
12346 static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
12347 {
12348 #ifdef CONFIG_DEBUG_TCG
12349 uint32_t env_flags_current = env->hflags;
12350 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
12351
12352 if (unlikely(env_flags_current != env_flags_rebuilt)) {
12353 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
12354 env_flags_current, env_flags_rebuilt);
12355 abort();
12356 }
12357 #endif
12358 }
12359
12360 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
12361 target_ulong *cs_base, uint32_t *pflags)
12362 {
12363 uint32_t flags = env->hflags;
12364 uint32_t pstate_for_ss;
12365
12366 *cs_base = 0;
12367 assert_hflags_rebuild_correctly(env);
12368
12369 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
12370 *pc = env->pc;
12371 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12372 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
12373 }
12374 pstate_for_ss = env->pstate;
12375 } else {
12376 *pc = env->regs[15];
12377
12378 if (arm_feature(env, ARM_FEATURE_M)) {
12379 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12380 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
12381 != env->v7m.secure) {
12382 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
12383 }
12384
12385 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
12386 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
12387 (env->v7m.secure &&
12388 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
12389 /*
12390 * ASPEN is set, but FPCA/SFPA indicate that there is no
12391 * active FP context; we must create a new FP context before
12392 * executing any FP insn.
12393 */
12394 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
12395 }
12396
12397 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
12398 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
12399 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
12400 }
12401 } else {
12402 /*
12403 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12404 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12405 */
12406 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12407 flags = FIELD_DP32(flags, TBFLAG_A32,
12408 XSCALE_CPAR, env->cp15.c15_cpar);
12409 } else {
12410 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
12411 env->vfp.vec_len);
12412 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
12413 env->vfp.vec_stride);
12414 }
12415 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
12416 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
12417 }
12418 }
12419
12420 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
12421 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
12422 pstate_for_ss = env->uncached_cpsr;
12423 }
12424
12425 /*
12426 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12427 * states defined in the ARM ARM for software singlestep:
12428 * SS_ACTIVE PSTATE.SS State
12429 * 0 x Inactive (the TB flag for SS is always 0)
12430 * 1 0 Active-pending
12431 * 1 1 Active-not-pending
12432 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
12433 */
12434 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
12435 (pstate_for_ss & PSTATE_SS)) {
12436 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
12437 }
12438
12439 *pflags = flags;
12440 }
12441
12442 #ifdef TARGET_AARCH64
12443 /*
12444 * The manual says that when SVE is enabled and VQ is widened the
12445 * implementation is allowed to zero the previously inaccessible
12446 * portion of the registers. The corollary to that is that when
12447 * SVE is enabled and VQ is narrowed we are also allowed to zero
12448 * the now inaccessible portion of the registers.
12449 *
12450 * The intent of this is that no predicate bit beyond VQ is ever set.
12451 * Which means that some operations on predicate registers themselves
12452 * may operate on full uint64_t or even unrolled across the maximum
12453 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
12454 * may well be cheaper than conditionals to restrict the operation
12455 * to the relevant portion of a uint16_t[16].
12456 */
12457 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12458 {
12459 int i, j;
12460 uint64_t pmask;
12461
12462 assert(vq >= 1 && vq <= ARM_MAX_VQ);
12463 assert(vq <= env_archcpu(env)->sve_max_vq);
12464
12465 /* Zap the high bits of the zregs. */
12466 for (i = 0; i < 32; i++) {
12467 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12468 }
12469
12470 /* Zap the high bits of the pregs and ffr. */
12471 pmask = 0;
12472 if (vq & 3) {
12473 pmask = ~(-1ULL << (16 * (vq & 3)));
12474 }
12475 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12476 for (i = 0; i < 17; ++i) {
12477 env->vfp.pregs[i].p[j] &= pmask;
12478 }
12479 pmask = 0;
12480 }
12481 }
12482
12483 /*
12484 * Notice a change in SVE vector size when changing EL.
12485 */
12486 void aarch64_sve_change_el(CPUARMState *env, int old_el,
12487 int new_el, bool el0_a64)
12488 {
12489 ARMCPU *cpu = env_archcpu(env);
12490 int old_len, new_len;
12491 bool old_a64, new_a64;
12492
12493 /* Nothing to do if no SVE. */
12494 if (!cpu_isar_feature(aa64_sve, cpu)) {
12495 return;
12496 }
12497
12498 /* Nothing to do if FP is disabled in either EL. */
12499 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12500 return;
12501 }
12502
12503 /*
12504 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12505 * at ELx, or not available because the EL is in AArch32 state, then
12506 * for all purposes other than a direct read, the ZCR_ELx.LEN field
12507 * has an effective value of 0".
12508 *
12509 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12510 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12511 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
12512 * we already have the correct register contents when encountering the
12513 * vq0->vq0 transition between EL0->EL1.
12514 */
12515 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12516 old_len = (old_a64 && !sve_exception_el(env, old_el)
12517 ? sve_zcr_len_for_el(env, old_el) : 0);
12518 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12519 new_len = (new_a64 && !sve_exception_el(env, new_el)
12520 ? sve_zcr_len_for_el(env, new_el) : 0);
12521
12522 /* When changing vector length, clear inaccessible state. */
12523 if (new_len < old_len) {
12524 aarch64_sve_narrow_vq(env, new_len + 1);
12525 }
12526 }
12527 #endif