4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "qemu/range.h"
29 #include "qapi/qapi-commands-machine-target.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
34 #include "exec/cpu_ldst.h"
37 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
39 #ifndef CONFIG_USER_ONLY
41 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
42 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
43 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
44 target_ulong
*page_size_ptr
,
45 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
48 static void switch_mode(CPUARMState
*env
, int mode
);
50 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
54 /* VFP data registers are always little-endian. */
55 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
57 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
60 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
61 /* Aliases for Q regs. */
64 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
66 stq_le_p(buf
+ 8, q
[1]);
70 switch (reg
- nregs
) {
71 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
72 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
73 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
78 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
82 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
84 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
87 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
90 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
92 q
[1] = ldq_le_p(buf
+ 8);
96 switch (reg
- nregs
) {
97 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
98 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
99 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
104 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
108 /* 128 bit FP register */
110 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
112 stq_le_p(buf
+ 8, q
[1]);
117 stl_p(buf
, vfp_get_fpsr(env
));
121 stl_p(buf
, vfp_get_fpcr(env
));
128 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
132 /* 128 bit FP register */
134 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
135 q
[0] = ldq_le_p(buf
);
136 q
[1] = ldq_le_p(buf
+ 8);
141 vfp_set_fpsr(env
, ldl_p(buf
));
145 vfp_set_fpcr(env
, ldl_p(buf
));
152 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
154 assert(ri
->fieldoffset
);
155 if (cpreg_field_is_64bit(ri
)) {
156 return CPREG_FIELD64(env
, ri
);
158 return CPREG_FIELD32(env
, ri
);
162 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
165 assert(ri
->fieldoffset
);
166 if (cpreg_field_is_64bit(ri
)) {
167 CPREG_FIELD64(env
, ri
) = value
;
169 CPREG_FIELD32(env
, ri
) = value
;
173 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
175 return (char *)env
+ ri
->fieldoffset
;
178 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
180 /* Raw read of a coprocessor register (as needed for migration, etc). */
181 if (ri
->type
& ARM_CP_CONST
) {
182 return ri
->resetvalue
;
183 } else if (ri
->raw_readfn
) {
184 return ri
->raw_readfn(env
, ri
);
185 } else if (ri
->readfn
) {
186 return ri
->readfn(env
, ri
);
188 return raw_read(env
, ri
);
192 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
195 /* Raw write of a coprocessor register (as needed for migration, etc).
196 * Note that constant registers are treated as write-ignored; the
197 * caller should check for success by whether a readback gives the
200 if (ri
->type
& ARM_CP_CONST
) {
202 } else if (ri
->raw_writefn
) {
203 ri
->raw_writefn(env
, ri
, v
);
204 } else if (ri
->writefn
) {
205 ri
->writefn(env
, ri
, v
);
207 raw_write(env
, ri
, v
);
211 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
213 ARMCPU
*cpu
= env_archcpu(env
);
214 const ARMCPRegInfo
*ri
;
217 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
218 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
220 if (cpreg_field_is_64bit(ri
)) {
221 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
223 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
229 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
234 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
236 /* Return true if the regdef would cause an assertion if you called
237 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
238 * program bug for it not to have the NO_RAW flag).
239 * NB that returning false here doesn't necessarily mean that calling
240 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
241 * read/write access functions which are safe for raw use" from "has
242 * read/write access functions which have side effects but has forgotten
243 * to provide raw access functions".
244 * The tests here line up with the conditions in read/write_raw_cp_reg()
245 * and assertions in raw_read()/raw_write().
247 if ((ri
->type
& ARM_CP_CONST
) ||
249 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
255 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
257 /* Write the coprocessor state from cpu->env to the (index,value) list. */
261 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
262 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
263 const ARMCPRegInfo
*ri
;
266 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
271 if (ri
->type
& ARM_CP_NO_RAW
) {
275 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
278 * Only sync if the previous list->cpustate sync succeeded.
279 * Rather than tracking the success/failure state for every
280 * item in the list, we just recheck "does the raw write we must
281 * have made in write_list_to_cpustate() read back OK" here.
283 uint64_t oldval
= cpu
->cpreg_values
[i
];
285 if (oldval
== newval
) {
289 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
290 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
294 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
296 cpu
->cpreg_values
[i
] = newval
;
301 bool write_list_to_cpustate(ARMCPU
*cpu
)
306 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
307 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
308 uint64_t v
= cpu
->cpreg_values
[i
];
309 const ARMCPRegInfo
*ri
;
311 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
316 if (ri
->type
& ARM_CP_NO_RAW
) {
319 /* Write value and confirm it reads back as written
320 * (to catch read-only registers and partially read-only
321 * registers where the incoming migration value doesn't match)
323 write_raw_cp_reg(&cpu
->env
, ri
, v
);
324 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
331 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
333 ARMCPU
*cpu
= opaque
;
335 const ARMCPRegInfo
*ri
;
337 regidx
= *(uint32_t *)key
;
338 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
340 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
341 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
342 /* The value array need not be initialized at this point */
343 cpu
->cpreg_array_len
++;
347 static void count_cpreg(gpointer key
, gpointer opaque
)
349 ARMCPU
*cpu
= opaque
;
351 const ARMCPRegInfo
*ri
;
353 regidx
= *(uint32_t *)key
;
354 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
356 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
357 cpu
->cpreg_array_len
++;
361 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
363 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
364 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
375 void init_cpreg_list(ARMCPU
*cpu
)
377 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
378 * Note that we require cpreg_tuples[] to be sorted by key ID.
383 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
384 keys
= g_list_sort(keys
, cpreg_key_compare
);
386 cpu
->cpreg_array_len
= 0;
388 g_list_foreach(keys
, count_cpreg
, cpu
);
390 arraylen
= cpu
->cpreg_array_len
;
391 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
392 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
393 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
394 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
395 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
396 cpu
->cpreg_array_len
= 0;
398 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
400 assert(cpu
->cpreg_array_len
== arraylen
);
406 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
407 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
409 * access_el3_aa32ns: Used to check AArch32 register views.
410 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
412 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
413 const ARMCPRegInfo
*ri
,
416 bool secure
= arm_is_secure_below_el3(env
);
418 assert(!arm_el_is_aa64(env
, 3));
420 return CP_ACCESS_TRAP_UNCATEGORIZED
;
425 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
426 const ARMCPRegInfo
*ri
,
429 if (!arm_el_is_aa64(env
, 3)) {
430 return access_el3_aa32ns(env
, ri
, isread
);
435 /* Some secure-only AArch32 registers trap to EL3 if used from
436 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
437 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
438 * We assume that the .access field is set to PL1_RW.
440 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
441 const ARMCPRegInfo
*ri
,
444 if (arm_current_el(env
) == 3) {
447 if (arm_is_secure_below_el3(env
)) {
448 return CP_ACCESS_TRAP_EL3
;
450 /* This will be EL1 NS and EL2 NS, which just UNDEF */
451 return CP_ACCESS_TRAP_UNCATEGORIZED
;
454 /* Check for traps to "powerdown debug" registers, which are controlled
457 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
460 int el
= arm_current_el(env
);
461 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
462 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
463 (arm_hcr_el2_eff(env
) & HCR_TGE
);
465 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
466 return CP_ACCESS_TRAP_EL2
;
468 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
469 return CP_ACCESS_TRAP_EL3
;
474 /* Check for traps to "debug ROM" registers, which are controlled
475 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
477 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
480 int el
= arm_current_el(env
);
481 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
482 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
483 (arm_hcr_el2_eff(env
) & HCR_TGE
);
485 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
486 return CP_ACCESS_TRAP_EL2
;
488 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
489 return CP_ACCESS_TRAP_EL3
;
494 /* Check for traps to general debug registers, which are controlled
495 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
497 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
500 int el
= arm_current_el(env
);
501 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
502 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
503 (arm_hcr_el2_eff(env
) & HCR_TGE
);
505 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
506 return CP_ACCESS_TRAP_EL2
;
508 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
509 return CP_ACCESS_TRAP_EL3
;
514 /* Check for traps to performance monitor registers, which are controlled
515 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
517 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
520 int el
= arm_current_el(env
);
522 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
523 && !arm_is_secure_below_el3(env
)) {
524 return CP_ACCESS_TRAP_EL2
;
526 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
527 return CP_ACCESS_TRAP_EL3
;
532 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
534 ARMCPU
*cpu
= env_archcpu(env
);
536 raw_write(env
, ri
, value
);
537 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
540 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
542 ARMCPU
*cpu
= env_archcpu(env
);
544 if (raw_read(env
, ri
) != value
) {
545 /* Unlike real hardware the qemu TLB uses virtual addresses,
546 * not modified virtual addresses, so this causes a TLB flush.
549 raw_write(env
, ri
, value
);
553 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
556 ARMCPU
*cpu
= env_archcpu(env
);
558 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
559 && !extended_addresses_enabled(env
)) {
560 /* For VMSA (when not using the LPAE long descriptor page table
561 * format) this register includes the ASID, so do a TLB flush.
562 * For PMSA it is purely a process ID and no action is needed.
566 raw_write(env
, ri
, value
);
569 /* IS variants of TLB operations must affect all cores */
570 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
573 CPUState
*cs
= env_cpu(env
);
575 tlb_flush_all_cpus_synced(cs
);
578 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
581 CPUState
*cs
= env_cpu(env
);
583 tlb_flush_all_cpus_synced(cs
);
586 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
589 CPUState
*cs
= env_cpu(env
);
591 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
594 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
597 CPUState
*cs
= env_cpu(env
);
599 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
603 * Non-IS variants of TLB operations are upgraded to
604 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
605 * force broadcast of these operations.
607 static bool tlb_force_broadcast(CPUARMState
*env
)
609 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
610 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
613 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
616 /* Invalidate all (TLBIALL) */
617 CPUState
*cs
= env_cpu(env
);
619 if (tlb_force_broadcast(env
)) {
620 tlb_flush_all_cpus_synced(cs
);
626 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
629 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
630 CPUState
*cs
= env_cpu(env
);
632 value
&= TARGET_PAGE_MASK
;
633 if (tlb_force_broadcast(env
)) {
634 tlb_flush_page_all_cpus_synced(cs
, value
);
636 tlb_flush_page(cs
, value
);
640 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
643 /* Invalidate by ASID (TLBIASID) */
644 CPUState
*cs
= env_cpu(env
);
646 if (tlb_force_broadcast(env
)) {
647 tlb_flush_all_cpus_synced(cs
);
653 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
656 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
657 CPUState
*cs
= env_cpu(env
);
659 value
&= TARGET_PAGE_MASK
;
660 if (tlb_force_broadcast(env
)) {
661 tlb_flush_page_all_cpus_synced(cs
, value
);
663 tlb_flush_page(cs
, value
);
667 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
670 CPUState
*cs
= env_cpu(env
);
672 tlb_flush_by_mmuidx(cs
,
675 ARMMMUIdxBit_Stage2
);
678 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
681 CPUState
*cs
= env_cpu(env
);
683 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
686 ARMMMUIdxBit_Stage2
);
689 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
692 /* Invalidate by IPA. This has to invalidate any structures that
693 * contain only stage 2 translation information, but does not need
694 * to apply to structures that contain combined stage 1 and stage 2
695 * translation information.
696 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
698 CPUState
*cs
= env_cpu(env
);
701 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
705 pageaddr
= sextract64(value
<< 12, 0, 40);
707 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
710 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
713 CPUState
*cs
= env_cpu(env
);
716 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
720 pageaddr
= sextract64(value
<< 12, 0, 40);
722 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
723 ARMMMUIdxBit_Stage2
);
726 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
729 CPUState
*cs
= env_cpu(env
);
731 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
734 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
737 CPUState
*cs
= env_cpu(env
);
739 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
742 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
745 CPUState
*cs
= env_cpu(env
);
746 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
748 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
751 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
754 CPUState
*cs
= env_cpu(env
);
755 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
757 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
761 static const ARMCPRegInfo cp_reginfo
[] = {
762 /* Define the secure and non-secure FCSE identifier CP registers
763 * separately because there is no secure bank in V8 (no _EL3). This allows
764 * the secure register to be properly reset and migrated. There is also no
765 * v8 EL1 version of the register so the non-secure instance stands alone.
768 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
769 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
770 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
771 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
772 { .name
= "FCSEIDR_S",
773 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
774 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
775 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
776 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
777 /* Define the secure and non-secure context identifier CP registers
778 * separately because there is no secure bank in V8 (no _EL3). This allows
779 * the secure register to be properly reset and migrated. In the
780 * non-secure case, the 32-bit register will have reset and migration
781 * disabled during registration as it is handled by the 64-bit instance.
783 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
784 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
785 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
786 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
787 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
788 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
789 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
790 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
791 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
792 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
796 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
797 /* NB: Some of these registers exist in v8 but with more precise
798 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
800 /* MMU Domain access control / MPU write buffer control */
802 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
803 .access
= PL1_RW
, .resetvalue
= 0,
804 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
805 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
806 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
807 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
808 * For v6 and v5, these mappings are overly broad.
810 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
811 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
812 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
813 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
814 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
815 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
816 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
817 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
818 /* Cache maintenance ops; some of this space may be overridden later. */
819 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
820 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
821 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
825 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
826 /* Not all pre-v6 cores implemented this WFI, so this is slightly
829 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
830 .access
= PL1_W
, .type
= ARM_CP_WFI
},
834 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
835 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
836 * is UNPREDICTABLE; we choose to NOP as most implementations do).
838 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
839 .access
= PL1_W
, .type
= ARM_CP_WFI
},
840 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
841 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
842 * OMAPCP will override this space.
844 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
845 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
847 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
848 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
850 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
851 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
852 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
854 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
855 * implementing it as RAZ means the "debug architecture version" bits
856 * will read as a reserved value, which should cause Linux to not try
857 * to use the debug hardware.
859 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
860 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
861 /* MMU TLB control. Note that the wildcarding means we cover not just
862 * the unified TLB ops but also the dside/iside/inner-shareable variants.
864 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
865 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
866 .type
= ARM_CP_NO_RAW
},
867 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
868 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
869 .type
= ARM_CP_NO_RAW
},
870 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
871 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
872 .type
= ARM_CP_NO_RAW
},
873 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
874 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
875 .type
= ARM_CP_NO_RAW
},
876 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
877 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
878 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
879 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
883 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
888 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
889 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
890 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
891 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
892 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
894 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
895 /* VFP coprocessor: cp10 & cp11 [23:20] */
896 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
898 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
899 /* ASEDIS [31] bit is RAO/WI */
903 /* VFPv3 and upwards with NEON implement 32 double precision
904 * registers (D0-D31).
906 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
907 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
908 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
916 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
917 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
919 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
920 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
921 value
&= ~(0xf << 20);
922 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
925 env
->cp15
.cpacr_el1
= value
;
928 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
931 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
932 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
934 uint64_t value
= env
->cp15
.cpacr_el1
;
936 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
937 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
938 value
&= ~(0xf << 20);
944 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
946 /* Call cpacr_write() so that we reset with the correct RAO bits set
947 * for our CPU features.
949 cpacr_write(env
, ri
, 0);
952 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
955 if (arm_feature(env
, ARM_FEATURE_V8
)) {
956 /* Check if CPACR accesses are to be trapped to EL2 */
957 if (arm_current_el(env
) == 1 &&
958 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
959 return CP_ACCESS_TRAP_EL2
;
960 /* Check if CPACR accesses are to be trapped to EL3 */
961 } else if (arm_current_el(env
) < 3 &&
962 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
963 return CP_ACCESS_TRAP_EL3
;
970 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
973 /* Check if CPTR accesses are set to trap to EL3 */
974 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
975 return CP_ACCESS_TRAP_EL3
;
981 static const ARMCPRegInfo v6_cp_reginfo
[] = {
982 /* prefetch by MVA in v6, NOP in v7 */
983 { .name
= "MVA_prefetch",
984 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
985 .access
= PL1_W
, .type
= ARM_CP_NOP
},
986 /* We need to break the TB after ISB to execute self-modifying code
987 * correctly and also to take any pending interrupts immediately.
988 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
990 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
991 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
992 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
993 .access
= PL0_W
, .type
= ARM_CP_NOP
},
994 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
995 .access
= PL0_W
, .type
= ARM_CP_NOP
},
996 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
998 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
999 offsetof(CPUARMState
, cp15
.ifar_ns
) },
1001 /* Watchpoint Fault Address Register : should actually only be present
1002 * for 1136, 1176, 11MPCore.
1004 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1005 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
1006 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
1007 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
1008 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
1009 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
1013 /* Definitions for the PMU registers */
1014 #define PMCRN_MASK 0xf800
1015 #define PMCRN_SHIFT 11
1023 #define PMXEVTYPER_P 0x80000000
1024 #define PMXEVTYPER_U 0x40000000
1025 #define PMXEVTYPER_NSK 0x20000000
1026 #define PMXEVTYPER_NSU 0x10000000
1027 #define PMXEVTYPER_NSH 0x08000000
1028 #define PMXEVTYPER_M 0x04000000
1029 #define PMXEVTYPER_MT 0x02000000
1030 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1031 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1032 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1033 PMXEVTYPER_M | PMXEVTYPER_MT | \
1034 PMXEVTYPER_EVTCOUNT)
1036 #define PMCCFILTR 0xf8000000
1037 #define PMCCFILTR_M PMXEVTYPER_M
1038 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1040 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1042 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1045 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1046 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1048 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1051 typedef struct pm_event
{
1052 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1053 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1054 bool (*supported
)(CPUARMState
*);
1056 * Retrieve the current count of the underlying event. The programmed
1057 * counters hold a difference from the return value from this function
1059 uint64_t (*get_count
)(CPUARMState
*);
1061 * Return how many nanoseconds it will take (at a minimum) for count events
1062 * to occur. A negative value indicates the counter will never overflow, or
1063 * that the counter has otherwise arranged for the overflow bit to be set
1064 * and the PMU interrupt to be raised on overflow.
1066 int64_t (*ns_per_count
)(uint64_t);
1069 static bool event_always_supported(CPUARMState
*env
)
1074 static uint64_t swinc_get_count(CPUARMState
*env
)
1077 * SW_INCR events are written directly to the pmevcntr's by writes to
1078 * PMSWINC, so there is no underlying count maintained by the PMU itself
1083 static int64_t swinc_ns_per(uint64_t ignored
)
1089 * Return the underlying cycle count for the PMU cycle counters. If we're in
1090 * usermode, simply return 0.
1092 static uint64_t cycles_get_count(CPUARMState
*env
)
1094 #ifndef CONFIG_USER_ONLY
1095 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1096 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1098 return cpu_get_host_ticks();
1102 #ifndef CONFIG_USER_ONLY
1103 static int64_t cycles_ns_per(uint64_t cycles
)
1105 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1108 static bool instructions_supported(CPUARMState
*env
)
1110 return use_icount
== 1 /* Precise instruction counting */;
1113 static uint64_t instructions_get_count(CPUARMState
*env
)
1115 return (uint64_t)cpu_get_icount_raw();
1118 static int64_t instructions_ns_per(uint64_t icount
)
1120 return cpu_icount_to_ns((int64_t)icount
);
1124 static const pm_event pm_events
[] = {
1125 { .number
= 0x000, /* SW_INCR */
1126 .supported
= event_always_supported
,
1127 .get_count
= swinc_get_count
,
1128 .ns_per_count
= swinc_ns_per
,
1130 #ifndef CONFIG_USER_ONLY
1131 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1132 .supported
= instructions_supported
,
1133 .get_count
= instructions_get_count
,
1134 .ns_per_count
= instructions_ns_per
,
1136 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1137 .supported
= event_always_supported
,
1138 .get_count
= cycles_get_count
,
1139 .ns_per_count
= cycles_ns_per
,
1145 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1146 * events (i.e. the statistical profiling extension), this implementation
1147 * should first be updated to something sparse instead of the current
1148 * supported_event_map[] array.
1150 #define MAX_EVENT_ID 0x11
1151 #define UNSUPPORTED_EVENT UINT16_MAX
1152 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1155 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1156 * of ARM event numbers to indices in our pm_events array.
1158 * Note: Events in the 0x40XX range are not currently supported.
1160 void pmu_init(ARMCPU
*cpu
)
1165 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1168 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1169 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1174 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1175 const pm_event
*cnt
= &pm_events
[i
];
1176 assert(cnt
->number
<= MAX_EVENT_ID
);
1177 /* We do not currently support events in the 0x40xx range */
1178 assert(cnt
->number
<= 0x3f);
1180 if (cnt
->supported(&cpu
->env
)) {
1181 supported_event_map
[cnt
->number
] = i
;
1182 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1183 if (cnt
->number
& 0x20) {
1184 cpu
->pmceid1
|= event_mask
;
1186 cpu
->pmceid0
|= event_mask
;
1193 * Check at runtime whether a PMU event is supported for the current machine
1195 static bool event_supported(uint16_t number
)
1197 if (number
> MAX_EVENT_ID
) {
1200 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1203 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1206 /* Performance monitor registers user accessibility is controlled
1207 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1208 * trapping to EL2 or EL3 for other accesses.
1210 int el
= arm_current_el(env
);
1212 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1213 return CP_ACCESS_TRAP
;
1215 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1216 && !arm_is_secure_below_el3(env
)) {
1217 return CP_ACCESS_TRAP_EL2
;
1219 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1220 return CP_ACCESS_TRAP_EL3
;
1223 return CP_ACCESS_OK
;
1226 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1227 const ARMCPRegInfo
*ri
,
1230 /* ER: event counter read trap control */
1231 if (arm_feature(env
, ARM_FEATURE_V8
)
1232 && arm_current_el(env
) == 0
1233 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1235 return CP_ACCESS_OK
;
1238 return pmreg_access(env
, ri
, isread
);
1241 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1242 const ARMCPRegInfo
*ri
,
1245 /* SW: software increment write trap control */
1246 if (arm_feature(env
, ARM_FEATURE_V8
)
1247 && arm_current_el(env
) == 0
1248 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1250 return CP_ACCESS_OK
;
1253 return pmreg_access(env
, ri
, isread
);
1256 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1257 const ARMCPRegInfo
*ri
,
1260 /* ER: event counter read trap control */
1261 if (arm_feature(env
, ARM_FEATURE_V8
)
1262 && arm_current_el(env
) == 0
1263 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1264 return CP_ACCESS_OK
;
1267 return pmreg_access(env
, ri
, isread
);
1270 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1271 const ARMCPRegInfo
*ri
,
1274 /* CR: cycle counter read trap control */
1275 if (arm_feature(env
, ARM_FEATURE_V8
)
1276 && arm_current_el(env
) == 0
1277 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1279 return CP_ACCESS_OK
;
1282 return pmreg_access(env
, ri
, isread
);
1285 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1286 * the current EL, security state, and register configuration.
1288 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1291 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1292 bool enabled
, prohibited
, filtered
;
1293 bool secure
= arm_is_secure(env
);
1294 int el
= arm_current_el(env
);
1295 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1297 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1301 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1302 (counter
< hpmn
|| counter
== 31)) {
1303 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1305 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1307 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1310 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1311 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1316 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1317 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1320 if (prohibited
&& counter
== 31) {
1321 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1324 if (counter
== 31) {
1325 filter
= env
->cp15
.pmccfiltr_el0
;
1327 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1330 p
= filter
& PMXEVTYPER_P
;
1331 u
= filter
& PMXEVTYPER_U
;
1332 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1333 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1334 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1335 m
= arm_el_is_aa64(env
, 1) &&
1336 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1339 filtered
= secure
? u
: u
!= nsu
;
1340 } else if (el
== 1) {
1341 filtered
= secure
? p
: p
!= nsk
;
1342 } else if (el
== 2) {
1348 if (counter
!= 31) {
1350 * If not checking PMCCNTR, ensure the counter is setup to an event we
1353 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1354 if (!event_supported(event
)) {
1359 return enabled
&& !prohibited
&& !filtered
;
1362 static void pmu_update_irq(CPUARMState
*env
)
1364 ARMCPU
*cpu
= env_archcpu(env
);
1365 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1366 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1370 * Ensure c15_ccnt is the guest-visible count so that operations such as
1371 * enabling/disabling the counter or filtering, modifying the count itself,
1372 * etc. can be done logically. This is essentially a no-op if the counter is
1373 * not enabled at the time of the call.
1375 static void pmccntr_op_start(CPUARMState
*env
)
1377 uint64_t cycles
= cycles_get_count(env
);
1379 if (pmu_counter_enabled(env
, 31)) {
1380 uint64_t eff_cycles
= cycles
;
1381 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1382 /* Increment once every 64 processor clock cycles */
1386 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1388 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1389 1ull << 63 : 1ull << 31;
1390 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1391 env
->cp15
.c9_pmovsr
|= (1 << 31);
1392 pmu_update_irq(env
);
1395 env
->cp15
.c15_ccnt
= new_pmccntr
;
1397 env
->cp15
.c15_ccnt_delta
= cycles
;
1401 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1402 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1405 static void pmccntr_op_finish(CPUARMState
*env
)
1407 if (pmu_counter_enabled(env
, 31)) {
1408 #ifndef CONFIG_USER_ONLY
1409 /* Calculate when the counter will next overflow */
1410 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1411 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1412 remaining_cycles
= (uint32_t)remaining_cycles
;
1414 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1416 if (overflow_in
> 0) {
1417 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1419 ARMCPU
*cpu
= env_archcpu(env
);
1420 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1424 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1425 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1426 /* Increment once every 64 processor clock cycles */
1429 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1433 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1436 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1438 if (event_supported(event
)) {
1439 uint16_t event_idx
= supported_event_map
[event
];
1440 count
= pm_events
[event_idx
].get_count(env
);
1443 if (pmu_counter_enabled(env
, counter
)) {
1444 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1446 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1447 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1448 pmu_update_irq(env
);
1450 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1452 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1455 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1457 if (pmu_counter_enabled(env
, counter
)) {
1458 #ifndef CONFIG_USER_ONLY
1459 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1460 uint16_t event_idx
= supported_event_map
[event
];
1461 uint64_t delta
= UINT32_MAX
-
1462 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1463 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1465 if (overflow_in
> 0) {
1466 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1468 ARMCPU
*cpu
= env_archcpu(env
);
1469 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1473 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1474 env
->cp15
.c14_pmevcntr
[counter
];
1478 void pmu_op_start(CPUARMState
*env
)
1481 pmccntr_op_start(env
);
1482 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1483 pmevcntr_op_start(env
, i
);
1487 void pmu_op_finish(CPUARMState
*env
)
1490 pmccntr_op_finish(env
);
1491 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1492 pmevcntr_op_finish(env
, i
);
1496 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1498 pmu_op_start(&cpu
->env
);
1501 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1503 pmu_op_finish(&cpu
->env
);
1506 void arm_pmu_timer_cb(void *opaque
)
1508 ARMCPU
*cpu
= opaque
;
1511 * Update all the counter values based on the current underlying counts,
1512 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1513 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1514 * counter may expire.
1516 pmu_op_start(&cpu
->env
);
1517 pmu_op_finish(&cpu
->env
);
1520 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1525 if (value
& PMCRC
) {
1526 /* The counter has been reset */
1527 env
->cp15
.c15_ccnt
= 0;
1530 if (value
& PMCRP
) {
1532 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1533 env
->cp15
.c14_pmevcntr
[i
] = 0;
1537 /* only the DP, X, D and E bits are writable */
1538 env
->cp15
.c9_pmcr
&= ~0x39;
1539 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1544 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1548 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1549 /* Increment a counter's count iff: */
1550 if ((value
& (1 << i
)) && /* counter's bit is set */
1551 /* counter is enabled and not filtered */
1552 pmu_counter_enabled(env
, i
) &&
1553 /* counter is SW_INCR */
1554 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1555 pmevcntr_op_start(env
, i
);
1558 * Detect if this write causes an overflow since we can't predict
1559 * PMSWINC overflows like we can for other events
1561 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1563 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1564 env
->cp15
.c9_pmovsr
|= (1 << i
);
1565 pmu_update_irq(env
);
1568 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1570 pmevcntr_op_finish(env
, i
);
1575 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1578 pmccntr_op_start(env
);
1579 ret
= env
->cp15
.c15_ccnt
;
1580 pmccntr_op_finish(env
);
1584 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1587 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1588 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1589 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1592 env
->cp15
.c9_pmselr
= value
& 0x1f;
1595 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1598 pmccntr_op_start(env
);
1599 env
->cp15
.c15_ccnt
= value
;
1600 pmccntr_op_finish(env
);
1603 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1606 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1608 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1611 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1614 pmccntr_op_start(env
);
1615 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1616 pmccntr_op_finish(env
);
1619 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1622 pmccntr_op_start(env
);
1623 /* M is not accessible from AArch32 */
1624 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1625 (value
& PMCCFILTR
);
1626 pmccntr_op_finish(env
);
1629 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1631 /* M is not visible in AArch32 */
1632 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1635 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1638 value
&= pmu_counter_mask(env
);
1639 env
->cp15
.c9_pmcnten
|= value
;
1642 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1645 value
&= pmu_counter_mask(env
);
1646 env
->cp15
.c9_pmcnten
&= ~value
;
1649 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1652 value
&= pmu_counter_mask(env
);
1653 env
->cp15
.c9_pmovsr
&= ~value
;
1654 pmu_update_irq(env
);
1657 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1660 value
&= pmu_counter_mask(env
);
1661 env
->cp15
.c9_pmovsr
|= value
;
1662 pmu_update_irq(env
);
1665 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1666 uint64_t value
, const uint8_t counter
)
1668 if (counter
== 31) {
1669 pmccfiltr_write(env
, ri
, value
);
1670 } else if (counter
< pmu_num_counters(env
)) {
1671 pmevcntr_op_start(env
, counter
);
1674 * If this counter's event type is changing, store the current
1675 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1676 * pmevcntr_op_finish has the correct baseline when it converts back to
1679 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1680 PMXEVTYPER_EVTCOUNT
;
1681 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1682 if (old_event
!= new_event
) {
1684 if (event_supported(new_event
)) {
1685 uint16_t event_idx
= supported_event_map
[new_event
];
1686 count
= pm_events
[event_idx
].get_count(env
);
1688 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1691 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1692 pmevcntr_op_finish(env
, counter
);
1694 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1695 * PMSELR value is equal to or greater than the number of implemented
1696 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1700 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1701 const uint8_t counter
)
1703 if (counter
== 31) {
1704 return env
->cp15
.pmccfiltr_el0
;
1705 } else if (counter
< pmu_num_counters(env
)) {
1706 return env
->cp15
.c14_pmevtyper
[counter
];
1709 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1710 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1716 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1719 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1720 pmevtyper_write(env
, ri
, value
, counter
);
1723 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1726 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1727 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1730 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1731 * pmu_op_finish calls when loading saved state for a migration. Because
1732 * we're potentially updating the type of event here, the value written to
1733 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1734 * different counter type. Therefore, we need to set this value to the
1735 * current count for the counter type we're writing so that pmu_op_finish
1736 * has the correct count for its calculation.
1738 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1739 if (event_supported(event
)) {
1740 uint16_t event_idx
= supported_event_map
[event
];
1741 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1742 pm_events
[event_idx
].get_count(env
);
1746 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1748 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1749 return pmevtyper_read(env
, ri
, counter
);
1752 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1755 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1758 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1760 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1763 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1764 uint64_t value
, uint8_t counter
)
1766 if (counter
< pmu_num_counters(env
)) {
1767 pmevcntr_op_start(env
, counter
);
1768 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1769 pmevcntr_op_finish(env
, counter
);
1772 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1773 * are CONSTRAINED UNPREDICTABLE.
1777 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1780 if (counter
< pmu_num_counters(env
)) {
1782 pmevcntr_op_start(env
, counter
);
1783 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1784 pmevcntr_op_finish(env
, counter
);
1787 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1788 * are CONSTRAINED UNPREDICTABLE. */
1793 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1796 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1797 pmevcntr_write(env
, ri
, value
, counter
);
1800 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1802 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1803 return pmevcntr_read(env
, ri
, counter
);
1806 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1809 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1810 assert(counter
< pmu_num_counters(env
));
1811 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1812 pmevcntr_write(env
, ri
, value
, counter
);
1815 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1817 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1818 assert(counter
< pmu_num_counters(env
));
1819 return env
->cp15
.c14_pmevcntr
[counter
];
1822 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1825 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1828 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1830 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1833 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1836 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1837 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1839 env
->cp15
.c9_pmuserenr
= value
& 1;
1843 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1846 /* We have no event counters so only the C bit can be changed */
1847 value
&= pmu_counter_mask(env
);
1848 env
->cp15
.c9_pminten
|= value
;
1849 pmu_update_irq(env
);
1852 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1855 value
&= pmu_counter_mask(env
);
1856 env
->cp15
.c9_pminten
&= ~value
;
1857 pmu_update_irq(env
);
1860 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1863 /* Note that even though the AArch64 view of this register has bits
1864 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1865 * architectural requirements for bits which are RES0 only in some
1866 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1867 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1869 raw_write(env
, ri
, value
& ~0x1FULL
);
1872 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1874 /* Begin with base v8.0 state. */
1875 uint32_t valid_mask
= 0x3fff;
1876 ARMCPU
*cpu
= env_archcpu(env
);
1878 if (arm_el_is_aa64(env
, 3)) {
1879 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1880 valid_mask
&= ~SCR_NET
;
1882 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1885 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1886 valid_mask
&= ~SCR_HCE
;
1888 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1889 * supported if EL2 exists. The bit is UNK/SBZP when
1890 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1891 * when EL2 is unavailable.
1892 * On ARMv8, this bit is always available.
1894 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1895 !arm_feature(env
, ARM_FEATURE_V8
)) {
1896 valid_mask
&= ~SCR_SMD
;
1899 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1900 valid_mask
|= SCR_TLOR
;
1902 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1903 valid_mask
|= SCR_API
| SCR_APK
;
1906 /* Clear all-context RES0 bits. */
1907 value
&= valid_mask
;
1908 raw_write(env
, ri
, value
);
1911 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
1912 const ARMCPRegInfo
*ri
,
1915 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
1916 return CP_ACCESS_TRAP_EL2
;
1919 return CP_ACCESS_OK
;
1922 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1924 ARMCPU
*cpu
= env_archcpu(env
);
1926 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1929 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1930 ri
->secure
& ARM_CP_SECSTATE_S
);
1932 return cpu
->ccsidr
[index
];
1935 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1938 raw_write(env
, ri
, value
& 0xf);
1941 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1943 CPUState
*cs
= env_cpu(env
);
1944 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1946 bool allow_virt
= (arm_current_el(env
) == 1 &&
1947 (!arm_is_secure_below_el3(env
) ||
1948 (env
->cp15
.scr_el3
& SCR_EEL2
)));
1950 if (allow_virt
&& (hcr_el2
& HCR_IMO
)) {
1951 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1955 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1960 if (allow_virt
&& (hcr_el2
& HCR_FMO
)) {
1961 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1965 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1970 /* External aborts are not possible in QEMU so A bit is always clear */
1974 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1977 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
1978 return CP_ACCESS_TRAP_EL2
;
1981 return CP_ACCESS_OK
;
1984 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1987 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1988 return access_aa64_tid1(env
, ri
, isread
);
1991 return CP_ACCESS_OK
;
1994 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1995 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1996 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1997 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1998 /* Performance monitors are implementation defined in v7,
1999 * but with an ARM recommended set of registers, which we
2002 * Performance registers fall into three categories:
2003 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2004 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2005 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2006 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2007 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2009 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2010 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2011 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2012 .writefn
= pmcntenset_write
,
2013 .accessfn
= pmreg_access
,
2014 .raw_writefn
= raw_write
},
2015 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
2016 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2017 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2018 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2019 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2020 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2022 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2023 .accessfn
= pmreg_access
,
2024 .writefn
= pmcntenclr_write
,
2025 .type
= ARM_CP_ALIAS
},
2026 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2027 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2028 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2029 .type
= ARM_CP_ALIAS
,
2030 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2031 .writefn
= pmcntenclr_write
},
2032 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2033 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2034 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2035 .accessfn
= pmreg_access
,
2036 .writefn
= pmovsr_write
,
2037 .raw_writefn
= raw_write
},
2038 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2039 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2040 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2041 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2042 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2043 .writefn
= pmovsr_write
,
2044 .raw_writefn
= raw_write
},
2045 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2046 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2047 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2048 .writefn
= pmswinc_write
},
2049 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2050 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2051 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2052 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2053 .writefn
= pmswinc_write
},
2054 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2055 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2056 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2057 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2058 .raw_writefn
= raw_write
},
2059 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2060 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2061 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2062 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2063 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2064 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2065 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2066 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2067 .accessfn
= pmreg_access_ccntr
},
2068 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2069 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2070 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2072 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2073 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2074 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2075 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2076 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2077 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2078 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2080 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2081 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2082 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2083 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2085 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2087 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2088 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2089 .accessfn
= pmreg_access
,
2090 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2091 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2092 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2093 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2094 .accessfn
= pmreg_access
,
2095 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2096 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2097 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2098 .accessfn
= pmreg_access_xevcntr
,
2099 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2100 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2101 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2102 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2103 .accessfn
= pmreg_access_xevcntr
,
2104 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2105 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2106 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2107 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2109 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2110 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2111 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2112 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2113 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2115 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2116 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2117 .access
= PL1_RW
, .accessfn
= access_tpm
,
2118 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2119 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2121 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2122 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2123 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2124 .access
= PL1_RW
, .accessfn
= access_tpm
,
2126 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2127 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2128 .resetvalue
= 0x0 },
2129 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2130 .access
= PL1_RW
, .accessfn
= access_tpm
,
2131 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2132 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2133 .writefn
= pmintenclr_write
, },
2134 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2135 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2136 .access
= PL1_RW
, .accessfn
= access_tpm
,
2137 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2138 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2139 .writefn
= pmintenclr_write
},
2140 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2141 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2143 .accessfn
= access_aa64_tid2
,
2144 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2145 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2146 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2148 .accessfn
= access_aa64_tid2
,
2149 .writefn
= csselr_write
, .resetvalue
= 0,
2150 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2151 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2152 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2153 * just RAZ for all cores:
2155 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2156 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2157 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2158 .accessfn
= access_aa64_tid1
,
2160 /* Auxiliary fault status registers: these also are IMPDEF, and we
2161 * choose to RAZ/WI for all cores.
2163 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2164 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2165 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2166 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2167 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2168 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2169 /* MAIR can just read-as-written because we don't implement caches
2170 * and so don't need to care about memory attributes.
2172 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2173 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2174 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2176 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2177 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2178 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2180 /* For non-long-descriptor page tables these are PRRR and NMRR;
2181 * regardless they still act as reads-as-written for QEMU.
2183 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2184 * allows them to assign the correct fieldoffset based on the endianness
2185 * handled in the field definitions.
2187 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2188 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2189 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2190 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2191 .resetfn
= arm_cp_reset_ignore
},
2192 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2193 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2194 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2195 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2196 .resetfn
= arm_cp_reset_ignore
},
2197 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2198 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2199 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2200 /* 32 bit ITLB invalidates */
2201 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2202 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2203 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2204 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2205 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2206 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2207 /* 32 bit DTLB invalidates */
2208 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2209 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2210 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2211 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2212 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2213 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2214 /* 32 bit TLB invalidates */
2215 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2216 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2217 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2218 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2219 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2220 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2221 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2222 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2226 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2227 /* 32 bit TLB invalidates, Inner Shareable */
2228 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2229 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2230 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2231 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2232 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2233 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2234 .writefn
= tlbiasid_is_write
},
2235 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2236 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2237 .writefn
= tlbimvaa_is_write
},
2241 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2242 /* PMOVSSET is not implemented in v7 before v7ve */
2243 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2244 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2245 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2246 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2247 .writefn
= pmovsset_write
,
2248 .raw_writefn
= raw_write
},
2249 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2250 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2251 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2252 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2253 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2254 .writefn
= pmovsset_write
,
2255 .raw_writefn
= raw_write
},
2259 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2266 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2269 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2270 return CP_ACCESS_TRAP
;
2272 return CP_ACCESS_OK
;
2275 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2276 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2277 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2279 .writefn
= teecr_write
},
2280 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2281 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2282 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2286 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2287 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2288 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2290 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2291 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2293 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2294 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2295 .resetfn
= arm_cp_reset_ignore
},
2296 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2297 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2298 .access
= PL0_R
|PL1_W
,
2299 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2301 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2302 .access
= PL0_R
|PL1_W
,
2303 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2304 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2305 .resetfn
= arm_cp_reset_ignore
},
2306 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2307 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2309 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2310 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2312 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2313 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2318 #ifndef CONFIG_USER_ONLY
2320 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2323 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2324 * Writable only at the highest implemented exception level.
2326 int el
= arm_current_el(env
);
2330 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2331 return CP_ACCESS_TRAP
;
2335 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2336 arm_is_secure_below_el3(env
)) {
2337 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2338 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2346 if (!isread
&& el
< arm_highest_el(env
)) {
2347 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2350 return CP_ACCESS_OK
;
2353 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2356 unsigned int cur_el
= arm_current_el(env
);
2357 bool secure
= arm_is_secure(env
);
2359 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2361 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2362 return CP_ACCESS_TRAP
;
2365 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2366 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2367 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2368 return CP_ACCESS_TRAP_EL2
;
2370 return CP_ACCESS_OK
;
2373 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2376 unsigned int cur_el
= arm_current_el(env
);
2377 bool secure
= arm_is_secure(env
);
2379 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2380 * EL0[PV]TEN is zero.
2383 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2384 return CP_ACCESS_TRAP
;
2387 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2388 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2389 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2390 return CP_ACCESS_TRAP_EL2
;
2392 return CP_ACCESS_OK
;
2395 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2396 const ARMCPRegInfo
*ri
,
2399 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2402 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2403 const ARMCPRegInfo
*ri
,
2406 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2409 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2412 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2415 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2418 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2421 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2422 const ARMCPRegInfo
*ri
,
2425 /* The AArch64 register view of the secure physical timer is
2426 * always accessible from EL3, and configurably accessible from
2429 switch (arm_current_el(env
)) {
2431 if (!arm_is_secure(env
)) {
2432 return CP_ACCESS_TRAP
;
2434 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2435 return CP_ACCESS_TRAP_EL3
;
2437 return CP_ACCESS_OK
;
2440 return CP_ACCESS_TRAP
;
2442 return CP_ACCESS_OK
;
2444 g_assert_not_reached();
2448 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2450 ARMCPU
*cpu
= env_archcpu(env
);
2452 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2455 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2457 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2460 /* Timer enabled: calculate and set current ISTATUS, irq, and
2461 * reset timer to when ISTATUS next has to change
2463 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2464 cpu
->env
.cp15
.cntvoff_el2
: 0;
2465 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2466 /* Note that this must be unsigned 64 bit arithmetic: */
2467 int istatus
= count
- offset
>= gt
->cval
;
2471 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2473 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2474 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2477 /* Next transition is when count rolls back over to zero */
2478 nexttick
= UINT64_MAX
;
2480 /* Next transition is when we hit cval */
2481 nexttick
= gt
->cval
+ offset
;
2483 /* Note that the desired next expiry time might be beyond the
2484 * signed-64-bit range of a QEMUTimer -- in this case we just
2485 * set the timer for as far in the future as possible. When the
2486 * timer expires we will reset the timer for any remaining period.
2488 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2489 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2491 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2493 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2495 /* Timer disabled: ISTATUS and timer output always clear */
2497 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2498 timer_del(cpu
->gt_timer
[timeridx
]);
2499 trace_arm_gt_recalc_disabled(timeridx
);
2503 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2506 ARMCPU
*cpu
= env_archcpu(env
);
2508 timer_del(cpu
->gt_timer
[timeridx
]);
2511 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2513 return gt_get_countervalue(env
);
2516 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2520 switch (arm_current_el(env
)) {
2522 hcr
= arm_hcr_el2_eff(env
);
2523 if (hcr
& HCR_E2H
) {
2528 hcr
= arm_hcr_el2_eff(env
);
2529 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2535 return env
->cp15
.cntvoff_el2
;
2538 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2540 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2543 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2547 trace_arm_gt_cval_write(timeridx
, value
);
2548 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2549 gt_recalc_timer(env_archcpu(env
), timeridx
);
2552 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2555 uint64_t offset
= 0;
2559 offset
= gt_virt_cnt_offset(env
);
2563 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2564 (gt_get_countervalue(env
) - offset
));
2567 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2571 uint64_t offset
= 0;
2575 offset
= gt_virt_cnt_offset(env
);
2579 trace_arm_gt_tval_write(timeridx
, value
);
2580 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2581 sextract64(value
, 0, 32);
2582 gt_recalc_timer(env_archcpu(env
), timeridx
);
2585 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2589 ARMCPU
*cpu
= env_archcpu(env
);
2590 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2592 trace_arm_gt_ctl_write(timeridx
, value
);
2593 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2594 if ((oldval
^ value
) & 1) {
2595 /* Enable toggled */
2596 gt_recalc_timer(cpu
, timeridx
);
2597 } else if ((oldval
^ value
) & 2) {
2598 /* IMASK toggled: don't need to recalculate,
2599 * just set the interrupt line based on ISTATUS
2601 int irqstate
= (oldval
& 4) && !(value
& 2);
2603 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2604 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2608 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2610 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2613 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2616 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2619 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2621 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2624 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2627 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2630 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2633 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2636 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2638 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2641 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2644 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2647 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2649 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2652 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2655 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2658 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2661 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2664 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2667 ARMCPU
*cpu
= env_archcpu(env
);
2669 trace_arm_gt_cntvoff_write(value
);
2670 raw_write(env
, ri
, value
);
2671 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2674 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2676 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2679 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2682 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2685 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2687 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2690 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2693 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2696 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2699 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2702 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2704 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2707 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2710 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2713 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2715 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2718 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2721 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2724 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2727 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2730 void arm_gt_ptimer_cb(void *opaque
)
2732 ARMCPU
*cpu
= opaque
;
2734 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2737 void arm_gt_vtimer_cb(void *opaque
)
2739 ARMCPU
*cpu
= opaque
;
2741 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2744 void arm_gt_htimer_cb(void *opaque
)
2746 ARMCPU
*cpu
= opaque
;
2748 gt_recalc_timer(cpu
, GTIMER_HYP
);
2751 void arm_gt_stimer_cb(void *opaque
)
2753 ARMCPU
*cpu
= opaque
;
2755 gt_recalc_timer(cpu
, GTIMER_SEC
);
2758 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2760 ARMCPU
*cpu
= env_archcpu(env
);
2762 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
2765 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2766 /* Note that CNTFRQ is purely reads-as-written for the benefit
2767 * of software; writing it doesn't actually change the timer frequency.
2768 * Our reset value matches the fixed frequency we implement the timer at.
2770 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2771 .type
= ARM_CP_ALIAS
,
2772 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2773 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2775 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2776 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2777 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2778 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2779 .resetfn
= arm_gt_cntfrq_reset
,
2781 /* overall control: mostly access permissions */
2782 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2783 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2785 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2788 /* per-timer control */
2789 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2790 .secure
= ARM_CP_SECSTATE_NS
,
2791 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2792 .accessfn
= gt_ptimer_access
,
2793 .fieldoffset
= offsetoflow32(CPUARMState
,
2794 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2795 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2797 { .name
= "CNTP_CTL_S",
2798 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2799 .secure
= ARM_CP_SECSTATE_S
,
2800 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2801 .accessfn
= gt_ptimer_access
,
2802 .fieldoffset
= offsetoflow32(CPUARMState
,
2803 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2804 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2806 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2807 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2808 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2809 .accessfn
= gt_ptimer_access
,
2810 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2812 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2814 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2815 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2816 .accessfn
= gt_vtimer_access
,
2817 .fieldoffset
= offsetoflow32(CPUARMState
,
2818 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2819 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2821 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2822 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2823 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2824 .accessfn
= gt_vtimer_access
,
2825 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2827 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2829 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2830 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2831 .secure
= ARM_CP_SECSTATE_NS
,
2832 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2833 .accessfn
= gt_ptimer_access
,
2834 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2836 { .name
= "CNTP_TVAL_S",
2837 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2838 .secure
= ARM_CP_SECSTATE_S
,
2839 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2840 .accessfn
= gt_ptimer_access
,
2841 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2843 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2844 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2845 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2846 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2847 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2849 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2850 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2851 .accessfn
= gt_vtimer_access
,
2852 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2854 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2855 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2856 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2857 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2858 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2860 /* The counter itself */
2861 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2862 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2863 .accessfn
= gt_pct_access
,
2864 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2866 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2867 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2868 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2869 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2871 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2872 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2873 .accessfn
= gt_vct_access
,
2874 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2876 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2877 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2878 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2879 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2881 /* Comparison value, indicating when the timer goes off */
2882 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2883 .secure
= ARM_CP_SECSTATE_NS
,
2885 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2886 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2887 .accessfn
= gt_ptimer_access
,
2888 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2890 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2891 .secure
= ARM_CP_SECSTATE_S
,
2893 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2894 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2895 .accessfn
= gt_ptimer_access
,
2896 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2898 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2899 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2902 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2903 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2904 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2906 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2908 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2909 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2910 .accessfn
= gt_vtimer_access
,
2911 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2913 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2914 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2918 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2919 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2921 /* Secure timer -- this is actually restricted to only EL3
2922 * and configurably Secure-EL1 via the accessfn.
2924 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2925 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2926 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2927 .accessfn
= gt_stimer_access
,
2928 .readfn
= gt_sec_tval_read
,
2929 .writefn
= gt_sec_tval_write
,
2930 .resetfn
= gt_sec_timer_reset
,
2932 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2933 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2934 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2935 .accessfn
= gt_stimer_access
,
2936 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2938 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2940 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2941 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2942 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2943 .accessfn
= gt_stimer_access
,
2944 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2945 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2952 /* In user-mode most of the generic timer registers are inaccessible
2953 * however modern kernels (4.12+) allow access to cntvct_el0
2956 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2958 ARMCPU
*cpu
= env_archcpu(env
);
2960 /* Currently we have no support for QEMUTimer in linux-user so we
2961 * can't call gt_get_countervalue(env), instead we directly
2962 * call the lower level functions.
2964 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
2967 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2968 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2969 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2970 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2971 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2972 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2974 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2975 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2976 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2977 .readfn
= gt_virt_cnt_read
,
2984 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2986 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2987 raw_write(env
, ri
, value
);
2988 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2989 raw_write(env
, ri
, value
& 0xfffff6ff);
2991 raw_write(env
, ri
, value
& 0xfffff1ff);
2995 #ifndef CONFIG_USER_ONLY
2996 /* get_phys_addr() isn't present for user-mode-only targets */
2998 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3002 /* The ATS12NSO* operations must trap to EL3 if executed in
3003 * Secure EL1 (which can only happen if EL3 is AArch64).
3004 * They are simply UNDEF if executed from NS EL1.
3005 * They function normally from EL2 or EL3.
3007 if (arm_current_el(env
) == 1) {
3008 if (arm_is_secure_below_el3(env
)) {
3009 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3011 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3014 return CP_ACCESS_OK
;
3017 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3018 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3021 target_ulong page_size
;
3025 bool format64
= false;
3026 MemTxAttrs attrs
= {};
3027 ARMMMUFaultInfo fi
= {};
3028 ARMCacheAttrs cacheattrs
= {};
3030 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3031 &prot
, &page_size
, &fi
, &cacheattrs
);
3035 * Some kinds of translation fault must cause exceptions rather
3036 * than being reported in the PAR.
3038 int current_el
= arm_current_el(env
);
3040 uint32_t syn
, fsr
, fsc
;
3041 bool take_exc
= false;
3043 if (fi
.s1ptw
&& current_el
== 1 && !arm_is_secure(env
)
3044 && (mmu_idx
== ARMMMUIdx_Stage1_E1
||
3045 mmu_idx
== ARMMMUIdx_Stage1_E0
)) {
3047 * Synchronous stage 2 fault on an access made as part of the
3048 * translation table walk for AT S1E0* or AT S1E1* insn
3049 * executed from NS EL1. If this is a synchronous external abort
3050 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3051 * to EL3. Otherwise the fault is taken as an exception to EL2,
3052 * and HPFAR_EL2 holds the faulting IPA.
3054 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3055 (env
->cp15
.scr_el3
& SCR_EA
)) {
3058 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3062 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3064 * Synchronous external aborts during a translation table walk
3065 * are taken as Data Abort exceptions.
3068 if (current_el
== 3) {
3074 target_el
= exception_target_el(env
);
3080 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3081 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3082 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3083 fsr
= arm_fi_to_lfsc(&fi
);
3084 fsc
= extract32(fsr
, 0, 6);
3086 fsr
= arm_fi_to_sfsc(&fi
);
3090 * Report exception with ESR indicating a fault due to a
3091 * translation table walk for a cache maintenance instruction.
3093 syn
= syn_data_abort_no_iss(current_el
== target_el
,
3094 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3095 env
->exception
.vaddress
= value
;
3096 env
->exception
.fsr
= fsr
;
3097 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3103 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3106 * * TTBCR.EAE determines whether the result is returned using the
3107 * 32-bit or the 64-bit PAR format
3108 * * Instructions executed in Hyp mode always use the 64bit format
3110 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3111 * * The Non-secure TTBCR.EAE bit is set to 1
3112 * * The implementation includes EL2, and the value of HCR.VM is 1
3114 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3116 * ATS1Hx always uses the 64bit format.
3118 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3120 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3121 if (mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_E10_1
) {
3122 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3124 format64
|= arm_current_el(env
) == 2;
3130 /* Create a 64-bit PAR */
3131 par64
= (1 << 11); /* LPAE bit always set */
3133 par64
|= phys_addr
& ~0xfffULL
;
3134 if (!attrs
.secure
) {
3135 par64
|= (1 << 9); /* NS */
3137 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3138 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3140 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3143 par64
|= (fsr
& 0x3f) << 1; /* FS */
3145 par64
|= (1 << 9); /* S */
3148 par64
|= (1 << 8); /* PTW */
3152 /* fsr is a DFSR/IFSR value for the short descriptor
3153 * translation table format (with WnR always clear).
3154 * Convert it to a 32-bit PAR.
3157 /* We do not set any attribute bits in the PAR */
3158 if (page_size
== (1 << 24)
3159 && arm_feature(env
, ARM_FEATURE_V7
)) {
3160 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3162 par64
= phys_addr
& 0xfffff000;
3164 if (!attrs
.secure
) {
3165 par64
|= (1 << 9); /* NS */
3168 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3170 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3171 ((fsr
& 0xf) << 1) | 1;
3177 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3179 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3182 int el
= arm_current_el(env
);
3183 bool secure
= arm_is_secure_below_el3(env
);
3185 switch (ri
->opc2
& 6) {
3187 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3190 mmu_idx
= ARMMMUIdx_SE3
;
3193 mmu_idx
= ARMMMUIdx_Stage1_E1
;
3196 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3199 g_assert_not_reached();
3203 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3206 mmu_idx
= ARMMMUIdx_SE10_0
;
3209 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3212 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3215 g_assert_not_reached();
3219 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3220 mmu_idx
= ARMMMUIdx_E10_1
;
3223 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3224 mmu_idx
= ARMMMUIdx_E10_0
;
3227 g_assert_not_reached();
3230 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3232 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3235 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3238 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3241 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3243 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3246 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3249 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3250 return CP_ACCESS_TRAP
;
3252 return CP_ACCESS_OK
;
3255 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3258 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3260 int secure
= arm_is_secure_below_el3(env
);
3262 switch (ri
->opc2
& 6) {
3265 case 0: /* AT S1E1R, AT S1E1W */
3266 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_Stage1_E1
;
3268 case 4: /* AT S1E2R, AT S1E2W */
3269 mmu_idx
= ARMMMUIdx_E2
;
3271 case 6: /* AT S1E3R, AT S1E3W */
3272 mmu_idx
= ARMMMUIdx_SE3
;
3275 g_assert_not_reached();
3278 case 2: /* AT S1E0R, AT S1E0W */
3279 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_Stage1_E0
;
3281 case 4: /* AT S12E1R, AT S12E1W */
3282 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3284 case 6: /* AT S12E0R, AT S12E0W */
3285 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3288 g_assert_not_reached();
3291 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3295 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3296 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3297 .access
= PL1_RW
, .resetvalue
= 0,
3298 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3299 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3300 .writefn
= par_write
},
3301 #ifndef CONFIG_USER_ONLY
3302 /* This underdecoding is safe because the reginfo is NO_RAW. */
3303 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3304 .access
= PL1_W
, .accessfn
= ats_access
,
3305 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3310 /* Return basic MPU access permission bits. */
3311 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3318 for (i
= 0; i
< 16; i
+= 2) {
3319 ret
|= (val
>> i
) & mask
;
3325 /* Pad basic MPU access permission bits to extended format. */
3326 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3333 for (i
= 0; i
< 16; i
+= 2) {
3334 ret
|= (val
& mask
) << i
;
3340 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3343 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3346 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3348 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3351 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3354 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3357 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3359 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3362 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3364 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3370 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3374 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3377 ARMCPU
*cpu
= env_archcpu(env
);
3378 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3384 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3385 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3389 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3392 ARMCPU
*cpu
= env_archcpu(env
);
3393 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3395 if (value
>= nrgs
) {
3396 qemu_log_mask(LOG_GUEST_ERROR
,
3397 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3398 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3402 raw_write(env
, ri
, value
);
3405 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3406 /* Reset for all these registers is handled in arm_cpu_reset(),
3407 * because the PMSAv7 is also used by M-profile CPUs, which do
3408 * not register cpregs but still need the state to be reset.
3410 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3411 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3412 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3413 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3414 .resetfn
= arm_cp_reset_ignore
},
3415 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3416 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3417 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3418 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3419 .resetfn
= arm_cp_reset_ignore
},
3420 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3421 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3422 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3423 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3424 .resetfn
= arm_cp_reset_ignore
},
3425 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3427 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3428 .writefn
= pmsav7_rgnr_write
,
3429 .resetfn
= arm_cp_reset_ignore
},
3433 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3434 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3435 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3436 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3437 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3438 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3439 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3440 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3441 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3442 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3444 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3446 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3448 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3450 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3452 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3453 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3455 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3456 /* Protection region base and size registers */
3457 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3458 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3459 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3460 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3461 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3462 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3463 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3464 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3465 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3466 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3467 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3468 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3469 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3470 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3471 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3472 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3473 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3474 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3475 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3476 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3477 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3478 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3479 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3480 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3484 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3487 TCR
*tcr
= raw_ptr(env
, ri
);
3488 int maskshift
= extract32(value
, 0, 3);
3490 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3491 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3492 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3493 * using Long-desciptor translation table format */
3494 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3495 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3496 /* In an implementation that includes the Security Extensions
3497 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3498 * Short-descriptor translation table format.
3500 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3506 /* Update the masks corresponding to the TCR bank being written
3507 * Note that we always calculate mask and base_mask, but
3508 * they are only used for short-descriptor tables (ie if EAE is 0);
3509 * for long-descriptor tables the TCR fields are used differently
3510 * and the mask and base_mask values are meaningless.
3512 tcr
->raw_tcr
= value
;
3513 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3514 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3517 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3520 ARMCPU
*cpu
= env_archcpu(env
);
3521 TCR
*tcr
= raw_ptr(env
, ri
);
3523 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3524 /* With LPAE the TTBCR could result in a change of ASID
3525 * via the TTBCR.A1 bit, so do a TLB flush.
3527 tlb_flush(CPU(cpu
));
3529 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3530 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3531 vmsa_ttbcr_raw_write(env
, ri
, value
);
3534 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3536 TCR
*tcr
= raw_ptr(env
, ri
);
3538 /* Reset both the TCR as well as the masks corresponding to the bank of
3539 * the TCR being reset.
3543 tcr
->base_mask
= 0xffffc000u
;
3546 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3549 ARMCPU
*cpu
= env_archcpu(env
);
3550 TCR
*tcr
= raw_ptr(env
, ri
);
3552 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3553 tlb_flush(CPU(cpu
));
3554 tcr
->raw_tcr
= value
;
3557 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3560 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3561 if (cpreg_field_is_64bit(ri
) &&
3562 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3563 ARMCPU
*cpu
= env_archcpu(env
);
3564 tlb_flush(CPU(cpu
));
3566 raw_write(env
, ri
, value
);
3569 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3572 /* TODO: There are ASID fields in here with HCR_EL2.E2H */
3573 raw_write(env
, ri
, value
);
3576 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3579 ARMCPU
*cpu
= env_archcpu(env
);
3580 CPUState
*cs
= CPU(cpu
);
3583 * A change in VMID to the stage2 page table (Stage2) invalidates
3584 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3586 if (raw_read(env
, ri
) != value
) {
3587 tlb_flush_by_mmuidx(cs
,
3588 ARMMMUIdxBit_E10_1
|
3589 ARMMMUIdxBit_E10_0
|
3590 ARMMMUIdxBit_Stage2
);
3591 raw_write(env
, ri
, value
);
3595 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3596 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3597 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3598 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3599 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3600 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3601 .access
= PL1_RW
, .resetvalue
= 0,
3602 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3603 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3604 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3605 .access
= PL1_RW
, .resetvalue
= 0,
3606 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3607 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3608 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3609 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3610 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3615 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3616 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3617 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3619 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3620 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3621 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3622 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3623 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3624 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3625 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3626 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3627 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3628 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3629 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3630 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3631 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3632 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3633 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3634 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3635 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3636 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3637 .raw_writefn
= vmsa_ttbcr_raw_write
,
3638 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3639 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3643 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3644 * qemu tlbs nor adjusting cached masks.
3646 static const ARMCPRegInfo ttbcr2_reginfo
= {
3647 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3648 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3649 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3650 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3653 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3656 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3657 /* The OS_TYPE bit in this register changes the reported CPUID! */
3658 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3659 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3662 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3665 env
->cp15
.c15_threadid
= value
& 0xffff;
3668 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3671 /* Wait-for-interrupt (deprecated) */
3672 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
3675 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3678 /* On OMAP there are registers indicating the max/min index of dcache lines
3679 * containing a dirty line; cache flush operations have to reset these.
3681 env
->cp15
.c15_i_max
= 0x000;
3682 env
->cp15
.c15_i_min
= 0xff0;
3685 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3686 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3687 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3688 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3690 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3691 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3692 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3694 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3695 .writefn
= omap_ticonfig_write
},
3696 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3698 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3699 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3700 .access
= PL1_RW
, .resetvalue
= 0xff0,
3701 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3702 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3704 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3705 .writefn
= omap_threadid_write
},
3706 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3707 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3708 .type
= ARM_CP_NO_RAW
,
3709 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3710 /* TODO: Peripheral port remap register:
3711 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3712 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3715 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3716 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3717 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3718 .writefn
= omap_cachemaint_write
},
3719 { .name
= "C9", .cp
= 15, .crn
= 9,
3720 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3721 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3725 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3728 env
->cp15
.c15_cpar
= value
& 0x3fff;
3731 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3732 { .name
= "XSCALE_CPAR",
3733 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3734 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3735 .writefn
= xscale_cpar_write
, },
3736 { .name
= "XSCALE_AUXCR",
3737 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3738 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3740 /* XScale specific cache-lockdown: since we have no cache we NOP these
3741 * and hope the guest does not really rely on cache behaviour.
3743 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3744 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3745 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3746 { .name
= "XSCALE_UNLOCK_ICACHE",
3747 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3748 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3749 { .name
= "XSCALE_DCACHE_LOCK",
3750 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3751 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3752 { .name
= "XSCALE_UNLOCK_DCACHE",
3753 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3754 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3758 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3759 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3760 * implementation of this implementation-defined space.
3761 * Ideally this should eventually disappear in favour of actually
3762 * implementing the correct behaviour for all cores.
3764 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3765 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3767 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3772 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3773 /* Cache status: RAZ because we have no cache so it's always clean */
3774 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3775 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3780 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3781 /* We never have a a block transfer operation in progress */
3782 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3783 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3785 /* The cache ops themselves: these all NOP for QEMU */
3786 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3787 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3788 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3789 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3790 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3791 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3792 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3793 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3794 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3795 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3796 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3797 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3801 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3802 /* The cache test-and-clean instructions always return (1 << 30)
3803 * to indicate that there are no dirty cache lines.
3805 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3806 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3807 .resetvalue
= (1 << 30) },
3808 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3809 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3810 .resetvalue
= (1 << 30) },
3814 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3815 /* Ignore ReadBuffer accesses */
3816 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3817 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3818 .access
= PL1_RW
, .resetvalue
= 0,
3819 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3823 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3825 ARMCPU
*cpu
= env_archcpu(env
);
3826 unsigned int cur_el
= arm_current_el(env
);
3827 bool secure
= arm_is_secure(env
);
3829 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3830 return env
->cp15
.vpidr_el2
;
3832 return raw_read(env
, ri
);
3835 static uint64_t mpidr_read_val(CPUARMState
*env
)
3837 ARMCPU
*cpu
= env_archcpu(env
);
3838 uint64_t mpidr
= cpu
->mp_affinity
;
3840 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3841 mpidr
|= (1U << 31);
3842 /* Cores which are uniprocessor (non-coherent)
3843 * but still implement the MP extensions set
3844 * bit 30. (For instance, Cortex-R5).
3846 if (cpu
->mp_is_up
) {
3847 mpidr
|= (1u << 30);
3853 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3855 unsigned int cur_el
= arm_current_el(env
);
3856 bool secure
= arm_is_secure(env
);
3858 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3859 return env
->cp15
.vmpidr_el2
;
3861 return mpidr_read_val(env
);
3864 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3866 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3867 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3868 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3870 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3871 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3872 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3874 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3875 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3876 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3877 offsetof(CPUARMState
, cp15
.par_ns
)} },
3878 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3879 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3880 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3881 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3882 .writefn
= vmsa_ttbr_write
, },
3883 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3884 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3885 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3886 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3887 .writefn
= vmsa_ttbr_write
, },
3891 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3893 return vfp_get_fpcr(env
);
3896 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3899 vfp_set_fpcr(env
, value
);
3902 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3904 return vfp_get_fpsr(env
);
3907 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3910 vfp_set_fpsr(env
, value
);
3913 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3916 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
3917 return CP_ACCESS_TRAP
;
3919 return CP_ACCESS_OK
;
3922 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3925 env
->daif
= value
& PSTATE_DAIF
;
3928 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3929 const ARMCPRegInfo
*ri
,
3932 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3933 * SCTLR_EL1.UCI is set.
3935 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
3936 return CP_ACCESS_TRAP
;
3938 return CP_ACCESS_OK
;
3941 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3942 * Page D4-1736 (DDI0487A.b)
3945 static int vae1_tlbmask(CPUARMState
*env
)
3947 if (arm_is_secure_below_el3(env
)) {
3948 return ARMMMUIdxBit_SE10_1
| ARMMMUIdxBit_SE10_0
;
3950 return ARMMMUIdxBit_E10_1
| ARMMMUIdxBit_E10_0
;
3954 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3957 CPUState
*cs
= env_cpu(env
);
3958 int mask
= vae1_tlbmask(env
);
3960 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
3963 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3966 CPUState
*cs
= env_cpu(env
);
3967 int mask
= vae1_tlbmask(env
);
3969 if (tlb_force_broadcast(env
)) {
3970 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
3972 tlb_flush_by_mmuidx(cs
, mask
);
3976 static int alle1_tlbmask(CPUARMState
*env
)
3979 * Note that the 'ALL' scope must invalidate both stage 1 and
3980 * stage 2 translations, whereas most other scopes only invalidate
3981 * stage 1 translations.
3983 if (arm_is_secure_below_el3(env
)) {
3984 return ARMMMUIdxBit_SE10_1
| ARMMMUIdxBit_SE10_0
;
3985 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3986 return ARMMMUIdxBit_E10_1
| ARMMMUIdxBit_E10_0
| ARMMMUIdxBit_Stage2
;
3988 return ARMMMUIdxBit_E10_1
| ARMMMUIdxBit_E10_0
;
3992 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3995 CPUState
*cs
= env_cpu(env
);
3996 int mask
= alle1_tlbmask(env
);
3998 tlb_flush_by_mmuidx(cs
, mask
);
4001 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4004 ARMCPU
*cpu
= env_archcpu(env
);
4005 CPUState
*cs
= CPU(cpu
);
4007 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
4010 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4013 ARMCPU
*cpu
= env_archcpu(env
);
4014 CPUState
*cs
= CPU(cpu
);
4016 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4019 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4022 CPUState
*cs
= env_cpu(env
);
4023 int mask
= alle1_tlbmask(env
);
4025 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4028 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4031 CPUState
*cs
= env_cpu(env
);
4033 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
4036 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4039 CPUState
*cs
= env_cpu(env
);
4041 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4044 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4047 /* Invalidate by VA, EL2
4048 * Currently handles both VAE2 and VALE2, since we don't support
4049 * flush-last-level-only.
4051 ARMCPU
*cpu
= env_archcpu(env
);
4052 CPUState
*cs
= CPU(cpu
);
4053 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4055 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
4058 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4061 /* Invalidate by VA, EL3
4062 * Currently handles both VAE3 and VALE3, since we don't support
4063 * flush-last-level-only.
4065 ARMCPU
*cpu
= env_archcpu(env
);
4066 CPUState
*cs
= CPU(cpu
);
4067 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4069 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4072 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4075 CPUState
*cs
= env_cpu(env
);
4076 int mask
= vae1_tlbmask(env
);
4077 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4079 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4082 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4085 /* Invalidate by VA, EL1&0 (AArch64 version).
4086 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4087 * since we don't support flush-for-specific-ASID-only or
4088 * flush-last-level-only.
4090 CPUState
*cs
= env_cpu(env
);
4091 int mask
= vae1_tlbmask(env
);
4092 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4094 if (tlb_force_broadcast(env
)) {
4095 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
4097 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4101 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4104 CPUState
*cs
= env_cpu(env
);
4105 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4107 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4111 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4114 CPUState
*cs
= env_cpu(env
);
4115 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4117 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4121 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4124 /* Invalidate by IPA. This has to invalidate any structures that
4125 * contain only stage 2 translation information, but does not need
4126 * to apply to structures that contain combined stage 1 and stage 2
4127 * translation information.
4128 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4130 ARMCPU
*cpu
= env_archcpu(env
);
4131 CPUState
*cs
= CPU(cpu
);
4134 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4138 pageaddr
= sextract64(value
<< 12, 0, 48);
4140 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
4143 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4146 CPUState
*cs
= env_cpu(env
);
4149 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4153 pageaddr
= sextract64(value
<< 12, 0, 48);
4155 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4156 ARMMMUIdxBit_Stage2
);
4159 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4162 int cur_el
= arm_current_el(env
);
4165 uint64_t hcr
= arm_hcr_el2_eff(env
);
4168 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4169 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4170 return CP_ACCESS_TRAP_EL2
;
4173 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4174 return CP_ACCESS_TRAP
;
4176 if (hcr
& HCR_TDZ
) {
4177 return CP_ACCESS_TRAP_EL2
;
4180 } else if (hcr
& HCR_TDZ
) {
4181 return CP_ACCESS_TRAP_EL2
;
4184 return CP_ACCESS_OK
;
4187 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4189 ARMCPU
*cpu
= env_archcpu(env
);
4190 int dzp_bit
= 1 << 4;
4192 /* DZP indicates whether DC ZVA access is allowed */
4193 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4196 return cpu
->dcz_blocksize
| dzp_bit
;
4199 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4202 if (!(env
->pstate
& PSTATE_SP
)) {
4203 /* Access to SP_EL0 is undefined if it's being used as
4204 * the stack pointer.
4206 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4208 return CP_ACCESS_OK
;
4211 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4213 return env
->pstate
& PSTATE_SP
;
4216 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4218 update_spsel(env
, val
);
4221 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4224 ARMCPU
*cpu
= env_archcpu(env
);
4226 if (raw_read(env
, ri
) == value
) {
4227 /* Skip the TLB flush if nothing actually changed; Linux likes
4228 * to do a lot of pointless SCTLR writes.
4233 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4234 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4238 raw_write(env
, ri
, value
);
4239 /* ??? Lots of these bits are not implemented. */
4240 /* This may enable/disable the MMU, so do a TLB flush. */
4241 tlb_flush(CPU(cpu
));
4243 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4245 * Normally we would always end the TB on an SCTLR write; see the
4246 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4247 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4248 * of hflags from the translator, so do it here.
4250 arm_rebuild_hflags(env
);
4254 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4257 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4258 return CP_ACCESS_TRAP_FP_EL2
;
4260 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4261 return CP_ACCESS_TRAP_FP_EL3
;
4263 return CP_ACCESS_OK
;
4266 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4269 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4272 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4273 /* Minimal set of EL0-visible registers. This will need to be expanded
4274 * significantly for system emulation of AArch64 CPUs.
4276 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4277 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4278 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4279 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4280 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4281 .type
= ARM_CP_NO_RAW
,
4282 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4283 .fieldoffset
= offsetof(CPUARMState
, daif
),
4284 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4285 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4286 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4287 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4288 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4289 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4290 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4291 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4292 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4293 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4294 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4295 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4296 .readfn
= aa64_dczid_read
},
4297 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4298 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4299 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4300 #ifndef CONFIG_USER_ONLY
4301 /* Avoid overhead of an access check that always passes in user-mode */
4302 .accessfn
= aa64_zva_access
,
4305 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4306 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4307 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4308 /* Cache ops: all NOPs since we don't emulate caches */
4309 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4310 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4311 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4312 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4313 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4314 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4315 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4316 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4317 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4318 .accessfn
= aa64_cacheop_access
},
4319 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4320 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4321 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4322 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4323 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4324 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4325 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4326 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4327 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4328 .accessfn
= aa64_cacheop_access
},
4329 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4330 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4331 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4332 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4333 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4334 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4335 .accessfn
= aa64_cacheop_access
},
4336 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4337 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4338 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4339 .accessfn
= aa64_cacheop_access
},
4340 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4341 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4342 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4343 /* TLBI operations */
4344 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4345 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4346 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4347 .writefn
= tlbi_aa64_vmalle1is_write
},
4348 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4349 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4350 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4351 .writefn
= tlbi_aa64_vae1is_write
},
4352 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4353 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4354 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4355 .writefn
= tlbi_aa64_vmalle1is_write
},
4356 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4357 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4358 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4359 .writefn
= tlbi_aa64_vae1is_write
},
4360 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4361 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4362 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4363 .writefn
= tlbi_aa64_vae1is_write
},
4364 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4365 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4366 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4367 .writefn
= tlbi_aa64_vae1is_write
},
4368 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4369 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4370 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4371 .writefn
= tlbi_aa64_vmalle1_write
},
4372 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4373 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4374 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4375 .writefn
= tlbi_aa64_vae1_write
},
4376 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4377 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4378 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4379 .writefn
= tlbi_aa64_vmalle1_write
},
4380 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4381 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4382 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4383 .writefn
= tlbi_aa64_vae1_write
},
4384 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4385 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4386 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4387 .writefn
= tlbi_aa64_vae1_write
},
4388 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4389 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4390 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4391 .writefn
= tlbi_aa64_vae1_write
},
4392 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4393 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4394 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4395 .writefn
= tlbi_aa64_ipas2e1is_write
},
4396 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4397 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4398 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4399 .writefn
= tlbi_aa64_ipas2e1is_write
},
4400 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4401 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4402 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4403 .writefn
= tlbi_aa64_alle1is_write
},
4404 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4405 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4406 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4407 .writefn
= tlbi_aa64_alle1is_write
},
4408 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4409 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4410 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4411 .writefn
= tlbi_aa64_ipas2e1_write
},
4412 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4413 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4414 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4415 .writefn
= tlbi_aa64_ipas2e1_write
},
4416 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4417 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4418 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4419 .writefn
= tlbi_aa64_alle1_write
},
4420 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4421 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4422 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4423 .writefn
= tlbi_aa64_alle1is_write
},
4424 #ifndef CONFIG_USER_ONLY
4425 /* 64 bit address translation operations */
4426 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4427 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4428 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4429 .writefn
= ats_write64
},
4430 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4431 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4432 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4433 .writefn
= ats_write64
},
4434 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4435 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4436 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4437 .writefn
= ats_write64
},
4438 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4439 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4440 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4441 .writefn
= ats_write64
},
4442 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4443 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4444 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4445 .writefn
= ats_write64
},
4446 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4447 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4448 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4449 .writefn
= ats_write64
},
4450 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4451 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4452 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4453 .writefn
= ats_write64
},
4454 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4455 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4456 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4457 .writefn
= ats_write64
},
4458 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4459 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4460 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4461 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4462 .writefn
= ats_write64
},
4463 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4464 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4465 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4466 .writefn
= ats_write64
},
4467 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4468 .type
= ARM_CP_ALIAS
,
4469 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4470 .access
= PL1_RW
, .resetvalue
= 0,
4471 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4472 .writefn
= par_write
},
4474 /* TLB invalidate last level of translation table walk */
4475 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4476 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4477 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4478 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4479 .writefn
= tlbimvaa_is_write
},
4480 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4481 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4482 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4483 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4484 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4485 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4486 .writefn
= tlbimva_hyp_write
},
4487 { .name
= "TLBIMVALHIS",
4488 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4489 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4490 .writefn
= tlbimva_hyp_is_write
},
4491 { .name
= "TLBIIPAS2",
4492 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4493 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4494 .writefn
= tlbiipas2_write
},
4495 { .name
= "TLBIIPAS2IS",
4496 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4497 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4498 .writefn
= tlbiipas2_is_write
},
4499 { .name
= "TLBIIPAS2L",
4500 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4501 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4502 .writefn
= tlbiipas2_write
},
4503 { .name
= "TLBIIPAS2LIS",
4504 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4505 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4506 .writefn
= tlbiipas2_is_write
},
4507 /* 32 bit cache operations */
4508 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4509 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4510 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4511 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4512 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4513 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4514 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4515 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4516 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4517 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4518 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4519 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4520 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4521 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4522 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4523 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4524 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4525 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4526 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4527 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4528 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4529 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4530 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4531 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4532 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4533 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4534 /* MMU Domain access control / MPU write buffer control */
4535 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4536 .access
= PL1_RW
, .resetvalue
= 0,
4537 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4538 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4539 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4540 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4541 .type
= ARM_CP_ALIAS
,
4542 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4544 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4545 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4546 .type
= ARM_CP_ALIAS
,
4547 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4549 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4550 /* We rely on the access checks not allowing the guest to write to the
4551 * state field when SPSel indicates that it's being used as the stack
4554 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4555 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4556 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4557 .type
= ARM_CP_ALIAS
,
4558 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4559 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4560 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4561 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4562 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4563 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4564 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4565 .type
= ARM_CP_NO_RAW
,
4566 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4567 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4568 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4569 .type
= ARM_CP_ALIAS
,
4570 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4571 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4572 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4573 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4574 .access
= PL2_RW
, .resetvalue
= 0,
4575 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4576 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4577 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4578 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4579 .access
= PL2_RW
, .resetvalue
= 0,
4580 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4581 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4582 .type
= ARM_CP_ALIAS
,
4583 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4585 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4586 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4587 .type
= ARM_CP_ALIAS
,
4588 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4590 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4591 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4592 .type
= ARM_CP_ALIAS
,
4593 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4595 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4596 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4597 .type
= ARM_CP_ALIAS
,
4598 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4600 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4601 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4602 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4604 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4605 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4606 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4607 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4608 .writefn
= sdcr_write
,
4609 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4613 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4614 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4615 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4616 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4618 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4619 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4620 .type
= ARM_CP_NO_RAW
,
4621 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4623 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4624 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4625 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4626 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4627 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4628 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4630 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4631 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4632 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4633 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4634 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4635 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4636 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4638 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4639 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4640 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4641 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4642 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4643 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4645 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4646 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4647 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4649 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4650 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4651 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4653 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4654 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4655 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4657 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4658 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4659 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4660 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4661 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4662 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4663 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4664 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4665 .cp
= 15, .opc1
= 6, .crm
= 2,
4666 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4667 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4668 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4669 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4670 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4671 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4672 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4673 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4674 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4675 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4676 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4677 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4678 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4679 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4680 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4681 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4683 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4684 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4685 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4686 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4687 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4688 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4689 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4690 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4692 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4693 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4694 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4695 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4696 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4698 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4699 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4700 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4701 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4702 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4703 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4704 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4705 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4706 .access
= PL2_RW
, .accessfn
= access_tda
,
4707 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4708 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4709 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4710 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4711 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4712 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4713 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4714 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4715 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4716 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4717 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4718 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4719 .type
= ARM_CP_CONST
,
4720 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4721 .access
= PL2_RW
, .resetvalue
= 0 },
4725 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4726 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4727 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4728 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4730 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4734 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4736 ARMCPU
*cpu
= env_archcpu(env
);
4737 /* Begin with bits defined in base ARMv8.0. */
4738 uint64_t valid_mask
= MAKE_64BIT_MASK(0, 34);
4740 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4741 valid_mask
&= ~HCR_HCD
;
4742 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4743 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4744 * However, if we're using the SMC PSCI conduit then QEMU is
4745 * effectively acting like EL3 firmware and so the guest at
4746 * EL2 should retain the ability to prevent EL1 from being
4747 * able to make SMC calls into the ersatz firmware, so in
4748 * that case HCR.TSC should be read/write.
4750 valid_mask
&= ~HCR_TSC
;
4752 if (cpu_isar_feature(aa64_vh
, cpu
)) {
4753 valid_mask
|= HCR_E2H
;
4755 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4756 valid_mask
|= HCR_TLOR
;
4758 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
4759 valid_mask
|= HCR_API
| HCR_APK
;
4762 /* Clear RES0 bits. */
4763 value
&= valid_mask
;
4765 /* These bits change the MMU setup:
4766 * HCR_VM enables stage 2 translation
4767 * HCR_PTW forbids certain page-table setups
4768 * HCR_DC Disables stage1 and enables stage2 translation
4770 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4771 tlb_flush(CPU(cpu
));
4773 env
->cp15
.hcr_el2
= value
;
4776 * Updates to VI and VF require us to update the status of
4777 * virtual interrupts, which are the logical OR of these bits
4778 * and the state of the input lines from the GIC. (This requires
4779 * that we have the iothread lock, which is done by marking the
4780 * reginfo structs as ARM_CP_IO.)
4781 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4782 * possible for it to be taken immediately, because VIRQ and
4783 * VFIQ are masked unless running at EL0 or EL1, and HCR
4784 * can only be written at EL2.
4786 g_assert(qemu_mutex_iothread_locked());
4787 arm_cpu_update_virq(cpu
);
4788 arm_cpu_update_vfiq(cpu
);
4791 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4794 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4795 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4796 hcr_write(env
, NULL
, value
);
4799 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4802 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4803 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4804 hcr_write(env
, NULL
, value
);
4808 * Return the effective value of HCR_EL2.
4809 * Bits that are not included here:
4810 * RW (read from SCR_EL3.RW as needed)
4812 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4814 uint64_t ret
= env
->cp15
.hcr_el2
;
4816 if (arm_is_secure_below_el3(env
)) {
4818 * "This register has no effect if EL2 is not enabled in the
4819 * current Security state". This is ARMv8.4-SecEL2 speak for
4820 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4822 * Prior to that, the language was "In an implementation that
4823 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4824 * as if this field is 0 for all purposes other than a direct
4825 * read or write access of HCR_EL2". With lots of enumeration
4826 * on a per-field basis. In current QEMU, this is condition
4827 * is arm_is_secure_below_el3.
4829 * Since the v8.4 language applies to the entire register, and
4830 * appears to be backward compatible, use that.
4833 } else if (ret
& HCR_TGE
) {
4834 /* These bits are up-to-date as of ARMv8.4. */
4835 if (ret
& HCR_E2H
) {
4836 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4837 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4838 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4839 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4841 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4843 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4844 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4845 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4852 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4856 * For A-profile AArch32 EL3, if NSACR.CP10
4857 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4859 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
4860 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
4861 value
&= ~(0x3 << 10);
4862 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
4864 env
->cp15
.cptr_el
[2] = value
;
4867 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4870 * For A-profile AArch32 EL3, if NSACR.CP10
4871 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4873 uint64_t value
= env
->cp15
.cptr_el
[2];
4875 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
4876 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
4882 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4883 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4885 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4886 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4887 .writefn
= hcr_write
},
4888 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4889 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4890 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4891 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4892 .writefn
= hcr_writelow
},
4893 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4894 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4895 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4896 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4897 .type
= ARM_CP_ALIAS
,
4898 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4900 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4901 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4902 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4903 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4904 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4905 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4906 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4907 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4908 .type
= ARM_CP_ALIAS
,
4909 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4911 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4912 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4913 .type
= ARM_CP_ALIAS
,
4914 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4916 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4917 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4918 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4919 .access
= PL2_RW
, .writefn
= vbar_write
,
4920 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4922 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4923 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4924 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4925 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4926 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4927 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4928 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4929 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
4930 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
4931 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4932 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4933 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4935 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4936 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4937 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4938 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4939 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4940 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4941 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4943 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4944 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4945 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4946 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4948 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4949 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4950 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4952 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4953 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4954 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4956 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4957 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4959 /* no .writefn needed as this can't cause an ASID change;
4960 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4962 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4963 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4964 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4965 .type
= ARM_CP_ALIAS
,
4966 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4967 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4968 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4969 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4971 /* no .writefn needed as this can't cause an ASID change;
4972 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4974 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4975 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4976 .cp
= 15, .opc1
= 6, .crm
= 2,
4977 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4978 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4979 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4980 .writefn
= vttbr_write
},
4981 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4982 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4983 .access
= PL2_RW
, .writefn
= vttbr_write
,
4984 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4985 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4986 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4987 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4988 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4989 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4990 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4991 .access
= PL2_RW
, .resetvalue
= 0,
4992 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4993 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4994 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4995 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
4996 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4997 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4998 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4999 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5000 { .name
= "TLBIALLNSNH",
5001 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5002 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5003 .writefn
= tlbiall_nsnh_write
},
5004 { .name
= "TLBIALLNSNHIS",
5005 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5006 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5007 .writefn
= tlbiall_nsnh_is_write
},
5008 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5009 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5010 .writefn
= tlbiall_hyp_write
},
5011 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5012 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5013 .writefn
= tlbiall_hyp_is_write
},
5014 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5015 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5016 .writefn
= tlbimva_hyp_write
},
5017 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5018 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5019 .writefn
= tlbimva_hyp_is_write
},
5020 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5021 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5022 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5023 .writefn
= tlbi_aa64_alle2_write
},
5024 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5025 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5026 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5027 .writefn
= tlbi_aa64_vae2_write
},
5028 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5029 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5030 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5031 .writefn
= tlbi_aa64_vae2_write
},
5032 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5033 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5034 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5035 .writefn
= tlbi_aa64_alle2is_write
},
5036 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5037 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5038 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5039 .writefn
= tlbi_aa64_vae2is_write
},
5040 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5041 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5042 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5043 .writefn
= tlbi_aa64_vae2is_write
},
5044 #ifndef CONFIG_USER_ONLY
5045 /* Unlike the other EL2-related AT operations, these must
5046 * UNDEF from EL3 if EL2 is not implemented, which is why we
5047 * define them here rather than with the rest of the AT ops.
5049 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5050 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5051 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5052 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5053 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5054 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5055 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5056 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5057 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5058 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5059 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5060 * to behave as if SCR.NS was 1.
5062 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5064 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5065 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5067 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5068 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5069 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5070 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5071 * reset values as IMPDEF. We choose to reset to 3 to comply with
5072 * both ARMv7 and ARMv8.
5074 .access
= PL2_RW
, .resetvalue
= 3,
5075 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5076 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5077 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5078 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5079 .writefn
= gt_cntvoff_write
,
5080 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5081 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5082 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5083 .writefn
= gt_cntvoff_write
,
5084 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5085 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5086 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5087 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5088 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5089 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5090 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5091 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5092 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5093 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5094 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5095 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5096 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5097 .resetfn
= gt_hyp_timer_reset
,
5098 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5099 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5101 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5103 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5105 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5107 /* The only field of MDCR_EL2 that has a defined architectural reset value
5108 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5109 * don't implement any PMU event counters, so using zero as a reset
5110 * value for MDCR_EL2 is okay
5112 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5113 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5114 .access
= PL2_RW
, .resetvalue
= 0,
5115 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5116 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5117 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5118 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5119 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5120 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5121 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5123 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5124 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5125 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5127 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5131 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5132 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5133 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5134 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5136 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5137 .writefn
= hcr_writehigh
},
5141 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5144 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5145 * At Secure EL1 it traps to EL3.
5147 if (arm_current_el(env
) == 3) {
5148 return CP_ACCESS_OK
;
5150 if (arm_is_secure_below_el3(env
)) {
5151 return CP_ACCESS_TRAP_EL3
;
5153 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5155 return CP_ACCESS_OK
;
5157 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5160 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5161 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5162 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5163 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5164 .resetvalue
= 0, .writefn
= scr_write
},
5165 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5166 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5167 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5168 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5169 .writefn
= scr_write
},
5170 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5171 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5172 .access
= PL3_RW
, .resetvalue
= 0,
5173 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5175 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5176 .access
= PL3_RW
, .resetvalue
= 0,
5177 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5178 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5179 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5180 .writefn
= vbar_write
, .resetvalue
= 0,
5181 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5182 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5183 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5184 .access
= PL3_RW
, .resetvalue
= 0,
5185 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5186 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5187 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5189 /* no .writefn needed as this can't cause an ASID change;
5190 * we must provide a .raw_writefn and .resetfn because we handle
5191 * reset and migration for the AArch32 TTBCR(S), which might be
5192 * using mask and base_mask.
5194 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5195 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5196 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5197 .type
= ARM_CP_ALIAS
,
5198 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5200 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5201 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5202 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5203 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5204 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5205 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5206 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5207 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5208 .type
= ARM_CP_ALIAS
,
5209 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5211 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5212 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5213 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5214 .access
= PL3_RW
, .writefn
= vbar_write
,
5215 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5217 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5218 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5219 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5220 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5221 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5222 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5223 .access
= PL3_RW
, .resetvalue
= 0,
5224 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5225 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5226 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5227 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5229 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5230 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5231 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5233 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5234 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5235 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5237 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5238 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5239 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5240 .writefn
= tlbi_aa64_alle3is_write
},
5241 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5242 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5243 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5244 .writefn
= tlbi_aa64_vae3is_write
},
5245 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5246 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5247 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5248 .writefn
= tlbi_aa64_vae3is_write
},
5249 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5250 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5251 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5252 .writefn
= tlbi_aa64_alle3_write
},
5253 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5254 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5255 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5256 .writefn
= tlbi_aa64_vae3_write
},
5257 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5258 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5259 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5260 .writefn
= tlbi_aa64_vae3_write
},
5264 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5267 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5268 * but the AArch32 CTR has its own reginfo struct)
5270 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5271 return CP_ACCESS_TRAP
;
5274 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
5275 return CP_ACCESS_TRAP_EL2
;
5278 return CP_ACCESS_OK
;
5281 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5284 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5285 * read via a bit in OSLSR_EL1.
5289 if (ri
->state
== ARM_CP_STATE_AA32
) {
5290 oslock
= (value
== 0xC5ACCE55);
5295 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5298 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5299 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5300 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5301 * unlike DBGDRAR it is never accessible from EL0.
5302 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5305 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5306 .access
= PL0_R
, .accessfn
= access_tdra
,
5307 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5308 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5309 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5310 .access
= PL1_R
, .accessfn
= access_tdra
,
5311 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5312 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5313 .access
= PL0_R
, .accessfn
= access_tdra
,
5314 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5315 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5316 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5317 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5318 .access
= PL1_RW
, .accessfn
= access_tda
,
5319 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5321 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5322 * We don't implement the configurable EL0 access.
5324 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5325 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5326 .type
= ARM_CP_ALIAS
,
5327 .access
= PL1_R
, .accessfn
= access_tda
,
5328 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5329 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5330 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5331 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5332 .accessfn
= access_tdosa
,
5333 .writefn
= oslar_write
},
5334 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5335 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5336 .access
= PL1_R
, .resetvalue
= 10,
5337 .accessfn
= access_tdosa
,
5338 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5339 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5340 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5341 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5342 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5343 .type
= ARM_CP_NOP
},
5344 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5345 * implement vector catch debug events yet.
5348 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5349 .access
= PL1_RW
, .accessfn
= access_tda
,
5350 .type
= ARM_CP_NOP
},
5351 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5352 * to save and restore a 32-bit guest's DBGVCR)
5354 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5355 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5356 .access
= PL2_RW
, .accessfn
= access_tda
,
5357 .type
= ARM_CP_NOP
},
5358 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5359 * Channel but Linux may try to access this register. The 32-bit
5360 * alias is DBGDCCINT.
5362 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5363 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5364 .access
= PL1_RW
, .accessfn
= access_tda
,
5365 .type
= ARM_CP_NOP
},
5369 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5370 /* 64 bit access versions of the (dummy) debug registers */
5371 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5372 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5373 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5374 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5378 /* Return the exception level to which exceptions should be taken
5379 * via SVEAccessTrap. If an exception should be routed through
5380 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5381 * take care of raising that exception.
5382 * C.f. the ARM pseudocode function CheckSVEEnabled.
5384 int sve_exception_el(CPUARMState
*env
, int el
)
5386 #ifndef CONFIG_USER_ONLY
5388 bool disabled
= false;
5390 /* The CPACR.ZEN controls traps to EL1:
5391 * 0, 2 : trap EL0 and EL1 accesses
5392 * 1 : trap only EL0 accesses
5393 * 3 : trap no accesses
5395 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5397 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5402 return (arm_feature(env
, ARM_FEATURE_EL2
)
5403 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5406 /* Check CPACR.FPEN. */
5407 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5409 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5417 /* CPTR_EL2. Since TZ and TFP are positive,
5418 * they will be zero when EL2 is not present.
5420 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5421 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5424 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5429 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5430 if (arm_feature(env
, ARM_FEATURE_EL3
)
5431 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5438 static uint32_t sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
5442 end_len
= start_len
&= 0xf;
5443 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
5444 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
5445 assert(end_len
< start_len
);
5451 * Given that SVE is enabled, return the vector length for EL.
5453 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5455 ARMCPU
*cpu
= env_archcpu(env
);
5456 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5459 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5461 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5462 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5464 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5465 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5468 return sve_zcr_get_valid_len(cpu
, zcr_len
);
5471 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5474 int cur_el
= arm_current_el(env
);
5475 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5478 /* Bits other than [3:0] are RAZ/WI. */
5479 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
5480 raw_write(env
, ri
, value
& 0xf);
5483 * Because we arrived here, we know both FP and SVE are enabled;
5484 * otherwise we would have trapped access to the ZCR_ELn register.
5486 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5487 if (new_len
< old_len
) {
5488 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5492 static const ARMCPRegInfo zcr_el1_reginfo
= {
5493 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5494 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5495 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5496 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5497 .writefn
= zcr_write
, .raw_writefn
= raw_write
5500 static const ARMCPRegInfo zcr_el2_reginfo
= {
5501 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5502 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5503 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5504 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5505 .writefn
= zcr_write
, .raw_writefn
= raw_write
5508 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5509 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5510 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5511 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5512 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5515 static const ARMCPRegInfo zcr_el3_reginfo
= {
5516 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5517 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5518 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5519 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5520 .writefn
= zcr_write
, .raw_writefn
= raw_write
5523 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5525 CPUARMState
*env
= &cpu
->env
;
5527 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5528 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5530 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5532 if (env
->cpu_watchpoint
[n
]) {
5533 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5534 env
->cpu_watchpoint
[n
] = NULL
;
5537 if (!extract64(wcr
, 0, 1)) {
5538 /* E bit clear : watchpoint disabled */
5542 switch (extract64(wcr
, 3, 2)) {
5544 /* LSC 00 is reserved and must behave as if the wp is disabled */
5547 flags
|= BP_MEM_READ
;
5550 flags
|= BP_MEM_WRITE
;
5553 flags
|= BP_MEM_ACCESS
;
5557 /* Attempts to use both MASK and BAS fields simultaneously are
5558 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5559 * thus generating a watchpoint for every byte in the masked region.
5561 mask
= extract64(wcr
, 24, 4);
5562 if (mask
== 1 || mask
== 2) {
5563 /* Reserved values of MASK; we must act as if the mask value was
5564 * some non-reserved value, or as if the watchpoint were disabled.
5565 * We choose the latter.
5569 /* Watchpoint covers an aligned area up to 2GB in size */
5571 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5572 * whether the watchpoint fires when the unmasked bits match; we opt
5573 * to generate the exceptions.
5577 /* Watchpoint covers bytes defined by the byte address select bits */
5578 int bas
= extract64(wcr
, 5, 8);
5582 /* This must act as if the watchpoint is disabled */
5586 if (extract64(wvr
, 2, 1)) {
5587 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5588 * ignored, and BAS[3:0] define which bytes to watch.
5592 /* The BAS bits are supposed to be programmed to indicate a contiguous
5593 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5594 * we fire for each byte in the word/doubleword addressed by the WVR.
5595 * We choose to ignore any non-zero bits after the first range of 1s.
5597 basstart
= ctz32(bas
);
5598 len
= cto32(bas
>> basstart
);
5602 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5603 &env
->cpu_watchpoint
[n
]);
5606 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5609 CPUARMState
*env
= &cpu
->env
;
5611 /* Completely clear out existing QEMU watchpoints and our array, to
5612 * avoid possible stale entries following migration load.
5614 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5615 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5617 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5618 hw_watchpoint_update(cpu
, i
);
5622 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5625 ARMCPU
*cpu
= env_archcpu(env
);
5628 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5629 * register reads and behaves as if values written are sign extended.
5630 * Bits [1:0] are RES0.
5632 value
= sextract64(value
, 0, 49) & ~3ULL;
5634 raw_write(env
, ri
, value
);
5635 hw_watchpoint_update(cpu
, i
);
5638 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5641 ARMCPU
*cpu
= env_archcpu(env
);
5644 raw_write(env
, ri
, value
);
5645 hw_watchpoint_update(cpu
, i
);
5648 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5650 CPUARMState
*env
= &cpu
->env
;
5651 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5652 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5657 if (env
->cpu_breakpoint
[n
]) {
5658 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5659 env
->cpu_breakpoint
[n
] = NULL
;
5662 if (!extract64(bcr
, 0, 1)) {
5663 /* E bit clear : watchpoint disabled */
5667 bt
= extract64(bcr
, 20, 4);
5670 case 4: /* unlinked address mismatch (reserved if AArch64) */
5671 case 5: /* linked address mismatch (reserved if AArch64) */
5672 qemu_log_mask(LOG_UNIMP
,
5673 "arm: address mismatch breakpoint types not implemented\n");
5675 case 0: /* unlinked address match */
5676 case 1: /* linked address match */
5678 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5679 * we behave as if the register was sign extended. Bits [1:0] are
5680 * RES0. The BAS field is used to allow setting breakpoints on 16
5681 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5682 * a bp will fire if the addresses covered by the bp and the addresses
5683 * covered by the insn overlap but the insn doesn't start at the
5684 * start of the bp address range. We choose to require the insn and
5685 * the bp to have the same address. The constraints on writing to
5686 * BAS enforced in dbgbcr_write mean we have only four cases:
5687 * 0b0000 => no breakpoint
5688 * 0b0011 => breakpoint on addr
5689 * 0b1100 => breakpoint on addr + 2
5690 * 0b1111 => breakpoint on addr
5691 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5693 int bas
= extract64(bcr
, 5, 4);
5694 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5703 case 2: /* unlinked context ID match */
5704 case 8: /* unlinked VMID match (reserved if no EL2) */
5705 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5706 qemu_log_mask(LOG_UNIMP
,
5707 "arm: unlinked context breakpoint types not implemented\n");
5709 case 9: /* linked VMID match (reserved if no EL2) */
5710 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5711 case 3: /* linked context ID match */
5713 /* We must generate no events for Linked context matches (unless
5714 * they are linked to by some other bp/wp, which is handled in
5715 * updates for the linking bp/wp). We choose to also generate no events
5716 * for reserved values.
5721 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5724 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5727 CPUARMState
*env
= &cpu
->env
;
5729 /* Completely clear out existing QEMU breakpoints and our array, to
5730 * avoid possible stale entries following migration load.
5732 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5733 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5735 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5736 hw_breakpoint_update(cpu
, i
);
5740 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5743 ARMCPU
*cpu
= env_archcpu(env
);
5746 raw_write(env
, ri
, value
);
5747 hw_breakpoint_update(cpu
, i
);
5750 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5753 ARMCPU
*cpu
= env_archcpu(env
);
5756 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5759 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5760 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5762 raw_write(env
, ri
, value
);
5763 hw_breakpoint_update(cpu
, i
);
5766 static void define_debug_regs(ARMCPU
*cpu
)
5768 /* Define v7 and v8 architectural debug registers.
5769 * These are just dummy implementations for now.
5772 int wrps
, brps
, ctx_cmps
;
5773 ARMCPRegInfo dbgdidr
= {
5774 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5775 .access
= PL0_R
, .accessfn
= access_tda
,
5776 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5779 /* Note that all these register fields hold "number of Xs minus 1". */
5780 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5781 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5782 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5784 assert(ctx_cmps
<= brps
);
5786 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5787 * of the debug registers such as number of breakpoints;
5788 * check that if they both exist then they agree.
5790 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5791 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5792 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5793 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5796 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5797 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5799 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5800 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5803 for (i
= 0; i
< brps
+ 1; i
++) {
5804 ARMCPRegInfo dbgregs
[] = {
5805 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5806 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5807 .access
= PL1_RW
, .accessfn
= access_tda
,
5808 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5809 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5811 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5812 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5813 .access
= PL1_RW
, .accessfn
= access_tda
,
5814 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5815 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5819 define_arm_cp_regs(cpu
, dbgregs
);
5822 for (i
= 0; i
< wrps
+ 1; i
++) {
5823 ARMCPRegInfo dbgregs
[] = {
5824 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5825 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5826 .access
= PL1_RW
, .accessfn
= access_tda
,
5827 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5828 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5830 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5831 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5832 .access
= PL1_RW
, .accessfn
= access_tda
,
5833 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5834 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5838 define_arm_cp_regs(cpu
, dbgregs
);
5842 /* We don't know until after realize whether there's a GICv3
5843 * attached, and that is what registers the gicv3 sysregs.
5844 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5847 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5849 ARMCPU
*cpu
= env_archcpu(env
);
5850 uint64_t pfr1
= cpu
->id_pfr1
;
5852 if (env
->gicv3state
) {
5858 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5860 ARMCPU
*cpu
= env_archcpu(env
);
5861 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5863 if (env
->gicv3state
) {
5869 /* Shared logic between LORID and the rest of the LOR* registers.
5870 * Secure state has already been delt with.
5872 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5874 int el
= arm_current_el(env
);
5876 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5877 return CP_ACCESS_TRAP_EL2
;
5879 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5880 return CP_ACCESS_TRAP_EL3
;
5882 return CP_ACCESS_OK
;
5885 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5888 if (arm_is_secure_below_el3(env
)) {
5889 /* Access ok in secure mode. */
5890 return CP_ACCESS_OK
;
5892 return access_lor_ns(env
);
5895 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5896 const ARMCPRegInfo
*ri
, bool isread
)
5898 if (arm_is_secure_below_el3(env
)) {
5899 /* Access denied in secure mode. */
5900 return CP_ACCESS_TRAP
;
5902 return access_lor_ns(env
);
5905 #ifdef TARGET_AARCH64
5906 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5909 int el
= arm_current_el(env
);
5912 arm_feature(env
, ARM_FEATURE_EL2
) &&
5913 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5914 return CP_ACCESS_TRAP_EL2
;
5917 arm_feature(env
, ARM_FEATURE_EL3
) &&
5918 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5919 return CP_ACCESS_TRAP_EL3
;
5921 return CP_ACCESS_OK
;
5924 static const ARMCPRegInfo pauth_reginfo
[] = {
5925 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5926 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5927 .access
= PL1_RW
, .accessfn
= access_pauth
,
5928 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
5929 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5930 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5931 .access
= PL1_RW
, .accessfn
= access_pauth
,
5932 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
5933 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5934 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5935 .access
= PL1_RW
, .accessfn
= access_pauth
,
5936 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
5937 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5938 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5939 .access
= PL1_RW
, .accessfn
= access_pauth
,
5940 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
5941 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5942 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5943 .access
= PL1_RW
, .accessfn
= access_pauth
,
5944 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
5945 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5946 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5947 .access
= PL1_RW
, .accessfn
= access_pauth
,
5948 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
5949 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5950 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5951 .access
= PL1_RW
, .accessfn
= access_pauth
,
5952 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
5953 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5954 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5955 .access
= PL1_RW
, .accessfn
= access_pauth
,
5956 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
5957 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5958 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5959 .access
= PL1_RW
, .accessfn
= access_pauth
,
5960 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
5961 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5962 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5963 .access
= PL1_RW
, .accessfn
= access_pauth
,
5964 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
5968 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5973 /* Success sets NZCV = 0000. */
5974 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
5976 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
5978 * ??? Failed, for unknown reasons in the crypto subsystem.
5979 * The best we can do is log the reason and return the
5980 * timed-out indication to the guest. There is no reason
5981 * we know to expect this failure to be transitory, so the
5982 * guest may well hang retrying the operation.
5984 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
5985 ri
->name
, error_get_pretty(err
));
5988 env
->ZF
= 0; /* NZCF = 0100 */
5994 /* We do not support re-seeding, so the two registers operate the same. */
5995 static const ARMCPRegInfo rndr_reginfo
[] = {
5996 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
5997 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
5998 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
5999 .access
= PL0_R
, .readfn
= rndr_readfn
},
6000 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
6001 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
6002 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
6003 .access
= PL0_R
, .readfn
= rndr_readfn
},
6007 #ifndef CONFIG_USER_ONLY
6008 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
6011 ARMCPU
*cpu
= env_archcpu(env
);
6012 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6013 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
6014 uint64_t vaddr_in
= (uint64_t) value
;
6015 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
6017 int mem_idx
= cpu_mmu_index(env
, false);
6019 /* This won't be crossing page boundaries */
6020 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
6026 /* RCU lock is already being held */
6027 mr
= memory_region_from_host(haddr
, &offset
);
6030 memory_region_do_writeback(mr
, offset
, dline_size
);
6035 static const ARMCPRegInfo dcpop_reg
[] = {
6036 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
6037 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
6038 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6039 .accessfn
= aa64_cacheop_access
, .writefn
= dccvap_writefn
},
6043 static const ARMCPRegInfo dcpodp_reg
[] = {
6044 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
6045 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
6046 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
6047 .accessfn
= aa64_cacheop_access
, .writefn
= dccvap_writefn
},
6050 #endif /*CONFIG_USER_ONLY*/
6054 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6057 int el
= arm_current_el(env
);
6060 uint64_t sctlr
= arm_sctlr(env
, el
);
6061 if (!(sctlr
& SCTLR_EnRCTX
)) {
6062 return CP_ACCESS_TRAP
;
6064 } else if (el
== 1) {
6065 uint64_t hcr
= arm_hcr_el2_eff(env
);
6067 return CP_ACCESS_TRAP_EL2
;
6070 return CP_ACCESS_OK
;
6073 static const ARMCPRegInfo predinv_reginfo
[] = {
6074 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
6075 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
6076 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6077 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
6078 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
6079 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6080 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
6081 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
6082 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6084 * Note the AArch32 opcodes have a different OPC1.
6086 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
6087 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
6088 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6089 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
6090 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
6091 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6092 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
6093 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
6094 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
6098 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6101 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
6102 return CP_ACCESS_TRAP_EL2
;
6105 return CP_ACCESS_OK
;
6108 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6111 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6112 return access_aa64_tid3(env
, ri
, isread
);
6115 return CP_ACCESS_OK
;
6118 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6121 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
6122 return CP_ACCESS_TRAP_EL2
;
6125 return CP_ACCESS_OK
;
6128 static const ARMCPRegInfo jazelle_regs
[] = {
6130 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
6131 .access
= PL1_R
, .accessfn
= access_jazelle
,
6132 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6134 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
6135 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6137 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
6138 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6142 static const ARMCPRegInfo vhe_reginfo
[] = {
6143 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6144 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
6146 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
6147 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
6148 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
6149 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
6150 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
6154 void register_cp_regs_for_features(ARMCPU
*cpu
)
6156 /* Register all the coprocessor registers based on feature bits */
6157 CPUARMState
*env
= &cpu
->env
;
6158 if (arm_feature(env
, ARM_FEATURE_M
)) {
6159 /* M profile has no coprocessor registers */
6163 define_arm_cp_regs(cpu
, cp_reginfo
);
6164 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
6165 /* Must go early as it is full of wildcards that may be
6166 * overridden by later definitions.
6168 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
6171 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6172 /* The ID registers all have impdef reset values */
6173 ARMCPRegInfo v6_idregs
[] = {
6174 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
6175 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6176 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6177 .accessfn
= access_aa32_tid3
,
6178 .resetvalue
= cpu
->id_pfr0
},
6179 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6180 * the value of the GIC field until after we define these regs.
6182 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
6183 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
6184 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
6185 .accessfn
= access_aa32_tid3
,
6186 .readfn
= id_pfr1_read
,
6187 .writefn
= arm_cp_write_ignore
},
6188 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
6189 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
6190 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6191 .accessfn
= access_aa32_tid3
,
6192 .resetvalue
= cpu
->id_dfr0
},
6193 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
6194 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
6195 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6196 .accessfn
= access_aa32_tid3
,
6197 .resetvalue
= cpu
->id_afr0
},
6198 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
6199 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
6200 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6201 .accessfn
= access_aa32_tid3
,
6202 .resetvalue
= cpu
->id_mmfr0
},
6203 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
6204 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
6205 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6206 .accessfn
= access_aa32_tid3
,
6207 .resetvalue
= cpu
->id_mmfr1
},
6208 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
6209 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
6210 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6211 .accessfn
= access_aa32_tid3
,
6212 .resetvalue
= cpu
->id_mmfr2
},
6213 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
6214 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
6215 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6216 .accessfn
= access_aa32_tid3
,
6217 .resetvalue
= cpu
->id_mmfr3
},
6218 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
6219 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6220 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6221 .accessfn
= access_aa32_tid3
,
6222 .resetvalue
= cpu
->isar
.id_isar0
},
6223 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
6224 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
6225 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6226 .accessfn
= access_aa32_tid3
,
6227 .resetvalue
= cpu
->isar
.id_isar1
},
6228 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
6229 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6230 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6231 .accessfn
= access_aa32_tid3
,
6232 .resetvalue
= cpu
->isar
.id_isar2
},
6233 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
6234 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
6235 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6236 .accessfn
= access_aa32_tid3
,
6237 .resetvalue
= cpu
->isar
.id_isar3
},
6238 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
6239 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
6240 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6241 .accessfn
= access_aa32_tid3
,
6242 .resetvalue
= cpu
->isar
.id_isar4
},
6243 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
6244 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
6245 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6246 .accessfn
= access_aa32_tid3
,
6247 .resetvalue
= cpu
->isar
.id_isar5
},
6248 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
6249 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
6250 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6251 .accessfn
= access_aa32_tid3
,
6252 .resetvalue
= cpu
->id_mmfr4
},
6253 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
6254 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
6255 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6256 .accessfn
= access_aa32_tid3
,
6257 .resetvalue
= cpu
->isar
.id_isar6
},
6260 define_arm_cp_regs(cpu
, v6_idregs
);
6261 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
6263 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
6265 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
6266 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
6268 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
6269 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
6270 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
6272 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
6273 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
6275 if (arm_feature(env
, ARM_FEATURE_V7
)) {
6276 /* v7 performance monitor control register: same implementor
6277 * field as main ID register, and we implement four counters in
6278 * addition to the cycle count register.
6280 unsigned int i
, pmcrn
= 4;
6281 ARMCPRegInfo pmcr
= {
6282 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6284 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6285 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6286 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6287 .raw_writefn
= raw_write
,
6289 ARMCPRegInfo pmcr64
= {
6290 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6291 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6292 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6294 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6295 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
6296 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6298 define_one_arm_cp_reg(cpu
, &pmcr
);
6299 define_one_arm_cp_reg(cpu
, &pmcr64
);
6300 for (i
= 0; i
< pmcrn
; i
++) {
6301 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6302 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6303 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6304 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6305 ARMCPRegInfo pmev_regs
[] = {
6306 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6307 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6308 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6309 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6310 .accessfn
= pmreg_access
},
6311 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6312 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6313 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6315 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6316 .raw_readfn
= pmevcntr_rawread
,
6317 .raw_writefn
= pmevcntr_rawwrite
},
6318 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6319 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6320 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6321 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6322 .accessfn
= pmreg_access
},
6323 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6324 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6325 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6327 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6328 .raw_writefn
= pmevtyper_rawwrite
},
6331 define_arm_cp_regs(cpu
, pmev_regs
);
6332 g_free(pmevcntr_name
);
6333 g_free(pmevcntr_el0_name
);
6334 g_free(pmevtyper_name
);
6335 g_free(pmevtyper_el0_name
);
6337 ARMCPRegInfo clidr
= {
6338 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
6339 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
6340 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6341 .accessfn
= access_aa64_tid2
,
6342 .resetvalue
= cpu
->clidr
6344 define_one_arm_cp_reg(cpu
, &clidr
);
6345 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
6346 define_debug_regs(cpu
);
6348 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
6350 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
6351 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
6352 ARMCPRegInfo v81_pmu_regs
[] = {
6353 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6354 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6355 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6356 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6357 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6358 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6359 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6360 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6363 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6365 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6366 /* AArch64 ID registers, which all have impdef reset values.
6367 * Note that within the ID register ranges the unused slots
6368 * must all RAZ, not UNDEF; future architecture versions may
6369 * define new registers here.
6371 ARMCPRegInfo v8_idregs
[] = {
6372 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6373 * know the right value for the GIC field until after we
6374 * define these regs.
6376 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6377 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
6378 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
6379 .accessfn
= access_aa64_tid3
,
6380 .readfn
= id_aa64pfr0_read
,
6381 .writefn
= arm_cp_write_ignore
},
6382 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6383 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
6384 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6385 .accessfn
= access_aa64_tid3
,
6386 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
6387 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6388 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
6389 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6390 .accessfn
= access_aa64_tid3
,
6392 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6393 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
6394 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6395 .accessfn
= access_aa64_tid3
,
6397 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6398 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
6399 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6400 .accessfn
= access_aa64_tid3
,
6401 /* At present, only SVEver == 0 is defined anyway. */
6403 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6404 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
6405 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6406 .accessfn
= access_aa64_tid3
,
6408 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6409 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
6410 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6411 .accessfn
= access_aa64_tid3
,
6413 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6414 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
6415 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6416 .accessfn
= access_aa64_tid3
,
6418 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6419 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
6420 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6421 .accessfn
= access_aa64_tid3
,
6422 .resetvalue
= cpu
->id_aa64dfr0
},
6423 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6424 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
6425 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6426 .accessfn
= access_aa64_tid3
,
6427 .resetvalue
= cpu
->id_aa64dfr1
},
6428 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6429 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
6430 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6431 .accessfn
= access_aa64_tid3
,
6433 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6434 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
6435 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6436 .accessfn
= access_aa64_tid3
,
6438 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6439 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
6440 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6441 .accessfn
= access_aa64_tid3
,
6442 .resetvalue
= cpu
->id_aa64afr0
},
6443 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6444 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
6445 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6446 .accessfn
= access_aa64_tid3
,
6447 .resetvalue
= cpu
->id_aa64afr1
},
6448 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6449 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
6450 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6451 .accessfn
= access_aa64_tid3
,
6453 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6454 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
6455 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6456 .accessfn
= access_aa64_tid3
,
6458 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
6459 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
6460 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6461 .accessfn
= access_aa64_tid3
,
6462 .resetvalue
= cpu
->isar
.id_aa64isar0
},
6463 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
6464 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
6465 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6466 .accessfn
= access_aa64_tid3
,
6467 .resetvalue
= cpu
->isar
.id_aa64isar1
},
6468 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6469 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
6470 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6471 .accessfn
= access_aa64_tid3
,
6473 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6474 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
6475 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6476 .accessfn
= access_aa64_tid3
,
6478 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6479 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
6480 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6481 .accessfn
= access_aa64_tid3
,
6483 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6484 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
6485 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6486 .accessfn
= access_aa64_tid3
,
6488 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6489 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
6490 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6491 .accessfn
= access_aa64_tid3
,
6493 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6494 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
6495 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6496 .accessfn
= access_aa64_tid3
,
6498 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6499 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6500 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6501 .accessfn
= access_aa64_tid3
,
6502 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
6503 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6504 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
6505 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6506 .accessfn
= access_aa64_tid3
,
6507 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
6508 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6509 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
6510 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6511 .accessfn
= access_aa64_tid3
,
6513 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6514 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
6515 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6516 .accessfn
= access_aa64_tid3
,
6518 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6519 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
6520 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6521 .accessfn
= access_aa64_tid3
,
6523 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6524 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
6525 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6526 .accessfn
= access_aa64_tid3
,
6528 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6529 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
6530 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6531 .accessfn
= access_aa64_tid3
,
6533 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6534 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
6535 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6536 .accessfn
= access_aa64_tid3
,
6538 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6539 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
6540 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6541 .accessfn
= access_aa64_tid3
,
6542 .resetvalue
= cpu
->isar
.mvfr0
},
6543 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6544 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
6545 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6546 .accessfn
= access_aa64_tid3
,
6547 .resetvalue
= cpu
->isar
.mvfr1
},
6548 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
6549 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
6550 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6551 .accessfn
= access_aa64_tid3
,
6552 .resetvalue
= cpu
->isar
.mvfr2
},
6553 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6554 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
6555 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6556 .accessfn
= access_aa64_tid3
,
6558 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6559 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
6560 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6561 .accessfn
= access_aa64_tid3
,
6563 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6564 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
6565 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6566 .accessfn
= access_aa64_tid3
,
6568 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6569 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
6570 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6571 .accessfn
= access_aa64_tid3
,
6573 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6574 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
6575 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6576 .accessfn
= access_aa64_tid3
,
6578 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
6579 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
6580 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6581 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
6582 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
6583 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
6584 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6585 .resetvalue
= cpu
->pmceid0
},
6586 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
6587 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
6588 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6589 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
6590 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
6591 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
6592 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6593 .resetvalue
= cpu
->pmceid1
},
6596 #ifdef CONFIG_USER_ONLY
6597 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
6598 { .name
= "ID_AA64PFR0_EL1",
6599 .exported_bits
= 0x000f000f00ff0000,
6600 .fixed_bits
= 0x0000000000000011 },
6601 { .name
= "ID_AA64PFR1_EL1",
6602 .exported_bits
= 0x00000000000000f0 },
6603 { .name
= "ID_AA64PFR*_EL1_RESERVED",
6605 { .name
= "ID_AA64ZFR0_EL1" },
6606 { .name
= "ID_AA64MMFR0_EL1",
6607 .fixed_bits
= 0x00000000ff000000 },
6608 { .name
= "ID_AA64MMFR1_EL1" },
6609 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
6611 { .name
= "ID_AA64DFR0_EL1",
6612 .fixed_bits
= 0x0000000000000006 },
6613 { .name
= "ID_AA64DFR1_EL1" },
6614 { .name
= "ID_AA64DFR*_EL1_RESERVED",
6616 { .name
= "ID_AA64AFR*",
6618 { .name
= "ID_AA64ISAR0_EL1",
6619 .exported_bits
= 0x00fffffff0fffff0 },
6620 { .name
= "ID_AA64ISAR1_EL1",
6621 .exported_bits
= 0x000000f0ffffffff },
6622 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
6624 REGUSERINFO_SENTINEL
6626 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
6628 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6629 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
6630 !arm_feature(env
, ARM_FEATURE_EL2
)) {
6631 ARMCPRegInfo rvbar
= {
6632 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6633 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6634 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
6636 define_one_arm_cp_reg(cpu
, &rvbar
);
6638 define_arm_cp_regs(cpu
, v8_idregs
);
6639 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6641 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6642 uint64_t vmpidr_def
= mpidr_read_val(env
);
6643 ARMCPRegInfo vpidr_regs
[] = {
6644 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6645 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6646 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6647 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6648 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6649 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6650 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6651 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6652 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6653 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6654 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6655 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6656 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6657 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6658 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6659 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6661 .resetvalue
= vmpidr_def
,
6662 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6665 define_arm_cp_regs(cpu
, vpidr_regs
);
6666 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6667 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6668 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6670 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6671 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6672 ARMCPRegInfo rvbar
= {
6673 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6674 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6675 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6677 define_one_arm_cp_reg(cpu
, &rvbar
);
6680 /* If EL2 is missing but higher ELs are enabled, we need to
6681 * register the no_el2 reginfos.
6683 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6684 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6685 * of MIDR_EL1 and MPIDR_EL1.
6687 ARMCPRegInfo vpidr_regs
[] = {
6688 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6689 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6690 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6691 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6692 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6693 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6694 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6695 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6696 .type
= ARM_CP_NO_RAW
,
6697 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6700 define_arm_cp_regs(cpu
, vpidr_regs
);
6701 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6702 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6703 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6707 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6708 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6709 ARMCPRegInfo el3_regs
[] = {
6710 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6711 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6712 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6713 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6714 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6716 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6717 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6718 .resetvalue
= cpu
->reset_sctlr
},
6722 define_arm_cp_regs(cpu
, el3_regs
);
6724 /* The behaviour of NSACR is sufficiently various that we don't
6725 * try to describe it in a single reginfo:
6726 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6727 * reads as constant 0xc00 from NS EL1 and NS EL2
6728 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6729 * if v7 without EL3, register doesn't exist
6730 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6732 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6733 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6734 ARMCPRegInfo nsacr
= {
6735 .name
= "NSACR", .type
= ARM_CP_CONST
,
6736 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6737 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6740 define_one_arm_cp_reg(cpu
, &nsacr
);
6742 ARMCPRegInfo nsacr
= {
6744 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6745 .access
= PL3_RW
| PL1_R
,
6747 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6749 define_one_arm_cp_reg(cpu
, &nsacr
);
6752 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6753 ARMCPRegInfo nsacr
= {
6754 .name
= "NSACR", .type
= ARM_CP_CONST
,
6755 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6759 define_one_arm_cp_reg(cpu
, &nsacr
);
6763 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6764 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6765 /* PMSAv6 not implemented */
6766 assert(arm_feature(env
, ARM_FEATURE_V7
));
6767 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6768 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6770 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6773 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6774 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6775 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6776 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6777 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6780 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6781 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6783 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6784 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6786 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6787 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6789 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6790 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6792 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6793 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6795 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6796 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6798 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6799 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6801 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6802 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6804 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6805 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6807 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6808 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6810 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6811 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6813 if (cpu_isar_feature(jazelle
, cpu
)) {
6814 define_arm_cp_regs(cpu
, jazelle_regs
);
6816 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6817 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6818 * be read-only (ie write causes UNDEF exception).
6821 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6822 /* Pre-v8 MIDR space.
6823 * Note that the MIDR isn't a simple constant register because
6824 * of the TI925 behaviour where writes to another register can
6825 * cause the MIDR value to change.
6827 * Unimplemented registers in the c15 0 0 0 space default to
6828 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6829 * and friends override accordingly.
6832 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6833 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6834 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6835 .readfn
= midr_read
,
6836 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6837 .type
= ARM_CP_OVERRIDE
},
6838 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6840 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6841 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6843 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6844 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6846 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6847 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6849 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6850 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6852 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6853 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6856 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6857 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6858 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6859 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6860 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6861 .readfn
= midr_read
},
6862 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6863 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6864 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6865 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6866 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6867 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6868 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6869 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6870 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6872 .accessfn
= access_aa64_tid1
,
6873 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6876 ARMCPRegInfo id_cp_reginfo
[] = {
6877 /* These are common to v8 and pre-v8 */
6879 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6880 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
6881 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6882 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6883 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6884 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6885 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6886 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6888 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6890 .accessfn
= access_aa32_tid1
,
6891 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6894 /* TLBTR is specific to VMSA */
6895 ARMCPRegInfo id_tlbtr_reginfo
= {
6897 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6899 .accessfn
= access_aa32_tid1
,
6900 .type
= ARM_CP_CONST
, .resetvalue
= 0,
6902 /* MPUIR is specific to PMSA V6+ */
6903 ARMCPRegInfo id_mpuir_reginfo
= {
6905 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6906 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6907 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6909 ARMCPRegInfo crn0_wi_reginfo
= {
6910 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6911 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6912 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6914 #ifdef CONFIG_USER_ONLY
6915 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
6916 { .name
= "MIDR_EL1",
6917 .exported_bits
= 0x00000000ffffffff },
6918 { .name
= "REVIDR_EL1" },
6919 REGUSERINFO_SENTINEL
6921 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
6923 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6924 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6926 /* Register the blanket "writes ignored" value first to cover the
6927 * whole space. Then update the specific ID registers to allow write
6928 * access, so that they ignore writes rather than causing them to
6931 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6932 for (r
= id_pre_v8_midr_cp_reginfo
;
6933 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6936 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6939 id_mpuir_reginfo
.access
= PL1_RW
;
6940 id_tlbtr_reginfo
.access
= PL1_RW
;
6942 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6943 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6945 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6947 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6948 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6949 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6950 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6951 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6955 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6956 ARMCPRegInfo mpidr_cp_reginfo
[] = {
6957 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6958 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
6959 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
6962 #ifdef CONFIG_USER_ONLY
6963 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
6964 { .name
= "MPIDR_EL1",
6965 .fixed_bits
= 0x0000000080000000 },
6966 REGUSERINFO_SENTINEL
6968 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
6970 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6973 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6974 ARMCPRegInfo auxcr_reginfo
[] = {
6975 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6976 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6977 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6978 .resetvalue
= cpu
->reset_auxcr
},
6979 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6980 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6981 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6983 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6984 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6985 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6989 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6990 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6991 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6992 ARMCPRegInfo hactlr2_reginfo
= {
6993 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6994 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6995 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6998 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
7002 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
7004 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7005 * There are two flavours:
7006 * (1) older 32-bit only cores have a simple 32-bit CBAR
7007 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7008 * 32-bit register visible to AArch32 at a different encoding
7009 * to the "flavour 1" register and with the bits rearranged to
7010 * be able to squash a 64-bit address into the 32-bit view.
7011 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7012 * in future if we support AArch32-only configs of some of the
7013 * AArch64 cores we might need to add a specific feature flag
7014 * to indicate cores with "flavour 2" CBAR.
7016 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7017 /* 32 bit view is [31:18] 0...0 [43:32]. */
7018 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
7019 | extract64(cpu
->reset_cbar
, 32, 12);
7020 ARMCPRegInfo cbar_reginfo
[] = {
7022 .type
= ARM_CP_CONST
,
7023 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
7024 .access
= PL1_R
, .resetvalue
= cbar32
},
7025 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7026 .type
= ARM_CP_CONST
,
7027 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
7028 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
7031 /* We don't implement a r/w 64 bit CBAR currently */
7032 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
7033 define_arm_cp_regs(cpu
, cbar_reginfo
);
7035 ARMCPRegInfo cbar
= {
7037 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
7038 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
7039 .fieldoffset
= offsetof(CPUARMState
,
7040 cp15
.c15_config_base_address
)
7042 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
7043 cbar
.access
= PL1_R
;
7044 cbar
.fieldoffset
= 0;
7045 cbar
.type
= ARM_CP_CONST
;
7047 define_one_arm_cp_reg(cpu
, &cbar
);
7051 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
7052 ARMCPRegInfo vbar_cp_reginfo
[] = {
7053 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
7054 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
7055 .access
= PL1_RW
, .writefn
= vbar_write
,
7056 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
7057 offsetof(CPUARMState
, cp15
.vbar_ns
) },
7061 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
7064 /* Generic registers whose values depend on the implementation */
7066 ARMCPRegInfo sctlr
= {
7067 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
7068 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
7070 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
7071 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
7072 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
7073 .raw_writefn
= raw_write
,
7075 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
7076 /* Normally we would always end the TB on an SCTLR write, but Linux
7077 * arch/arm/mach-pxa/sleep.S expects two instructions following
7078 * an MMU enable to execute from cache. Imitate this behaviour.
7080 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
7082 define_one_arm_cp_reg(cpu
, &sctlr
);
7085 if (cpu_isar_feature(aa64_lor
, cpu
)) {
7087 * A trivial implementation of ARMv8.1-LOR leaves all of these
7088 * registers fixed at 0, which indicates that there are zero
7089 * supported Limited Ordering regions.
7091 static const ARMCPRegInfo lor_reginfo
[] = {
7092 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
7093 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
7094 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7095 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7096 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
7097 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
7098 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7099 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7100 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
7101 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
7102 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7103 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7104 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
7105 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
7106 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7107 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7108 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
7109 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
7110 .access
= PL1_R
, .accessfn
= access_lorid
,
7111 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7114 define_arm_cp_regs(cpu
, lor_reginfo
);
7117 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
7118 define_arm_cp_regs(cpu
, vhe_reginfo
);
7121 if (cpu_isar_feature(aa64_sve
, cpu
)) {
7122 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
7123 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7124 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
7126 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
7128 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7129 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
7133 #ifdef TARGET_AARCH64
7134 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
7135 define_arm_cp_regs(cpu
, pauth_reginfo
);
7137 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
7138 define_arm_cp_regs(cpu
, rndr_reginfo
);
7140 #ifndef CONFIG_USER_ONLY
7141 /* Data Cache clean instructions up to PoP */
7142 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
7143 define_one_arm_cp_reg(cpu
, dcpop_reg
);
7145 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
7146 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
7149 #endif /*CONFIG_USER_ONLY*/
7153 * While all v8.0 cpus support aarch64, QEMU does have configurations
7154 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
7155 * which will set ID_ISAR6.
7157 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
7158 ? cpu_isar_feature(aa64_predinv
, cpu
)
7159 : cpu_isar_feature(aa32_predinv
, cpu
)) {
7160 define_arm_cp_regs(cpu
, predinv_reginfo
);
7164 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
7166 CPUState
*cs
= CPU(cpu
);
7167 CPUARMState
*env
= &cpu
->env
;
7169 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
7170 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
7171 aarch64_fpu_gdb_set_reg
,
7172 34, "aarch64-fpu.xml", 0);
7173 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
7174 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7175 51, "arm-neon.xml", 0);
7176 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
7177 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7178 35, "arm-vfp3.xml", 0);
7179 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
7180 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
7181 19, "arm-vfp.xml", 0);
7183 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
7184 arm_gen_dynamic_xml(cs
),
7185 "system-registers.xml", 0);
7188 /* Sort alphabetically by type name, except for "any". */
7189 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
7191 ObjectClass
*class_a
= (ObjectClass
*)a
;
7192 ObjectClass
*class_b
= (ObjectClass
*)b
;
7193 const char *name_a
, *name_b
;
7195 name_a
= object_class_get_name(class_a
);
7196 name_b
= object_class_get_name(class_b
);
7197 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
7199 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
7202 return strcmp(name_a
, name_b
);
7206 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
7208 ObjectClass
*oc
= data
;
7209 const char *typename
;
7212 typename
= object_class_get_name(oc
);
7213 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
7214 qemu_printf(" %s\n", name
);
7218 void arm_cpu_list(void)
7222 list
= object_class_get_list(TYPE_ARM_CPU
, false);
7223 list
= g_slist_sort(list
, arm_cpu_list_compare
);
7224 qemu_printf("Available CPUs:\n");
7225 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
7229 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
7231 ObjectClass
*oc
= data
;
7232 CpuDefinitionInfoList
**cpu_list
= user_data
;
7233 CpuDefinitionInfoList
*entry
;
7234 CpuDefinitionInfo
*info
;
7235 const char *typename
;
7237 typename
= object_class_get_name(oc
);
7238 info
= g_malloc0(sizeof(*info
));
7239 info
->name
= g_strndup(typename
,
7240 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
7241 info
->q_typename
= g_strdup(typename
);
7243 entry
= g_malloc0(sizeof(*entry
));
7244 entry
->value
= info
;
7245 entry
->next
= *cpu_list
;
7249 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
7251 CpuDefinitionInfoList
*cpu_list
= NULL
;
7254 list
= object_class_get_list(TYPE_ARM_CPU
, false);
7255 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
7261 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
7262 void *opaque
, int state
, int secstate
,
7263 int crm
, int opc1
, int opc2
,
7266 /* Private utility function for define_one_arm_cp_reg_with_opaque():
7267 * add a single reginfo struct to the hash table.
7269 uint32_t *key
= g_new(uint32_t, 1);
7270 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
7271 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
7272 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
7274 r2
->name
= g_strdup(name
);
7275 /* Reset the secure state to the specific incoming state. This is
7276 * necessary as the register may have been defined with both states.
7278 r2
->secure
= secstate
;
7280 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
7281 /* Register is banked (using both entries in array).
7282 * Overwriting fieldoffset as the array is only used to define
7283 * banked registers but later only fieldoffset is used.
7285 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
7288 if (state
== ARM_CP_STATE_AA32
) {
7289 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
7290 /* If the register is banked then we don't need to migrate or
7291 * reset the 32-bit instance in certain cases:
7293 * 1) If the register has both 32-bit and 64-bit instances then we
7294 * can count on the 64-bit instance taking care of the
7296 * 2) If ARMv8 is enabled then we can count on a 64-bit version
7297 * taking care of the secure bank. This requires that separate
7298 * 32 and 64-bit definitions are provided.
7300 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
7301 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
7302 r2
->type
|= ARM_CP_ALIAS
;
7304 } else if ((secstate
!= r
->secure
) && !ns
) {
7305 /* The register is not banked so we only want to allow migration of
7306 * the non-secure instance.
7308 r2
->type
|= ARM_CP_ALIAS
;
7311 if (r
->state
== ARM_CP_STATE_BOTH
) {
7312 /* We assume it is a cp15 register if the .cp field is left unset.
7318 #ifdef HOST_WORDS_BIGENDIAN
7319 if (r2
->fieldoffset
) {
7320 r2
->fieldoffset
+= sizeof(uint32_t);
7325 if (state
== ARM_CP_STATE_AA64
) {
7326 /* To allow abbreviation of ARMCPRegInfo
7327 * definitions, we treat cp == 0 as equivalent to
7328 * the value for "standard guest-visible sysreg".
7329 * STATE_BOTH definitions are also always "standard
7330 * sysreg" in their AArch64 view (the .cp value may
7331 * be non-zero for the benefit of the AArch32 view).
7333 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
7334 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
7336 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
7337 r2
->opc0
, opc1
, opc2
);
7339 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
7342 r2
->opaque
= opaque
;
7344 /* reginfo passed to helpers is correct for the actual access,
7345 * and is never ARM_CP_STATE_BOTH:
7348 /* Make sure reginfo passed to helpers for wildcarded regs
7349 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7354 /* By convention, for wildcarded registers only the first
7355 * entry is used for migration; the others are marked as
7356 * ALIAS so we don't try to transfer the register
7357 * multiple times. Special registers (ie NOP/WFI) are
7358 * never migratable and not even raw-accessible.
7360 if ((r
->type
& ARM_CP_SPECIAL
)) {
7361 r2
->type
|= ARM_CP_NO_RAW
;
7363 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
7364 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
7365 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
7366 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
7369 /* Check that raw accesses are either forbidden or handled. Note that
7370 * we can't assert this earlier because the setup of fieldoffset for
7371 * banked registers has to be done first.
7373 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
7374 assert(!raw_accessors_invalid(r2
));
7377 /* Overriding of an existing definition must be explicitly
7380 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
7381 ARMCPRegInfo
*oldreg
;
7382 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
7383 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
7384 fprintf(stderr
, "Register redefined: cp=%d %d bit "
7385 "crn=%d crm=%d opc1=%d opc2=%d, "
7386 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
7387 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
7388 oldreg
->name
, r2
->name
);
7389 g_assert_not_reached();
7392 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
7396 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
7397 const ARMCPRegInfo
*r
, void *opaque
)
7399 /* Define implementations of coprocessor registers.
7400 * We store these in a hashtable because typically
7401 * there are less than 150 registers in a space which
7402 * is 16*16*16*8*8 = 262144 in size.
7403 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7404 * If a register is defined twice then the second definition is
7405 * used, so this can be used to define some generic registers and
7406 * then override them with implementation specific variations.
7407 * At least one of the original and the second definition should
7408 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7409 * against accidental use.
7411 * The state field defines whether the register is to be
7412 * visible in the AArch32 or AArch64 execution state. If the
7413 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7414 * reginfo structure for the AArch32 view, which sees the lower
7415 * 32 bits of the 64 bit register.
7417 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7418 * be wildcarded. AArch64 registers are always considered to be 64
7419 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7420 * the register, if any.
7422 int crm
, opc1
, opc2
, state
;
7423 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
7424 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
7425 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
7426 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
7427 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
7428 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
7429 /* 64 bit registers have only CRm and Opc1 fields */
7430 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
7431 /* op0 only exists in the AArch64 encodings */
7432 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
7433 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7434 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
7435 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
7436 * encodes a minimum access level for the register. We roll this
7437 * runtime check into our general permission check code, so check
7438 * here that the reginfo's specified permissions are strict enough
7439 * to encompass the generic architectural permission check.
7441 if (r
->state
!= ARM_CP_STATE_AA32
) {
7445 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7446 mask
= PL0U_R
| PL1_RW
;
7461 /* unallocated encoding, so not possible */
7469 /* min_EL EL1, secure mode only (we don't check the latter) */
7473 /* broken reginfo with out-of-range opc1 */
7477 /* assert our permissions are not too lax (stricter is fine) */
7478 assert((r
->access
& ~mask
) == 0);
7481 /* Check that the register definition has enough info to handle
7482 * reads and writes if they are permitted.
7484 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
7485 if (r
->access
& PL3_R
) {
7486 assert((r
->fieldoffset
||
7487 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7490 if (r
->access
& PL3_W
) {
7491 assert((r
->fieldoffset
||
7492 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7496 /* Bad type field probably means missing sentinel at end of reg list */
7497 assert(cptype_valid(r
->type
));
7498 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
7499 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
7500 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
7501 for (state
= ARM_CP_STATE_AA32
;
7502 state
<= ARM_CP_STATE_AA64
; state
++) {
7503 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
7506 if (state
== ARM_CP_STATE_AA32
) {
7507 /* Under AArch32 CP registers can be common
7508 * (same for secure and non-secure world) or banked.
7512 switch (r
->secure
) {
7513 case ARM_CP_SECSTATE_S
:
7514 case ARM_CP_SECSTATE_NS
:
7515 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7516 r
->secure
, crm
, opc1
, opc2
,
7520 name
= g_strdup_printf("%s_S", r
->name
);
7521 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7523 crm
, opc1
, opc2
, name
);
7525 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7527 crm
, opc1
, opc2
, r
->name
);
7531 /* AArch64 registers get mapped to non-secure instance
7533 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7535 crm
, opc1
, opc2
, r
->name
);
7543 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
7544 const ARMCPRegInfo
*regs
, void *opaque
)
7546 /* Define a whole list of registers */
7547 const ARMCPRegInfo
*r
;
7548 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7549 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
7554 * Modify ARMCPRegInfo for access from userspace.
7556 * This is a data driven modification directed by
7557 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7558 * user-space cannot alter any values and dynamic values pertaining to
7559 * execution state are hidden from user space view anyway.
7561 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
7563 const ARMCPRegUserSpaceInfo
*m
;
7566 for (m
= mods
; m
->name
; m
++) {
7567 GPatternSpec
*pat
= NULL
;
7569 pat
= g_pattern_spec_new(m
->name
);
7571 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7572 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
7573 r
->type
= ARM_CP_CONST
;
7577 } else if (strcmp(r
->name
, m
->name
) == 0) {
7578 r
->type
= ARM_CP_CONST
;
7580 r
->resetvalue
&= m
->exported_bits
;
7581 r
->resetvalue
|= m
->fixed_bits
;
7586 g_pattern_spec_free(pat
);
7591 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
7593 return g_hash_table_lookup(cpregs
, &encoded_cp
);
7596 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7599 /* Helper coprocessor write function for write-ignore registers */
7602 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7604 /* Helper coprocessor write function for read-as-zero registers */
7608 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
7610 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7613 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
7615 /* Return true if it is not valid for us to switch to
7616 * this CPU mode (ie all the UNPREDICTABLE cases in
7617 * the ARM ARM CPSRWriteByInstr pseudocode).
7620 /* Changes to or from Hyp via MSR and CPS are illegal. */
7621 if (write_type
== CPSRWriteByInstr
&&
7622 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
7623 mode
== ARM_CPU_MODE_HYP
)) {
7628 case ARM_CPU_MODE_USR
:
7630 case ARM_CPU_MODE_SYS
:
7631 case ARM_CPU_MODE_SVC
:
7632 case ARM_CPU_MODE_ABT
:
7633 case ARM_CPU_MODE_UND
:
7634 case ARM_CPU_MODE_IRQ
:
7635 case ARM_CPU_MODE_FIQ
:
7636 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7637 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7639 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7640 * and CPS are treated as illegal mode changes.
7642 if (write_type
== CPSRWriteByInstr
&&
7643 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
7644 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
7648 case ARM_CPU_MODE_HYP
:
7649 return !arm_feature(env
, ARM_FEATURE_EL2
)
7650 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
7651 case ARM_CPU_MODE_MON
:
7652 return arm_current_el(env
) < 3;
7658 uint32_t cpsr_read(CPUARMState
*env
)
7661 ZF
= (env
->ZF
== 0);
7662 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
7663 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
7664 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
7665 | ((env
->condexec_bits
& 0xfc) << 8)
7666 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
7669 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
7670 CPSRWriteType write_type
)
7672 uint32_t changed_daif
;
7674 if (mask
& CPSR_NZCV
) {
7675 env
->ZF
= (~val
) & CPSR_Z
;
7677 env
->CF
= (val
>> 29) & 1;
7678 env
->VF
= (val
<< 3) & 0x80000000;
7681 env
->QF
= ((val
& CPSR_Q
) != 0);
7683 env
->thumb
= ((val
& CPSR_T
) != 0);
7684 if (mask
& CPSR_IT_0_1
) {
7685 env
->condexec_bits
&= ~3;
7686 env
->condexec_bits
|= (val
>> 25) & 3;
7688 if (mask
& CPSR_IT_2_7
) {
7689 env
->condexec_bits
&= 3;
7690 env
->condexec_bits
|= (val
>> 8) & 0xfc;
7692 if (mask
& CPSR_GE
) {
7693 env
->GE
= (val
>> 16) & 0xf;
7696 /* In a V7 implementation that includes the security extensions but does
7697 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7698 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7699 * bits respectively.
7701 * In a V8 implementation, it is permitted for privileged software to
7702 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7704 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
7705 arm_feature(env
, ARM_FEATURE_EL3
) &&
7706 !arm_feature(env
, ARM_FEATURE_EL2
) &&
7707 !arm_is_secure(env
)) {
7709 changed_daif
= (env
->daif
^ val
) & mask
;
7711 if (changed_daif
& CPSR_A
) {
7712 /* Check to see if we are allowed to change the masking of async
7713 * abort exceptions from a non-secure state.
7715 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
7716 qemu_log_mask(LOG_GUEST_ERROR
,
7717 "Ignoring attempt to switch CPSR_A flag from "
7718 "non-secure world with SCR.AW bit clear\n");
7723 if (changed_daif
& CPSR_F
) {
7724 /* Check to see if we are allowed to change the masking of FIQ
7725 * exceptions from a non-secure state.
7727 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
7728 qemu_log_mask(LOG_GUEST_ERROR
,
7729 "Ignoring attempt to switch CPSR_F flag from "
7730 "non-secure world with SCR.FW bit clear\n");
7734 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7735 * If this bit is set software is not allowed to mask
7736 * FIQs, but is allowed to set CPSR_F to 0.
7738 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
7740 qemu_log_mask(LOG_GUEST_ERROR
,
7741 "Ignoring attempt to enable CPSR_F flag "
7742 "(non-maskable FIQ [NMFI] support enabled)\n");
7748 env
->daif
&= ~(CPSR_AIF
& mask
);
7749 env
->daif
|= val
& CPSR_AIF
& mask
;
7751 if (write_type
!= CPSRWriteRaw
&&
7752 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7753 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7754 /* Note that we can only get here in USR mode if this is a
7755 * gdb stub write; for this case we follow the architectural
7756 * behaviour for guest writes in USR mode of ignoring an attempt
7757 * to switch mode. (Those are caught by translate.c for writes
7758 * triggered by guest instructions.)
7761 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7762 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7763 * v7, and has defined behaviour in v8:
7764 * + leave CPSR.M untouched
7765 * + allow changes to the other CPSR fields
7767 * For user changes via the GDB stub, we don't set PSTATE.IL,
7768 * as this would be unnecessarily harsh for a user error.
7771 if (write_type
!= CPSRWriteByGDBStub
&&
7772 arm_feature(env
, ARM_FEATURE_V8
)) {
7776 qemu_log_mask(LOG_GUEST_ERROR
,
7777 "Illegal AArch32 mode switch attempt from %s to %s\n",
7778 aarch32_mode_name(env
->uncached_cpsr
),
7779 aarch32_mode_name(val
));
7781 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7782 write_type
== CPSRWriteExceptionReturn
?
7783 "Exception return from AArch32" :
7784 "AArch32 mode switch from",
7785 aarch32_mode_name(env
->uncached_cpsr
),
7786 aarch32_mode_name(val
), env
->regs
[15]);
7787 switch_mode(env
, val
& CPSR_M
);
7790 mask
&= ~CACHED_CPSR_BITS
;
7791 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7794 /* Sign/zero extend */
7795 uint32_t HELPER(sxtb16
)(uint32_t x
)
7798 res
= (uint16_t)(int8_t)x
;
7799 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7803 uint32_t HELPER(uxtb16
)(uint32_t x
)
7806 res
= (uint16_t)(uint8_t)x
;
7807 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7811 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7815 if (num
== INT_MIN
&& den
== -1)
7820 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7827 uint32_t HELPER(rbit
)(uint32_t x
)
7832 #ifdef CONFIG_USER_ONLY
7834 static void switch_mode(CPUARMState
*env
, int mode
)
7836 ARMCPU
*cpu
= env_archcpu(env
);
7838 if (mode
!= ARM_CPU_MODE_USR
) {
7839 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7843 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7844 uint32_t cur_el
, bool secure
)
7849 void aarch64_sync_64_to_32(CPUARMState
*env
)
7851 g_assert_not_reached();
7856 static void switch_mode(CPUARMState
*env
, int mode
)
7861 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7862 if (mode
== old_mode
)
7865 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7866 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7867 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7868 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7869 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7870 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7873 i
= bank_number(old_mode
);
7874 env
->banked_r13
[i
] = env
->regs
[13];
7875 env
->banked_spsr
[i
] = env
->spsr
;
7877 i
= bank_number(mode
);
7878 env
->regs
[13] = env
->banked_r13
[i
];
7879 env
->spsr
= env
->banked_spsr
[i
];
7881 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7882 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7885 /* Physical Interrupt Target EL Lookup Table
7887 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7889 * The below multi-dimensional table is used for looking up the target
7890 * exception level given numerous condition criteria. Specifically, the
7891 * target EL is based on SCR and HCR routing controls as well as the
7892 * currently executing EL and secure state.
7895 * target_el_table[2][2][2][2][2][4]
7896 * | | | | | +--- Current EL
7897 * | | | | +------ Non-secure(0)/Secure(1)
7898 * | | | +--------- HCR mask override
7899 * | | +------------ SCR exec state control
7900 * | +--------------- SCR mask override
7901 * +------------------ 32-bit(0)/64-bit(1) EL3
7903 * The table values are as such:
7907 * The ARM ARM target EL table includes entries indicating that an "exception
7908 * is not taken". The two cases where this is applicable are:
7909 * 1) An exception is taken from EL3 but the SCR does not have the exception
7911 * 2) An exception is taken from EL2 but the HCR does not have the exception
7913 * In these two cases, the below table contain a target of EL1. This value is
7914 * returned as it is expected that the consumer of the table data will check
7915 * for "target EL >= current EL" to ensure the exception is not taken.
7919 * BIT IRQ IMO Non-secure Secure
7920 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7922 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7923 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7924 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7925 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7926 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7927 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7928 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7929 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7930 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7931 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7932 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7933 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7934 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7935 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7936 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7937 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7938 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7942 * Determine the target EL for physical exceptions
7944 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7945 uint32_t cur_el
, bool secure
)
7947 CPUARMState
*env
= cs
->env_ptr
;
7952 /* Is the highest EL AArch64? */
7953 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7956 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7957 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7959 /* Either EL2 is the highest EL (and so the EL2 register width
7960 * is given by is64); or there is no EL2 or EL3, in which case
7961 * the value of 'rw' does not affect the table lookup anyway.
7966 hcr_el2
= arm_hcr_el2_eff(env
);
7969 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7970 hcr
= hcr_el2
& HCR_IMO
;
7973 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7974 hcr
= hcr_el2
& HCR_FMO
;
7977 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7978 hcr
= hcr_el2
& HCR_AMO
;
7982 /* Perform a table-lookup for the target EL given the current state */
7983 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7985 assert(target_el
> 0);
7990 void arm_log_exception(int idx
)
7992 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
7993 const char *exc
= NULL
;
7994 static const char * const excnames
[] = {
7995 [EXCP_UDEF
] = "Undefined Instruction",
7997 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
7998 [EXCP_DATA_ABORT
] = "Data Abort",
8001 [EXCP_BKPT
] = "Breakpoint",
8002 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8003 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8004 [EXCP_HVC
] = "Hypervisor Call",
8005 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8006 [EXCP_SMC
] = "Secure Monitor Call",
8007 [EXCP_VIRQ
] = "Virtual IRQ",
8008 [EXCP_VFIQ
] = "Virtual FIQ",
8009 [EXCP_SEMIHOST
] = "Semihosting call",
8010 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8011 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8012 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8013 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
8014 [EXCP_LSERR
] = "v8M LSERR UsageFault",
8015 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
8018 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8019 exc
= excnames
[idx
];
8024 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8029 * Function used to synchronize QEMU's AArch64 register set with AArch32
8030 * register set. This is necessary when switching between AArch32 and AArch64
8033 void aarch64_sync_32_to_64(CPUARMState
*env
)
8036 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8038 /* We can blanket copy R[0:7] to X[0:7] */
8039 for (i
= 0; i
< 8; i
++) {
8040 env
->xregs
[i
] = env
->regs
[i
];
8044 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8045 * Otherwise, they come from the banked user regs.
8047 if (mode
== ARM_CPU_MODE_FIQ
) {
8048 for (i
= 8; i
< 13; i
++) {
8049 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8052 for (i
= 8; i
< 13; i
++) {
8053 env
->xregs
[i
] = env
->regs
[i
];
8058 * Registers x13-x23 are the various mode SP and FP registers. Registers
8059 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8060 * from the mode banked register.
8062 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8063 env
->xregs
[13] = env
->regs
[13];
8064 env
->xregs
[14] = env
->regs
[14];
8066 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8067 /* HYP is an exception in that it is copied from r14 */
8068 if (mode
== ARM_CPU_MODE_HYP
) {
8069 env
->xregs
[14] = env
->regs
[14];
8071 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8075 if (mode
== ARM_CPU_MODE_HYP
) {
8076 env
->xregs
[15] = env
->regs
[13];
8078 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
8081 if (mode
== ARM_CPU_MODE_IRQ
) {
8082 env
->xregs
[16] = env
->regs
[14];
8083 env
->xregs
[17] = env
->regs
[13];
8085 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
8086 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
8089 if (mode
== ARM_CPU_MODE_SVC
) {
8090 env
->xregs
[18] = env
->regs
[14];
8091 env
->xregs
[19] = env
->regs
[13];
8093 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
8094 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
8097 if (mode
== ARM_CPU_MODE_ABT
) {
8098 env
->xregs
[20] = env
->regs
[14];
8099 env
->xregs
[21] = env
->regs
[13];
8101 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
8102 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
8105 if (mode
== ARM_CPU_MODE_UND
) {
8106 env
->xregs
[22] = env
->regs
[14];
8107 env
->xregs
[23] = env
->regs
[13];
8109 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
8110 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
8114 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8115 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8116 * FIQ bank for r8-r14.
8118 if (mode
== ARM_CPU_MODE_FIQ
) {
8119 for (i
= 24; i
< 31; i
++) {
8120 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
8123 for (i
= 24; i
< 29; i
++) {
8124 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
8126 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
8127 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
8130 env
->pc
= env
->regs
[15];
8134 * Function used to synchronize QEMU's AArch32 register set with AArch64
8135 * register set. This is necessary when switching between AArch32 and AArch64
8138 void aarch64_sync_64_to_32(CPUARMState
*env
)
8141 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8143 /* We can blanket copy X[0:7] to R[0:7] */
8144 for (i
= 0; i
< 8; i
++) {
8145 env
->regs
[i
] = env
->xregs
[i
];
8149 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8150 * Otherwise, we copy x8-x12 into the banked user regs.
8152 if (mode
== ARM_CPU_MODE_FIQ
) {
8153 for (i
= 8; i
< 13; i
++) {
8154 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
8157 for (i
= 8; i
< 13; i
++) {
8158 env
->regs
[i
] = env
->xregs
[i
];
8163 * Registers r13 & r14 depend on the current mode.
8164 * If we are in a given mode, we copy the corresponding x registers to r13
8165 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8168 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8169 env
->regs
[13] = env
->xregs
[13];
8170 env
->regs
[14] = env
->xregs
[14];
8172 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
8175 * HYP is an exception in that it does not have its own banked r14 but
8176 * shares the USR r14
8178 if (mode
== ARM_CPU_MODE_HYP
) {
8179 env
->regs
[14] = env
->xregs
[14];
8181 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
8185 if (mode
== ARM_CPU_MODE_HYP
) {
8186 env
->regs
[13] = env
->xregs
[15];
8188 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
8191 if (mode
== ARM_CPU_MODE_IRQ
) {
8192 env
->regs
[14] = env
->xregs
[16];
8193 env
->regs
[13] = env
->xregs
[17];
8195 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
8196 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
8199 if (mode
== ARM_CPU_MODE_SVC
) {
8200 env
->regs
[14] = env
->xregs
[18];
8201 env
->regs
[13] = env
->xregs
[19];
8203 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
8204 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
8207 if (mode
== ARM_CPU_MODE_ABT
) {
8208 env
->regs
[14] = env
->xregs
[20];
8209 env
->regs
[13] = env
->xregs
[21];
8211 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
8212 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
8215 if (mode
== ARM_CPU_MODE_UND
) {
8216 env
->regs
[14] = env
->xregs
[22];
8217 env
->regs
[13] = env
->xregs
[23];
8219 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
8220 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
8223 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8224 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8225 * FIQ bank for r8-r14.
8227 if (mode
== ARM_CPU_MODE_FIQ
) {
8228 for (i
= 24; i
< 31; i
++) {
8229 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
8232 for (i
= 24; i
< 29; i
++) {
8233 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
8235 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
8236 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
8239 env
->regs
[15] = env
->pc
;
8242 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
8243 uint32_t mask
, uint32_t offset
,
8246 /* Change the CPU state so as to actually take the exception. */
8247 switch_mode(env
, new_mode
);
8249 * For exceptions taken to AArch32 we must clear the SS bit in both
8250 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8252 env
->uncached_cpsr
&= ~PSTATE_SS
;
8253 env
->spsr
= cpsr_read(env
);
8254 /* Clear IT bits. */
8255 env
->condexec_bits
= 0;
8256 /* Switch to the new mode, and to the correct instruction set. */
8257 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8258 /* Set new mode endianness */
8259 env
->uncached_cpsr
&= ~CPSR_E
;
8260 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8261 env
->uncached_cpsr
|= CPSR_E
;
8263 /* J and IL must always be cleared for exception entry */
8264 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
8267 if (new_mode
== ARM_CPU_MODE_HYP
) {
8268 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
8269 env
->elr_el
[2] = env
->regs
[15];
8272 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8273 * and we should just guard the thumb mode on V4
8275 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8277 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8279 env
->regs
[14] = env
->regs
[15] + offset
;
8281 env
->regs
[15] = newpc
;
8282 arm_rebuild_hflags(env
);
8285 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
8288 * Handle exception entry to Hyp mode; this is sufficiently
8289 * different to entry to other AArch32 modes that we handle it
8292 * The vector table entry used is always the 0x14 Hyp mode entry point,
8293 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8294 * The offset applied to the preferred return address is always zero
8295 * (see DDI0487C.a section G1.12.3).
8296 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8298 uint32_t addr
, mask
;
8299 ARMCPU
*cpu
= ARM_CPU(cs
);
8300 CPUARMState
*env
= &cpu
->env
;
8302 switch (cs
->exception_index
) {
8310 /* Fall through to prefetch abort. */
8311 case EXCP_PREFETCH_ABORT
:
8312 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
8313 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
8314 (uint32_t)env
->exception
.vaddress
);
8317 case EXCP_DATA_ABORT
:
8318 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
8319 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
8320 (uint32_t)env
->exception
.vaddress
);
8336 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8339 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
8340 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8342 * QEMU syndrome values are v8-style. v7 has the IL bit
8343 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8344 * If this is a v7 CPU, squash the IL bit in those cases.
8346 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
8347 (cs
->exception_index
== EXCP_DATA_ABORT
&&
8348 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
8349 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
8350 env
->exception
.syndrome
&= ~ARM_EL_IL
;
8353 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
8356 if (arm_current_el(env
) != 2 && addr
< 0x14) {
8361 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
8364 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
8367 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
8371 addr
+= env
->cp15
.hvbar
;
8373 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
8376 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
8378 ARMCPU
*cpu
= ARM_CPU(cs
);
8379 CPUARMState
*env
= &cpu
->env
;
8386 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8387 switch (syn_get_ec(env
->exception
.syndrome
)) {
8389 case EC_BREAKPOINT_SAME_EL
:
8393 case EC_WATCHPOINT_SAME_EL
:
8399 case EC_VECTORCATCH
:
8408 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
8411 if (env
->exception
.target_el
== 2) {
8412 arm_cpu_do_interrupt_aarch32_hyp(cs
);
8416 switch (cs
->exception_index
) {
8418 new_mode
= ARM_CPU_MODE_UND
;
8427 new_mode
= ARM_CPU_MODE_SVC
;
8430 /* The PC already points to the next instruction. */
8434 /* Fall through to prefetch abort. */
8435 case EXCP_PREFETCH_ABORT
:
8436 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
8437 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
8438 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
8439 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
8440 new_mode
= ARM_CPU_MODE_ABT
;
8442 mask
= CPSR_A
| CPSR_I
;
8445 case EXCP_DATA_ABORT
:
8446 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
8447 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
8448 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
8450 (uint32_t)env
->exception
.vaddress
);
8451 new_mode
= ARM_CPU_MODE_ABT
;
8453 mask
= CPSR_A
| CPSR_I
;
8457 new_mode
= ARM_CPU_MODE_IRQ
;
8459 /* Disable IRQ and imprecise data aborts. */
8460 mask
= CPSR_A
| CPSR_I
;
8462 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
8463 /* IRQ routed to monitor mode */
8464 new_mode
= ARM_CPU_MODE_MON
;
8469 new_mode
= ARM_CPU_MODE_FIQ
;
8471 /* Disable FIQ, IRQ and imprecise data aborts. */
8472 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8473 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
8474 /* FIQ routed to monitor mode */
8475 new_mode
= ARM_CPU_MODE_MON
;
8480 new_mode
= ARM_CPU_MODE_IRQ
;
8482 /* Disable IRQ and imprecise data aborts. */
8483 mask
= CPSR_A
| CPSR_I
;
8487 new_mode
= ARM_CPU_MODE_FIQ
;
8489 /* Disable FIQ, IRQ and imprecise data aborts. */
8490 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8494 new_mode
= ARM_CPU_MODE_MON
;
8496 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8500 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8501 return; /* Never happens. Keep compiler happy. */
8504 if (new_mode
== ARM_CPU_MODE_MON
) {
8505 addr
+= env
->cp15
.mvbar
;
8506 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
8507 /* High vectors. When enabled, base address cannot be remapped. */
8510 /* ARM v7 architectures provide a vector base address register to remap
8511 * the interrupt vector table.
8512 * This register is only followed in non-monitor mode, and is banked.
8513 * Note: only bits 31:5 are valid.
8515 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
8518 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
8519 env
->cp15
.scr_el3
&= ~SCR_NS
;
8522 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
8525 /* Handle exception entry to a target EL which is using AArch64 */
8526 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
8528 ARMCPU
*cpu
= ARM_CPU(cs
);
8529 CPUARMState
*env
= &cpu
->env
;
8530 unsigned int new_el
= env
->exception
.target_el
;
8531 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
8532 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
8533 unsigned int cur_el
= arm_current_el(env
);
8536 * Note that new_el can never be 0. If cur_el is 0, then
8537 * el0_a64 is is_a64(), else el0_a64 is ignored.
8539 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
8541 if (cur_el
< new_el
) {
8542 /* Entry vector offset depends on whether the implemented EL
8543 * immediately lower than the target level is using AArch32 or AArch64
8549 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
8552 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
8555 is_aa64
= is_a64(env
);
8558 g_assert_not_reached();
8566 } else if (pstate_read(env
) & PSTATE_SP
) {
8570 switch (cs
->exception_index
) {
8571 case EXCP_PREFETCH_ABORT
:
8572 case EXCP_DATA_ABORT
:
8573 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
8574 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
8575 env
->cp15
.far_el
[new_el
]);
8583 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
8585 * QEMU internal FP/SIMD syndromes from AArch32 include the
8586 * TA and coproc fields which are only exposed if the exception
8587 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8588 * AArch64 format syndrome.
8590 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
8592 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
8603 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8607 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
8608 aarch64_save_sp(env
, arm_current_el(env
));
8609 env
->elr_el
[new_el
] = env
->pc
;
8611 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
8612 env
->elr_el
[new_el
] = env
->regs
[15];
8614 aarch64_sync_32_to_64(env
);
8616 env
->condexec_bits
= 0;
8618 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
8619 env
->elr_el
[new_el
]);
8621 pstate_write(env
, PSTATE_DAIF
| new_mode
);
8623 aarch64_restore_sp(env
, new_el
);
8624 helper_rebuild_hflags_a64(env
, new_el
);
8628 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
8629 new_el
, env
->pc
, pstate_read(env
));
8633 * Do semihosting call and set the appropriate return value. All the
8634 * permission and validity checks have been done at translate time.
8636 * We only see semihosting exceptions in TCG only as they are not
8637 * trapped to the hypervisor in KVM.
8640 static void handle_semihosting(CPUState
*cs
)
8642 ARMCPU
*cpu
= ARM_CPU(cs
);
8643 CPUARMState
*env
= &cpu
->env
;
8646 qemu_log_mask(CPU_LOG_INT
,
8647 "...handling as semihosting call 0x%" PRIx64
"\n",
8649 env
->xregs
[0] = do_arm_semihosting(env
);
8652 qemu_log_mask(CPU_LOG_INT
,
8653 "...handling as semihosting call 0x%x\n",
8655 env
->regs
[0] = do_arm_semihosting(env
);
8656 env
->regs
[15] += env
->thumb
? 2 : 4;
8661 /* Handle a CPU exception for A and R profile CPUs.
8662 * Do any appropriate logging, handle PSCI calls, and then hand off
8663 * to the AArch64-entry or AArch32-entry function depending on the
8664 * target exception level's register width.
8666 void arm_cpu_do_interrupt(CPUState
*cs
)
8668 ARMCPU
*cpu
= ARM_CPU(cs
);
8669 CPUARMState
*env
= &cpu
->env
;
8670 unsigned int new_el
= env
->exception
.target_el
;
8672 assert(!arm_feature(env
, ARM_FEATURE_M
));
8674 arm_log_exception(cs
->exception_index
);
8675 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
8677 if (qemu_loglevel_mask(CPU_LOG_INT
)
8678 && !excp_is_internal(cs
->exception_index
)) {
8679 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
8680 syn_get_ec(env
->exception
.syndrome
),
8681 env
->exception
.syndrome
);
8684 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
8685 arm_handle_psci_call(cpu
);
8686 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
8691 * Semihosting semantics depend on the register width of the code
8692 * that caused the exception, not the target exception level, so
8693 * must be handled here.
8696 if (cs
->exception_index
== EXCP_SEMIHOST
) {
8697 handle_semihosting(cs
);
8702 /* Hooks may change global state so BQL should be held, also the
8703 * BQL needs to be held for any modification of
8704 * cs->interrupt_request.
8706 g_assert(qemu_mutex_iothread_locked());
8708 arm_call_pre_el_change_hook(cpu
);
8710 assert(!excp_is_internal(cs
->exception_index
));
8711 if (arm_el_is_aa64(env
, new_el
)) {
8712 arm_cpu_do_interrupt_aarch64(cs
);
8714 arm_cpu_do_interrupt_aarch32(cs
);
8717 arm_call_el_change_hook(cpu
);
8719 if (!kvm_enabled()) {
8720 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
8723 #endif /* !CONFIG_USER_ONLY */
8725 /* Return the exception level which controls this address translation regime */
8726 static uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8729 case ARMMMUIdx_E20_0
:
8730 case ARMMMUIdx_E20_2
:
8731 case ARMMMUIdx_Stage2
:
8736 case ARMMMUIdx_SE10_0
:
8737 return arm_el_is_aa64(env
, 3) ? 1 : 3;
8738 case ARMMMUIdx_SE10_1
:
8739 case ARMMMUIdx_Stage1_E0
:
8740 case ARMMMUIdx_Stage1_E1
:
8741 case ARMMMUIdx_E10_0
:
8742 case ARMMMUIdx_E10_1
:
8743 case ARMMMUIdx_MPrivNegPri
:
8744 case ARMMMUIdx_MUserNegPri
:
8745 case ARMMMUIdx_MPriv
:
8746 case ARMMMUIdx_MUser
:
8747 case ARMMMUIdx_MSPrivNegPri
:
8748 case ARMMMUIdx_MSUserNegPri
:
8749 case ARMMMUIdx_MSPriv
:
8750 case ARMMMUIdx_MSUser
:
8753 g_assert_not_reached();
8757 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
8759 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
8761 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
8762 el
= (mmu_idx
== ARMMMUIdx_E20_0
? 2 : 1);
8764 return env
->cp15
.sctlr_el
[el
];
8767 /* Return the SCTLR value which controls this address translation regime */
8768 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8770 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
8773 #ifndef CONFIG_USER_ONLY
8775 /* Return true if the specified stage of address translation is disabled */
8776 static inline bool regime_translation_disabled(CPUARMState
*env
,
8779 if (arm_feature(env
, ARM_FEATURE_M
)) {
8780 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
8781 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
8782 case R_V7M_MPU_CTRL_ENABLE_MASK
:
8783 /* Enabled, but not for HardFault and NMI */
8784 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
8785 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
8786 /* Enabled for all cases */
8790 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8791 * we warned about that in armv7m_nvic.c when the guest set it.
8797 if (mmu_idx
== ARMMMUIdx_Stage2
) {
8798 /* HCR.DC means HCR.VM behaves as 1 */
8799 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
8802 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
8803 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8804 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
8809 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
8810 (mmu_idx
== ARMMMUIdx_Stage1_E0
|| mmu_idx
== ARMMMUIdx_Stage1_E1
)) {
8811 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
8815 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
8818 static inline bool regime_translation_big_endian(CPUARMState
*env
,
8821 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
8824 /* Return the TTBR associated with this translation regime */
8825 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8828 if (mmu_idx
== ARMMMUIdx_Stage2
) {
8829 return env
->cp15
.vttbr_el2
;
8832 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
8834 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
8838 #endif /* !CONFIG_USER_ONLY */
8840 /* Return the TCR controlling this translation regime */
8841 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8843 if (mmu_idx
== ARMMMUIdx_Stage2
) {
8844 return &env
->cp15
.vtcr_el2
;
8846 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
8849 /* Convert a possible stage1+2 MMU index into the appropriate
8852 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
8855 case ARMMMUIdx_E10_0
:
8856 return ARMMMUIdx_Stage1_E0
;
8857 case ARMMMUIdx_E10_1
:
8858 return ARMMMUIdx_Stage1_E1
;
8864 /* Return true if the translation regime is using LPAE format page tables */
8865 static inline bool regime_using_lpae_format(CPUARMState
*env
,
8868 int el
= regime_el(env
, mmu_idx
);
8869 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
8872 if (arm_feature(env
, ARM_FEATURE_LPAE
)
8873 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
8879 /* Returns true if the stage 1 translation regime is using LPAE format page
8880 * tables. Used when raising alignment exceptions, whose FSR changes depending
8881 * on whether the long or short descriptor format is in use. */
8882 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8884 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8886 return regime_using_lpae_format(env
, mmu_idx
);
8889 #ifndef CONFIG_USER_ONLY
8890 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8893 case ARMMMUIdx_SE10_0
:
8894 case ARMMMUIdx_E20_0
:
8895 case ARMMMUIdx_Stage1_E0
:
8896 case ARMMMUIdx_MUser
:
8897 case ARMMMUIdx_MSUser
:
8898 case ARMMMUIdx_MUserNegPri
:
8899 case ARMMMUIdx_MSUserNegPri
:
8903 case ARMMMUIdx_E10_0
:
8904 case ARMMMUIdx_E10_1
:
8905 g_assert_not_reached();
8909 /* Translate section/page access permissions to page
8910 * R/W protection flags
8913 * @mmu_idx: MMU index indicating required translation regime
8914 * @ap: The 3-bit access permissions (AP[2:0])
8915 * @domain_prot: The 2-bit domain access permissions
8917 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8918 int ap
, int domain_prot
)
8920 bool is_user
= regime_is_user(env
, mmu_idx
);
8922 if (domain_prot
== 3) {
8923 return PAGE_READ
| PAGE_WRITE
;
8928 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8931 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
8933 return is_user
? 0 : PAGE_READ
;
8940 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8945 return PAGE_READ
| PAGE_WRITE
;
8948 return PAGE_READ
| PAGE_WRITE
;
8949 case 4: /* Reserved. */
8952 return is_user
? 0 : PAGE_READ
;
8956 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
8961 g_assert_not_reached();
8965 /* Translate section/page access permissions to page
8966 * R/W protection flags.
8968 * @ap: The 2-bit simple AP (AP[2:1])
8969 * @is_user: TRUE if accessing from PL0
8971 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
8975 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8977 return PAGE_READ
| PAGE_WRITE
;
8979 return is_user
? 0 : PAGE_READ
;
8983 g_assert_not_reached();
8988 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
8990 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
8993 /* Translate S2 section/page access permissions to protection flags
8996 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8997 * @xn: XN (execute-never) bit
8999 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9010 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9017 /* Translate section/page access permissions to protection flags
9020 * @mmu_idx: MMU index indicating required translation regime
9021 * @is_aa64: TRUE if AArch64
9022 * @ap: The 2-bit simple AP (AP[2:1])
9023 * @ns: NS (non-secure) bit
9024 * @xn: XN (execute-never) bit
9025 * @pxn: PXN (privileged execute-never) bit
9027 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9028 int ap
, int ns
, int xn
, int pxn
)
9030 bool is_user
= regime_is_user(env
, mmu_idx
);
9031 int prot_rw
, user_rw
;
9035 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
9037 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9041 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9044 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
9048 /* TODO have_wxn should be replaced with
9049 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9050 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9051 * compatible processors have EL2, which is required for [U]WXN.
9053 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
9056 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
9060 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
9061 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
9063 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9064 switch (regime_el(env
, mmu_idx
)) {
9068 xn
= xn
|| !(user_rw
& PAGE_READ
);
9072 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
9074 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
9075 (uwxn
&& (user_rw
& PAGE_WRITE
));
9085 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
9088 return prot_rw
| PAGE_EXEC
;
9091 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9092 uint32_t *table
, uint32_t address
)
9094 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9095 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9097 if (address
& tcr
->mask
) {
9098 if (tcr
->raw_tcr
& TTBCR_PD1
) {
9099 /* Translation table walk disabled for TTBR1 */
9102 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
9104 if (tcr
->raw_tcr
& TTBCR_PD0
) {
9105 /* Translation table walk disabled for TTBR0 */
9108 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
9110 *table
|= (address
>> 18) & 0x3ffc;
9114 /* Translate a S1 pagetable walk through S2 if needed. */
9115 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9116 hwaddr addr
, MemTxAttrs txattrs
,
9117 ARMMMUFaultInfo
*fi
)
9119 if ((mmu_idx
== ARMMMUIdx_Stage1_E0
|| mmu_idx
== ARMMMUIdx_Stage1_E1
) &&
9120 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
9121 target_ulong s2size
;
9125 ARMCacheAttrs cacheattrs
= {};
9126 ARMCacheAttrs
*pcacheattrs
= NULL
;
9128 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
9130 * PTW means we must fault if this S1 walk touches S2 Device
9131 * memory; otherwise we don't care about the attributes and can
9132 * save the S2 translation the effort of computing them.
9134 pcacheattrs
= &cacheattrs
;
9137 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_Stage2
, &s2pa
,
9138 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
9140 assert(fi
->type
!= ARMFault_None
);
9146 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
9147 /* Access was to Device memory: generate Permission fault */
9148 fi
->type
= ARMFault_Permission
;
9159 /* All loads done in the course of a page table walk go through here. */
9160 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9161 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9163 ARMCPU
*cpu
= ARM_CPU(cs
);
9164 CPUARMState
*env
= &cpu
->env
;
9165 MemTxAttrs attrs
= {};
9166 MemTxResult result
= MEMTX_OK
;
9170 attrs
.secure
= is_secure
;
9171 as
= arm_addressspace(cs
, attrs
);
9172 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9176 if (regime_translation_big_endian(env
, mmu_idx
)) {
9177 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
9179 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
9181 if (result
== MEMTX_OK
) {
9184 fi
->type
= ARMFault_SyncExternalOnWalk
;
9185 fi
->ea
= arm_extabort_type(result
);
9189 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9190 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9192 ARMCPU
*cpu
= ARM_CPU(cs
);
9193 CPUARMState
*env
= &cpu
->env
;
9194 MemTxAttrs attrs
= {};
9195 MemTxResult result
= MEMTX_OK
;
9199 attrs
.secure
= is_secure
;
9200 as
= arm_addressspace(cs
, attrs
);
9201 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9205 if (regime_translation_big_endian(env
, mmu_idx
)) {
9206 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
9208 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
9210 if (result
== MEMTX_OK
) {
9213 fi
->type
= ARMFault_SyncExternalOnWalk
;
9214 fi
->ea
= arm_extabort_type(result
);
9218 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
9219 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9220 hwaddr
*phys_ptr
, int *prot
,
9221 target_ulong
*page_size
,
9222 ARMMMUFaultInfo
*fi
)
9224 CPUState
*cs
= env_cpu(env
);
9235 /* Pagetable walk. */
9236 /* Lookup l1 descriptor. */
9237 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9238 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9239 fi
->type
= ARMFault_Translation
;
9242 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9244 if (fi
->type
!= ARMFault_None
) {
9248 domain
= (desc
>> 5) & 0x0f;
9249 if (regime_el(env
, mmu_idx
) == 1) {
9250 dacr
= env
->cp15
.dacr_ns
;
9252 dacr
= env
->cp15
.dacr_s
;
9254 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9256 /* Section translation fault. */
9257 fi
->type
= ARMFault_Translation
;
9263 if (domain_prot
== 0 || domain_prot
== 2) {
9264 fi
->type
= ARMFault_Domain
;
9269 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9270 ap
= (desc
>> 10) & 3;
9271 *page_size
= 1024 * 1024;
9273 /* Lookup l2 entry. */
9275 /* Coarse pagetable. */
9276 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9278 /* Fine pagetable. */
9279 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
9281 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9283 if (fi
->type
!= ARMFault_None
) {
9287 case 0: /* Page translation fault. */
9288 fi
->type
= ARMFault_Translation
;
9290 case 1: /* 64k page. */
9291 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9292 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
9293 *page_size
= 0x10000;
9295 case 2: /* 4k page. */
9296 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9297 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
9298 *page_size
= 0x1000;
9300 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9302 /* ARMv6/XScale extended small page format */
9303 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
9304 || arm_feature(env
, ARM_FEATURE_V6
)) {
9305 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9306 *page_size
= 0x1000;
9308 /* UNPREDICTABLE in ARMv5; we choose to take a
9309 * page translation fault.
9311 fi
->type
= ARMFault_Translation
;
9315 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
9318 ap
= (desc
>> 4) & 3;
9321 /* Never happens, but compiler isn't smart enough to tell. */
9325 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9326 *prot
|= *prot
? PAGE_EXEC
: 0;
9327 if (!(*prot
& (1 << access_type
))) {
9328 /* Access permission fault. */
9329 fi
->type
= ARMFault_Permission
;
9332 *phys_ptr
= phys_addr
;
9335 fi
->domain
= domain
;
9340 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
9341 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9342 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
9343 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
9345 CPUState
*cs
= env_cpu(env
);
9359 /* Pagetable walk. */
9360 /* Lookup l1 descriptor. */
9361 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9362 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9363 fi
->type
= ARMFault_Translation
;
9366 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9368 if (fi
->type
!= ARMFault_None
) {
9372 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
9373 /* Section translation fault, or attempt to use the encoding
9374 * which is Reserved on implementations without PXN.
9376 fi
->type
= ARMFault_Translation
;
9379 if ((type
== 1) || !(desc
& (1 << 18))) {
9380 /* Page or Section. */
9381 domain
= (desc
>> 5) & 0x0f;
9383 if (regime_el(env
, mmu_idx
) == 1) {
9384 dacr
= env
->cp15
.dacr_ns
;
9386 dacr
= env
->cp15
.dacr_s
;
9391 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9392 if (domain_prot
== 0 || domain_prot
== 2) {
9393 /* Section or Page domain fault */
9394 fi
->type
= ARMFault_Domain
;
9398 if (desc
& (1 << 18)) {
9400 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
9401 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
9402 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
9403 *page_size
= 0x1000000;
9406 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9407 *page_size
= 0x100000;
9409 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
9410 xn
= desc
& (1 << 4);
9412 ns
= extract32(desc
, 19, 1);
9414 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
9415 pxn
= (desc
>> 2) & 1;
9417 ns
= extract32(desc
, 3, 1);
9418 /* Lookup l2 entry. */
9419 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9420 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9422 if (fi
->type
!= ARMFault_None
) {
9425 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
9427 case 0: /* Page translation fault. */
9428 fi
->type
= ARMFault_Translation
;
9430 case 1: /* 64k page. */
9431 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9432 xn
= desc
& (1 << 15);
9433 *page_size
= 0x10000;
9435 case 2: case 3: /* 4k page. */
9436 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9438 *page_size
= 0x1000;
9441 /* Never happens, but compiler isn't smart enough to tell. */
9445 if (domain_prot
== 3) {
9446 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9448 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
9451 if (xn
&& access_type
== MMU_INST_FETCH
) {
9452 fi
->type
= ARMFault_Permission
;
9456 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
9457 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
9458 /* The simplified model uses AP[0] as an access control bit. */
9459 if ((ap
& 1) == 0) {
9460 /* Access flag fault. */
9461 fi
->type
= ARMFault_AccessFlag
;
9464 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
9466 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9471 if (!(*prot
& (1 << access_type
))) {
9472 /* Access permission fault. */
9473 fi
->type
= ARMFault_Permission
;
9478 /* The NS bit will (as required by the architecture) have no effect if
9479 * the CPU doesn't support TZ or this is a non-secure translation
9480 * regime, because the attribute will already be non-secure.
9482 attrs
->secure
= false;
9484 *phys_ptr
= phys_addr
;
9487 fi
->domain
= domain
;
9493 * check_s2_mmu_setup
9495 * @is_aa64: True if the translation regime is in AArch64 state
9496 * @startlevel: Suggested starting level
9497 * @inputsize: Bitsize of IPAs
9498 * @stride: Page-table stride (See the ARM ARM)
9500 * Returns true if the suggested S2 translation parameters are OK and
9503 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
9504 int inputsize
, int stride
)
9506 const int grainsize
= stride
+ 3;
9509 /* Negative levels are never allowed. */
9514 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
9515 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
9520 CPUARMState
*env
= &cpu
->env
;
9521 unsigned int pamax
= arm_pamax(cpu
);
9524 case 13: /* 64KB Pages. */
9525 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
9529 case 11: /* 16KB Pages. */
9530 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
9534 case 9: /* 4KB Pages. */
9535 if (level
== 0 && pamax
<= 42) {
9540 g_assert_not_reached();
9543 /* Inputsize checks. */
9544 if (inputsize
> pamax
&&
9545 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
9546 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9550 /* AArch32 only supports 4KB pages. Assert on that. */
9551 assert(stride
== 9);
9560 /* Translate from the 4-bit stage 2 representation of
9561 * memory attributes (without cache-allocation hints) to
9562 * the 8-bit representation of the stage 1 MAIR registers
9563 * (which includes allocation hints).
9565 * ref: shared/translation/attrs/S2AttrDecode()
9566 * .../S2ConvertAttrsHints()
9568 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
9570 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
9571 uint8_t loattr
= extract32(s2attrs
, 0, 2);
9572 uint8_t hihint
= 0, lohint
= 0;
9574 if (hiattr
!= 0) { /* normal memory */
9575 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
9576 hiattr
= loattr
= 1; /* non-cacheable */
9578 if (hiattr
!= 1) { /* Write-through or write-back */
9579 hihint
= 3; /* RW allocate */
9581 if (loattr
!= 1) { /* Write-through or write-back */
9582 lohint
= 3; /* RW allocate */
9587 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
9589 #endif /* !CONFIG_USER_ONLY */
9591 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
9594 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
9595 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
9599 * Bit 55 is always between the two regions, and is canonical for
9600 * determining if address tagging is enabled.
9602 select
= extract64(va
, 55, 1);
9604 if (!regime_has_2_ranges(mmu_idx
)) {
9605 tsz
= extract32(tcr
, 0, 6);
9606 using64k
= extract32(tcr
, 14, 1);
9607 using16k
= extract32(tcr
, 15, 1);
9608 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9610 tbi
= tbid
= hpd
= false;
9612 tbi
= extract32(tcr
, 20, 1);
9613 hpd
= extract32(tcr
, 24, 1);
9614 tbid
= extract32(tcr
, 29, 1);
9617 } else if (!select
) {
9618 tsz
= extract32(tcr
, 0, 6);
9619 epd
= extract32(tcr
, 7, 1);
9620 using64k
= extract32(tcr
, 14, 1);
9621 using16k
= extract32(tcr
, 15, 1);
9622 tbi
= extract64(tcr
, 37, 1);
9623 hpd
= extract64(tcr
, 41, 1);
9624 tbid
= extract64(tcr
, 51, 1);
9626 int tg
= extract32(tcr
, 30, 2);
9629 tsz
= extract32(tcr
, 16, 6);
9630 epd
= extract32(tcr
, 23, 1);
9631 tbi
= extract64(tcr
, 38, 1);
9632 hpd
= extract64(tcr
, 42, 1);
9633 tbid
= extract64(tcr
, 52, 1);
9635 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
9636 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
9638 return (ARMVAParameters
) {
9645 .using16k
= using16k
,
9646 .using64k
= using64k
,
9650 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
9651 ARMMMUIdx mmu_idx
, bool data
)
9653 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
9655 /* Present TBI as a composite with TBID. */
9656 ret
.tbi
&= (data
|| !ret
.tbid
);
9660 #ifndef CONFIG_USER_ONLY
9661 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
9664 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
9665 uint32_t el
= regime_el(env
, mmu_idx
);
9669 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9671 bool sext
= extract32(tcr
, 4, 1);
9672 bool sign
= extract32(tcr
, 3, 1);
9675 * If the sign-extend bit is not the same as t0sz[3], the result
9676 * is unpredictable. Flag this as a guest error.
9679 qemu_log_mask(LOG_GUEST_ERROR
,
9680 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9682 tsz
= sextract32(tcr
, 0, 4) + 8;
9686 } else if (el
== 2) {
9688 tsz
= extract32(tcr
, 0, 3);
9690 hpd
= extract64(tcr
, 24, 1);
9693 int t0sz
= extract32(tcr
, 0, 3);
9694 int t1sz
= extract32(tcr
, 16, 3);
9697 select
= va
> (0xffffffffu
>> t0sz
);
9699 /* Note that we will detect errors later. */
9700 select
= va
>= ~(0xffffffffu
>> t1sz
);
9704 epd
= extract32(tcr
, 7, 1);
9705 hpd
= extract64(tcr
, 41, 1);
9708 epd
= extract32(tcr
, 23, 1);
9709 hpd
= extract64(tcr
, 42, 1);
9711 /* For aarch32, hpd0 is not enabled without t2e as well. */
9712 hpd
&= extract32(tcr
, 6, 1);
9715 return (ARMVAParameters
) {
9723 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
9724 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9725 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
9726 target_ulong
*page_size_ptr
,
9727 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9729 ARMCPU
*cpu
= env_archcpu(env
);
9730 CPUState
*cs
= CPU(cpu
);
9731 /* Read an LPAE long-descriptor translation table. */
9732 ARMFaultType fault_type
= ARMFault_Translation
;
9734 ARMVAParameters param
;
9736 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
9737 uint32_t tableattrs
;
9738 target_ulong page_size
;
9741 int addrsize
, inputsize
;
9742 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9743 int ap
, ns
, xn
, pxn
;
9744 uint32_t el
= regime_el(env
, mmu_idx
);
9746 uint64_t descaddrmask
;
9747 bool aarch64
= arm_el_is_aa64(env
, el
);
9748 bool guarded
= false;
9751 * This code does not handle the different format TCR for VTCR_EL2.
9752 * This code also does not support shareability levels.
9753 * Attribute and permission bit handling should also be checked when adding
9754 * support for those page table walks.
9757 param
= aa64_va_parameters(env
, address
, mmu_idx
,
9758 access_type
!= MMU_INST_FETCH
);
9760 ttbr1_valid
= regime_has_2_ranges(mmu_idx
);
9761 addrsize
= 64 - 8 * param
.tbi
;
9762 inputsize
= 64 - param
.tsz
;
9764 param
= aa32_va_parameters(env
, address
, mmu_idx
);
9766 /* There is no TTBR1 for EL2 */
9767 ttbr1_valid
= (el
!= 2);
9768 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
9769 inputsize
= addrsize
- param
.tsz
;
9773 * We determined the region when collecting the parameters, but we
9774 * have not yet validated that the address is valid for the region.
9775 * Extract the top bits and verify that they all match select.
9777 * For aa32, if inputsize == addrsize, then we have selected the
9778 * region by exclusion in aa32_va_parameters and there is no more
9779 * validation to do here.
9781 if (inputsize
< addrsize
) {
9782 target_ulong top_bits
= sextract64(address
, inputsize
,
9783 addrsize
- inputsize
);
9784 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
9785 /* The gap between the two regions is a Translation fault */
9786 fault_type
= ARMFault_Translation
;
9791 if (param
.using64k
) {
9793 } else if (param
.using16k
) {
9799 /* Note that QEMU ignores shareability and cacheability attributes,
9800 * so we don't need to do anything with the SH, ORGN, IRGN fields
9801 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9802 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9803 * implement any ASID-like capability so we can ignore it (instead
9804 * we will always flush the TLB any time the ASID is changed).
9806 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
9808 /* Here we should have set up all the parameters for the translation:
9809 * inputsize, ttbr, epd, stride, tbi
9813 /* Translation table walk disabled => Translation fault on TLB miss
9814 * Note: This is always 0 on 64-bit EL2 and EL3.
9819 if (mmu_idx
!= ARMMMUIdx_Stage2
) {
9820 /* The starting level depends on the virtual address size (which can
9821 * be up to 48 bits) and the translation granule size. It indicates
9822 * the number of strides (stride bits at a time) needed to
9823 * consume the bits of the input address. In the pseudocode this is:
9824 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9825 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9826 * our 'stride + 3' and 'stride' is our 'stride'.
9827 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9828 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9829 * = 4 - (inputsize - 4) / stride;
9831 level
= 4 - (inputsize
- 4) / stride
;
9833 /* For stage 2 translations the starting level is specified by the
9834 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9836 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
9837 uint32_t startlevel
;
9840 if (!aarch64
|| stride
== 9) {
9841 /* AArch32 or 4KB pages */
9842 startlevel
= 2 - sl0
;
9844 /* 16KB or 64KB pages */
9845 startlevel
= 3 - sl0
;
9848 /* Check that the starting level is valid. */
9849 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
9852 fault_type
= ARMFault_Translation
;
9858 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
9859 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
9861 /* Now we can extract the actual base address from the TTBR */
9862 descaddr
= extract64(ttbr
, 0, 48);
9863 descaddr
&= ~indexmask
;
9865 /* The address field in the descriptor goes up to bit 39 for ARMv7
9866 * but up to bit 47 for ARMv8, but we use the descaddrmask
9867 * up to bit 39 for AArch32, because we don't need other bits in that case
9868 * to construct next descriptor address (anyway they should be all zeroes).
9870 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
9871 ~indexmask_grainsize
;
9873 /* Secure accesses start with the page table in secure memory and
9874 * can be downgraded to non-secure at any step. Non-secure accesses
9875 * remain non-secure. We implement this by just ORing in the NSTable/NS
9876 * bits at each step.
9878 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
9880 uint64_t descriptor
;
9883 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
9885 nstable
= extract32(tableattrs
, 4, 1);
9886 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
9887 if (fi
->type
!= ARMFault_None
) {
9891 if (!(descriptor
& 1) ||
9892 (!(descriptor
& 2) && (level
== 3))) {
9893 /* Invalid, or the Reserved level 3 encoding */
9896 descaddr
= descriptor
& descaddrmask
;
9898 if ((descriptor
& 2) && (level
< 3)) {
9899 /* Table entry. The top five bits are attributes which may
9900 * propagate down through lower levels of the table (and
9901 * which are all arranged so that 0 means "no effect", so
9902 * we can gather them up by ORing in the bits at each level).
9904 tableattrs
|= extract64(descriptor
, 59, 5);
9906 indexmask
= indexmask_grainsize
;
9909 /* Block entry at level 1 or 2, or page entry at level 3.
9910 * These are basically the same thing, although the number
9911 * of bits we pull in from the vaddr varies.
9913 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
9914 descaddr
|= (address
& (page_size
- 1));
9915 /* Extract attributes from the descriptor */
9916 attrs
= extract64(descriptor
, 2, 10)
9917 | (extract64(descriptor
, 52, 12) << 10);
9919 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9920 /* Stage 2 table descriptors do not include any attribute fields */
9923 /* Merge in attributes from table descriptors */
9924 attrs
|= nstable
<< 3; /* NS */
9925 guarded
= extract64(descriptor
, 50, 1); /* GP */
9927 /* HPD disables all the table attributes except NSTable. */
9930 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
9931 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9932 * means "force PL1 access only", which means forcing AP[1] to 0.
9934 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
9935 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
9938 /* Here descaddr is the final physical address, and attributes
9941 fault_type
= ARMFault_AccessFlag
;
9942 if ((attrs
& (1 << 8)) == 0) {
9947 ap
= extract32(attrs
, 4, 2);
9948 xn
= extract32(attrs
, 12, 1);
9950 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9952 *prot
= get_S2prot(env
, ap
, xn
);
9954 ns
= extract32(attrs
, 3, 1);
9955 pxn
= extract32(attrs
, 11, 1);
9956 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
9959 fault_type
= ARMFault_Permission
;
9960 if (!(*prot
& (1 << access_type
))) {
9965 /* The NS bit will (as required by the architecture) have no effect if
9966 * the CPU doesn't support TZ or this is a non-secure translation
9967 * regime, because the attribute will already be non-secure.
9969 txattrs
->secure
= false;
9971 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
9972 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
9973 txattrs
->target_tlb_bit0
= true;
9976 if (cacheattrs
!= NULL
) {
9977 if (mmu_idx
== ARMMMUIdx_Stage2
) {
9978 cacheattrs
->attrs
= convert_stage2_attrs(env
,
9979 extract32(attrs
, 0, 4));
9981 /* Index into MAIR registers for cache attributes */
9982 uint8_t attrindx
= extract32(attrs
, 0, 3);
9983 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
9984 assert(attrindx
<= 7);
9985 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
9987 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
9990 *phys_ptr
= descaddr
;
9991 *page_size_ptr
= page_size
;
9995 fi
->type
= fault_type
;
9997 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9998 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
);
10002 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10004 int32_t address
, int *prot
)
10006 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10007 *prot
= PAGE_READ
| PAGE_WRITE
;
10009 case 0xF0000000 ... 0xFFFFFFFF:
10010 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10011 /* hivecs execing is ok */
10012 *prot
|= PAGE_EXEC
;
10015 case 0x00000000 ... 0x7FFFFFFF:
10016 *prot
|= PAGE_EXEC
;
10020 /* Default system address map for M profile cores.
10021 * The architecture specifies which regions are execute-never;
10022 * at the MPU level no other checks are defined.
10025 case 0x00000000 ... 0x1fffffff: /* ROM */
10026 case 0x20000000 ... 0x3fffffff: /* SRAM */
10027 case 0x60000000 ... 0x7fffffff: /* RAM */
10028 case 0x80000000 ... 0x9fffffff: /* RAM */
10029 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10031 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10032 case 0xa0000000 ... 0xbfffffff: /* Device */
10033 case 0xc0000000 ... 0xdfffffff: /* Device */
10034 case 0xe0000000 ... 0xffffffff: /* System */
10035 *prot
= PAGE_READ
| PAGE_WRITE
;
10038 g_assert_not_reached();
10043 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
10044 ARMMMUIdx mmu_idx
, bool is_user
)
10046 /* Return true if we should use the default memory map as a
10047 * "background" region if there are no hits against any MPU regions.
10049 CPUARMState
*env
= &cpu
->env
;
10055 if (arm_feature(env
, ARM_FEATURE_M
)) {
10056 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
10057 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
10059 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
10063 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
10065 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10066 return arm_feature(env
, ARM_FEATURE_M
) &&
10067 extract32(address
, 20, 12) == 0xe00;
10070 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
10072 /* True if address is in the M profile system region
10073 * 0xe0000000 - 0xffffffff
10075 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
10078 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
10079 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10080 hwaddr
*phys_ptr
, int *prot
,
10081 target_ulong
*page_size
,
10082 ARMMMUFaultInfo
*fi
)
10084 ARMCPU
*cpu
= env_archcpu(env
);
10086 bool is_user
= regime_is_user(env
, mmu_idx
);
10088 *phys_ptr
= address
;
10089 *page_size
= TARGET_PAGE_SIZE
;
10092 if (regime_translation_disabled(env
, mmu_idx
) ||
10093 m_is_ppb_region(env
, address
)) {
10094 /* MPU disabled or M profile PPB access: use default memory map.
10095 * The other case which uses the default memory map in the
10096 * v7M ARM ARM pseudocode is exception vector reads from the vector
10097 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10098 * which always does a direct read using address_space_ldl(), rather
10099 * than going via this function, so we don't need to check that here.
10101 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10102 } else { /* MPU enabled */
10103 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10104 /* region search */
10105 uint32_t base
= env
->pmsav7
.drbar
[n
];
10106 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
10108 bool srdis
= false;
10110 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
10115 qemu_log_mask(LOG_GUEST_ERROR
,
10116 "DRSR[%d]: Rsize field cannot be 0\n", n
);
10120 rmask
= (1ull << rsize
) - 1;
10122 if (base
& rmask
) {
10123 qemu_log_mask(LOG_GUEST_ERROR
,
10124 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
10125 "to DRSR region size, mask = 0x%" PRIx32
"\n",
10130 if (address
< base
|| address
> base
+ rmask
) {
10132 * Address not in this region. We must check whether the
10133 * region covers addresses in the same page as our address.
10134 * In that case we must not report a size that covers the
10135 * whole page for a subsequent hit against a different MPU
10136 * region or the background region, because it would result in
10137 * incorrect TLB hits for subsequent accesses to addresses that
10138 * are in this MPU region.
10140 if (ranges_overlap(base
, rmask
,
10141 address
& TARGET_PAGE_MASK
,
10142 TARGET_PAGE_SIZE
)) {
10148 /* Region matched */
10150 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
10152 uint32_t srdis_mask
;
10154 rsize
-= 3; /* sub region size (power of 2) */
10155 snd
= ((address
- base
) >> rsize
) & 0x7;
10156 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
10158 srdis_mask
= srdis
? 0x3 : 0x0;
10159 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
10160 /* This will check in groups of 2, 4 and then 8, whether
10161 * the subregion bits are consistent. rsize is incremented
10162 * back up to give the region size, considering consistent
10163 * adjacent subregions as one region. Stop testing if rsize
10164 * is already big enough for an entire QEMU page.
10166 int snd_rounded
= snd
& ~(i
- 1);
10167 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
10168 snd_rounded
+ 8, i
);
10169 if (srdis_mask
^ srdis_multi
) {
10172 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
10179 if (rsize
< TARGET_PAGE_BITS
) {
10180 *page_size
= 1 << rsize
;
10185 if (n
== -1) { /* no hits */
10186 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10187 /* background fault */
10188 fi
->type
= ARMFault_Background
;
10191 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10192 } else { /* a MPU hit! */
10193 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
10194 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
10196 if (m_is_system_region(env
, address
)) {
10197 /* System space is always execute never */
10201 if (is_user
) { /* User mode AP bit decoding */
10206 break; /* no access */
10208 *prot
|= PAGE_WRITE
;
10212 *prot
|= PAGE_READ
| PAGE_EXEC
;
10215 /* for v7M, same as 6; for R profile a reserved value */
10216 if (arm_feature(env
, ARM_FEATURE_M
)) {
10217 *prot
|= PAGE_READ
| PAGE_EXEC
;
10222 qemu_log_mask(LOG_GUEST_ERROR
,
10223 "DRACR[%d]: Bad value for AP bits: 0x%"
10224 PRIx32
"\n", n
, ap
);
10226 } else { /* Priv. mode AP bits decoding */
10229 break; /* no access */
10233 *prot
|= PAGE_WRITE
;
10237 *prot
|= PAGE_READ
| PAGE_EXEC
;
10240 /* for v7M, same as 6; for R profile a reserved value */
10241 if (arm_feature(env
, ARM_FEATURE_M
)) {
10242 *prot
|= PAGE_READ
| PAGE_EXEC
;
10247 qemu_log_mask(LOG_GUEST_ERROR
,
10248 "DRACR[%d]: Bad value for AP bits: 0x%"
10249 PRIx32
"\n", n
, ap
);
10253 /* execute never */
10255 *prot
&= ~PAGE_EXEC
;
10260 fi
->type
= ARMFault_Permission
;
10262 return !(*prot
& (1 << access_type
));
10265 static bool v8m_is_sau_exempt(CPUARMState
*env
,
10266 uint32_t address
, MMUAccessType access_type
)
10268 /* The architecture specifies that certain address ranges are
10269 * exempt from v8M SAU/IDAU checks.
10272 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
10273 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
10274 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
10275 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
10276 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
10277 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
10280 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
10281 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10282 V8M_SAttributes
*sattrs
)
10284 /* Look up the security attributes for this address. Compare the
10285 * pseudocode SecurityCheck() function.
10286 * We assume the caller has zero-initialized *sattrs.
10288 ARMCPU
*cpu
= env_archcpu(env
);
10290 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
10291 int idau_region
= IREGION_NOTVALID
;
10292 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10293 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10296 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
10297 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
10299 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
10303 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
10304 /* 0xf0000000..0xffffffff is always S for insn fetches */
10308 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
10309 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
10313 if (idau_region
!= IREGION_NOTVALID
) {
10314 sattrs
->irvalid
= true;
10315 sattrs
->iregion
= idau_region
;
10318 switch (env
->sau
.ctrl
& 3) {
10319 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10321 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10324 default: /* SAU.ENABLE == 1 */
10325 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
10326 if (env
->sau
.rlar
[r
] & 1) {
10327 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
10328 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
10330 if (base
<= address
&& limit
>= address
) {
10331 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10332 sattrs
->subpage
= true;
10334 if (sattrs
->srvalid
) {
10335 /* If we hit in more than one region then we must report
10336 * as Secure, not NS-Callable, with no valid region
10339 sattrs
->ns
= false;
10340 sattrs
->nsc
= false;
10341 sattrs
->sregion
= 0;
10342 sattrs
->srvalid
= false;
10345 if (env
->sau
.rlar
[r
] & 2) {
10346 sattrs
->nsc
= true;
10350 sattrs
->srvalid
= true;
10351 sattrs
->sregion
= r
;
10355 * Address not in this region. We must check whether the
10356 * region covers addresses in the same page as our address.
10357 * In that case we must not report a size that covers the
10358 * whole page for a subsequent hit against a different MPU
10359 * region or the background region, because it would result
10360 * in incorrect TLB hits for subsequent accesses to
10361 * addresses that are in this MPU region.
10363 if (limit
>= base
&&
10364 ranges_overlap(base
, limit
- base
+ 1,
10366 TARGET_PAGE_SIZE
)) {
10367 sattrs
->subpage
= true;
10376 * The IDAU will override the SAU lookup results if it specifies
10377 * higher security than the SAU does.
10380 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
10381 sattrs
->ns
= false;
10382 sattrs
->nsc
= idau_nsc
;
10387 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
10388 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10389 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10390 int *prot
, bool *is_subpage
,
10391 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
10393 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10394 * that a full phys-to-virt translation does).
10395 * mregion is (if not NULL) set to the region number which matched,
10396 * or -1 if no region number is returned (MPU off, address did not
10397 * hit a region, address hit in multiple regions).
10398 * We set is_subpage to true if the region hit doesn't cover the
10399 * entire TARGET_PAGE the address is within.
10401 ARMCPU
*cpu
= env_archcpu(env
);
10402 bool is_user
= regime_is_user(env
, mmu_idx
);
10403 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10405 int matchregion
= -1;
10407 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10408 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10410 *is_subpage
= false;
10411 *phys_ptr
= address
;
10417 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10418 * was an exception vector read from the vector table (which is always
10419 * done using the default system address map), because those accesses
10420 * are done in arm_v7m_load_vector(), which always does a direct
10421 * read using address_space_ldl(), rather than going via this function.
10423 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
10425 } else if (m_is_ppb_region(env
, address
)) {
10428 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10432 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10433 /* region search */
10434 /* Note that the base address is bits [31:5] from the register
10435 * with bits [4:0] all zeroes, but the limit address is bits
10436 * [31:5] from the register with bits [4:0] all ones.
10438 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
10439 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
10441 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
10442 /* Region disabled */
10446 if (address
< base
|| address
> limit
) {
10448 * Address not in this region. We must check whether the
10449 * region covers addresses in the same page as our address.
10450 * In that case we must not report a size that covers the
10451 * whole page for a subsequent hit against a different MPU
10452 * region or the background region, because it would result in
10453 * incorrect TLB hits for subsequent accesses to addresses that
10454 * are in this MPU region.
10456 if (limit
>= base
&&
10457 ranges_overlap(base
, limit
- base
+ 1,
10459 TARGET_PAGE_SIZE
)) {
10460 *is_subpage
= true;
10465 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10466 *is_subpage
= true;
10469 if (matchregion
!= -1) {
10470 /* Multiple regions match -- always a failure (unlike
10471 * PMSAv7 where highest-numbered-region wins)
10473 fi
->type
= ARMFault_Permission
;
10484 /* background fault */
10485 fi
->type
= ARMFault_Background
;
10489 if (matchregion
== -1) {
10490 /* hit using the background region */
10491 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10493 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
10494 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
10496 if (m_is_system_region(env
, address
)) {
10497 /* System space is always execute never */
10501 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
10502 if (*prot
&& !xn
) {
10503 *prot
|= PAGE_EXEC
;
10505 /* We don't need to look the attribute up in the MAIR0/MAIR1
10506 * registers because that only tells us about cacheability.
10509 *mregion
= matchregion
;
10513 fi
->type
= ARMFault_Permission
;
10515 return !(*prot
& (1 << access_type
));
10519 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
10520 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10521 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10522 int *prot
, target_ulong
*page_size
,
10523 ARMMMUFaultInfo
*fi
)
10525 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10526 V8M_SAttributes sattrs
= {};
10528 bool mpu_is_subpage
;
10530 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10531 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
10532 if (access_type
== MMU_INST_FETCH
) {
10533 /* Instruction fetches always use the MMU bank and the
10534 * transaction attribute determined by the fetch address,
10535 * regardless of CPU state. This is painful for QEMU
10536 * to handle, because it would mean we need to encode
10537 * into the mmu_idx not just the (user, negpri) information
10538 * for the current security state but also that for the
10539 * other security state, which would balloon the number
10540 * of mmu_idx values needed alarmingly.
10541 * Fortunately we can avoid this because it's not actually
10542 * possible to arbitrarily execute code from memory with
10543 * the wrong security attribute: it will always generate
10544 * an exception of some kind or another, apart from the
10545 * special case of an NS CPU executing an SG instruction
10546 * in S&NSC memory. So we always just fail the translation
10547 * here and sort things out in the exception handler
10548 * (including possibly emulating an SG instruction).
10550 if (sattrs
.ns
!= !secure
) {
10552 fi
->type
= ARMFault_QEMU_NSCExec
;
10554 fi
->type
= ARMFault_QEMU_SFault
;
10556 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10557 *phys_ptr
= address
;
10562 /* For data accesses we always use the MMU bank indicated
10563 * by the current CPU state, but the security attributes
10564 * might downgrade a secure access to nonsecure.
10567 txattrs
->secure
= false;
10568 } else if (!secure
) {
10569 /* NS access to S memory must fault.
10570 * Architecturally we should first check whether the
10571 * MPU information for this address indicates that we
10572 * are doing an unaligned access to Device memory, which
10573 * should generate a UsageFault instead. QEMU does not
10574 * currently check for that kind of unaligned access though.
10575 * If we added it we would need to do so as a special case
10576 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10578 fi
->type
= ARMFault_QEMU_SFault
;
10579 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10580 *phys_ptr
= address
;
10587 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
10588 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
10589 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
10593 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
10594 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10595 hwaddr
*phys_ptr
, int *prot
,
10596 ARMMMUFaultInfo
*fi
)
10601 bool is_user
= regime_is_user(env
, mmu_idx
);
10603 if (regime_translation_disabled(env
, mmu_idx
)) {
10604 /* MPU disabled. */
10605 *phys_ptr
= address
;
10606 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10610 *phys_ptr
= address
;
10611 for (n
= 7; n
>= 0; n
--) {
10612 base
= env
->cp15
.c6_region
[n
];
10613 if ((base
& 1) == 0) {
10616 mask
= 1 << ((base
>> 1) & 0x1f);
10617 /* Keep this shift separate from the above to avoid an
10618 (undefined) << 32. */
10619 mask
= (mask
<< 1) - 1;
10620 if (((base
^ address
) & ~mask
) == 0) {
10625 fi
->type
= ARMFault_Background
;
10629 if (access_type
== MMU_INST_FETCH
) {
10630 mask
= env
->cp15
.pmsav5_insn_ap
;
10632 mask
= env
->cp15
.pmsav5_data_ap
;
10634 mask
= (mask
>> (n
* 4)) & 0xf;
10637 fi
->type
= ARMFault_Permission
;
10642 fi
->type
= ARMFault_Permission
;
10646 *prot
= PAGE_READ
| PAGE_WRITE
;
10651 *prot
|= PAGE_WRITE
;
10655 *prot
= PAGE_READ
| PAGE_WRITE
;
10659 fi
->type
= ARMFault_Permission
;
10669 /* Bad permission. */
10670 fi
->type
= ARMFault_Permission
;
10674 *prot
|= PAGE_EXEC
;
10678 /* Combine either inner or outer cacheability attributes for normal
10679 * memory, according to table D4-42 and pseudocode procedure
10680 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10682 * NB: only stage 1 includes allocation hints (RW bits), leading to
10685 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
10687 if (s1
== 4 || s2
== 4) {
10688 /* non-cacheable has precedence */
10690 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
10691 /* stage 1 write-through takes precedence */
10693 } else if (extract32(s2
, 2, 2) == 2) {
10694 /* stage 2 write-through takes precedence, but the allocation hint
10695 * is still taken from stage 1
10697 return (2 << 2) | extract32(s1
, 0, 2);
10698 } else { /* write-back */
10703 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10704 * and CombineS1S2Desc()
10706 * @s1: Attributes from stage 1 walk
10707 * @s2: Attributes from stage 2 walk
10709 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
10711 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
10712 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
10715 /* Combine shareability attributes (table D4-43) */
10716 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
10717 /* if either are outer-shareable, the result is outer-shareable */
10718 ret
.shareability
= 2;
10719 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
10720 /* if either are inner-shareable, the result is inner-shareable */
10721 ret
.shareability
= 3;
10723 /* both non-shareable */
10724 ret
.shareability
= 0;
10727 /* Combine memory type and cacheability attributes */
10728 if (s1hi
== 0 || s2hi
== 0) {
10729 /* Device has precedence over normal */
10730 if (s1lo
== 0 || s2lo
== 0) {
10731 /* nGnRnE has precedence over anything */
10733 } else if (s1lo
== 4 || s2lo
== 4) {
10734 /* non-Reordering has precedence over Reordering */
10735 ret
.attrs
= 4; /* nGnRE */
10736 } else if (s1lo
== 8 || s2lo
== 8) {
10737 /* non-Gathering has precedence over Gathering */
10738 ret
.attrs
= 8; /* nGRE */
10740 ret
.attrs
= 0xc; /* GRE */
10743 /* Any location for which the resultant memory type is any
10744 * type of Device memory is always treated as Outer Shareable.
10746 ret
.shareability
= 2;
10747 } else { /* Normal memory */
10748 /* Outer/inner cacheability combine independently */
10749 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
10750 | combine_cacheattr_nibble(s1lo
, s2lo
);
10752 if (ret
.attrs
== 0x44) {
10753 /* Any location for which the resultant memory type is Normal
10754 * Inner Non-cacheable, Outer Non-cacheable is always treated
10755 * as Outer Shareable.
10757 ret
.shareability
= 2;
10765 /* get_phys_addr - get the physical address for this virtual address
10767 * Find the physical address corresponding to the given virtual address,
10768 * by doing a translation table walk on MMU based systems or using the
10769 * MPU state on MPU based systems.
10771 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10772 * prot and page_size may not be filled in, and the populated fsr value provides
10773 * information on why the translation aborted, in the format of a
10774 * DFSR/IFSR fault register, with the following caveats:
10775 * * we honour the short vs long DFSR format differences.
10776 * * the WnR bit is never set (the caller must do this).
10777 * * for PSMAv5 based systems we don't bother to return a full FSR format
10780 * @env: CPUARMState
10781 * @address: virtual address to get physical address for
10782 * @access_type: 0 for read, 1 for write, 2 for execute
10783 * @mmu_idx: MMU index indicating required translation regime
10784 * @phys_ptr: set to the physical address corresponding to the virtual address
10785 * @attrs: set to the memory transaction attributes to use
10786 * @prot: set to the permissions for the page containing phys_ptr
10787 * @page_size: set to the size of the page containing phys_ptr
10788 * @fi: set to fault info if the translation fails
10789 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10791 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
10792 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10793 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10794 target_ulong
*page_size
,
10795 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10797 if (mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_E10_1
) {
10798 /* Call ourselves recursively to do the stage 1 and then stage 2
10801 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
10805 ARMCacheAttrs cacheattrs2
= {};
10807 ret
= get_phys_addr(env
, address
, access_type
,
10808 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
10809 prot
, page_size
, fi
, cacheattrs
);
10811 /* If S1 fails or S2 is disabled, return early. */
10812 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10817 /* S1 is done. Now do S2 translation. */
10818 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_Stage2
,
10819 phys_ptr
, attrs
, &s2_prot
,
10821 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
10823 /* Combine the S1 and S2 perms. */
10826 /* Combine the S1 and S2 cache attributes, if needed */
10827 if (!ret
&& cacheattrs
!= NULL
) {
10828 if (env
->cp15
.hcr_el2
& HCR_DC
) {
10830 * HCR.DC forces the first stage attributes to
10831 * Normal Non-Shareable,
10832 * Inner Write-Back Read-Allocate Write-Allocate,
10833 * Outer Write-Back Read-Allocate Write-Allocate.
10835 cacheattrs
->attrs
= 0xff;
10836 cacheattrs
->shareability
= 0;
10838 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
10844 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10846 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10850 /* The page table entries may downgrade secure to non-secure, but
10851 * cannot upgrade an non-secure translation regime's attributes
10854 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
10855 attrs
->user
= regime_is_user(env
, mmu_idx
);
10857 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10858 * In v7 and earlier it affects all stage 1 translations.
10860 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
10861 && !arm_feature(env
, ARM_FEATURE_V8
)) {
10862 if (regime_el(env
, mmu_idx
) == 3) {
10863 address
+= env
->cp15
.fcseidr_s
;
10865 address
+= env
->cp15
.fcseidr_ns
;
10869 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
10871 *page_size
= TARGET_PAGE_SIZE
;
10873 if (arm_feature(env
, ARM_FEATURE_V8
)) {
10875 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
10876 phys_ptr
, attrs
, prot
, page_size
, fi
);
10877 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10879 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
10880 phys_ptr
, prot
, page_size
, fi
);
10883 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
10884 phys_ptr
, prot
, fi
);
10886 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
10887 " mmu_idx %u -> %s (prot %c%c%c)\n",
10888 access_type
== MMU_DATA_LOAD
? "reading" :
10889 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
10890 (uint32_t)address
, mmu_idx
,
10891 ret
? "Miss" : "Hit",
10892 *prot
& PAGE_READ
? 'r' : '-',
10893 *prot
& PAGE_WRITE
? 'w' : '-',
10894 *prot
& PAGE_EXEC
? 'x' : '-');
10899 /* Definitely a real MMU, not an MPU */
10901 if (regime_translation_disabled(env
, mmu_idx
)) {
10902 /* MMU disabled. */
10903 *phys_ptr
= address
;
10904 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10905 *page_size
= TARGET_PAGE_SIZE
;
10909 if (regime_using_lpae_format(env
, mmu_idx
)) {
10910 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
10911 phys_ptr
, attrs
, prot
, page_size
,
10913 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
10914 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
10915 phys_ptr
, attrs
, prot
, page_size
, fi
);
10917 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
10918 phys_ptr
, prot
, page_size
, fi
);
10922 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
10925 ARMCPU
*cpu
= ARM_CPU(cs
);
10926 CPUARMState
*env
= &cpu
->env
;
10928 target_ulong page_size
;
10931 ARMMMUFaultInfo fi
= {};
10932 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
10934 *attrs
= (MemTxAttrs
) {};
10936 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
10937 attrs
, &prot
, &page_size
, &fi
, NULL
);
10947 /* Note that signed overflow is undefined in C. The following routines are
10948 careful to use unsigned types where modulo arithmetic is required.
10949 Failure to do so _will_ break on newer gcc. */
10951 /* Signed saturating arithmetic. */
10953 /* Perform 16-bit signed saturating addition. */
10954 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
10959 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
10968 /* Perform 8-bit signed saturating addition. */
10969 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
10974 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
10983 /* Perform 16-bit signed saturating subtraction. */
10984 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
10989 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
10998 /* Perform 8-bit signed saturating subtraction. */
10999 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
11004 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
11013 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11014 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11015 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11016 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11019 #include "op_addsub.h"
11021 /* Unsigned saturating arithmetic. */
11022 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
11031 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
11039 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
11048 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
11056 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11057 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11058 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11059 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11062 #include "op_addsub.h"
11064 /* Signed modulo arithmetic. */
11065 #define SARITH16(a, b, n, op) do { \
11067 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11068 RESULT(sum, n, 16); \
11070 ge |= 3 << (n * 2); \
11073 #define SARITH8(a, b, n, op) do { \
11075 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11076 RESULT(sum, n, 8); \
11082 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11083 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11084 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11085 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11089 #include "op_addsub.h"
11091 /* Unsigned modulo arithmetic. */
11092 #define ADD16(a, b, n) do { \
11094 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11095 RESULT(sum, n, 16); \
11096 if ((sum >> 16) == 1) \
11097 ge |= 3 << (n * 2); \
11100 #define ADD8(a, b, n) do { \
11102 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11103 RESULT(sum, n, 8); \
11104 if ((sum >> 8) == 1) \
11108 #define SUB16(a, b, n) do { \
11110 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11111 RESULT(sum, n, 16); \
11112 if ((sum >> 16) == 0) \
11113 ge |= 3 << (n * 2); \
11116 #define SUB8(a, b, n) do { \
11118 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11119 RESULT(sum, n, 8); \
11120 if ((sum >> 8) == 0) \
11127 #include "op_addsub.h"
11129 /* Halved signed arithmetic. */
11130 #define ADD16(a, b, n) \
11131 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11132 #define SUB16(a, b, n) \
11133 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11134 #define ADD8(a, b, n) \
11135 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11136 #define SUB8(a, b, n) \
11137 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11140 #include "op_addsub.h"
11142 /* Halved unsigned arithmetic. */
11143 #define ADD16(a, b, n) \
11144 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11145 #define SUB16(a, b, n) \
11146 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11147 #define ADD8(a, b, n) \
11148 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11149 #define SUB8(a, b, n) \
11150 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11153 #include "op_addsub.h"
11155 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11163 /* Unsigned sum of absolute byte differences. */
11164 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11167 sum
= do_usad(a
, b
);
11168 sum
+= do_usad(a
>> 8, b
>> 8);
11169 sum
+= do_usad(a
>> 16, b
>>16);
11170 sum
+= do_usad(a
>> 24, b
>> 24);
11174 /* For ARMv6 SEL instruction. */
11175 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11187 mask
|= 0xff000000;
11188 return (a
& mask
) | (b
& ~mask
);
11192 * The upper bytes of val (above the number specified by 'bytes') must have
11193 * been zeroed out by the caller.
11195 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11199 stl_le_p(buf
, val
);
11201 /* zlib crc32 converts the accumulator and output to one's complement. */
11202 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
11205 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
11209 stl_le_p(buf
, val
);
11211 /* Linux crc32c converts the output to one's complement. */
11212 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
11215 /* Return the exception level to which FP-disabled exceptions should
11216 * be taken, or 0 if FP is enabled.
11218 int fp_exception_el(CPUARMState
*env
, int cur_el
)
11220 #ifndef CONFIG_USER_ONLY
11223 /* CPACR and the CPTR registers don't exist before v6, so FP is
11224 * always accessible
11226 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
11230 if (arm_feature(env
, ARM_FEATURE_M
)) {
11231 /* CPACR can cause a NOCP UsageFault taken to current security state */
11232 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
11236 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
11237 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
11238 /* FP insns cause a NOCP UsageFault taken to Secure */
11246 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11247 * 0, 2 : trap EL0 and EL1/PL1 accesses
11248 * 1 : trap only EL0 accesses
11249 * 3 : trap no accesses
11251 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
11255 if (cur_el
== 0 || cur_el
== 1) {
11256 /* Trap to PL1, which might be EL1 or EL3 */
11257 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
11262 if (cur_el
== 3 && !is_a64(env
)) {
11263 /* Secure PL1 running at EL3 */
11277 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11278 * to control non-secure access to the FPU. It doesn't have any
11279 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11281 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
11282 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
11283 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
11284 /* FP insns act as UNDEF */
11285 return cur_el
== 2 ? 2 : 1;
11289 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
11290 * check because zero bits in the registers mean "don't trap".
11293 /* CPTR_EL2 : present in v7VE or v8 */
11294 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
11295 && !arm_is_secure_below_el3(env
)) {
11296 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
11300 /* CPTR_EL3 : present in v8 */
11301 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
11302 /* Trap all FP ops to EL3 */
11309 /* Return the exception level we're running at if this is our mmu_idx */
11310 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
11312 if (mmu_idx
& ARM_MMU_IDX_M
) {
11313 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
11317 case ARMMMUIdx_E10_0
:
11318 case ARMMMUIdx_E20_0
:
11319 case ARMMMUIdx_SE10_0
:
11321 case ARMMMUIdx_E10_1
:
11322 case ARMMMUIdx_SE10_1
:
11325 case ARMMMUIdx_E20_2
:
11327 case ARMMMUIdx_SE3
:
11330 g_assert_not_reached();
11335 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
11337 g_assert_not_reached();
11341 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
11343 if (arm_feature(env
, ARM_FEATURE_M
)) {
11344 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
11347 /* See ARM pseudo-function ELIsInHost. */
11350 if (arm_is_secure_below_el3(env
)) {
11351 return ARMMMUIdx_SE10_0
;
11353 if ((env
->cp15
.hcr_el2
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)
11354 && arm_el_is_aa64(env
, 2)) {
11355 return ARMMMUIdx_E20_0
;
11357 return ARMMMUIdx_E10_0
;
11359 if (arm_is_secure_below_el3(env
)) {
11360 return ARMMMUIdx_SE10_1
;
11362 return ARMMMUIdx_E10_1
;
11364 /* TODO: ARMv8.4-SecEL2 */
11365 /* Note that TGE does not apply at EL2. */
11366 if ((env
->cp15
.hcr_el2
& HCR_E2H
) && arm_el_is_aa64(env
, 2)) {
11367 return ARMMMUIdx_E20_2
;
11369 return ARMMMUIdx_E2
;
11371 return ARMMMUIdx_SE3
;
11373 g_assert_not_reached();
11377 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
11379 return arm_mmu_idx_el(env
, arm_current_el(env
));
11382 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
11384 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
11387 #ifndef CONFIG_USER_ONLY
11388 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
11390 return stage_1_mmu_idx(arm_mmu_idx(env
));
11394 static uint32_t rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
11395 ARMMMUIdx mmu_idx
, uint32_t flags
)
11397 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
11398 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
,
11399 arm_to_core_mmu_idx(mmu_idx
));
11401 if (arm_singlestep_active(env
)) {
11402 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
11407 static uint32_t rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
11408 ARMMMUIdx mmu_idx
, uint32_t flags
)
11410 bool sctlr_b
= arm_sctlr_b(env
);
11413 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, 1);
11415 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
11416 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
11418 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
11420 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
11423 static uint32_t rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
11426 uint32_t flags
= 0;
11428 if (arm_v7m_is_handler_mode(env
)) {
11429 flags
= FIELD_DP32(flags
, TBFLAG_M32
, HANDLER
, 1);
11433 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
11434 * is suppressing them because the requested execution priority
11437 if (arm_feature(env
, ARM_FEATURE_V8
) &&
11438 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
11439 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
11440 flags
= FIELD_DP32(flags
, TBFLAG_M32
, STACKCHECK
, 1);
11443 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
11446 static uint32_t rebuild_hflags_aprofile(CPUARMState
*env
)
11450 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
,
11451 arm_debug_target_el(env
));
11455 static uint32_t rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
11458 uint32_t flags
= rebuild_hflags_aprofile(env
);
11460 if (arm_el_is_aa64(env
, 1)) {
11461 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
11464 if (arm_current_el(env
) < 2 && env
->cp15
.hstr_el2
&&
11465 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
11466 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HSTR_ACTIVE
, 1);
11469 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
11472 static uint32_t rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
11475 uint32_t flags
= rebuild_hflags_aprofile(env
);
11476 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
11477 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
11481 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
11483 /* Get control bits for tagged addresses. */
11484 if (regime_has_2_ranges(mmu_idx
)) {
11485 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
11486 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
11487 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
11490 tbii
= tbid
& !p0
.tbid
;
11493 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
11494 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
11496 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
11497 int sve_el
= sve_exception_el(env
, el
);
11501 * If SVE is disabled, but FP is enabled,
11502 * then the effective len is 0.
11504 if (sve_el
!= 0 && fp_el
== 0) {
11507 zcr_len
= sve_zcr_len_for_el(env
, el
);
11509 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
11510 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
11513 sctlr
= regime_sctlr(env
, stage1
);
11515 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
11516 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
11519 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
11521 * In order to save space in flags, we record only whether
11522 * pauth is "inactive", meaning all insns are implemented as
11523 * a nop, or "active" when some action must be performed.
11524 * The decision of which action to take is left to a helper.
11526 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
11527 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
11531 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
11532 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
11533 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
11534 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
11538 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
11541 static uint32_t rebuild_hflags_internal(CPUARMState
*env
)
11543 int el
= arm_current_el(env
);
11544 int fp_el
= fp_exception_el(env
, el
);
11545 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
11548 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
11549 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
11550 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
11552 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
11556 void arm_rebuild_hflags(CPUARMState
*env
)
11558 env
->hflags
= rebuild_hflags_internal(env
);
11561 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
11563 int fp_el
= fp_exception_el(env
, el
);
11564 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
11566 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
11570 * If we have triggered a EL state change we can't rely on the
11571 * translator having passed it too us, we need to recompute.
11573 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
11575 int el
= arm_current_el(env
);
11576 int fp_el
= fp_exception_el(env
, el
);
11577 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
11578 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
11581 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
11583 int fp_el
= fp_exception_el(env
, el
);
11584 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
11586 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
11589 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
11591 int fp_el
= fp_exception_el(env
, el
);
11592 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
11594 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
11597 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
11599 #ifdef CONFIG_DEBUG_TCG
11600 uint32_t env_flags_current
= env
->hflags
;
11601 uint32_t env_flags_rebuilt
= rebuild_hflags_internal(env
);
11603 if (unlikely(env_flags_current
!= env_flags_rebuilt
)) {
11604 fprintf(stderr
, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
11605 env_flags_current
, env_flags_rebuilt
);
11611 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
11612 target_ulong
*cs_base
, uint32_t *pflags
)
11614 uint32_t flags
= env
->hflags
;
11615 uint32_t pstate_for_ss
;
11618 assert_hflags_rebuild_correctly(env
);
11620 if (FIELD_EX32(flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
11622 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
11623 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
11625 pstate_for_ss
= env
->pstate
;
11627 *pc
= env
->regs
[15];
11629 if (arm_feature(env
, ARM_FEATURE_M
)) {
11630 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
11631 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
11632 != env
->v7m
.secure
) {
11633 flags
= FIELD_DP32(flags
, TBFLAG_M32
, FPCCR_S_WRONG
, 1);
11636 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
11637 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
11638 (env
->v7m
.secure
&&
11639 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
11641 * ASPEN is set, but FPCA/SFPA indicate that there is no
11642 * active FP context; we must create a new FP context before
11643 * executing any FP insn.
11645 flags
= FIELD_DP32(flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
, 1);
11648 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
11649 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
11650 flags
= FIELD_DP32(flags
, TBFLAG_M32
, LSPACT
, 1);
11654 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11655 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11657 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
11658 flags
= FIELD_DP32(flags
, TBFLAG_A32
,
11659 XSCALE_CPAR
, env
->cp15
.c15_cpar
);
11661 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
,
11663 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
,
11664 env
->vfp
.vec_stride
);
11666 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
11667 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
11671 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, THUMB
, env
->thumb
);
11672 flags
= FIELD_DP32(flags
, TBFLAG_AM32
, CONDEXEC
, env
->condexec_bits
);
11673 pstate_for_ss
= env
->uncached_cpsr
;
11677 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11678 * states defined in the ARM ARM for software singlestep:
11679 * SS_ACTIVE PSTATE.SS State
11680 * 0 x Inactive (the TB flag for SS is always 0)
11681 * 1 0 Active-pending
11682 * 1 1 Active-not-pending
11683 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
11685 if (FIELD_EX32(flags
, TBFLAG_ANY
, SS_ACTIVE
) &&
11686 (pstate_for_ss
& PSTATE_SS
)) {
11687 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
11693 #ifdef TARGET_AARCH64
11695 * The manual says that when SVE is enabled and VQ is widened the
11696 * implementation is allowed to zero the previously inaccessible
11697 * portion of the registers. The corollary to that is that when
11698 * SVE is enabled and VQ is narrowed we are also allowed to zero
11699 * the now inaccessible portion of the registers.
11701 * The intent of this is that no predicate bit beyond VQ is ever set.
11702 * Which means that some operations on predicate registers themselves
11703 * may operate on full uint64_t or even unrolled across the maximum
11704 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
11705 * may well be cheaper than conditionals to restrict the operation
11706 * to the relevant portion of a uint16_t[16].
11708 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
11713 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
11714 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
11716 /* Zap the high bits of the zregs. */
11717 for (i
= 0; i
< 32; i
++) {
11718 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
11721 /* Zap the high bits of the pregs and ffr. */
11724 pmask
= ~(-1ULL << (16 * (vq
& 3)));
11726 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
11727 for (i
= 0; i
< 17; ++i
) {
11728 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
11735 * Notice a change in SVE vector size when changing EL.
11737 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
11738 int new_el
, bool el0_a64
)
11740 ARMCPU
*cpu
= env_archcpu(env
);
11741 int old_len
, new_len
;
11742 bool old_a64
, new_a64
;
11744 /* Nothing to do if no SVE. */
11745 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
11749 /* Nothing to do if FP is disabled in either EL. */
11750 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
11755 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11756 * at ELx, or not available because the EL is in AArch32 state, then
11757 * for all purposes other than a direct read, the ZCR_ELx.LEN field
11758 * has an effective value of 0".
11760 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11761 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11762 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11763 * we already have the correct register contents when encountering the
11764 * vq0->vq0 transition between EL0->EL1.
11766 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
11767 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
11768 ? sve_zcr_len_for_el(env
, old_el
) : 0);
11769 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
11770 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
11771 ? sve_zcr_len_for_el(env
, new_el
) : 0);
11773 /* When changing vector length, clear inaccessible state. */
11774 if (new_len
< old_len
) {
11775 aarch64_sve_narrow_vq(env
, new_len
+ 1);