4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
12 #include "target/arm/idau.h"
15 #include "internals.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/timer.h"
20 #include "qemu/bitops.h"
21 #include "qemu/crc32c.h"
22 #include "qemu/qemu-print.h"
23 #include "exec/exec-all.h"
24 #include <zlib.h> /* For crc32 */
26 #include "semihosting/semihost.h"
27 #include "sysemu/cpus.h"
28 #include "sysemu/cpu-timers.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/tcg.h"
31 #include "qemu/range.h"
32 #include "qapi/qapi-commands-machine-target.h"
33 #include "qapi/error.h"
34 #include "qemu/guest-random.h"
37 #include "exec/cpu_ldst.h"
38 #include "semihosting/common-semi.h"
41 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
42 #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
44 #ifndef CONFIG_USER_ONLY
46 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
47 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
49 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
50 target_ulong
*page_size_ptr
,
51 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
52 __attribute__((nonnull
));
55 static void switch_mode(CPUARMState
*env
, int mode
);
56 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
);
58 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
60 assert(ri
->fieldoffset
);
61 if (cpreg_field_is_64bit(ri
)) {
62 return CPREG_FIELD64(env
, ri
);
64 return CPREG_FIELD32(env
, ri
);
68 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
71 assert(ri
->fieldoffset
);
72 if (cpreg_field_is_64bit(ri
)) {
73 CPREG_FIELD64(env
, ri
) = value
;
75 CPREG_FIELD32(env
, ri
) = value
;
79 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
81 return (char *)env
+ ri
->fieldoffset
;
84 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
86 /* Raw read of a coprocessor register (as needed for migration, etc). */
87 if (ri
->type
& ARM_CP_CONST
) {
88 return ri
->resetvalue
;
89 } else if (ri
->raw_readfn
) {
90 return ri
->raw_readfn(env
, ri
);
91 } else if (ri
->readfn
) {
92 return ri
->readfn(env
, ri
);
94 return raw_read(env
, ri
);
98 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
101 /* Raw write of a coprocessor register (as needed for migration, etc).
102 * Note that constant registers are treated as write-ignored; the
103 * caller should check for success by whether a readback gives the
106 if (ri
->type
& ARM_CP_CONST
) {
108 } else if (ri
->raw_writefn
) {
109 ri
->raw_writefn(env
, ri
, v
);
110 } else if (ri
->writefn
) {
111 ri
->writefn(env
, ri
, v
);
113 raw_write(env
, ri
, v
);
117 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
119 /* Return true if the regdef would cause an assertion if you called
120 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
121 * program bug for it not to have the NO_RAW flag).
122 * NB that returning false here doesn't necessarily mean that calling
123 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
124 * read/write access functions which are safe for raw use" from "has
125 * read/write access functions which have side effects but has forgotten
126 * to provide raw access functions".
127 * The tests here line up with the conditions in read/write_raw_cp_reg()
128 * and assertions in raw_read()/raw_write().
130 if ((ri
->type
& ARM_CP_CONST
) ||
132 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
138 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
140 /* Write the coprocessor state from cpu->env to the (index,value) list. */
144 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
145 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
146 const ARMCPRegInfo
*ri
;
149 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
154 if (ri
->type
& ARM_CP_NO_RAW
) {
158 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
161 * Only sync if the previous list->cpustate sync succeeded.
162 * Rather than tracking the success/failure state for every
163 * item in the list, we just recheck "does the raw write we must
164 * have made in write_list_to_cpustate() read back OK" here.
166 uint64_t oldval
= cpu
->cpreg_values
[i
];
168 if (oldval
== newval
) {
172 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
173 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
177 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
179 cpu
->cpreg_values
[i
] = newval
;
184 bool write_list_to_cpustate(ARMCPU
*cpu
)
189 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
190 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
191 uint64_t v
= cpu
->cpreg_values
[i
];
192 const ARMCPRegInfo
*ri
;
194 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
199 if (ri
->type
& ARM_CP_NO_RAW
) {
202 /* Write value and confirm it reads back as written
203 * (to catch read-only registers and partially read-only
204 * registers where the incoming migration value doesn't match)
206 write_raw_cp_reg(&cpu
->env
, ri
, v
);
207 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
214 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
216 ARMCPU
*cpu
= opaque
;
218 const ARMCPRegInfo
*ri
;
220 regidx
= *(uint32_t *)key
;
221 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
223 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
224 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
225 /* The value array need not be initialized at this point */
226 cpu
->cpreg_array_len
++;
230 static void count_cpreg(gpointer key
, gpointer opaque
)
232 ARMCPU
*cpu
= opaque
;
234 const ARMCPRegInfo
*ri
;
236 regidx
= *(uint32_t *)key
;
237 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
239 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
240 cpu
->cpreg_array_len
++;
244 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
246 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
247 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
258 void init_cpreg_list(ARMCPU
*cpu
)
260 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
261 * Note that we require cpreg_tuples[] to be sorted by key ID.
266 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
267 keys
= g_list_sort(keys
, cpreg_key_compare
);
269 cpu
->cpreg_array_len
= 0;
271 g_list_foreach(keys
, count_cpreg
, cpu
);
273 arraylen
= cpu
->cpreg_array_len
;
274 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
275 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
276 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
277 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
278 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
279 cpu
->cpreg_array_len
= 0;
281 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
283 assert(cpu
->cpreg_array_len
== arraylen
);
289 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
291 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
292 const ARMCPRegInfo
*ri
,
295 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
296 arm_is_secure_below_el3(env
)) {
297 return CP_ACCESS_TRAP_UNCATEGORIZED
;
302 /* Some secure-only AArch32 registers trap to EL3 if used from
303 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
304 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
305 * We assume that the .access field is set to PL1_RW.
307 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
308 const ARMCPRegInfo
*ri
,
311 if (arm_current_el(env
) == 3) {
314 if (arm_is_secure_below_el3(env
)) {
315 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
316 return CP_ACCESS_TRAP_EL2
;
318 return CP_ACCESS_TRAP_EL3
;
320 /* This will be EL1 NS and EL2 NS, which just UNDEF */
321 return CP_ACCESS_TRAP_UNCATEGORIZED
;
324 static uint64_t arm_mdcr_el2_eff(CPUARMState
*env
)
326 return arm_is_el2_enabled(env
) ? env
->cp15
.mdcr_el2
: 0;
329 /* Check for traps to "powerdown debug" registers, which are controlled
332 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
335 int el
= arm_current_el(env
);
336 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
337 bool mdcr_el2_tdosa
= (mdcr_el2
& MDCR_TDOSA
) || (mdcr_el2
& MDCR_TDE
) ||
338 (arm_hcr_el2_eff(env
) & HCR_TGE
);
340 if (el
< 2 && mdcr_el2_tdosa
) {
341 return CP_ACCESS_TRAP_EL2
;
343 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
344 return CP_ACCESS_TRAP_EL3
;
349 /* Check for traps to "debug ROM" registers, which are controlled
350 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
352 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
355 int el
= arm_current_el(env
);
356 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
357 bool mdcr_el2_tdra
= (mdcr_el2
& MDCR_TDRA
) || (mdcr_el2
& MDCR_TDE
) ||
358 (arm_hcr_el2_eff(env
) & HCR_TGE
);
360 if (el
< 2 && mdcr_el2_tdra
) {
361 return CP_ACCESS_TRAP_EL2
;
363 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
364 return CP_ACCESS_TRAP_EL3
;
369 /* Check for traps to general debug registers, which are controlled
370 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
372 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
375 int el
= arm_current_el(env
);
376 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
377 bool mdcr_el2_tda
= (mdcr_el2
& MDCR_TDA
) || (mdcr_el2
& MDCR_TDE
) ||
378 (arm_hcr_el2_eff(env
) & HCR_TGE
);
380 if (el
< 2 && mdcr_el2_tda
) {
381 return CP_ACCESS_TRAP_EL2
;
383 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
384 return CP_ACCESS_TRAP_EL3
;
389 /* Check for traps to performance monitor registers, which are controlled
390 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
392 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
395 int el
= arm_current_el(env
);
396 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
398 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
399 return CP_ACCESS_TRAP_EL2
;
401 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
402 return CP_ACCESS_TRAP_EL3
;
407 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
408 static CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
411 if (arm_current_el(env
) == 1) {
412 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
413 if (arm_hcr_el2_eff(env
) & trap
) {
414 return CP_ACCESS_TRAP_EL2
;
420 /* Check for traps from EL1 due to HCR_EL2.TSW. */
421 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
424 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
425 return CP_ACCESS_TRAP_EL2
;
430 /* Check for traps from EL1 due to HCR_EL2.TACR. */
431 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
434 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
435 return CP_ACCESS_TRAP_EL2
;
440 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
441 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
444 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
445 return CP_ACCESS_TRAP_EL2
;
450 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
452 ARMCPU
*cpu
= env_archcpu(env
);
454 raw_write(env
, ri
, value
);
455 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
458 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
460 ARMCPU
*cpu
= env_archcpu(env
);
462 if (raw_read(env
, ri
) != value
) {
463 /* Unlike real hardware the qemu TLB uses virtual addresses,
464 * not modified virtual addresses, so this causes a TLB flush.
467 raw_write(env
, ri
, value
);
471 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
474 ARMCPU
*cpu
= env_archcpu(env
);
476 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
477 && !extended_addresses_enabled(env
)) {
478 /* For VMSA (when not using the LPAE long descriptor page table
479 * format) this register includes the ASID, so do a TLB flush.
480 * For PMSA it is purely a process ID and no action is needed.
484 raw_write(env
, ri
, value
);
487 /* IS variants of TLB operations must affect all cores */
488 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
491 CPUState
*cs
= env_cpu(env
);
493 tlb_flush_all_cpus_synced(cs
);
496 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
499 CPUState
*cs
= env_cpu(env
);
501 tlb_flush_all_cpus_synced(cs
);
504 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
507 CPUState
*cs
= env_cpu(env
);
509 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
512 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
515 CPUState
*cs
= env_cpu(env
);
517 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
521 * Non-IS variants of TLB operations are upgraded to
522 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
523 * force broadcast of these operations.
525 static bool tlb_force_broadcast(CPUARMState
*env
)
527 return arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_FB
);
530 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
533 /* Invalidate all (TLBIALL) */
534 CPUState
*cs
= env_cpu(env
);
536 if (tlb_force_broadcast(env
)) {
537 tlb_flush_all_cpus_synced(cs
);
543 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
546 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
547 CPUState
*cs
= env_cpu(env
);
549 value
&= TARGET_PAGE_MASK
;
550 if (tlb_force_broadcast(env
)) {
551 tlb_flush_page_all_cpus_synced(cs
, value
);
553 tlb_flush_page(cs
, value
);
557 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
560 /* Invalidate by ASID (TLBIASID) */
561 CPUState
*cs
= env_cpu(env
);
563 if (tlb_force_broadcast(env
)) {
564 tlb_flush_all_cpus_synced(cs
);
570 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
573 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
574 CPUState
*cs
= env_cpu(env
);
576 value
&= TARGET_PAGE_MASK
;
577 if (tlb_force_broadcast(env
)) {
578 tlb_flush_page_all_cpus_synced(cs
, value
);
580 tlb_flush_page(cs
, value
);
584 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
587 CPUState
*cs
= env_cpu(env
);
589 tlb_flush_by_mmuidx(cs
,
591 ARMMMUIdxBit_E10_1_PAN
|
595 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
598 CPUState
*cs
= env_cpu(env
);
600 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
602 ARMMMUIdxBit_E10_1_PAN
|
607 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
610 CPUState
*cs
= env_cpu(env
);
612 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
615 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
618 CPUState
*cs
= env_cpu(env
);
620 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
623 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
626 CPUState
*cs
= env_cpu(env
);
627 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
629 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
632 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
635 CPUState
*cs
= env_cpu(env
);
636 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
638 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
642 static const ARMCPRegInfo cp_reginfo
[] = {
643 /* Define the secure and non-secure FCSE identifier CP registers
644 * separately because there is no secure bank in V8 (no _EL3). This allows
645 * the secure register to be properly reset and migrated. There is also no
646 * v8 EL1 version of the register so the non-secure instance stands alone.
649 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
650 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
651 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
652 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
653 { .name
= "FCSEIDR_S",
654 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
655 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
656 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
657 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
658 /* Define the secure and non-secure context identifier CP registers
659 * separately because there is no secure bank in V8 (no _EL3). This allows
660 * the secure register to be properly reset and migrated. In the
661 * non-secure case, the 32-bit register will have reset and migration
662 * disabled during registration as it is handled by the 64-bit instance.
664 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
665 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
666 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
667 .secure
= ARM_CP_SECSTATE_NS
,
668 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
669 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
670 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
671 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
672 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
673 .secure
= ARM_CP_SECSTATE_S
,
674 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
675 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
679 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
680 /* NB: Some of these registers exist in v8 but with more precise
681 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
683 /* MMU Domain access control / MPU write buffer control */
685 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
686 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
687 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
688 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
689 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
690 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
691 * For v6 and v5, these mappings are overly broad.
693 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
694 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
695 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
696 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
697 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
698 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
699 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
700 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
701 /* Cache maintenance ops; some of this space may be overridden later. */
702 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
703 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
704 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
708 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
709 /* Not all pre-v6 cores implemented this WFI, so this is slightly
712 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
713 .access
= PL1_W
, .type
= ARM_CP_WFI
},
717 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
718 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
719 * is UNPREDICTABLE; we choose to NOP as most implementations do).
721 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
722 .access
= PL1_W
, .type
= ARM_CP_WFI
},
723 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
724 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
725 * OMAPCP will override this space.
727 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
728 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
730 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
731 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
733 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
734 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
735 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
737 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
738 * implementing it as RAZ means the "debug architecture version" bits
739 * will read as a reserved value, which should cause Linux to not try
740 * to use the debug hardware.
742 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
743 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
744 /* MMU TLB control. Note that the wildcarding means we cover not just
745 * the unified TLB ops but also the dside/iside/inner-shareable variants.
747 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
748 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
749 .type
= ARM_CP_NO_RAW
},
750 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
751 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
752 .type
= ARM_CP_NO_RAW
},
753 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
754 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
755 .type
= ARM_CP_NO_RAW
},
756 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
757 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
758 .type
= ARM_CP_NO_RAW
},
759 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
760 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
761 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
762 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
766 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
771 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
772 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
773 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
774 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
775 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
777 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
778 /* VFP coprocessor: cp10 & cp11 [23:20] */
779 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
781 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
782 /* ASEDIS [31] bit is RAO/WI */
786 /* VFPv3 and upwards with NEON implement 32 double precision
787 * registers (D0-D31).
789 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
790 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
798 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
799 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
801 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
802 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
803 value
&= ~(0xf << 20);
804 value
|= env
->cp15
.cpacr_el1
& (0xf << 20);
807 env
->cp15
.cpacr_el1
= value
;
810 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
813 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
814 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
816 uint64_t value
= env
->cp15
.cpacr_el1
;
818 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
819 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
820 value
&= ~(0xf << 20);
826 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
828 /* Call cpacr_write() so that we reset with the correct RAO bits set
829 * for our CPU features.
831 cpacr_write(env
, ri
, 0);
834 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
837 if (arm_feature(env
, ARM_FEATURE_V8
)) {
838 /* Check if CPACR accesses are to be trapped to EL2 */
839 if (arm_current_el(env
) == 1 && arm_is_el2_enabled(env
) &&
840 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
)) {
841 return CP_ACCESS_TRAP_EL2
;
842 /* Check if CPACR accesses are to be trapped to EL3 */
843 } else if (arm_current_el(env
) < 3 &&
844 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
845 return CP_ACCESS_TRAP_EL3
;
852 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
855 /* Check if CPTR accesses are set to trap to EL3 */
856 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
857 return CP_ACCESS_TRAP_EL3
;
863 static const ARMCPRegInfo v6_cp_reginfo
[] = {
864 /* prefetch by MVA in v6, NOP in v7 */
865 { .name
= "MVA_prefetch",
866 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
867 .access
= PL1_W
, .type
= ARM_CP_NOP
},
868 /* We need to break the TB after ISB to execute self-modifying code
869 * correctly and also to take any pending interrupts immediately.
870 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
872 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
873 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
874 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
875 .access
= PL0_W
, .type
= ARM_CP_NOP
},
876 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
877 .access
= PL0_W
, .type
= ARM_CP_NOP
},
878 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
879 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
880 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
881 offsetof(CPUARMState
, cp15
.ifar_ns
) },
883 /* Watchpoint Fault Address Register : should actually only be present
884 * for 1136, 1176, 11MPCore.
886 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
887 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
888 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
889 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
890 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
891 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
895 typedef struct pm_event
{
896 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
897 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
898 bool (*supported
)(CPUARMState
*);
900 * Retrieve the current count of the underlying event. The programmed
901 * counters hold a difference from the return value from this function
903 uint64_t (*get_count
)(CPUARMState
*);
905 * Return how many nanoseconds it will take (at a minimum) for count events
906 * to occur. A negative value indicates the counter will never overflow, or
907 * that the counter has otherwise arranged for the overflow bit to be set
908 * and the PMU interrupt to be raised on overflow.
910 int64_t (*ns_per_count
)(uint64_t);
913 static bool event_always_supported(CPUARMState
*env
)
918 static uint64_t swinc_get_count(CPUARMState
*env
)
921 * SW_INCR events are written directly to the pmevcntr's by writes to
922 * PMSWINC, so there is no underlying count maintained by the PMU itself
927 static int64_t swinc_ns_per(uint64_t ignored
)
933 * Return the underlying cycle count for the PMU cycle counters. If we're in
934 * usermode, simply return 0.
936 static uint64_t cycles_get_count(CPUARMState
*env
)
938 #ifndef CONFIG_USER_ONLY
939 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
940 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
942 return cpu_get_host_ticks();
946 #ifndef CONFIG_USER_ONLY
947 static int64_t cycles_ns_per(uint64_t cycles
)
949 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
952 static bool instructions_supported(CPUARMState
*env
)
954 return icount_enabled() == 1; /* Precise instruction counting */
957 static uint64_t instructions_get_count(CPUARMState
*env
)
959 return (uint64_t)icount_get_raw();
962 static int64_t instructions_ns_per(uint64_t icount
)
964 return icount_to_ns((int64_t)icount
);
968 static bool pmu_8_1_events_supported(CPUARMState
*env
)
970 /* For events which are supported in any v8.1 PMU */
971 return cpu_isar_feature(any_pmu_8_1
, env_archcpu(env
));
974 static bool pmu_8_4_events_supported(CPUARMState
*env
)
976 /* For events which are supported in any v8.1 PMU */
977 return cpu_isar_feature(any_pmu_8_4
, env_archcpu(env
));
980 static uint64_t zero_event_get_count(CPUARMState
*env
)
982 /* For events which on QEMU never fire, so their count is always zero */
986 static int64_t zero_event_ns_per(uint64_t cycles
)
988 /* An event which never fires can never overflow */
992 static const pm_event pm_events
[] = {
993 { .number
= 0x000, /* SW_INCR */
994 .supported
= event_always_supported
,
995 .get_count
= swinc_get_count
,
996 .ns_per_count
= swinc_ns_per
,
998 #ifndef CONFIG_USER_ONLY
999 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1000 .supported
= instructions_supported
,
1001 .get_count
= instructions_get_count
,
1002 .ns_per_count
= instructions_ns_per
,
1004 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1005 .supported
= event_always_supported
,
1006 .get_count
= cycles_get_count
,
1007 .ns_per_count
= cycles_ns_per
,
1010 { .number
= 0x023, /* STALL_FRONTEND */
1011 .supported
= pmu_8_1_events_supported
,
1012 .get_count
= zero_event_get_count
,
1013 .ns_per_count
= zero_event_ns_per
,
1015 { .number
= 0x024, /* STALL_BACKEND */
1016 .supported
= pmu_8_1_events_supported
,
1017 .get_count
= zero_event_get_count
,
1018 .ns_per_count
= zero_event_ns_per
,
1020 { .number
= 0x03c, /* STALL */
1021 .supported
= pmu_8_4_events_supported
,
1022 .get_count
= zero_event_get_count
,
1023 .ns_per_count
= zero_event_ns_per
,
1028 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1029 * events (i.e. the statistical profiling extension), this implementation
1030 * should first be updated to something sparse instead of the current
1031 * supported_event_map[] array.
1033 #define MAX_EVENT_ID 0x3c
1034 #define UNSUPPORTED_EVENT UINT16_MAX
1035 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1038 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1039 * of ARM event numbers to indices in our pm_events array.
1041 * Note: Events in the 0x40XX range are not currently supported.
1043 void pmu_init(ARMCPU
*cpu
)
1048 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1051 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1052 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1057 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1058 const pm_event
*cnt
= &pm_events
[i
];
1059 assert(cnt
->number
<= MAX_EVENT_ID
);
1060 /* We do not currently support events in the 0x40xx range */
1061 assert(cnt
->number
<= 0x3f);
1063 if (cnt
->supported(&cpu
->env
)) {
1064 supported_event_map
[cnt
->number
] = i
;
1065 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1066 if (cnt
->number
& 0x20) {
1067 cpu
->pmceid1
|= event_mask
;
1069 cpu
->pmceid0
|= event_mask
;
1076 * Check at runtime whether a PMU event is supported for the current machine
1078 static bool event_supported(uint16_t number
)
1080 if (number
> MAX_EVENT_ID
) {
1083 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1086 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1089 /* Performance monitor registers user accessibility is controlled
1090 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1091 * trapping to EL2 or EL3 for other accesses.
1093 int el
= arm_current_el(env
);
1094 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1096 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1097 return CP_ACCESS_TRAP
;
1099 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
1100 return CP_ACCESS_TRAP_EL2
;
1102 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1103 return CP_ACCESS_TRAP_EL3
;
1106 return CP_ACCESS_OK
;
1109 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1110 const ARMCPRegInfo
*ri
,
1113 /* ER: event counter read trap control */
1114 if (arm_feature(env
, ARM_FEATURE_V8
)
1115 && arm_current_el(env
) == 0
1116 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1118 return CP_ACCESS_OK
;
1121 return pmreg_access(env
, ri
, isread
);
1124 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1125 const ARMCPRegInfo
*ri
,
1128 /* SW: software increment write trap control */
1129 if (arm_feature(env
, ARM_FEATURE_V8
)
1130 && arm_current_el(env
) == 0
1131 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1133 return CP_ACCESS_OK
;
1136 return pmreg_access(env
, ri
, isread
);
1139 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1140 const ARMCPRegInfo
*ri
,
1143 /* ER: event counter read trap control */
1144 if (arm_feature(env
, ARM_FEATURE_V8
)
1145 && arm_current_el(env
) == 0
1146 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1147 return CP_ACCESS_OK
;
1150 return pmreg_access(env
, ri
, isread
);
1153 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1154 const ARMCPRegInfo
*ri
,
1157 /* CR: cycle counter read trap control */
1158 if (arm_feature(env
, ARM_FEATURE_V8
)
1159 && arm_current_el(env
) == 0
1160 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1162 return CP_ACCESS_OK
;
1165 return pmreg_access(env
, ri
, isread
);
1168 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1169 * the current EL, security state, and register configuration.
1171 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1174 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1175 bool enabled
, prohibited
, filtered
;
1176 bool secure
= arm_is_secure(env
);
1177 int el
= arm_current_el(env
);
1178 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1179 uint8_t hpmn
= mdcr_el2
& MDCR_HPMN
;
1181 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1185 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1186 (counter
< hpmn
|| counter
== 31)) {
1187 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1189 e
= mdcr_el2
& MDCR_HPME
;
1191 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1194 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1195 prohibited
= mdcr_el2
& MDCR_HPMD
;
1200 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1201 !(env
->cp15
.mdcr_el3
& MDCR_SPME
);
1204 if (prohibited
&& counter
== 31) {
1205 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1208 if (counter
== 31) {
1209 filter
= env
->cp15
.pmccfiltr_el0
;
1211 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1214 p
= filter
& PMXEVTYPER_P
;
1215 u
= filter
& PMXEVTYPER_U
;
1216 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1217 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1218 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1219 m
= arm_el_is_aa64(env
, 1) &&
1220 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1223 filtered
= secure
? u
: u
!= nsu
;
1224 } else if (el
== 1) {
1225 filtered
= secure
? p
: p
!= nsk
;
1226 } else if (el
== 2) {
1232 if (counter
!= 31) {
1234 * If not checking PMCCNTR, ensure the counter is setup to an event we
1237 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1238 if (!event_supported(event
)) {
1243 return enabled
&& !prohibited
&& !filtered
;
1246 static void pmu_update_irq(CPUARMState
*env
)
1248 ARMCPU
*cpu
= env_archcpu(env
);
1249 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1250 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1254 * Ensure c15_ccnt is the guest-visible count so that operations such as
1255 * enabling/disabling the counter or filtering, modifying the count itself,
1256 * etc. can be done logically. This is essentially a no-op if the counter is
1257 * not enabled at the time of the call.
1259 static void pmccntr_op_start(CPUARMState
*env
)
1261 uint64_t cycles
= cycles_get_count(env
);
1263 if (pmu_counter_enabled(env
, 31)) {
1264 uint64_t eff_cycles
= cycles
;
1265 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1266 /* Increment once every 64 processor clock cycles */
1270 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1272 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1273 1ull << 63 : 1ull << 31;
1274 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1275 env
->cp15
.c9_pmovsr
|= (1 << 31);
1276 pmu_update_irq(env
);
1279 env
->cp15
.c15_ccnt
= new_pmccntr
;
1281 env
->cp15
.c15_ccnt_delta
= cycles
;
1285 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1286 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1289 static void pmccntr_op_finish(CPUARMState
*env
)
1291 if (pmu_counter_enabled(env
, 31)) {
1292 #ifndef CONFIG_USER_ONLY
1293 /* Calculate when the counter will next overflow */
1294 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1295 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1296 remaining_cycles
= (uint32_t)remaining_cycles
;
1298 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1300 if (overflow_in
> 0) {
1301 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1303 ARMCPU
*cpu
= env_archcpu(env
);
1304 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1308 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1309 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1310 /* Increment once every 64 processor clock cycles */
1313 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1317 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1320 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1322 if (event_supported(event
)) {
1323 uint16_t event_idx
= supported_event_map
[event
];
1324 count
= pm_events
[event_idx
].get_count(env
);
1327 if (pmu_counter_enabled(env
, counter
)) {
1328 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1330 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1331 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1332 pmu_update_irq(env
);
1334 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1336 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1339 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1341 if (pmu_counter_enabled(env
, counter
)) {
1342 #ifndef CONFIG_USER_ONLY
1343 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1344 uint16_t event_idx
= supported_event_map
[event
];
1345 uint64_t delta
= UINT32_MAX
-
1346 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1347 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1349 if (overflow_in
> 0) {
1350 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1352 ARMCPU
*cpu
= env_archcpu(env
);
1353 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1357 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1358 env
->cp15
.c14_pmevcntr
[counter
];
1362 void pmu_op_start(CPUARMState
*env
)
1365 pmccntr_op_start(env
);
1366 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1367 pmevcntr_op_start(env
, i
);
1371 void pmu_op_finish(CPUARMState
*env
)
1374 pmccntr_op_finish(env
);
1375 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1376 pmevcntr_op_finish(env
, i
);
1380 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1382 pmu_op_start(&cpu
->env
);
1385 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1387 pmu_op_finish(&cpu
->env
);
1390 void arm_pmu_timer_cb(void *opaque
)
1392 ARMCPU
*cpu
= opaque
;
1395 * Update all the counter values based on the current underlying counts,
1396 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1397 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1398 * counter may expire.
1400 pmu_op_start(&cpu
->env
);
1401 pmu_op_finish(&cpu
->env
);
1404 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1409 if (value
& PMCRC
) {
1410 /* The counter has been reset */
1411 env
->cp15
.c15_ccnt
= 0;
1414 if (value
& PMCRP
) {
1416 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1417 env
->cp15
.c14_pmevcntr
[i
] = 0;
1421 env
->cp15
.c9_pmcr
&= ~PMCR_WRITEABLE_MASK
;
1422 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITEABLE_MASK
);
1427 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1431 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1432 /* Increment a counter's count iff: */
1433 if ((value
& (1 << i
)) && /* counter's bit is set */
1434 /* counter is enabled and not filtered */
1435 pmu_counter_enabled(env
, i
) &&
1436 /* counter is SW_INCR */
1437 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1438 pmevcntr_op_start(env
, i
);
1441 * Detect if this write causes an overflow since we can't predict
1442 * PMSWINC overflows like we can for other events
1444 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1446 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1447 env
->cp15
.c9_pmovsr
|= (1 << i
);
1448 pmu_update_irq(env
);
1451 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1453 pmevcntr_op_finish(env
, i
);
1458 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1461 pmccntr_op_start(env
);
1462 ret
= env
->cp15
.c15_ccnt
;
1463 pmccntr_op_finish(env
);
1467 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1470 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1471 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1472 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1475 env
->cp15
.c9_pmselr
= value
& 0x1f;
1478 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1481 pmccntr_op_start(env
);
1482 env
->cp15
.c15_ccnt
= value
;
1483 pmccntr_op_finish(env
);
1486 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1489 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1491 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1494 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1497 pmccntr_op_start(env
);
1498 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1499 pmccntr_op_finish(env
);
1502 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1505 pmccntr_op_start(env
);
1506 /* M is not accessible from AArch32 */
1507 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1508 (value
& PMCCFILTR
);
1509 pmccntr_op_finish(env
);
1512 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1514 /* M is not visible in AArch32 */
1515 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1518 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1521 value
&= pmu_counter_mask(env
);
1522 env
->cp15
.c9_pmcnten
|= value
;
1525 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1528 value
&= pmu_counter_mask(env
);
1529 env
->cp15
.c9_pmcnten
&= ~value
;
1532 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1535 value
&= pmu_counter_mask(env
);
1536 env
->cp15
.c9_pmovsr
&= ~value
;
1537 pmu_update_irq(env
);
1540 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1543 value
&= pmu_counter_mask(env
);
1544 env
->cp15
.c9_pmovsr
|= value
;
1545 pmu_update_irq(env
);
1548 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1549 uint64_t value
, const uint8_t counter
)
1551 if (counter
== 31) {
1552 pmccfiltr_write(env
, ri
, value
);
1553 } else if (counter
< pmu_num_counters(env
)) {
1554 pmevcntr_op_start(env
, counter
);
1557 * If this counter's event type is changing, store the current
1558 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1559 * pmevcntr_op_finish has the correct baseline when it converts back to
1562 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1563 PMXEVTYPER_EVTCOUNT
;
1564 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1565 if (old_event
!= new_event
) {
1567 if (event_supported(new_event
)) {
1568 uint16_t event_idx
= supported_event_map
[new_event
];
1569 count
= pm_events
[event_idx
].get_count(env
);
1571 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1574 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1575 pmevcntr_op_finish(env
, counter
);
1577 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1578 * PMSELR value is equal to or greater than the number of implemented
1579 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1583 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1584 const uint8_t counter
)
1586 if (counter
== 31) {
1587 return env
->cp15
.pmccfiltr_el0
;
1588 } else if (counter
< pmu_num_counters(env
)) {
1589 return env
->cp15
.c14_pmevtyper
[counter
];
1592 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1593 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1599 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1602 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1603 pmevtyper_write(env
, ri
, value
, counter
);
1606 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1609 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1610 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1613 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1614 * pmu_op_finish calls when loading saved state for a migration. Because
1615 * we're potentially updating the type of event here, the value written to
1616 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1617 * different counter type. Therefore, we need to set this value to the
1618 * current count for the counter type we're writing so that pmu_op_finish
1619 * has the correct count for its calculation.
1621 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1622 if (event_supported(event
)) {
1623 uint16_t event_idx
= supported_event_map
[event
];
1624 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1625 pm_events
[event_idx
].get_count(env
);
1629 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1631 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1632 return pmevtyper_read(env
, ri
, counter
);
1635 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1638 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1641 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1643 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1646 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1647 uint64_t value
, uint8_t counter
)
1649 if (counter
< pmu_num_counters(env
)) {
1650 pmevcntr_op_start(env
, counter
);
1651 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1652 pmevcntr_op_finish(env
, counter
);
1655 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1656 * are CONSTRAINED UNPREDICTABLE.
1660 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1663 if (counter
< pmu_num_counters(env
)) {
1665 pmevcntr_op_start(env
, counter
);
1666 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1667 pmevcntr_op_finish(env
, counter
);
1670 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1671 * are CONSTRAINED UNPREDICTABLE. */
1676 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1679 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1680 pmevcntr_write(env
, ri
, value
, counter
);
1683 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1685 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1686 return pmevcntr_read(env
, ri
, counter
);
1689 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1692 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1693 assert(counter
< pmu_num_counters(env
));
1694 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1695 pmevcntr_write(env
, ri
, value
, counter
);
1698 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1700 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1701 assert(counter
< pmu_num_counters(env
));
1702 return env
->cp15
.c14_pmevcntr
[counter
];
1705 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1708 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1711 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1713 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1716 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1719 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1720 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1722 env
->cp15
.c9_pmuserenr
= value
& 1;
1726 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1729 /* We have no event counters so only the C bit can be changed */
1730 value
&= pmu_counter_mask(env
);
1731 env
->cp15
.c9_pminten
|= value
;
1732 pmu_update_irq(env
);
1735 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1738 value
&= pmu_counter_mask(env
);
1739 env
->cp15
.c9_pminten
&= ~value
;
1740 pmu_update_irq(env
);
1743 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1746 /* Note that even though the AArch64 view of this register has bits
1747 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1748 * architectural requirements for bits which are RES0 only in some
1749 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1750 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1752 raw_write(env
, ri
, value
& ~0x1FULL
);
1755 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1757 /* Begin with base v8.0 state. */
1758 uint32_t valid_mask
= 0x3fff;
1759 ARMCPU
*cpu
= env_archcpu(env
);
1761 if (ri
->state
== ARM_CP_STATE_AA64
) {
1762 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1763 !cpu_isar_feature(aa64_aa32_el1
, cpu
)) {
1764 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1766 valid_mask
&= ~SCR_NET
;
1768 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1769 valid_mask
|= SCR_TLOR
;
1771 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1772 valid_mask
|= SCR_API
| SCR_APK
;
1774 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
1775 valid_mask
|= SCR_EEL2
;
1777 if (cpu_isar_feature(aa64_mte
, cpu
)) {
1778 valid_mask
|= SCR_ATA
;
1781 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1784 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1785 valid_mask
&= ~SCR_HCE
;
1787 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1788 * supported if EL2 exists. The bit is UNK/SBZP when
1789 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1790 * when EL2 is unavailable.
1791 * On ARMv8, this bit is always available.
1793 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1794 !arm_feature(env
, ARM_FEATURE_V8
)) {
1795 valid_mask
&= ~SCR_SMD
;
1799 /* Clear all-context RES0 bits. */
1800 value
&= valid_mask
;
1801 raw_write(env
, ri
, value
);
1804 static void scr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1807 * scr_write will set the RES1 bits on an AArch64-only CPU.
1808 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1810 scr_write(env
, ri
, 0);
1813 static CPAccessResult
access_aa64_tid2(CPUARMState
*env
,
1814 const ARMCPRegInfo
*ri
,
1817 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID2
)) {
1818 return CP_ACCESS_TRAP_EL2
;
1821 return CP_ACCESS_OK
;
1824 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1826 ARMCPU
*cpu
= env_archcpu(env
);
1828 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1831 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1832 ri
->secure
& ARM_CP_SECSTATE_S
);
1834 return cpu
->ccsidr
[index
];
1837 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1840 raw_write(env
, ri
, value
& 0xf);
1843 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1845 CPUState
*cs
= env_cpu(env
);
1846 bool el1
= arm_current_el(env
) == 1;
1847 uint64_t hcr_el2
= el1
? arm_hcr_el2_eff(env
) : 0;
1850 if (hcr_el2
& HCR_IMO
) {
1851 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1855 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1860 if (hcr_el2
& HCR_FMO
) {
1861 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1865 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1870 /* External aborts are not possible in QEMU so A bit is always clear */
1874 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1877 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
1878 return CP_ACCESS_TRAP_EL2
;
1881 return CP_ACCESS_OK
;
1884 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1887 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1888 return access_aa64_tid1(env
, ri
, isread
);
1891 return CP_ACCESS_OK
;
1894 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1895 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1896 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1897 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1898 /* Performance monitors are implementation defined in v7,
1899 * but with an ARM recommended set of registers, which we
1902 * Performance registers fall into three categories:
1903 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1904 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1905 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1906 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1907 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1909 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1910 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1911 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1912 .writefn
= pmcntenset_write
,
1913 .accessfn
= pmreg_access
,
1914 .raw_writefn
= raw_write
},
1915 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1916 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1917 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1918 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1919 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1920 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1922 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1923 .accessfn
= pmreg_access
,
1924 .writefn
= pmcntenclr_write
,
1925 .type
= ARM_CP_ALIAS
},
1926 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1927 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1928 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1929 .type
= ARM_CP_ALIAS
,
1930 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1931 .writefn
= pmcntenclr_write
},
1932 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1933 .access
= PL0_RW
, .type
= ARM_CP_IO
,
1934 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1935 .accessfn
= pmreg_access
,
1936 .writefn
= pmovsr_write
,
1937 .raw_writefn
= raw_write
},
1938 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1939 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1940 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1941 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1942 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1943 .writefn
= pmovsr_write
,
1944 .raw_writefn
= raw_write
},
1945 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1946 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1947 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1948 .writefn
= pmswinc_write
},
1949 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
1950 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
1951 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1952 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1953 .writefn
= pmswinc_write
},
1954 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1955 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1956 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1957 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1958 .raw_writefn
= raw_write
},
1959 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1960 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1961 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1962 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1963 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1964 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1965 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1966 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1967 .accessfn
= pmreg_access_ccntr
},
1968 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1969 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1970 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1972 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
1973 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
1974 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
1975 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
1976 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
1977 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1978 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1980 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1981 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1982 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
1983 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1985 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1987 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1988 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1989 .accessfn
= pmreg_access
,
1990 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1991 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1992 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1993 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1994 .accessfn
= pmreg_access
,
1995 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1996 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1997 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1998 .accessfn
= pmreg_access_xevcntr
,
1999 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2000 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2001 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2002 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2003 .accessfn
= pmreg_access_xevcntr
,
2004 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2005 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2006 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2007 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2009 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2010 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2011 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2012 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2013 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2015 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2016 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2017 .access
= PL1_RW
, .accessfn
= access_tpm
,
2018 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2019 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2021 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2022 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2023 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2024 .access
= PL1_RW
, .accessfn
= access_tpm
,
2026 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2027 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2028 .resetvalue
= 0x0 },
2029 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2030 .access
= PL1_RW
, .accessfn
= access_tpm
,
2031 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2032 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2033 .writefn
= pmintenclr_write
, },
2034 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2035 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2036 .access
= PL1_RW
, .accessfn
= access_tpm
,
2037 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2038 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2039 .writefn
= pmintenclr_write
},
2040 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2041 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2043 .accessfn
= access_aa64_tid2
,
2044 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2045 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2046 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2048 .accessfn
= access_aa64_tid2
,
2049 .writefn
= csselr_write
, .resetvalue
= 0,
2050 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2051 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2052 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2053 * just RAZ for all cores:
2055 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2056 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2057 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2058 .accessfn
= access_aa64_tid1
,
2060 /* Auxiliary fault status registers: these also are IMPDEF, and we
2061 * choose to RAZ/WI for all cores.
2063 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2064 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2065 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2066 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2067 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2068 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2069 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2070 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2071 /* MAIR can just read-as-written because we don't implement caches
2072 * and so don't need to care about memory attributes.
2074 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2075 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2076 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2077 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2079 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2080 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2081 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2083 /* For non-long-descriptor page tables these are PRRR and NMRR;
2084 * regardless they still act as reads-as-written for QEMU.
2086 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2087 * allows them to assign the correct fieldoffset based on the endianness
2088 * handled in the field definitions.
2090 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2091 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2092 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2093 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2094 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2095 .resetfn
= arm_cp_reset_ignore
},
2096 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2097 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2098 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2099 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2100 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2101 .resetfn
= arm_cp_reset_ignore
},
2102 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2103 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2104 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2105 /* 32 bit ITLB invalidates */
2106 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2107 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2108 .writefn
= tlbiall_write
},
2109 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2110 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2111 .writefn
= tlbimva_write
},
2112 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2113 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2114 .writefn
= tlbiasid_write
},
2115 /* 32 bit DTLB invalidates */
2116 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2117 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2118 .writefn
= tlbiall_write
},
2119 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2120 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2121 .writefn
= tlbimva_write
},
2122 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2123 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2124 .writefn
= tlbiasid_write
},
2125 /* 32 bit TLB invalidates */
2126 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2127 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2128 .writefn
= tlbiall_write
},
2129 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2130 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2131 .writefn
= tlbimva_write
},
2132 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2133 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2134 .writefn
= tlbiasid_write
},
2135 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2136 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2137 .writefn
= tlbimvaa_write
},
2141 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2142 /* 32 bit TLB invalidates, Inner Shareable */
2143 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2144 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2145 .writefn
= tlbiall_is_write
},
2146 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2147 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2148 .writefn
= tlbimva_is_write
},
2149 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2150 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2151 .writefn
= tlbiasid_is_write
},
2152 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2153 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2154 .writefn
= tlbimvaa_is_write
},
2158 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2159 /* PMOVSSET is not implemented in v7 before v7ve */
2160 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2161 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2162 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2163 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2164 .writefn
= pmovsset_write
,
2165 .raw_writefn
= raw_write
},
2166 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2167 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2168 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2169 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2170 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2171 .writefn
= pmovsset_write
,
2172 .raw_writefn
= raw_write
},
2176 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2183 static CPAccessResult
teecr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2187 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2188 * at all, so we don't need to check whether we're v8A.
2190 if (arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
2191 (env
->cp15
.hstr_el2
& HSTR_TTEE
)) {
2192 return CP_ACCESS_TRAP_EL2
;
2194 return CP_ACCESS_OK
;
2197 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2200 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2201 return CP_ACCESS_TRAP
;
2203 return teecr_access(env
, ri
, isread
);
2206 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2207 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2208 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2210 .writefn
= teecr_write
, .accessfn
= teecr_access
},
2211 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2212 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2213 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2217 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2218 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2219 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2221 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2222 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2224 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2225 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2226 .resetfn
= arm_cp_reset_ignore
},
2227 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2228 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2229 .access
= PL0_R
|PL1_W
,
2230 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2232 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2233 .access
= PL0_R
|PL1_W
,
2234 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2235 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2236 .resetfn
= arm_cp_reset_ignore
},
2237 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2238 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2240 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2241 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2243 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2244 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2249 #ifndef CONFIG_USER_ONLY
2251 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2254 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2255 * Writable only at the highest implemented exception level.
2257 int el
= arm_current_el(env
);
2263 hcr
= arm_hcr_el2_eff(env
);
2264 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2265 cntkctl
= env
->cp15
.cnthctl_el2
;
2267 cntkctl
= env
->cp15
.c14_cntkctl
;
2269 if (!extract32(cntkctl
, 0, 2)) {
2270 return CP_ACCESS_TRAP
;
2274 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2275 arm_is_secure_below_el3(env
)) {
2276 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2277 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2285 if (!isread
&& el
< arm_highest_el(env
)) {
2286 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2289 return CP_ACCESS_OK
;
2292 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2295 unsigned int cur_el
= arm_current_el(env
);
2296 bool has_el2
= arm_is_el2_enabled(env
);
2297 uint64_t hcr
= arm_hcr_el2_eff(env
);
2301 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2302 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2303 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2304 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2307 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2308 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2309 return CP_ACCESS_TRAP
;
2312 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2313 if (hcr
& HCR_E2H
) {
2314 if (timeridx
== GTIMER_PHYS
&&
2315 !extract32(env
->cp15
.cnthctl_el2
, 10, 1)) {
2316 return CP_ACCESS_TRAP_EL2
;
2319 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2320 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2321 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2322 return CP_ACCESS_TRAP_EL2
;
2328 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2329 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2331 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2332 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2333 return CP_ACCESS_TRAP_EL2
;
2337 return CP_ACCESS_OK
;
2340 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2343 unsigned int cur_el
= arm_current_el(env
);
2344 bool has_el2
= arm_is_el2_enabled(env
);
2345 uint64_t hcr
= arm_hcr_el2_eff(env
);
2349 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2350 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2351 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2352 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2356 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2357 * EL0 if EL0[PV]TEN is zero.
2359 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2360 return CP_ACCESS_TRAP
;
2365 if (has_el2
&& timeridx
== GTIMER_PHYS
) {
2366 if (hcr
& HCR_E2H
) {
2367 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2368 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2369 return CP_ACCESS_TRAP_EL2
;
2372 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2373 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2374 return CP_ACCESS_TRAP_EL2
;
2380 return CP_ACCESS_OK
;
2383 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2384 const ARMCPRegInfo
*ri
,
2387 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2390 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2391 const ARMCPRegInfo
*ri
,
2394 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2397 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2400 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2403 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2406 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2409 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2410 const ARMCPRegInfo
*ri
,
2413 /* The AArch64 register view of the secure physical timer is
2414 * always accessible from EL3, and configurably accessible from
2417 switch (arm_current_el(env
)) {
2419 if (!arm_is_secure(env
)) {
2420 return CP_ACCESS_TRAP
;
2422 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2423 return CP_ACCESS_TRAP_EL3
;
2425 return CP_ACCESS_OK
;
2428 return CP_ACCESS_TRAP
;
2430 return CP_ACCESS_OK
;
2432 g_assert_not_reached();
2436 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2438 ARMCPU
*cpu
= env_archcpu(env
);
2440 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2443 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2445 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2448 /* Timer enabled: calculate and set current ISTATUS, irq, and
2449 * reset timer to when ISTATUS next has to change
2451 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2452 cpu
->env
.cp15
.cntvoff_el2
: 0;
2453 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2454 /* Note that this must be unsigned 64 bit arithmetic: */
2455 int istatus
= count
- offset
>= gt
->cval
;
2459 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2461 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2462 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2465 /* Next transition is when count rolls back over to zero */
2466 nexttick
= UINT64_MAX
;
2468 /* Next transition is when we hit cval */
2469 nexttick
= gt
->cval
+ offset
;
2471 /* Note that the desired next expiry time might be beyond the
2472 * signed-64-bit range of a QEMUTimer -- in this case we just
2473 * set the timer for as far in the future as possible. When the
2474 * timer expires we will reset the timer for any remaining period.
2476 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2477 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2479 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2481 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2483 /* Timer disabled: ISTATUS and timer output always clear */
2485 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2486 timer_del(cpu
->gt_timer
[timeridx
]);
2487 trace_arm_gt_recalc_disabled(timeridx
);
2491 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2494 ARMCPU
*cpu
= env_archcpu(env
);
2496 timer_del(cpu
->gt_timer
[timeridx
]);
2499 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2501 return gt_get_countervalue(env
);
2504 static uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2508 switch (arm_current_el(env
)) {
2510 hcr
= arm_hcr_el2_eff(env
);
2511 if (hcr
& HCR_E2H
) {
2516 hcr
= arm_hcr_el2_eff(env
);
2517 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2523 return env
->cp15
.cntvoff_el2
;
2526 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2528 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2531 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2535 trace_arm_gt_cval_write(timeridx
, value
);
2536 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2537 gt_recalc_timer(env_archcpu(env
), timeridx
);
2540 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2543 uint64_t offset
= 0;
2547 case GTIMER_HYPVIRT
:
2548 offset
= gt_virt_cnt_offset(env
);
2552 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2553 (gt_get_countervalue(env
) - offset
));
2556 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2560 uint64_t offset
= 0;
2564 case GTIMER_HYPVIRT
:
2565 offset
= gt_virt_cnt_offset(env
);
2569 trace_arm_gt_tval_write(timeridx
, value
);
2570 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2571 sextract64(value
, 0, 32);
2572 gt_recalc_timer(env_archcpu(env
), timeridx
);
2575 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2579 ARMCPU
*cpu
= env_archcpu(env
);
2580 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2582 trace_arm_gt_ctl_write(timeridx
, value
);
2583 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2584 if ((oldval
^ value
) & 1) {
2585 /* Enable toggled */
2586 gt_recalc_timer(cpu
, timeridx
);
2587 } else if ((oldval
^ value
) & 2) {
2588 /* IMASK toggled: don't need to recalculate,
2589 * just set the interrupt line based on ISTATUS
2591 int irqstate
= (oldval
& 4) && !(value
& 2);
2593 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2594 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2598 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2600 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2603 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2606 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2609 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2611 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2614 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2617 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2620 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2623 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2626 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2628 switch (arm_mmu_idx(env
)) {
2629 case ARMMMUIdx_E20_0
:
2630 case ARMMMUIdx_E20_2
:
2631 case ARMMMUIdx_E20_2_PAN
:
2632 case ARMMMUIdx_SE20_0
:
2633 case ARMMMUIdx_SE20_2
:
2634 case ARMMMUIdx_SE20_2_PAN
:
2641 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2643 switch (arm_mmu_idx(env
)) {
2644 case ARMMMUIdx_E20_0
:
2645 case ARMMMUIdx_E20_2
:
2646 case ARMMMUIdx_E20_2_PAN
:
2647 case ARMMMUIdx_SE20_0
:
2648 case ARMMMUIdx_SE20_2
:
2649 case ARMMMUIdx_SE20_2_PAN
:
2650 return GTIMER_HYPVIRT
;
2656 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2657 const ARMCPRegInfo
*ri
)
2659 int timeridx
= gt_phys_redir_timeridx(env
);
2660 return env
->cp15
.c14_timer
[timeridx
].cval
;
2663 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2666 int timeridx
= gt_phys_redir_timeridx(env
);
2667 gt_cval_write(env
, ri
, timeridx
, value
);
2670 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2671 const ARMCPRegInfo
*ri
)
2673 int timeridx
= gt_phys_redir_timeridx(env
);
2674 return gt_tval_read(env
, ri
, timeridx
);
2677 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2680 int timeridx
= gt_phys_redir_timeridx(env
);
2681 gt_tval_write(env
, ri
, timeridx
, value
);
2684 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2685 const ARMCPRegInfo
*ri
)
2687 int timeridx
= gt_phys_redir_timeridx(env
);
2688 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2691 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2694 int timeridx
= gt_phys_redir_timeridx(env
);
2695 gt_ctl_write(env
, ri
, timeridx
, value
);
2698 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2700 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2703 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2706 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2709 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2711 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2714 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2717 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2720 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2723 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2726 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2729 ARMCPU
*cpu
= env_archcpu(env
);
2731 trace_arm_gt_cntvoff_write(value
);
2732 raw_write(env
, ri
, value
);
2733 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2736 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
2737 const ARMCPRegInfo
*ri
)
2739 int timeridx
= gt_virt_redir_timeridx(env
);
2740 return env
->cp15
.c14_timer
[timeridx
].cval
;
2743 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2746 int timeridx
= gt_virt_redir_timeridx(env
);
2747 gt_cval_write(env
, ri
, timeridx
, value
);
2750 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
2751 const ARMCPRegInfo
*ri
)
2753 int timeridx
= gt_virt_redir_timeridx(env
);
2754 return gt_tval_read(env
, ri
, timeridx
);
2757 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2760 int timeridx
= gt_virt_redir_timeridx(env
);
2761 gt_tval_write(env
, ri
, timeridx
, value
);
2764 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
2765 const ARMCPRegInfo
*ri
)
2767 int timeridx
= gt_virt_redir_timeridx(env
);
2768 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2771 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2774 int timeridx
= gt_virt_redir_timeridx(env
);
2775 gt_ctl_write(env
, ri
, timeridx
, value
);
2778 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2780 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2783 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2786 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2789 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2791 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2794 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2797 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2800 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2803 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2806 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2808 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2811 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2814 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2817 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2819 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2822 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2825 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2828 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2831 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2834 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2836 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
2839 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2842 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2845 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2847 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
2850 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2853 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2856 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2859 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
2862 void arm_gt_ptimer_cb(void *opaque
)
2864 ARMCPU
*cpu
= opaque
;
2866 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2869 void arm_gt_vtimer_cb(void *opaque
)
2871 ARMCPU
*cpu
= opaque
;
2873 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2876 void arm_gt_htimer_cb(void *opaque
)
2878 ARMCPU
*cpu
= opaque
;
2880 gt_recalc_timer(cpu
, GTIMER_HYP
);
2883 void arm_gt_stimer_cb(void *opaque
)
2885 ARMCPU
*cpu
= opaque
;
2887 gt_recalc_timer(cpu
, GTIMER_SEC
);
2890 void arm_gt_hvtimer_cb(void *opaque
)
2892 ARMCPU
*cpu
= opaque
;
2894 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
2897 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2899 ARMCPU
*cpu
= env_archcpu(env
);
2901 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
2904 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2905 /* Note that CNTFRQ is purely reads-as-written for the benefit
2906 * of software; writing it doesn't actually change the timer frequency.
2907 * Our reset value matches the fixed frequency we implement the timer at.
2909 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2910 .type
= ARM_CP_ALIAS
,
2911 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2912 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2914 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2915 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2916 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2918 .resetfn
= arm_gt_cntfrq_reset
,
2920 /* overall control: mostly access permissions */
2921 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2922 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2924 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2927 /* per-timer control */
2928 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2929 .secure
= ARM_CP_SECSTATE_NS
,
2930 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2931 .accessfn
= gt_ptimer_access
,
2932 .fieldoffset
= offsetoflow32(CPUARMState
,
2933 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2934 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
2935 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
2937 { .name
= "CNTP_CTL_S",
2938 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2939 .secure
= ARM_CP_SECSTATE_S
,
2940 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2941 .accessfn
= gt_ptimer_access
,
2942 .fieldoffset
= offsetoflow32(CPUARMState
,
2943 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2944 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2946 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2947 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2948 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2949 .accessfn
= gt_ptimer_access
,
2950 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2952 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
2953 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
2955 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2956 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2957 .accessfn
= gt_vtimer_access
,
2958 .fieldoffset
= offsetoflow32(CPUARMState
,
2959 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2960 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
2961 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
2963 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2964 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2965 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2966 .accessfn
= gt_vtimer_access
,
2967 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2969 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
2970 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
2972 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2973 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2974 .secure
= ARM_CP_SECSTATE_NS
,
2975 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2976 .accessfn
= gt_ptimer_access
,
2977 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
2979 { .name
= "CNTP_TVAL_S",
2980 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2981 .secure
= ARM_CP_SECSTATE_S
,
2982 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2983 .accessfn
= gt_ptimer_access
,
2984 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2986 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2987 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2988 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2989 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2990 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
2992 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2993 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2994 .accessfn
= gt_vtimer_access
,
2995 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
2997 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2998 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2999 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3000 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3001 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3003 /* The counter itself */
3004 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3005 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3006 .accessfn
= gt_pct_access
,
3007 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3009 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3010 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3011 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3012 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3014 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3015 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3016 .accessfn
= gt_vct_access
,
3017 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3019 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3020 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3021 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3022 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3024 /* Comparison value, indicating when the timer goes off */
3025 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3026 .secure
= ARM_CP_SECSTATE_NS
,
3028 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3029 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3030 .accessfn
= gt_ptimer_access
,
3031 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3032 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3034 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3035 .secure
= ARM_CP_SECSTATE_S
,
3037 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3038 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3039 .accessfn
= gt_ptimer_access
,
3040 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3042 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3043 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3046 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3047 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3048 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3049 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3051 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3053 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3054 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3055 .accessfn
= gt_vtimer_access
,
3056 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3057 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3059 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3060 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3063 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3064 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3065 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3066 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3068 /* Secure timer -- this is actually restricted to only EL3
3069 * and configurably Secure-EL1 via the accessfn.
3071 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3072 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3073 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3074 .accessfn
= gt_stimer_access
,
3075 .readfn
= gt_sec_tval_read
,
3076 .writefn
= gt_sec_tval_write
,
3077 .resetfn
= gt_sec_timer_reset
,
3079 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3080 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3081 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3082 .accessfn
= gt_stimer_access
,
3083 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3085 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3087 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3088 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3089 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3090 .accessfn
= gt_stimer_access
,
3091 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3092 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3097 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3100 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3101 return CP_ACCESS_TRAP
;
3103 return CP_ACCESS_OK
;
3108 /* In user-mode most of the generic timer registers are inaccessible
3109 * however modern kernels (4.12+) allow access to cntvct_el0
3112 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3114 ARMCPU
*cpu
= env_archcpu(env
);
3116 /* Currently we have no support for QEMUTimer in linux-user so we
3117 * can't call gt_get_countervalue(env), instead we directly
3118 * call the lower level functions.
3120 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3123 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3124 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3125 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3126 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3127 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3128 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
3130 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3131 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3132 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3133 .readfn
= gt_virt_cnt_read
,
3140 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3142 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3143 raw_write(env
, ri
, value
);
3144 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3145 raw_write(env
, ri
, value
& 0xfffff6ff);
3147 raw_write(env
, ri
, value
& 0xfffff1ff);
3151 #ifndef CONFIG_USER_ONLY
3152 /* get_phys_addr() isn't present for user-mode-only targets */
3154 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3158 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3159 * Secure EL1 (which can only happen if EL3 is AArch64).
3160 * They are simply UNDEF if executed from NS EL1.
3161 * They function normally from EL2 or EL3.
3163 if (arm_current_el(env
) == 1) {
3164 if (arm_is_secure_below_el3(env
)) {
3165 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
3166 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2
;
3168 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
3170 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3173 return CP_ACCESS_OK
;
3177 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3178 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
3181 target_ulong page_size
;
3185 bool format64
= false;
3186 MemTxAttrs attrs
= {};
3187 ARMMMUFaultInfo fi
= {};
3188 ARMCacheAttrs cacheattrs
= {};
3190 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
3191 &prot
, &page_size
, &fi
, &cacheattrs
);
3195 * Some kinds of translation fault must cause exceptions rather
3196 * than being reported in the PAR.
3198 int current_el
= arm_current_el(env
);
3200 uint32_t syn
, fsr
, fsc
;
3201 bool take_exc
= false;
3203 if (fi
.s1ptw
&& current_el
== 1
3204 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3206 * Synchronous stage 2 fault on an access made as part of the
3207 * translation table walk for AT S1E0* or AT S1E1* insn
3208 * executed from NS EL1. If this is a synchronous external abort
3209 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3210 * to EL3. Otherwise the fault is taken as an exception to EL2,
3211 * and HPFAR_EL2 holds the faulting IPA.
3213 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3214 (env
->cp15
.scr_el3
& SCR_EA
)) {
3217 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3218 if (arm_is_secure_below_el3(env
) && fi
.s1ns
) {
3219 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
3224 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3226 * Synchronous external aborts during a translation table walk
3227 * are taken as Data Abort exceptions.
3230 if (current_el
== 3) {
3236 target_el
= exception_target_el(env
);
3242 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3243 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3244 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3245 fsr
= arm_fi_to_lfsc(&fi
);
3246 fsc
= extract32(fsr
, 0, 6);
3248 fsr
= arm_fi_to_sfsc(&fi
);
3252 * Report exception with ESR indicating a fault due to a
3253 * translation table walk for a cache maintenance instruction.
3255 syn
= syn_data_abort_no_iss(current_el
== target_el
, 0,
3256 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3257 env
->exception
.vaddress
= value
;
3258 env
->exception
.fsr
= fsr
;
3259 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3265 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3268 * * TTBCR.EAE determines whether the result is returned using the
3269 * 32-bit or the 64-bit PAR format
3270 * * Instructions executed in Hyp mode always use the 64bit format
3272 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3273 * * The Non-secure TTBCR.EAE bit is set to 1
3274 * * The implementation includes EL2, and the value of HCR.VM is 1
3276 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3278 * ATS1Hx always uses the 64bit format.
3280 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3282 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3283 if (mmu_idx
== ARMMMUIdx_E10_0
||
3284 mmu_idx
== ARMMMUIdx_E10_1
||
3285 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3286 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3288 format64
|= arm_current_el(env
) == 2;
3294 /* Create a 64-bit PAR */
3295 par64
= (1 << 11); /* LPAE bit always set */
3297 par64
|= phys_addr
& ~0xfffULL
;
3298 if (!attrs
.secure
) {
3299 par64
|= (1 << 9); /* NS */
3301 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
3302 par64
|= cacheattrs
.shareability
<< 7; /* SH */
3304 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3307 par64
|= (fsr
& 0x3f) << 1; /* FS */
3309 par64
|= (1 << 9); /* S */
3312 par64
|= (1 << 8); /* PTW */
3316 /* fsr is a DFSR/IFSR value for the short descriptor
3317 * translation table format (with WnR always clear).
3318 * Convert it to a 32-bit PAR.
3321 /* We do not set any attribute bits in the PAR */
3322 if (page_size
== (1 << 24)
3323 && arm_feature(env
, ARM_FEATURE_V7
)) {
3324 par64
= (phys_addr
& 0xff000000) | (1 << 1);
3326 par64
= phys_addr
& 0xfffff000;
3328 if (!attrs
.secure
) {
3329 par64
|= (1 << 9); /* NS */
3332 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3334 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3335 ((fsr
& 0xf) << 1) | 1;
3340 #endif /* CONFIG_TCG */
3342 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3345 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3348 int el
= arm_current_el(env
);
3349 bool secure
= arm_is_secure_below_el3(env
);
3351 switch (ri
->opc2
& 6) {
3353 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3356 mmu_idx
= ARMMMUIdx_SE3
;
3359 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3362 if (ri
->crm
== 9 && (env
->uncached_cpsr
& CPSR_PAN
)) {
3363 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3364 : ARMMMUIdx_Stage1_E1_PAN
);
3366 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3370 g_assert_not_reached();
3374 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3377 mmu_idx
= ARMMMUIdx_SE10_0
;
3380 g_assert(!secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3381 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3384 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3387 g_assert_not_reached();
3391 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3392 mmu_idx
= ARMMMUIdx_E10_1
;
3395 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3396 mmu_idx
= ARMMMUIdx_E10_0
;
3399 g_assert_not_reached();
3402 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3404 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3406 /* Handled by hardware accelerator. */
3407 g_assert_not_reached();
3408 #endif /* CONFIG_TCG */
3411 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3415 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3418 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
);
3420 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3422 /* Handled by hardware accelerator. */
3423 g_assert_not_reached();
3424 #endif /* CONFIG_TCG */
3427 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3430 if (arm_current_el(env
) == 3 &&
3431 !(env
->cp15
.scr_el3
& (SCR_NS
| SCR_EEL2
))) {
3432 return CP_ACCESS_TRAP
;
3434 return CP_ACCESS_OK
;
3437 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3441 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3443 int secure
= arm_is_secure_below_el3(env
);
3445 switch (ri
->opc2
& 6) {
3448 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3449 if (ri
->crm
== 9 && (env
->pstate
& PSTATE_PAN
)) {
3450 mmu_idx
= (secure
? ARMMMUIdx_Stage1_SE1_PAN
3451 : ARMMMUIdx_Stage1_E1_PAN
);
3453 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE1
: ARMMMUIdx_Stage1_E1
;
3456 case 4: /* AT S1E2R, AT S1E2W */
3457 mmu_idx
= secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
;
3459 case 6: /* AT S1E3R, AT S1E3W */
3460 mmu_idx
= ARMMMUIdx_SE3
;
3463 g_assert_not_reached();
3466 case 2: /* AT S1E0R, AT S1E0W */
3467 mmu_idx
= secure
? ARMMMUIdx_Stage1_SE0
: ARMMMUIdx_Stage1_E0
;
3469 case 4: /* AT S12E1R, AT S12E1W */
3470 mmu_idx
= secure
? ARMMMUIdx_SE10_1
: ARMMMUIdx_E10_1
;
3472 case 6: /* AT S12E0R, AT S12E0W */
3473 mmu_idx
= secure
? ARMMMUIdx_SE10_0
: ARMMMUIdx_E10_0
;
3476 g_assert_not_reached();
3479 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3481 /* Handled by hardware accelerator. */
3482 g_assert_not_reached();
3483 #endif /* CONFIG_TCG */
3487 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3488 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3489 .access
= PL1_RW
, .resetvalue
= 0,
3490 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3491 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3492 .writefn
= par_write
},
3493 #ifndef CONFIG_USER_ONLY
3494 /* This underdecoding is safe because the reginfo is NO_RAW. */
3495 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3496 .access
= PL1_W
, .accessfn
= ats_access
,
3497 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
3502 /* Return basic MPU access permission bits. */
3503 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3510 for (i
= 0; i
< 16; i
+= 2) {
3511 ret
|= (val
>> i
) & mask
;
3517 /* Pad basic MPU access permission bits to extended format. */
3518 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3525 for (i
= 0; i
< 16; i
+= 2) {
3526 ret
|= (val
& mask
) << i
;
3532 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3535 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3538 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3540 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3543 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3546 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3549 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3551 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3554 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3556 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3562 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3566 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3569 ARMCPU
*cpu
= env_archcpu(env
);
3570 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3576 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3577 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3581 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3584 ARMCPU
*cpu
= env_archcpu(env
);
3585 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3587 if (value
>= nrgs
) {
3588 qemu_log_mask(LOG_GUEST_ERROR
,
3589 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3590 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3594 raw_write(env
, ri
, value
);
3597 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3598 /* Reset for all these registers is handled in arm_cpu_reset(),
3599 * because the PMSAv7 is also used by M-profile CPUs, which do
3600 * not register cpregs but still need the state to be reset.
3602 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3603 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3604 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3605 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3606 .resetfn
= arm_cp_reset_ignore
},
3607 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3608 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3609 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3610 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3611 .resetfn
= arm_cp_reset_ignore
},
3612 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3613 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3614 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3615 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3616 .resetfn
= arm_cp_reset_ignore
},
3617 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3619 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3620 .writefn
= pmsav7_rgnr_write
,
3621 .resetfn
= arm_cp_reset_ignore
},
3625 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3626 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3627 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3628 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3629 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3630 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3631 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3632 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3633 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3634 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3636 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3638 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3640 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3642 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3644 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3645 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3647 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3648 /* Protection region base and size registers */
3649 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3650 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3651 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3652 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3653 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3654 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3655 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3656 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3657 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3658 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3659 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3660 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3661 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3662 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3663 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3664 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3665 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3666 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3667 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3668 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3669 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3670 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3671 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3672 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3676 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3679 TCR
*tcr
= raw_ptr(env
, ri
);
3680 int maskshift
= extract32(value
, 0, 3);
3682 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3683 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3684 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3685 * using Long-desciptor translation table format */
3686 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3687 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3688 /* In an implementation that includes the Security Extensions
3689 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3690 * Short-descriptor translation table format.
3692 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3698 /* Update the masks corresponding to the TCR bank being written
3699 * Note that we always calculate mask and base_mask, but
3700 * they are only used for short-descriptor tables (ie if EAE is 0);
3701 * for long-descriptor tables the TCR fields are used differently
3702 * and the mask and base_mask values are meaningless.
3704 tcr
->raw_tcr
= value
;
3705 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3706 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3709 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3712 ARMCPU
*cpu
= env_archcpu(env
);
3713 TCR
*tcr
= raw_ptr(env
, ri
);
3715 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3716 /* With LPAE the TTBCR could result in a change of ASID
3717 * via the TTBCR.A1 bit, so do a TLB flush.
3719 tlb_flush(CPU(cpu
));
3721 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3722 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3723 vmsa_ttbcr_raw_write(env
, ri
, value
);
3726 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3728 TCR
*tcr
= raw_ptr(env
, ri
);
3730 /* Reset both the TCR as well as the masks corresponding to the bank of
3731 * the TCR being reset.
3735 tcr
->base_mask
= 0xffffc000u
;
3738 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3741 ARMCPU
*cpu
= env_archcpu(env
);
3742 TCR
*tcr
= raw_ptr(env
, ri
);
3744 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3745 tlb_flush(CPU(cpu
));
3746 tcr
->raw_tcr
= value
;
3749 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3752 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3753 if (cpreg_field_is_64bit(ri
) &&
3754 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3755 ARMCPU
*cpu
= env_archcpu(env
);
3756 tlb_flush(CPU(cpu
));
3758 raw_write(env
, ri
, value
);
3761 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3765 * If we are running with E2&0 regime, then an ASID is active.
3766 * Flush if that might be changing. Note we're not checking
3767 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3768 * holds the active ASID, only checking the field that might.
3770 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
3771 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
3772 uint16_t mask
= ARMMMUIdxBit_E20_2
|
3773 ARMMMUIdxBit_E20_2_PAN
|
3776 if (arm_is_secure_below_el3(env
)) {
3777 mask
>>= ARM_MMU_IDX_A_NS
;
3780 tlb_flush_by_mmuidx(env_cpu(env
), mask
);
3782 raw_write(env
, ri
, value
);
3785 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3788 ARMCPU
*cpu
= env_archcpu(env
);
3789 CPUState
*cs
= CPU(cpu
);
3792 * A change in VMID to the stage2 page table (Stage2) invalidates
3793 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3795 if (raw_read(env
, ri
) != value
) {
3796 uint16_t mask
= ARMMMUIdxBit_E10_1
|
3797 ARMMMUIdxBit_E10_1_PAN
|
3800 if (arm_is_secure_below_el3(env
)) {
3801 mask
>>= ARM_MMU_IDX_A_NS
;
3804 tlb_flush_by_mmuidx(cs
, mask
);
3805 raw_write(env
, ri
, value
);
3809 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3810 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3811 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
3812 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3813 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3814 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3815 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
3816 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3817 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3818 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3819 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
3820 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3821 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3822 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3823 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3824 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3825 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3830 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3831 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3832 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3833 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3834 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3835 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3836 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3837 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3838 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3839 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3840 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3841 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3842 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3843 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3844 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3845 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3846 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3847 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3848 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3849 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3850 .writefn
= vmsa_tcr_el12_write
,
3851 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3852 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3853 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3854 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3855 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3856 .raw_writefn
= vmsa_ttbcr_raw_write
,
3857 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
3858 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.tcr_el
[3]),
3859 offsetof(CPUARMState
, cp15
.tcr_el
[1])} },
3863 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3864 * qemu tlbs nor adjusting cached masks.
3866 static const ARMCPRegInfo ttbcr2_reginfo
= {
3867 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3868 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
3869 .type
= ARM_CP_ALIAS
,
3870 .bank_fieldoffsets
= {
3871 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3].raw_tcr
),
3872 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1].raw_tcr
),
3876 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3879 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3880 /* The OS_TYPE bit in this register changes the reported CPUID! */
3881 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3882 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3885 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3888 env
->cp15
.c15_threadid
= value
& 0xffff;
3891 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3894 /* Wait-for-interrupt (deprecated) */
3895 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
3898 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3901 /* On OMAP there are registers indicating the max/min index of dcache lines
3902 * containing a dirty line; cache flush operations have to reset these.
3904 env
->cp15
.c15_i_max
= 0x000;
3905 env
->cp15
.c15_i_min
= 0xff0;
3908 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3909 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3910 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3911 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3913 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3914 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3915 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3917 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3918 .writefn
= omap_ticonfig_write
},
3919 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3921 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3922 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3923 .access
= PL1_RW
, .resetvalue
= 0xff0,
3924 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3925 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3927 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3928 .writefn
= omap_threadid_write
},
3929 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3930 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3931 .type
= ARM_CP_NO_RAW
,
3932 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3933 /* TODO: Peripheral port remap register:
3934 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3935 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3938 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3939 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3940 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3941 .writefn
= omap_cachemaint_write
},
3942 { .name
= "C9", .cp
= 15, .crn
= 9,
3943 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3944 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3948 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3951 env
->cp15
.c15_cpar
= value
& 0x3fff;
3954 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3955 { .name
= "XSCALE_CPAR",
3956 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3957 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3958 .writefn
= xscale_cpar_write
, },
3959 { .name
= "XSCALE_AUXCR",
3960 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3961 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3963 /* XScale specific cache-lockdown: since we have no cache we NOP these
3964 * and hope the guest does not really rely on cache behaviour.
3966 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3967 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3968 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3969 { .name
= "XSCALE_UNLOCK_ICACHE",
3970 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3971 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3972 { .name
= "XSCALE_DCACHE_LOCK",
3973 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3974 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3975 { .name
= "XSCALE_UNLOCK_DCACHE",
3976 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3977 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3981 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3982 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3983 * implementation of this implementation-defined space.
3984 * Ideally this should eventually disappear in favour of actually
3985 * implementing the correct behaviour for all cores.
3987 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3988 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3990 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3995 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3996 /* Cache status: RAZ because we have no cache so it's always clean */
3997 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3998 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4003 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4004 /* We never have a a block transfer operation in progress */
4005 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4006 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4008 /* The cache ops themselves: these all NOP for QEMU */
4009 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4010 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4011 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4012 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4013 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4014 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4015 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4016 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4017 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4018 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4019 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4020 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
4024 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4025 /* The cache test-and-clean instructions always return (1 << 30)
4026 * to indicate that there are no dirty cache lines.
4028 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4029 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4030 .resetvalue
= (1 << 30) },
4031 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4032 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4033 .resetvalue
= (1 << 30) },
4037 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4038 /* Ignore ReadBuffer accesses */
4039 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4040 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4041 .access
= PL1_RW
, .resetvalue
= 0,
4042 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4046 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4048 unsigned int cur_el
= arm_current_el(env
);
4050 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4051 return env
->cp15
.vpidr_el2
;
4053 return raw_read(env
, ri
);
4056 static uint64_t mpidr_read_val(CPUARMState
*env
)
4058 ARMCPU
*cpu
= env_archcpu(env
);
4059 uint64_t mpidr
= cpu
->mp_affinity
;
4061 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4062 mpidr
|= (1U << 31);
4063 /* Cores which are uniprocessor (non-coherent)
4064 * but still implement the MP extensions set
4065 * bit 30. (For instance, Cortex-R5).
4067 if (cpu
->mp_is_up
) {
4068 mpidr
|= (1u << 30);
4074 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4076 unsigned int cur_el
= arm_current_el(env
);
4078 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4079 return env
->cp15
.vmpidr_el2
;
4081 return mpidr_read_val(env
);
4084 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4086 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4087 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4088 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4089 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4090 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4091 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4092 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4093 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4094 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4095 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4096 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4097 offsetof(CPUARMState
, cp15
.par_ns
)} },
4098 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4099 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4100 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4101 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4102 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4103 .writefn
= vmsa_ttbr_write
, },
4104 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4105 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4106 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4107 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4108 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4109 .writefn
= vmsa_ttbr_write
, },
4113 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4115 return vfp_get_fpcr(env
);
4118 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4121 vfp_set_fpcr(env
, value
);
4124 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4126 return vfp_get_fpsr(env
);
4129 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4132 vfp_set_fpsr(env
, value
);
4135 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4138 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4139 return CP_ACCESS_TRAP
;
4141 return CP_ACCESS_OK
;
4144 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4147 env
->daif
= value
& PSTATE_DAIF
;
4150 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4152 return env
->pstate
& PSTATE_PAN
;
4155 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4158 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4161 static const ARMCPRegInfo pan_reginfo
= {
4162 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4163 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4164 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4165 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4168 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4170 return env
->pstate
& PSTATE_UAO
;
4173 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4176 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4179 static const ARMCPRegInfo uao_reginfo
= {
4180 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4181 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4182 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4183 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4186 static uint64_t aa64_dit_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4188 return env
->pstate
& PSTATE_DIT
;
4191 static void aa64_dit_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4194 env
->pstate
= (env
->pstate
& ~PSTATE_DIT
) | (value
& PSTATE_DIT
);
4197 static const ARMCPRegInfo dit_reginfo
= {
4198 .name
= "DIT", .state
= ARM_CP_STATE_AA64
,
4199 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 5,
4200 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4201 .readfn
= aa64_dit_read
, .writefn
= aa64_dit_write
4204 static uint64_t aa64_ssbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4206 return env
->pstate
& PSTATE_SSBS
;
4209 static void aa64_ssbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4212 env
->pstate
= (env
->pstate
& ~PSTATE_SSBS
) | (value
& PSTATE_SSBS
);
4215 static const ARMCPRegInfo ssbs_reginfo
= {
4216 .name
= "SSBS", .state
= ARM_CP_STATE_AA64
,
4217 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 6,
4218 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4219 .readfn
= aa64_ssbs_read
, .writefn
= aa64_ssbs_write
4222 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4223 const ARMCPRegInfo
*ri
,
4226 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4227 switch (arm_current_el(env
)) {
4229 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4230 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4231 return CP_ACCESS_TRAP
;
4235 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4236 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4237 return CP_ACCESS_TRAP_EL2
;
4241 return CP_ACCESS_OK
;
4244 static CPAccessResult
aa64_cacheop_pou_access(CPUARMState
*env
,
4245 const ARMCPRegInfo
*ri
,
4248 /* Cache invalidate/clean to Point of Unification... */
4249 switch (arm_current_el(env
)) {
4251 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4252 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4253 return CP_ACCESS_TRAP
;
4257 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4258 if (arm_hcr_el2_eff(env
) & HCR_TPU
) {
4259 return CP_ACCESS_TRAP_EL2
;
4263 return CP_ACCESS_OK
;
4266 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4267 * Page D4-1736 (DDI0487A.b)
4270 static int vae1_tlbmask(CPUARMState
*env
)
4272 uint64_t hcr
= arm_hcr_el2_eff(env
);
4275 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4276 mask
= ARMMMUIdxBit_E20_2
|
4277 ARMMMUIdxBit_E20_2_PAN
|
4280 mask
= ARMMMUIdxBit_E10_1
|
4281 ARMMMUIdxBit_E10_1_PAN
|
4285 if (arm_is_secure_below_el3(env
)) {
4286 mask
>>= ARM_MMU_IDX_A_NS
;
4292 /* Return 56 if TBI is enabled, 64 otherwise. */
4293 static int tlbbits_for_regime(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
4296 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
4297 int tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
4298 int select
= extract64(addr
, 55, 1);
4300 return (tbi
>> select
) & 1 ? 56 : 64;
4303 static int vae1_tlbbits(CPUARMState
*env
, uint64_t addr
)
4305 uint64_t hcr
= arm_hcr_el2_eff(env
);
4308 /* Only the regime of the mmu_idx below is significant. */
4309 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4310 mmu_idx
= ARMMMUIdx_E20_0
;
4312 mmu_idx
= ARMMMUIdx_E10_0
;
4315 if (arm_is_secure_below_el3(env
)) {
4316 mmu_idx
&= ~ARM_MMU_IDX_A_NS
;
4319 return tlbbits_for_regime(env
, mmu_idx
, addr
);
4322 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4325 CPUState
*cs
= env_cpu(env
);
4326 int mask
= vae1_tlbmask(env
);
4328 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4331 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4334 CPUState
*cs
= env_cpu(env
);
4335 int mask
= vae1_tlbmask(env
);
4337 if (tlb_force_broadcast(env
)) {
4338 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4340 tlb_flush_by_mmuidx(cs
, mask
);
4344 static int alle1_tlbmask(CPUARMState
*env
)
4347 * Note that the 'ALL' scope must invalidate both stage 1 and
4348 * stage 2 translations, whereas most other scopes only invalidate
4349 * stage 1 translations.
4351 if (arm_is_secure_below_el3(env
)) {
4352 return ARMMMUIdxBit_SE10_1
|
4353 ARMMMUIdxBit_SE10_1_PAN
|
4354 ARMMMUIdxBit_SE10_0
;
4356 return ARMMMUIdxBit_E10_1
|
4357 ARMMMUIdxBit_E10_1_PAN
|
4362 static int e2_tlbmask(CPUARMState
*env
)
4364 if (arm_is_secure_below_el3(env
)) {
4365 return ARMMMUIdxBit_SE20_0
|
4366 ARMMMUIdxBit_SE20_2
|
4367 ARMMMUIdxBit_SE20_2_PAN
|
4370 return ARMMMUIdxBit_E20_0
|
4371 ARMMMUIdxBit_E20_2
|
4372 ARMMMUIdxBit_E20_2_PAN
|
4377 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4380 CPUState
*cs
= env_cpu(env
);
4381 int mask
= alle1_tlbmask(env
);
4383 tlb_flush_by_mmuidx(cs
, mask
);
4386 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4389 CPUState
*cs
= env_cpu(env
);
4390 int mask
= e2_tlbmask(env
);
4392 tlb_flush_by_mmuidx(cs
, mask
);
4395 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4398 ARMCPU
*cpu
= env_archcpu(env
);
4399 CPUState
*cs
= CPU(cpu
);
4401 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_SE3
);
4404 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4407 CPUState
*cs
= env_cpu(env
);
4408 int mask
= alle1_tlbmask(env
);
4410 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4413 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4416 CPUState
*cs
= env_cpu(env
);
4417 int mask
= e2_tlbmask(env
);
4419 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4422 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4425 CPUState
*cs
= env_cpu(env
);
4427 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_SE3
);
4430 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4433 /* Invalidate by VA, EL2
4434 * Currently handles both VAE2 and VALE2, since we don't support
4435 * flush-last-level-only.
4437 CPUState
*cs
= env_cpu(env
);
4438 int mask
= e2_tlbmask(env
);
4439 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4441 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
4444 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4447 /* Invalidate by VA, EL3
4448 * Currently handles both VAE3 and VALE3, since we don't support
4449 * flush-last-level-only.
4451 ARMCPU
*cpu
= env_archcpu(env
);
4452 CPUState
*cs
= CPU(cpu
);
4453 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4455 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_SE3
);
4458 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4461 CPUState
*cs
= env_cpu(env
);
4462 int mask
= vae1_tlbmask(env
);
4463 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4464 int bits
= vae1_tlbbits(env
, pageaddr
);
4466 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4469 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4472 /* Invalidate by VA, EL1&0 (AArch64 version).
4473 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4474 * since we don't support flush-for-specific-ASID-only or
4475 * flush-last-level-only.
4477 CPUState
*cs
= env_cpu(env
);
4478 int mask
= vae1_tlbmask(env
);
4479 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4480 int bits
= vae1_tlbbits(env
, pageaddr
);
4482 if (tlb_force_broadcast(env
)) {
4483 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4485 tlb_flush_page_bits_by_mmuidx(cs
, pageaddr
, mask
, bits
);
4489 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4492 CPUState
*cs
= env_cpu(env
);
4493 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4494 bool secure
= arm_is_secure_below_el3(env
);
4495 int mask
= secure
? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
;
4496 int bits
= tlbbits_for_regime(env
, secure
? ARMMMUIdx_SE2
: ARMMMUIdx_E2
,
4499 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
4502 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4505 CPUState
*cs
= env_cpu(env
);
4506 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
4507 int bits
= tlbbits_for_regime(env
, ARMMMUIdx_SE3
, pageaddr
);
4509 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4510 ARMMMUIdxBit_SE3
, bits
);
4513 #ifdef TARGET_AARCH64
4519 static TLBIRange
tlbi_aa64_get_range(CPUARMState
*env
, ARMMMUIdx mmuidx
,
4522 unsigned int page_size_granule
, page_shift
, num
, scale
, exponent
;
4523 /* Extract one bit to represent the va selector in use. */
4524 uint64_t select
= sextract64(value
, 36, 1);
4525 ARMVAParameters param
= aa64_va_parameters(env
, select
, mmuidx
, true);
4526 TLBIRange ret
= { };
4528 page_size_granule
= extract64(value
, 46, 2);
4530 /* The granule encoded in value must match the granule in use. */
4531 if (page_size_granule
!= (param
.using64k
? 3 : param
.using16k
? 2 : 1)) {
4532 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid tlbi page size granule %d\n",
4537 page_shift
= (page_size_granule
- 1) * 2 + 12;
4538 num
= extract64(value
, 39, 5);
4539 scale
= extract64(value
, 44, 2);
4540 exponent
= (5 * scale
) + 1;
4542 ret
.length
= (num
+ 1) << (exponent
+ page_shift
);
4545 ret
.base
= sextract64(value
, 0, 37);
4547 ret
.base
= extract64(value
, 0, 37);
4551 * With DS=1, BaseADDR is always shifted 16 so that it is able
4552 * to address all 52 va bits. The input address is perforce
4553 * aligned on a 64k boundary regardless of translation granule.
4557 ret
.base
<<= page_shift
;
4562 static void do_rvae_write(CPUARMState
*env
, uint64_t value
,
4563 int idxmap
, bool synced
)
4565 ARMMMUIdx one_idx
= ARM_MMU_IDX_A
| ctz32(idxmap
);
4569 range
= tlbi_aa64_get_range(env
, one_idx
, value
);
4570 bits
= tlbbits_for_regime(env
, one_idx
, range
.base
);
4573 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env
),
4579 tlb_flush_range_by_mmuidx(env_cpu(env
), range
.base
,
4580 range
.length
, idxmap
, bits
);
4584 static void tlbi_aa64_rvae1_write(CPUARMState
*env
,
4585 const ARMCPRegInfo
*ri
,
4589 * Invalidate by VA range, EL1&0.
4590 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4591 * since we don't support flush-for-specific-ASID-only or
4592 * flush-last-level-only.
4595 do_rvae_write(env
, value
, vae1_tlbmask(env
),
4596 tlb_force_broadcast(env
));
4599 static void tlbi_aa64_rvae1is_write(CPUARMState
*env
,
4600 const ARMCPRegInfo
*ri
,
4604 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4605 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4606 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4607 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4608 * shareable specific flushes.
4611 do_rvae_write(env
, value
, vae1_tlbmask(env
), true);
4614 static int vae2_tlbmask(CPUARMState
*env
)
4616 return (arm_is_secure_below_el3(env
)
4617 ? ARMMMUIdxBit_SE2
: ARMMMUIdxBit_E2
);
4620 static void tlbi_aa64_rvae2_write(CPUARMState
*env
,
4621 const ARMCPRegInfo
*ri
,
4625 * Invalidate by VA range, EL2.
4626 * Currently handles all of RVAE2 and RVALE2,
4627 * since we don't support flush-for-specific-ASID-only or
4628 * flush-last-level-only.
4631 do_rvae_write(env
, value
, vae2_tlbmask(env
),
4632 tlb_force_broadcast(env
));
4637 static void tlbi_aa64_rvae2is_write(CPUARMState
*env
,
4638 const ARMCPRegInfo
*ri
,
4642 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4643 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4644 * since we don't support flush-for-specific-ASID-only,
4645 * flush-last-level-only or inner/outer shareable specific flushes.
4648 do_rvae_write(env
, value
, vae2_tlbmask(env
), true);
4652 static void tlbi_aa64_rvae3_write(CPUARMState
*env
,
4653 const ARMCPRegInfo
*ri
,
4657 * Invalidate by VA range, EL3.
4658 * Currently handles all of RVAE3 and RVALE3,
4659 * since we don't support flush-for-specific-ASID-only or
4660 * flush-last-level-only.
4663 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
,
4664 tlb_force_broadcast(env
));
4667 static void tlbi_aa64_rvae3is_write(CPUARMState
*env
,
4668 const ARMCPRegInfo
*ri
,
4672 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4673 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4674 * since we don't support flush-for-specific-ASID-only,
4675 * flush-last-level-only or inner/outer specific flushes.
4678 do_rvae_write(env
, value
, ARMMMUIdxBit_SE3
, true);
4682 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4685 int cur_el
= arm_current_el(env
);
4688 uint64_t hcr
= arm_hcr_el2_eff(env
);
4691 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4692 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
4693 return CP_ACCESS_TRAP_EL2
;
4696 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4697 return CP_ACCESS_TRAP
;
4699 if (hcr
& HCR_TDZ
) {
4700 return CP_ACCESS_TRAP_EL2
;
4703 } else if (hcr
& HCR_TDZ
) {
4704 return CP_ACCESS_TRAP_EL2
;
4707 return CP_ACCESS_OK
;
4710 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4712 ARMCPU
*cpu
= env_archcpu(env
);
4713 int dzp_bit
= 1 << 4;
4715 /* DZP indicates whether DC ZVA access is allowed */
4716 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4719 return cpu
->dcz_blocksize
| dzp_bit
;
4722 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4725 if (!(env
->pstate
& PSTATE_SP
)) {
4726 /* Access to SP_EL0 is undefined if it's being used as
4727 * the stack pointer.
4729 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4731 return CP_ACCESS_OK
;
4734 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4736 return env
->pstate
& PSTATE_SP
;
4739 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4741 update_spsel(env
, val
);
4744 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4747 ARMCPU
*cpu
= env_archcpu(env
);
4749 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4750 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4754 /* ??? Lots of these bits are not implemented. */
4756 if (ri
->state
== ARM_CP_STATE_AA64
&& !cpu_isar_feature(aa64_mte
, cpu
)) {
4757 if (ri
->opc1
== 6) { /* SCTLR_EL3 */
4758 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF
| SCTLR_ATA
);
4760 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF0
| SCTLR_TCF
|
4761 SCTLR_ATA0
| SCTLR_ATA
);
4765 if (raw_read(env
, ri
) == value
) {
4766 /* Skip the TLB flush if nothing actually changed; Linux likes
4767 * to do a lot of pointless SCTLR writes.
4772 raw_write(env
, ri
, value
);
4774 /* This may enable/disable the MMU, so do a TLB flush. */
4775 tlb_flush(CPU(cpu
));
4777 if (ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
4779 * Normally we would always end the TB on an SCTLR write; see the
4780 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4781 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4782 * of hflags from the translator, so do it here.
4784 arm_rebuild_hflags(env
);
4788 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4791 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4792 return CP_ACCESS_TRAP_FP_EL2
;
4794 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4795 return CP_ACCESS_TRAP_FP_EL3
;
4797 return CP_ACCESS_OK
;
4800 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4803 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4806 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4807 /* Minimal set of EL0-visible registers. This will need to be expanded
4808 * significantly for system emulation of AArch64 CPUs.
4810 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4811 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4812 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4813 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4814 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4815 .type
= ARM_CP_NO_RAW
,
4816 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4817 .fieldoffset
= offsetof(CPUARMState
, daif
),
4818 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4819 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4820 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4821 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4822 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4823 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4824 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4825 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4826 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4827 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4828 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4829 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4830 .readfn
= aa64_dczid_read
},
4831 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4832 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4833 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4834 #ifndef CONFIG_USER_ONLY
4835 /* Avoid overhead of an access check that always passes in user-mode */
4836 .accessfn
= aa64_zva_access
,
4839 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4840 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4841 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4842 /* Cache ops: all NOPs since we don't emulate caches */
4843 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4844 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4845 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4846 .accessfn
= aa64_cacheop_pou_access
},
4847 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4848 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4849 .access
= PL1_W
, .type
= ARM_CP_NOP
,
4850 .accessfn
= aa64_cacheop_pou_access
},
4851 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4852 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4853 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4854 .accessfn
= aa64_cacheop_pou_access
},
4855 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4856 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4857 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
4858 .type
= ARM_CP_NOP
},
4859 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4860 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4861 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4862 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4863 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4864 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4865 .accessfn
= aa64_cacheop_poc_access
},
4866 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4867 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4868 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4869 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4870 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4871 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4872 .accessfn
= aa64_cacheop_pou_access
},
4873 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4874 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4875 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4876 .accessfn
= aa64_cacheop_poc_access
},
4877 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4878 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4879 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
4880 /* TLBI operations */
4881 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4882 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4883 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4884 .writefn
= tlbi_aa64_vmalle1is_write
},
4885 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4886 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4887 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4888 .writefn
= tlbi_aa64_vae1is_write
},
4889 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4890 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4891 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4892 .writefn
= tlbi_aa64_vmalle1is_write
},
4893 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4894 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4895 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4896 .writefn
= tlbi_aa64_vae1is_write
},
4897 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4898 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4899 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4900 .writefn
= tlbi_aa64_vae1is_write
},
4901 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4902 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4903 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4904 .writefn
= tlbi_aa64_vae1is_write
},
4905 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4906 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4907 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4908 .writefn
= tlbi_aa64_vmalle1_write
},
4909 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4910 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4911 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4912 .writefn
= tlbi_aa64_vae1_write
},
4913 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4914 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4915 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4916 .writefn
= tlbi_aa64_vmalle1_write
},
4917 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4918 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4919 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4920 .writefn
= tlbi_aa64_vae1_write
},
4921 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4922 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4923 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4924 .writefn
= tlbi_aa64_vae1_write
},
4925 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4926 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4927 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
4928 .writefn
= tlbi_aa64_vae1_write
},
4929 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4930 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4931 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4932 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4933 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4934 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4935 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4936 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4937 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4938 .writefn
= tlbi_aa64_alle1is_write
},
4939 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4940 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4941 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4942 .writefn
= tlbi_aa64_alle1is_write
},
4943 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4944 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4945 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4946 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4947 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4948 .access
= PL2_W
, .type
= ARM_CP_NOP
},
4949 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4950 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4951 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4952 .writefn
= tlbi_aa64_alle1_write
},
4953 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4954 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4955 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4956 .writefn
= tlbi_aa64_alle1is_write
},
4957 #ifndef CONFIG_USER_ONLY
4958 /* 64 bit address translation operations */
4959 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4960 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4961 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4962 .writefn
= ats_write64
},
4963 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4964 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4965 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4966 .writefn
= ats_write64
},
4967 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4968 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4969 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4970 .writefn
= ats_write64
},
4971 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4972 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4973 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4974 .writefn
= ats_write64
},
4975 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4976 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4977 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4978 .writefn
= ats_write64
},
4979 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4980 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4981 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4982 .writefn
= ats_write64
},
4983 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4984 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4985 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4986 .writefn
= ats_write64
},
4987 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4988 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4989 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4990 .writefn
= ats_write64
},
4991 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4992 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4993 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4994 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4995 .writefn
= ats_write64
},
4996 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4997 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4998 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
4999 .writefn
= ats_write64
},
5000 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
5001 .type
= ARM_CP_ALIAS
,
5002 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
5003 .access
= PL1_RW
, .resetvalue
= 0,
5004 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
5005 .writefn
= par_write
},
5007 /* TLB invalidate last level of translation table walk */
5008 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5009 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5010 .writefn
= tlbimva_is_write
},
5011 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5012 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5013 .writefn
= tlbimvaa_is_write
},
5014 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5015 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5016 .writefn
= tlbimva_write
},
5017 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5018 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5019 .writefn
= tlbimvaa_write
},
5020 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5021 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5022 .writefn
= tlbimva_hyp_write
},
5023 { .name
= "TLBIMVALHIS",
5024 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5025 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5026 .writefn
= tlbimva_hyp_is_write
},
5027 { .name
= "TLBIIPAS2",
5028 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5029 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5030 { .name
= "TLBIIPAS2IS",
5031 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5032 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5033 { .name
= "TLBIIPAS2L",
5034 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5035 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5036 { .name
= "TLBIIPAS2LIS",
5037 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5038 .type
= ARM_CP_NOP
, .access
= PL2_W
},
5039 /* 32 bit cache operations */
5040 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5041 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5042 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5043 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5044 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5045 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5046 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5047 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5048 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5049 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5050 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5051 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5052 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5053 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5054 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5055 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5056 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5057 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5058 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5059 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5060 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5061 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_pou_access
},
5062 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5063 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5064 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5065 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5066 /* MMU Domain access control / MPU write buffer control */
5067 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5068 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5069 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5070 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5071 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5072 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5073 .type
= ARM_CP_ALIAS
,
5074 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5076 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5077 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5078 .type
= ARM_CP_ALIAS
,
5079 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5081 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5082 /* We rely on the access checks not allowing the guest to write to the
5083 * state field when SPSel indicates that it's being used as the stack
5086 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5087 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5088 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5089 .type
= ARM_CP_ALIAS
,
5090 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5091 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5092 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5093 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5094 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5095 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5096 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5097 .type
= ARM_CP_NO_RAW
,
5098 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5099 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5100 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5101 .type
= ARM_CP_ALIAS
,
5102 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
5103 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
5104 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5105 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5106 .access
= PL2_RW
, .resetvalue
= 0,
5107 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5108 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5109 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5110 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5111 .access
= PL2_RW
, .resetvalue
= 0,
5112 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5113 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5114 .type
= ARM_CP_ALIAS
,
5115 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5117 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5118 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5119 .type
= ARM_CP_ALIAS
,
5120 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5122 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5123 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5124 .type
= ARM_CP_ALIAS
,
5125 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5127 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5128 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5129 .type
= ARM_CP_ALIAS
,
5130 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5132 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5133 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5134 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5136 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5137 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
5138 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5139 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5140 .writefn
= sdcr_write
,
5141 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5145 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5146 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
5147 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5148 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5150 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
5151 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5152 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5154 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5155 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5156 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5157 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5158 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5159 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5161 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5162 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5163 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5164 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5165 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5166 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5167 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5169 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5170 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5171 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5172 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5173 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5174 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5176 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5177 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5178 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5180 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5181 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5182 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5184 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5185 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5186 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5188 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5189 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5190 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5191 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5192 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5193 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5194 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5195 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5196 .cp
= 15, .opc1
= 6, .crm
= 2,
5197 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5198 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
5199 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5200 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5201 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5202 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5203 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5204 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5205 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5206 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5207 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5208 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5209 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5210 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5211 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5212 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5214 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5215 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5216 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5217 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5218 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5219 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5220 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5221 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5223 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5224 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5225 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5226 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5227 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
5229 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5230 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5231 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5232 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5233 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5234 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5235 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5236 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5237 .access
= PL2_RW
, .accessfn
= access_tda
,
5238 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5239 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5240 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5241 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5242 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5243 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5244 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5245 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5246 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5247 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5248 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5249 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5250 .type
= ARM_CP_CONST
,
5251 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5252 .access
= PL2_RW
, .resetvalue
= 0 },
5256 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5257 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
5258 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5259 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5261 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5265 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5267 ARMCPU
*cpu
= env_archcpu(env
);
5269 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5270 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5272 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5275 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5276 valid_mask
&= ~HCR_HCD
;
5277 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5278 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5279 * However, if we're using the SMC PSCI conduit then QEMU is
5280 * effectively acting like EL3 firmware and so the guest at
5281 * EL2 should retain the ability to prevent EL1 from being
5282 * able to make SMC calls into the ersatz firmware, so in
5283 * that case HCR.TSC should be read/write.
5285 valid_mask
&= ~HCR_TSC
;
5288 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5289 if (cpu_isar_feature(aa64_vh
, cpu
)) {
5290 valid_mask
|= HCR_E2H
;
5292 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5293 valid_mask
|= HCR_TLOR
;
5295 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5296 valid_mask
|= HCR_API
| HCR_APK
;
5298 if (cpu_isar_feature(aa64_mte
, cpu
)) {
5299 valid_mask
|= HCR_ATA
| HCR_DCT
| HCR_TID5
;
5303 /* Clear RES0 bits. */
5304 value
&= valid_mask
;
5307 * These bits change the MMU setup:
5308 * HCR_VM enables stage 2 translation
5309 * HCR_PTW forbids certain page-table setups
5310 * HCR_DC disables stage1 and enables stage2 translation
5311 * HCR_DCT enables tagging on (disabled) stage1 translation
5313 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
| HCR_DCT
)) {
5314 tlb_flush(CPU(cpu
));
5316 env
->cp15
.hcr_el2
= value
;
5319 * Updates to VI and VF require us to update the status of
5320 * virtual interrupts, which are the logical OR of these bits
5321 * and the state of the input lines from the GIC. (This requires
5322 * that we have the iothread lock, which is done by marking the
5323 * reginfo structs as ARM_CP_IO.)
5324 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5325 * possible for it to be taken immediately, because VIRQ and
5326 * VFIQ are masked unless running at EL0 or EL1, and HCR
5327 * can only be written at EL2.
5329 g_assert(qemu_mutex_iothread_locked());
5330 arm_cpu_update_virq(cpu
);
5331 arm_cpu_update_vfiq(cpu
);
5334 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
5336 do_hcr_write(env
, value
, 0);
5339 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5342 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5343 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
5344 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
5347 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5350 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5351 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
5352 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
5356 * Return the effective value of HCR_EL2.
5357 * Bits that are not included here:
5358 * RW (read from SCR_EL3.RW as needed)
5360 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
5362 uint64_t ret
= env
->cp15
.hcr_el2
;
5364 if (!arm_is_el2_enabled(env
)) {
5366 * "This register has no effect if EL2 is not enabled in the
5367 * current Security state". This is ARMv8.4-SecEL2 speak for
5368 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5370 * Prior to that, the language was "In an implementation that
5371 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5372 * as if this field is 0 for all purposes other than a direct
5373 * read or write access of HCR_EL2". With lots of enumeration
5374 * on a per-field basis. In current QEMU, this is condition
5375 * is arm_is_secure_below_el3.
5377 * Since the v8.4 language applies to the entire register, and
5378 * appears to be backward compatible, use that.
5384 * For a cpu that supports both aarch64 and aarch32, we can set bits
5385 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5386 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5388 if (!arm_el_is_aa64(env
, 2)) {
5389 uint64_t aa32_valid
;
5392 * These bits are up-to-date as of ARMv8.6.
5393 * For HCR, it's easiest to list just the 2 bits that are invalid.
5394 * For HCR2, list those that are valid.
5396 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
5397 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
5398 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
5402 if (ret
& HCR_TGE
) {
5403 /* These bits are up-to-date as of ARMv8.6. */
5404 if (ret
& HCR_E2H
) {
5405 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
5406 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
5407 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
5408 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
5409 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
5410 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
5412 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
5414 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
5415 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
5416 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
5423 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5427 * For A-profile AArch32 EL3, if NSACR.CP10
5428 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5430 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5431 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5432 value
&= ~(0x3 << 10);
5433 value
|= env
->cp15
.cptr_el
[2] & (0x3 << 10);
5435 env
->cp15
.cptr_el
[2] = value
;
5438 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5441 * For A-profile AArch32 EL3, if NSACR.CP10
5442 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5444 uint64_t value
= env
->cp15
.cptr_el
[2];
5446 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
5447 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
5453 static const ARMCPRegInfo el2_cp_reginfo
[] = {
5454 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
5456 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5457 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5458 .writefn
= hcr_write
},
5459 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
5460 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5461 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
5462 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
5463 .writefn
= hcr_writelow
},
5464 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
5465 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
5466 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5467 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
5468 .type
= ARM_CP_ALIAS
,
5469 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
5471 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
5472 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
5473 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
5474 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
5475 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5476 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
5477 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
5478 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
5479 .type
= ARM_CP_ALIAS
,
5480 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
5482 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
5483 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
5484 .type
= ARM_CP_ALIAS
,
5485 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
5487 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
5488 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
5489 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
5490 .access
= PL2_RW
, .writefn
= vbar_write
,
5491 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
5493 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
5494 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
5495 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
5496 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
5497 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5498 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
5499 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5500 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
5501 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
5502 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5503 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
5504 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
5506 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
5507 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
5508 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
5509 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
5510 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
5511 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
5512 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5514 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5515 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
5516 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
5517 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5519 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
5520 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
5521 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5523 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
5524 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
5525 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5527 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5528 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
5529 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
5530 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5531 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
5532 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
5533 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5534 .type
= ARM_CP_ALIAS
,
5535 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5536 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5537 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5538 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
5540 /* no .writefn needed as this can't cause an ASID change;
5541 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5543 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
5544 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
5545 .cp
= 15, .opc1
= 6, .crm
= 2,
5546 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5547 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5548 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
5549 .writefn
= vttbr_write
},
5550 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5551 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
5552 .access
= PL2_RW
, .writefn
= vttbr_write
,
5553 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
5554 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5555 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
5556 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5557 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
5558 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5559 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
5560 .access
= PL2_RW
, .resetvalue
= 0,
5561 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
5562 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
5563 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
5564 .access
= PL2_RW
, .resetvalue
= 0, .writefn
= vmsa_tcr_ttbr_el2_write
,
5565 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5566 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
5567 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
5568 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
5569 { .name
= "TLBIALLNSNH",
5570 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5571 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5572 .writefn
= tlbiall_nsnh_write
},
5573 { .name
= "TLBIALLNSNHIS",
5574 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5575 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5576 .writefn
= tlbiall_nsnh_is_write
},
5577 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5578 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5579 .writefn
= tlbiall_hyp_write
},
5580 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5581 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5582 .writefn
= tlbiall_hyp_is_write
},
5583 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5584 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5585 .writefn
= tlbimva_hyp_write
},
5586 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5587 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5588 .writefn
= tlbimva_hyp_is_write
},
5589 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
5590 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
5591 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5592 .writefn
= tlbi_aa64_alle2_write
},
5593 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
5594 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
5595 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5596 .writefn
= tlbi_aa64_vae2_write
},
5597 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
5598 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5599 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5600 .writefn
= tlbi_aa64_vae2_write
},
5601 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
5602 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
5603 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5604 .writefn
= tlbi_aa64_alle2is_write
},
5605 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
5606 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
5607 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5608 .writefn
= tlbi_aa64_vae2is_write
},
5609 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
5610 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5611 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5612 .writefn
= tlbi_aa64_vae2is_write
},
5613 #ifndef CONFIG_USER_ONLY
5614 /* Unlike the other EL2-related AT operations, these must
5615 * UNDEF from EL3 if EL2 is not implemented, which is why we
5616 * define them here rather than with the rest of the AT ops.
5618 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
5619 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5620 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5621 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5622 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
5623 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5624 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
5625 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
, .writefn
= ats_write64
},
5626 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5627 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5628 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5629 * to behave as if SCR.NS was 1.
5631 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
5633 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5634 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
5636 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
5637 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5638 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
5639 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5640 * reset values as IMPDEF. We choose to reset to 3 to comply with
5641 * both ARMv7 and ARMv8.
5643 .access
= PL2_RW
, .resetvalue
= 3,
5644 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
5645 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
5646 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
5647 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
5648 .writefn
= gt_cntvoff_write
,
5649 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5650 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
5651 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
5652 .writefn
= gt_cntvoff_write
,
5653 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
5654 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
5655 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
5656 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5657 .type
= ARM_CP_IO
, .access
= PL2_RW
,
5658 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5659 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
5660 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
5661 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
5662 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
5663 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
5664 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
5665 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
5666 .resetfn
= gt_hyp_timer_reset
,
5667 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
5668 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
5670 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
5672 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
5674 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
5676 /* The only field of MDCR_EL2 that has a defined architectural reset value
5677 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
5679 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
5680 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
5681 .access
= PL2_RW
, .resetvalue
= PMCR_NUM_COUNTERS
,
5682 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
5683 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
5684 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5685 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5686 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5687 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
5688 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
5690 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
5691 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
5692 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
5694 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
5698 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
5699 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
5700 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5701 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
5703 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
5704 .writefn
= hcr_writehigh
},
5708 static CPAccessResult
sel2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5711 if (arm_current_el(env
) == 3 || arm_is_secure_below_el3(env
)) {
5712 return CP_ACCESS_OK
;
5714 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5717 static const ARMCPRegInfo el2_sec_cp_reginfo
[] = {
5718 { .name
= "VSTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
5719 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 0,
5720 .access
= PL2_RW
, .accessfn
= sel2_access
,
5721 .fieldoffset
= offsetof(CPUARMState
, cp15
.vsttbr_el2
) },
5722 { .name
= "VSTCR_EL2", .state
= ARM_CP_STATE_AA64
,
5723 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 2,
5724 .access
= PL2_RW
, .accessfn
= sel2_access
,
5725 .fieldoffset
= offsetof(CPUARMState
, cp15
.vstcr_el2
) },
5729 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5732 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5733 * At Secure EL1 it traps to EL3 or EL2.
5735 if (arm_current_el(env
) == 3) {
5736 return CP_ACCESS_OK
;
5738 if (arm_is_secure_below_el3(env
)) {
5739 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
5740 return CP_ACCESS_TRAP_EL2
;
5742 return CP_ACCESS_TRAP_EL3
;
5744 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5746 return CP_ACCESS_OK
;
5748 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5751 static const ARMCPRegInfo el3_cp_reginfo
[] = {
5752 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
5753 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
5754 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
5755 .resetfn
= scr_reset
, .writefn
= scr_write
},
5756 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
5757 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
5758 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5759 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
5760 .writefn
= scr_write
},
5761 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
5762 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
5763 .access
= PL3_RW
, .resetvalue
= 0,
5764 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
5766 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
5767 .access
= PL3_RW
, .resetvalue
= 0,
5768 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
5769 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5770 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5771 .writefn
= vbar_write
, .resetvalue
= 0,
5772 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
5773 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
5774 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
5775 .access
= PL3_RW
, .resetvalue
= 0,
5776 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
5777 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
5778 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
5780 /* no .writefn needed as this can't cause an ASID change;
5781 * we must provide a .raw_writefn and .resetfn because we handle
5782 * reset and migration for the AArch32 TTBCR(S), which might be
5783 * using mask and base_mask.
5785 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
5786 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
5787 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
5788 .type
= ARM_CP_ALIAS
,
5789 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
5791 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
5792 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
5793 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
5794 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
5795 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
5796 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
5797 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
5798 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
5799 .type
= ARM_CP_ALIAS
,
5800 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
5802 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
5803 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5804 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
5805 .access
= PL3_RW
, .writefn
= vbar_write
,
5806 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
5808 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
5809 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
5810 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
5811 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
5812 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
5813 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
5814 .access
= PL3_RW
, .resetvalue
= 0,
5815 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5816 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5817 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5818 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5820 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5821 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5822 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5824 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5825 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5826 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5828 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5829 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5830 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5831 .writefn
= tlbi_aa64_alle3is_write
},
5832 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5833 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5834 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5835 .writefn
= tlbi_aa64_vae3is_write
},
5836 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5837 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5838 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5839 .writefn
= tlbi_aa64_vae3is_write
},
5840 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5841 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5842 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5843 .writefn
= tlbi_aa64_alle3_write
},
5844 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5845 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5846 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5847 .writefn
= tlbi_aa64_vae3_write
},
5848 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5849 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5850 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5851 .writefn
= tlbi_aa64_vae3_write
},
5855 #ifndef CONFIG_USER_ONLY
5856 /* Test if system register redirection is to occur in the current state. */
5857 static bool redirect_for_e2h(CPUARMState
*env
)
5859 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
5862 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5866 if (redirect_for_e2h(env
)) {
5867 /* Switch to the saved EL2 version of the register. */
5869 readfn
= ri
->readfn
;
5871 readfn
= ri
->orig_readfn
;
5873 if (readfn
== NULL
) {
5876 return readfn(env
, ri
);
5879 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5884 if (redirect_for_e2h(env
)) {
5885 /* Switch to the saved EL2 version of the register. */
5887 writefn
= ri
->writefn
;
5889 writefn
= ri
->orig_writefn
;
5891 if (writefn
== NULL
) {
5892 writefn
= raw_write
;
5894 writefn(env
, ri
, value
);
5897 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
5900 uint32_t src_key
, dst_key
, new_key
;
5901 const char *src_name
, *dst_name
, *new_name
;
5902 bool (*feature
)(const ARMISARegisters
*id
);
5905 #define K(op0, op1, crn, crm, op2) \
5906 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5908 static const struct E2HAlias aliases
[] = {
5909 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5910 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5911 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5912 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5913 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5914 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5915 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5916 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5917 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5918 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5919 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5920 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5921 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5922 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5923 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5924 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5925 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5926 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5927 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5928 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5929 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5930 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5931 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5932 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5933 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5934 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5935 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5936 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5937 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5938 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5939 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5940 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5943 * Note that redirection of ZCR is mentioned in the description
5944 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5945 * not in the summary table.
5947 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5948 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
5950 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5951 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte
},
5953 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5954 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5960 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
5961 const struct E2HAlias
*a
= &aliases
[i
];
5962 ARMCPRegInfo
*src_reg
, *dst_reg
;
5964 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
5968 src_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->src_key
);
5969 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
, &a
->dst_key
);
5970 g_assert(src_reg
!= NULL
);
5971 g_assert(dst_reg
!= NULL
);
5973 /* Cross-compare names to detect typos in the keys. */
5974 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
5975 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
5977 /* None of the core system registers use opaque; we will. */
5978 g_assert(src_reg
->opaque
== NULL
);
5980 /* Create alias before redirection so we dup the right data. */
5982 ARMCPRegInfo
*new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
5983 uint32_t *new_key
= g_memdup(&a
->new_key
, sizeof(uint32_t));
5986 new_reg
->name
= a
->new_name
;
5987 new_reg
->type
|= ARM_CP_ALIAS
;
5988 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5989 new_reg
->access
&= PL2_RW
| PL3_RW
;
5991 ok
= g_hash_table_insert(cpu
->cp_regs
, new_key
, new_reg
);
5995 src_reg
->opaque
= dst_reg
;
5996 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
5997 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
5998 if (!src_reg
->raw_readfn
) {
5999 src_reg
->raw_readfn
= raw_read
;
6001 if (!src_reg
->raw_writefn
) {
6002 src_reg
->raw_writefn
= raw_write
;
6004 src_reg
->readfn
= el2_e2h_read
;
6005 src_reg
->writefn
= el2_e2h_write
;
6010 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6013 int cur_el
= arm_current_el(env
);
6016 uint64_t hcr
= arm_hcr_el2_eff(env
);
6019 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
6020 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
6021 return CP_ACCESS_TRAP_EL2
;
6024 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
6025 return CP_ACCESS_TRAP
;
6027 if (hcr
& HCR_TID2
) {
6028 return CP_ACCESS_TRAP_EL2
;
6031 } else if (hcr
& HCR_TID2
) {
6032 return CP_ACCESS_TRAP_EL2
;
6036 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
6037 return CP_ACCESS_TRAP_EL2
;
6040 return CP_ACCESS_OK
;
6043 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6046 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6047 * read via a bit in OSLSR_EL1.
6051 if (ri
->state
== ARM_CP_STATE_AA32
) {
6052 oslock
= (value
== 0xC5ACCE55);
6057 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
6060 static const ARMCPRegInfo debug_cp_reginfo
[] = {
6061 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6062 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6063 * unlike DBGDRAR it is never accessible from EL0.
6064 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6067 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
6068 .access
= PL0_R
, .accessfn
= access_tdra
,
6069 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6070 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
6071 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6072 .access
= PL1_R
, .accessfn
= access_tdra
,
6073 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6074 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
6075 .access
= PL0_R
, .accessfn
= access_tdra
,
6076 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6077 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6078 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
6079 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
6080 .access
= PL1_RW
, .accessfn
= access_tda
,
6081 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
6084 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
6085 * Debug Communication Channel is not implemented.
6087 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_AA64
,
6088 .opc0
= 2, .opc1
= 3, .crn
= 0, .crm
= 1, .opc2
= 0,
6089 .access
= PL0_R
, .accessfn
= access_tda
,
6090 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6092 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
6093 * it is unlikely a guest will care.
6094 * We don't implement the configurable EL0 access.
6096 { .name
= "DBGDSCRint", .state
= ARM_CP_STATE_AA32
,
6097 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
6098 .type
= ARM_CP_ALIAS
,
6099 .access
= PL1_R
, .accessfn
= access_tda
,
6100 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
6101 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
6102 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
6103 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6104 .accessfn
= access_tdosa
,
6105 .writefn
= oslar_write
},
6106 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
6107 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
6108 .access
= PL1_R
, .resetvalue
= 10,
6109 .accessfn
= access_tdosa
,
6110 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
6111 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6112 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6113 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
6114 .access
= PL1_RW
, .accessfn
= access_tdosa
,
6115 .type
= ARM_CP_NOP
},
6116 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6117 * implement vector catch debug events yet.
6120 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6121 .access
= PL1_RW
, .accessfn
= access_tda
,
6122 .type
= ARM_CP_NOP
},
6123 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6124 * to save and restore a 32-bit guest's DBGVCR)
6126 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
6127 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
6128 .access
= PL2_RW
, .accessfn
= access_tda
,
6129 .type
= ARM_CP_NOP
},
6130 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6131 * Channel but Linux may try to access this register. The 32-bit
6132 * alias is DBGDCCINT.
6134 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
6135 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
6136 .access
= PL1_RW
, .accessfn
= access_tda
,
6137 .type
= ARM_CP_NOP
},
6141 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
6142 /* 64 bit access versions of the (dummy) debug registers */
6143 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
6144 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6145 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
6146 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
6150 /* Return the exception level to which exceptions should be taken
6151 * via SVEAccessTrap. If an exception should be routed through
6152 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6153 * take care of raising that exception.
6154 * C.f. the ARM pseudocode function CheckSVEEnabled.
6156 int sve_exception_el(CPUARMState
*env
, int el
)
6158 #ifndef CONFIG_USER_ONLY
6159 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
6161 if (el
<= 1 && (hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6162 /* Check CPACR.ZEN. */
6163 switch (extract32(env
->cp15
.cpacr_el1
, 16, 2)) {
6172 return hcr_el2
& HCR_TGE
? 2 : 1;
6175 /* Check CPACR.FPEN. */
6176 switch (extract32(env
->cp15
.cpacr_el1
, 20, 2)) {
6189 * CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE).
6192 if (hcr_el2
& HCR_E2H
) {
6193 /* Check CPTR_EL2.ZEN. */
6194 switch (extract32(env
->cp15
.cptr_el
[2], 16, 2)) {
6196 if (el
!= 0 || !(hcr_el2
& HCR_TGE
)) {
6205 /* Check CPTR_EL2.FPEN. */
6206 switch (extract32(env
->cp15
.cptr_el
[2], 20, 2)) {
6208 if (el
== 2 || !(hcr_el2
& HCR_TGE
)) {
6216 } else if (arm_is_el2_enabled(env
)) {
6217 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
6220 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
6226 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6227 if (arm_feature(env
, ARM_FEATURE_EL3
)
6228 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
6235 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
)
6239 start_len
= MIN(start_len
, ARM_MAX_VQ
- 1);
6240 end_len
= start_len
;
6242 if (!test_bit(start_len
, cpu
->sve_vq_map
)) {
6243 end_len
= find_last_bit(cpu
->sve_vq_map
, start_len
);
6244 assert(end_len
< start_len
);
6250 * Given that SVE is enabled, return the vector length for EL.
6252 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
6254 ARMCPU
*cpu
= env_archcpu(env
);
6255 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
6258 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
6259 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
6261 if (el
<= 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
6262 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
6264 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6265 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
6268 return aarch64_sve_zcr_get_valid_len(cpu
, zcr_len
);
6271 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6274 int cur_el
= arm_current_el(env
);
6275 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
6278 /* Bits other than [3:0] are RAZ/WI. */
6279 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
6280 raw_write(env
, ri
, value
& 0xf);
6283 * Because we arrived here, we know both FP and SVE are enabled;
6284 * otherwise we would have trapped access to the ZCR_ELn register.
6286 new_len
= sve_zcr_len_for_el(env
, cur_el
);
6287 if (new_len
< old_len
) {
6288 aarch64_sve_narrow_vq(env
, new_len
+ 1);
6292 static const ARMCPRegInfo zcr_el1_reginfo
= {
6293 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
6294 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
6295 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
6296 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
6297 .writefn
= zcr_write
, .raw_writefn
= raw_write
6300 static const ARMCPRegInfo zcr_el2_reginfo
= {
6301 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6302 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6303 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6304 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
6305 .writefn
= zcr_write
, .raw_writefn
= raw_write
6308 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
6309 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
6310 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
6311 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
6312 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
6315 static const ARMCPRegInfo zcr_el3_reginfo
= {
6316 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
6317 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
6318 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
6319 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
6320 .writefn
= zcr_write
, .raw_writefn
= raw_write
6323 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
6325 CPUARMState
*env
= &cpu
->env
;
6327 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
6328 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
6330 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
6332 if (env
->cpu_watchpoint
[n
]) {
6333 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
6334 env
->cpu_watchpoint
[n
] = NULL
;
6337 if (!extract64(wcr
, 0, 1)) {
6338 /* E bit clear : watchpoint disabled */
6342 switch (extract64(wcr
, 3, 2)) {
6344 /* LSC 00 is reserved and must behave as if the wp is disabled */
6347 flags
|= BP_MEM_READ
;
6350 flags
|= BP_MEM_WRITE
;
6353 flags
|= BP_MEM_ACCESS
;
6357 /* Attempts to use both MASK and BAS fields simultaneously are
6358 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6359 * thus generating a watchpoint for every byte in the masked region.
6361 mask
= extract64(wcr
, 24, 4);
6362 if (mask
== 1 || mask
== 2) {
6363 /* Reserved values of MASK; we must act as if the mask value was
6364 * some non-reserved value, or as if the watchpoint were disabled.
6365 * We choose the latter.
6369 /* Watchpoint covers an aligned area up to 2GB in size */
6371 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6372 * whether the watchpoint fires when the unmasked bits match; we opt
6373 * to generate the exceptions.
6377 /* Watchpoint covers bytes defined by the byte address select bits */
6378 int bas
= extract64(wcr
, 5, 8);
6381 if (extract64(wvr
, 2, 1)) {
6382 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6383 * ignored, and BAS[3:0] define which bytes to watch.
6389 /* This must act as if the watchpoint is disabled */
6393 /* The BAS bits are supposed to be programmed to indicate a contiguous
6394 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6395 * we fire for each byte in the word/doubleword addressed by the WVR.
6396 * We choose to ignore any non-zero bits after the first range of 1s.
6398 basstart
= ctz32(bas
);
6399 len
= cto32(bas
>> basstart
);
6403 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
6404 &env
->cpu_watchpoint
[n
]);
6407 void hw_watchpoint_update_all(ARMCPU
*cpu
)
6410 CPUARMState
*env
= &cpu
->env
;
6412 /* Completely clear out existing QEMU watchpoints and our array, to
6413 * avoid possible stale entries following migration load.
6415 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
6416 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
6418 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
6419 hw_watchpoint_update(cpu
, i
);
6423 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6426 ARMCPU
*cpu
= env_archcpu(env
);
6430 * Bits [1:0] are RES0.
6432 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
6433 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
6434 * they contain the value written. It is CONSTRAINED UNPREDICTABLE
6435 * whether the RESS bits are ignored when comparing an address.
6437 * Therefore we are allowed to compare the entire register, which lets
6438 * us avoid considering whether or not FEAT_LVA is actually enabled.
6442 raw_write(env
, ri
, value
);
6443 hw_watchpoint_update(cpu
, i
);
6446 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6449 ARMCPU
*cpu
= env_archcpu(env
);
6452 raw_write(env
, ri
, value
);
6453 hw_watchpoint_update(cpu
, i
);
6456 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
6458 CPUARMState
*env
= &cpu
->env
;
6459 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
6460 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
6465 if (env
->cpu_breakpoint
[n
]) {
6466 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
6467 env
->cpu_breakpoint
[n
] = NULL
;
6470 if (!extract64(bcr
, 0, 1)) {
6471 /* E bit clear : watchpoint disabled */
6475 bt
= extract64(bcr
, 20, 4);
6478 case 4: /* unlinked address mismatch (reserved if AArch64) */
6479 case 5: /* linked address mismatch (reserved if AArch64) */
6480 qemu_log_mask(LOG_UNIMP
,
6481 "arm: address mismatch breakpoint types not implemented\n");
6483 case 0: /* unlinked address match */
6484 case 1: /* linked address match */
6487 * Bits [1:0] are RES0.
6489 * It is IMPLEMENTATION DEFINED whether bits [63:49]
6490 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
6491 * of the VA field ([48] or [52] for FEAT_LVA), or whether the
6492 * value is read as written. It is CONSTRAINED UNPREDICTABLE
6493 * whether the RESS bits are ignored when comparing an address.
6494 * Therefore we are allowed to compare the entire register, which
6495 * lets us avoid considering whether FEAT_LVA is actually enabled.
6497 * The BAS field is used to allow setting breakpoints on 16-bit
6498 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6499 * a bp will fire if the addresses covered by the bp and the addresses
6500 * covered by the insn overlap but the insn doesn't start at the
6501 * start of the bp address range. We choose to require the insn and
6502 * the bp to have the same address. The constraints on writing to
6503 * BAS enforced in dbgbcr_write mean we have only four cases:
6504 * 0b0000 => no breakpoint
6505 * 0b0011 => breakpoint on addr
6506 * 0b1100 => breakpoint on addr + 2
6507 * 0b1111 => breakpoint on addr
6508 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6510 int bas
= extract64(bcr
, 5, 4);
6520 case 2: /* unlinked context ID match */
6521 case 8: /* unlinked VMID match (reserved if no EL2) */
6522 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6523 qemu_log_mask(LOG_UNIMP
,
6524 "arm: unlinked context breakpoint types not implemented\n");
6526 case 9: /* linked VMID match (reserved if no EL2) */
6527 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6528 case 3: /* linked context ID match */
6530 /* We must generate no events for Linked context matches (unless
6531 * they are linked to by some other bp/wp, which is handled in
6532 * updates for the linking bp/wp). We choose to also generate no events
6533 * for reserved values.
6538 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
6541 void hw_breakpoint_update_all(ARMCPU
*cpu
)
6544 CPUARMState
*env
= &cpu
->env
;
6546 /* Completely clear out existing QEMU breakpoints and our array, to
6547 * avoid possible stale entries following migration load.
6549 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
6550 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
6552 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
6553 hw_breakpoint_update(cpu
, i
);
6557 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6560 ARMCPU
*cpu
= env_archcpu(env
);
6563 raw_write(env
, ri
, value
);
6564 hw_breakpoint_update(cpu
, i
);
6567 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6570 ARMCPU
*cpu
= env_archcpu(env
);
6573 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6576 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
6577 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
6579 raw_write(env
, ri
, value
);
6580 hw_breakpoint_update(cpu
, i
);
6583 static void define_debug_regs(ARMCPU
*cpu
)
6585 /* Define v7 and v8 architectural debug registers.
6586 * These are just dummy implementations for now.
6589 int wrps
, brps
, ctx_cmps
;
6592 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6593 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
6594 * the register must not exist for this cpu.
6596 if (cpu
->isar
.dbgdidr
!= 0) {
6597 ARMCPRegInfo dbgdidr
= {
6598 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0,
6599 .opc1
= 0, .opc2
= 0,
6600 .access
= PL0_R
, .accessfn
= access_tda
,
6601 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->isar
.dbgdidr
,
6603 define_one_arm_cp_reg(cpu
, &dbgdidr
);
6606 /* Note that all these register fields hold "number of Xs minus 1". */
6607 brps
= arm_num_brps(cpu
);
6608 wrps
= arm_num_wrps(cpu
);
6609 ctx_cmps
= arm_num_ctx_cmps(cpu
);
6611 assert(ctx_cmps
<= brps
);
6613 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
6615 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
6616 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
6619 for (i
= 0; i
< brps
; i
++) {
6620 ARMCPRegInfo dbgregs
[] = {
6621 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
6622 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
6623 .access
= PL1_RW
, .accessfn
= access_tda
,
6624 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
6625 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
6627 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
6628 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
6629 .access
= PL1_RW
, .accessfn
= access_tda
,
6630 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
6631 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
6635 define_arm_cp_regs(cpu
, dbgregs
);
6638 for (i
= 0; i
< wrps
; i
++) {
6639 ARMCPRegInfo dbgregs
[] = {
6640 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
6641 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
6642 .access
= PL1_RW
, .accessfn
= access_tda
,
6643 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
6644 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
6646 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
6647 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
6648 .access
= PL1_RW
, .accessfn
= access_tda
,
6649 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
6650 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
6654 define_arm_cp_regs(cpu
, dbgregs
);
6658 static void define_pmu_regs(ARMCPU
*cpu
)
6661 * v7 performance monitor control register: same implementor
6662 * field as main ID register, and we implement four counters in
6663 * addition to the cycle count register.
6665 unsigned int i
, pmcrn
= PMCR_NUM_COUNTERS
;
6666 ARMCPRegInfo pmcr
= {
6667 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
6669 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6670 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
6671 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
6672 .raw_writefn
= raw_write
,
6674 ARMCPRegInfo pmcr64
= {
6675 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
6676 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
6677 .access
= PL0_RW
, .accessfn
= pmreg_access
,
6679 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
6680 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
) |
6682 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
6684 define_one_arm_cp_reg(cpu
, &pmcr
);
6685 define_one_arm_cp_reg(cpu
, &pmcr64
);
6686 for (i
= 0; i
< pmcrn
; i
++) {
6687 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
6688 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
6689 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
6690 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
6691 ARMCPRegInfo pmev_regs
[] = {
6692 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
6693 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6694 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6695 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6696 .accessfn
= pmreg_access
},
6697 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
6698 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
6699 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6701 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
6702 .raw_readfn
= pmevcntr_rawread
,
6703 .raw_writefn
= pmevcntr_rawwrite
},
6704 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
6705 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
6706 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
6707 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6708 .accessfn
= pmreg_access
},
6709 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
6710 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
6711 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
6713 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
6714 .raw_writefn
= pmevtyper_rawwrite
},
6717 define_arm_cp_regs(cpu
, pmev_regs
);
6718 g_free(pmevcntr_name
);
6719 g_free(pmevcntr_el0_name
);
6720 g_free(pmevtyper_name
);
6721 g_free(pmevtyper_el0_name
);
6723 if (cpu_isar_feature(aa32_pmu_8_1
, cpu
)) {
6724 ARMCPRegInfo v81_pmu_regs
[] = {
6725 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
6726 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
6727 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6728 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
6729 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
6730 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
6731 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6732 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
6735 define_arm_cp_regs(cpu
, v81_pmu_regs
);
6737 if (cpu_isar_feature(any_pmu_8_4
, cpu
)) {
6738 static const ARMCPRegInfo v84_pmmir
= {
6739 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
6740 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
6741 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6744 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
6748 /* We don't know until after realize whether there's a GICv3
6749 * attached, and that is what registers the gicv3 sysregs.
6750 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6753 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6755 ARMCPU
*cpu
= env_archcpu(env
);
6756 uint64_t pfr1
= cpu
->isar
.id_pfr1
;
6758 if (env
->gicv3state
) {
6764 #ifndef CONFIG_USER_ONLY
6765 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6767 ARMCPU
*cpu
= env_archcpu(env
);
6768 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
6770 if (env
->gicv3state
) {
6777 /* Shared logic between LORID and the rest of the LOR* registers.
6778 * Secure state exclusion has already been dealt with.
6780 static CPAccessResult
access_lor_ns(CPUARMState
*env
,
6781 const ARMCPRegInfo
*ri
, bool isread
)
6783 int el
= arm_current_el(env
);
6785 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
6786 return CP_ACCESS_TRAP_EL2
;
6788 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
6789 return CP_ACCESS_TRAP_EL3
;
6791 return CP_ACCESS_OK
;
6794 static CPAccessResult
access_lor_other(CPUARMState
*env
,
6795 const ARMCPRegInfo
*ri
, bool isread
)
6797 if (arm_is_secure_below_el3(env
)) {
6798 /* Access denied in secure mode. */
6799 return CP_ACCESS_TRAP
;
6801 return access_lor_ns(env
, ri
, isread
);
6805 * A trivial implementation of ARMv8.1-LOR leaves all of these
6806 * registers fixed at 0, which indicates that there are zero
6807 * supported Limited Ordering regions.
6809 static const ARMCPRegInfo lor_reginfo
[] = {
6810 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6811 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6812 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6813 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6814 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6815 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6816 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6817 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6818 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6819 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6820 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6821 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6822 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6823 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6824 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6825 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6826 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6827 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6828 .access
= PL1_R
, .accessfn
= access_lor_ns
,
6829 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6833 #ifdef TARGET_AARCH64
6834 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6837 int el
= arm_current_el(env
);
6840 arm_feature(env
, ARM_FEATURE_EL2
) &&
6841 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
6842 return CP_ACCESS_TRAP_EL2
;
6845 arm_feature(env
, ARM_FEATURE_EL3
) &&
6846 !(env
->cp15
.scr_el3
& SCR_APK
)) {
6847 return CP_ACCESS_TRAP_EL3
;
6849 return CP_ACCESS_OK
;
6852 static const ARMCPRegInfo pauth_reginfo
[] = {
6853 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6854 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
6855 .access
= PL1_RW
, .accessfn
= access_pauth
,
6856 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
6857 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6858 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
6859 .access
= PL1_RW
, .accessfn
= access_pauth
,
6860 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
6861 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6862 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
6863 .access
= PL1_RW
, .accessfn
= access_pauth
,
6864 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
6865 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6866 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
6867 .access
= PL1_RW
, .accessfn
= access_pauth
,
6868 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
6869 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6870 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
6871 .access
= PL1_RW
, .accessfn
= access_pauth
,
6872 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
6873 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6874 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
6875 .access
= PL1_RW
, .accessfn
= access_pauth
,
6876 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
6877 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6878 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
6879 .access
= PL1_RW
, .accessfn
= access_pauth
,
6880 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
6881 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6882 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
6883 .access
= PL1_RW
, .accessfn
= access_pauth
,
6884 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
6885 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
6886 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
6887 .access
= PL1_RW
, .accessfn
= access_pauth
,
6888 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
6889 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
6890 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
6891 .access
= PL1_RW
, .accessfn
= access_pauth
,
6892 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
6896 static const ARMCPRegInfo tlbirange_reginfo
[] = {
6897 { .name
= "TLBI_RVAE1IS", .state
= ARM_CP_STATE_AA64
,
6898 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 1,
6899 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6900 .writefn
= tlbi_aa64_rvae1is_write
},
6901 { .name
= "TLBI_RVAAE1IS", .state
= ARM_CP_STATE_AA64
,
6902 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 3,
6903 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6904 .writefn
= tlbi_aa64_rvae1is_write
},
6905 { .name
= "TLBI_RVALE1IS", .state
= ARM_CP_STATE_AA64
,
6906 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 5,
6907 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6908 .writefn
= tlbi_aa64_rvae1is_write
},
6909 { .name
= "TLBI_RVAALE1IS", .state
= ARM_CP_STATE_AA64
,
6910 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 7,
6911 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6912 .writefn
= tlbi_aa64_rvae1is_write
},
6913 { .name
= "TLBI_RVAE1OS", .state
= ARM_CP_STATE_AA64
,
6914 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
6915 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6916 .writefn
= tlbi_aa64_rvae1is_write
},
6917 { .name
= "TLBI_RVAAE1OS", .state
= ARM_CP_STATE_AA64
,
6918 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 3,
6919 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6920 .writefn
= tlbi_aa64_rvae1is_write
},
6921 { .name
= "TLBI_RVALE1OS", .state
= ARM_CP_STATE_AA64
,
6922 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 5,
6923 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6924 .writefn
= tlbi_aa64_rvae1is_write
},
6925 { .name
= "TLBI_RVAALE1OS", .state
= ARM_CP_STATE_AA64
,
6926 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 7,
6927 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6928 .writefn
= tlbi_aa64_rvae1is_write
},
6929 { .name
= "TLBI_RVAE1", .state
= ARM_CP_STATE_AA64
,
6930 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
6931 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6932 .writefn
= tlbi_aa64_rvae1_write
},
6933 { .name
= "TLBI_RVAAE1", .state
= ARM_CP_STATE_AA64
,
6934 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 3,
6935 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6936 .writefn
= tlbi_aa64_rvae1_write
},
6937 { .name
= "TLBI_RVALE1", .state
= ARM_CP_STATE_AA64
,
6938 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 5,
6939 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6940 .writefn
= tlbi_aa64_rvae1_write
},
6941 { .name
= "TLBI_RVAALE1", .state
= ARM_CP_STATE_AA64
,
6942 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 7,
6943 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
6944 .writefn
= tlbi_aa64_rvae1_write
},
6945 { .name
= "TLBI_RIPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
6946 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 2,
6947 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6948 { .name
= "TLBI_RIPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
6949 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 6,
6950 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6951 { .name
= "TLBI_RVAE2IS", .state
= ARM_CP_STATE_AA64
,
6952 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 1,
6953 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6954 .writefn
= tlbi_aa64_rvae2is_write
},
6955 { .name
= "TLBI_RVALE2IS", .state
= ARM_CP_STATE_AA64
,
6956 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 5,
6957 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6958 .writefn
= tlbi_aa64_rvae2is_write
},
6959 { .name
= "TLBI_RIPAS2E1", .state
= ARM_CP_STATE_AA64
,
6960 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 2,
6961 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6962 { .name
= "TLBI_RIPAS2LE1", .state
= ARM_CP_STATE_AA64
,
6963 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 6,
6964 .access
= PL2_W
, .type
= ARM_CP_NOP
},
6965 { .name
= "TLBI_RVAE2OS", .state
= ARM_CP_STATE_AA64
,
6966 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 1,
6967 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6968 .writefn
= tlbi_aa64_rvae2is_write
},
6969 { .name
= "TLBI_RVALE2OS", .state
= ARM_CP_STATE_AA64
,
6970 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 5,
6971 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6972 .writefn
= tlbi_aa64_rvae2is_write
},
6973 { .name
= "TLBI_RVAE2", .state
= ARM_CP_STATE_AA64
,
6974 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 1,
6975 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6976 .writefn
= tlbi_aa64_rvae2_write
},
6977 { .name
= "TLBI_RVALE2", .state
= ARM_CP_STATE_AA64
,
6978 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 5,
6979 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
6980 .writefn
= tlbi_aa64_rvae2_write
},
6981 { .name
= "TLBI_RVAE3IS", .state
= ARM_CP_STATE_AA64
,
6982 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 1,
6983 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6984 .writefn
= tlbi_aa64_rvae3is_write
},
6985 { .name
= "TLBI_RVALE3IS", .state
= ARM_CP_STATE_AA64
,
6986 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 5,
6987 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6988 .writefn
= tlbi_aa64_rvae3is_write
},
6989 { .name
= "TLBI_RVAE3OS", .state
= ARM_CP_STATE_AA64
,
6990 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 1,
6991 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6992 .writefn
= tlbi_aa64_rvae3is_write
},
6993 { .name
= "TLBI_RVALE3OS", .state
= ARM_CP_STATE_AA64
,
6994 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 5,
6995 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6996 .writefn
= tlbi_aa64_rvae3is_write
},
6997 { .name
= "TLBI_RVAE3", .state
= ARM_CP_STATE_AA64
,
6998 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 1,
6999 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7000 .writefn
= tlbi_aa64_rvae3_write
},
7001 { .name
= "TLBI_RVALE3", .state
= ARM_CP_STATE_AA64
,
7002 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 5,
7003 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7004 .writefn
= tlbi_aa64_rvae3_write
},
7008 static const ARMCPRegInfo tlbios_reginfo
[] = {
7009 { .name
= "TLBI_VMALLE1OS", .state
= ARM_CP_STATE_AA64
,
7010 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 0,
7011 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7012 .writefn
= tlbi_aa64_vmalle1is_write
},
7013 { .name
= "TLBI_VAE1OS", .state
= ARM_CP_STATE_AA64
,
7014 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 1,
7015 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7016 .writefn
= tlbi_aa64_vae1is_write
},
7017 { .name
= "TLBI_ASIDE1OS", .state
= ARM_CP_STATE_AA64
,
7018 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 2,
7019 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7020 .writefn
= tlbi_aa64_vmalle1is_write
},
7021 { .name
= "TLBI_VAAE1OS", .state
= ARM_CP_STATE_AA64
,
7022 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 3,
7023 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7024 .writefn
= tlbi_aa64_vae1is_write
},
7025 { .name
= "TLBI_VALE1OS", .state
= ARM_CP_STATE_AA64
,
7026 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 5,
7027 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7028 .writefn
= tlbi_aa64_vae1is_write
},
7029 { .name
= "TLBI_VAALE1OS", .state
= ARM_CP_STATE_AA64
,
7030 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 7,
7031 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
7032 .writefn
= tlbi_aa64_vae1is_write
},
7033 { .name
= "TLBI_ALLE2OS", .state
= ARM_CP_STATE_AA64
,
7034 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 0,
7035 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7036 .writefn
= tlbi_aa64_alle2is_write
},
7037 { .name
= "TLBI_VAE2OS", .state
= ARM_CP_STATE_AA64
,
7038 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 1,
7039 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7040 .writefn
= tlbi_aa64_vae2is_write
},
7041 { .name
= "TLBI_ALLE1OS", .state
= ARM_CP_STATE_AA64
,
7042 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 4,
7043 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7044 .writefn
= tlbi_aa64_alle1is_write
},
7045 { .name
= "TLBI_VALE2OS", .state
= ARM_CP_STATE_AA64
,
7046 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 5,
7047 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7048 .writefn
= tlbi_aa64_vae2is_write
},
7049 { .name
= "TLBI_VMALLS12E1OS", .state
= ARM_CP_STATE_AA64
,
7050 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 6,
7051 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7052 .writefn
= tlbi_aa64_alle1is_write
},
7053 { .name
= "TLBI_IPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7054 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 0,
7055 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7056 { .name
= "TLBI_RIPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
7057 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 3,
7058 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7059 { .name
= "TLBI_IPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7060 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 4,
7061 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7062 { .name
= "TLBI_RIPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
7063 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 7,
7064 .access
= PL2_W
, .type
= ARM_CP_NOP
},
7065 { .name
= "TLBI_ALLE3OS", .state
= ARM_CP_STATE_AA64
,
7066 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 0,
7067 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7068 .writefn
= tlbi_aa64_alle3is_write
},
7069 { .name
= "TLBI_VAE3OS", .state
= ARM_CP_STATE_AA64
,
7070 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 1,
7071 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7072 .writefn
= tlbi_aa64_vae3is_write
},
7073 { .name
= "TLBI_VALE3OS", .state
= ARM_CP_STATE_AA64
,
7074 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 5,
7075 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7076 .writefn
= tlbi_aa64_vae3is_write
},
7080 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7085 /* Success sets NZCV = 0000. */
7086 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
7088 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
7090 * ??? Failed, for unknown reasons in the crypto subsystem.
7091 * The best we can do is log the reason and return the
7092 * timed-out indication to the guest. There is no reason
7093 * we know to expect this failure to be transitory, so the
7094 * guest may well hang retrying the operation.
7096 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
7097 ri
->name
, error_get_pretty(err
));
7100 env
->ZF
= 0; /* NZCF = 0100 */
7106 /* We do not support re-seeding, so the two registers operate the same. */
7107 static const ARMCPRegInfo rndr_reginfo
[] = {
7108 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
7109 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7110 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
7111 .access
= PL0_R
, .readfn
= rndr_readfn
},
7112 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
7113 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
7114 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
7115 .access
= PL0_R
, .readfn
= rndr_readfn
},
7119 #ifndef CONFIG_USER_ONLY
7120 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
7123 ARMCPU
*cpu
= env_archcpu(env
);
7124 /* CTR_EL0 System register -> DminLine, bits [19:16] */
7125 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
7126 uint64_t vaddr_in
= (uint64_t) value
;
7127 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
7129 int mem_idx
= cpu_mmu_index(env
, false);
7131 /* This won't be crossing page boundaries */
7132 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
7138 /* RCU lock is already being held */
7139 mr
= memory_region_from_host(haddr
, &offset
);
7142 memory_region_writeback(mr
, offset
, dline_size
);
7147 static const ARMCPRegInfo dcpop_reg
[] = {
7148 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
7149 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
7150 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7151 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7155 static const ARMCPRegInfo dcpodp_reg
[] = {
7156 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
7157 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
7158 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
7159 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
7162 #endif /*CONFIG_USER_ONLY*/
7164 static CPAccessResult
access_aa64_tid5(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7167 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID5
)) {
7168 return CP_ACCESS_TRAP_EL2
;
7171 return CP_ACCESS_OK
;
7174 static CPAccessResult
access_mte(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7177 int el
= arm_current_el(env
);
7179 if (el
< 2 && arm_is_el2_enabled(env
)) {
7180 uint64_t hcr
= arm_hcr_el2_eff(env
);
7181 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
7182 return CP_ACCESS_TRAP_EL2
;
7186 arm_feature(env
, ARM_FEATURE_EL3
) &&
7187 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
7188 return CP_ACCESS_TRAP_EL3
;
7190 return CP_ACCESS_OK
;
7193 static uint64_t tco_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7195 return env
->pstate
& PSTATE_TCO
;
7198 static void tco_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
7200 env
->pstate
= (env
->pstate
& ~PSTATE_TCO
) | (val
& PSTATE_TCO
);
7203 static const ARMCPRegInfo mte_reginfo
[] = {
7204 { .name
= "TFSRE0_EL1", .state
= ARM_CP_STATE_AA64
,
7205 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 1,
7206 .access
= PL1_RW
, .accessfn
= access_mte
,
7207 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[0]) },
7208 { .name
= "TFSR_EL1", .state
= ARM_CP_STATE_AA64
,
7209 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 0,
7210 .access
= PL1_RW
, .accessfn
= access_mte
,
7211 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[1]) },
7212 { .name
= "TFSR_EL2", .state
= ARM_CP_STATE_AA64
,
7213 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 6, .opc2
= 0,
7214 .access
= PL2_RW
, .accessfn
= access_mte
,
7215 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[2]) },
7216 { .name
= "TFSR_EL3", .state
= ARM_CP_STATE_AA64
,
7217 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 6, .opc2
= 0,
7219 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[3]) },
7220 { .name
= "RGSR_EL1", .state
= ARM_CP_STATE_AA64
,
7221 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 5,
7222 .access
= PL1_RW
, .accessfn
= access_mte
,
7223 .fieldoffset
= offsetof(CPUARMState
, cp15
.rgsr_el1
) },
7224 { .name
= "GCR_EL1", .state
= ARM_CP_STATE_AA64
,
7225 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 6,
7226 .access
= PL1_RW
, .accessfn
= access_mte
,
7227 .fieldoffset
= offsetof(CPUARMState
, cp15
.gcr_el1
) },
7228 { .name
= "GMID_EL1", .state
= ARM_CP_STATE_AA64
,
7229 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 4,
7230 .access
= PL1_R
, .accessfn
= access_aa64_tid5
,
7231 .type
= ARM_CP_CONST
, .resetvalue
= GMID_EL1_BS
},
7232 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7233 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7234 .type
= ARM_CP_NO_RAW
,
7235 .access
= PL0_RW
, .readfn
= tco_read
, .writefn
= tco_write
},
7236 { .name
= "DC_IGVAC", .state
= ARM_CP_STATE_AA64
,
7237 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 3,
7238 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7239 .accessfn
= aa64_cacheop_poc_access
},
7240 { .name
= "DC_IGSW", .state
= ARM_CP_STATE_AA64
,
7241 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 4,
7242 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7243 { .name
= "DC_IGDVAC", .state
= ARM_CP_STATE_AA64
,
7244 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 5,
7245 .type
= ARM_CP_NOP
, .access
= PL1_W
,
7246 .accessfn
= aa64_cacheop_poc_access
},
7247 { .name
= "DC_IGDSW", .state
= ARM_CP_STATE_AA64
,
7248 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 6,
7249 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7250 { .name
= "DC_CGSW", .state
= ARM_CP_STATE_AA64
,
7251 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 4,
7252 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7253 { .name
= "DC_CGDSW", .state
= ARM_CP_STATE_AA64
,
7254 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 6,
7255 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7256 { .name
= "DC_CIGSW", .state
= ARM_CP_STATE_AA64
,
7257 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 4,
7258 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7259 { .name
= "DC_CIGDSW", .state
= ARM_CP_STATE_AA64
,
7260 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 6,
7261 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
7265 static const ARMCPRegInfo mte_tco_ro_reginfo
[] = {
7266 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
7267 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
7268 .type
= ARM_CP_CONST
, .access
= PL0_RW
, },
7272 static const ARMCPRegInfo mte_el0_cacheop_reginfo
[] = {
7273 { .name
= "DC_CGVAC", .state
= ARM_CP_STATE_AA64
,
7274 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 3,
7275 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7276 .accessfn
= aa64_cacheop_poc_access
},
7277 { .name
= "DC_CGDVAC", .state
= ARM_CP_STATE_AA64
,
7278 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 5,
7279 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7280 .accessfn
= aa64_cacheop_poc_access
},
7281 { .name
= "DC_CGVAP", .state
= ARM_CP_STATE_AA64
,
7282 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 3,
7283 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7284 .accessfn
= aa64_cacheop_poc_access
},
7285 { .name
= "DC_CGDVAP", .state
= ARM_CP_STATE_AA64
,
7286 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 5,
7287 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7288 .accessfn
= aa64_cacheop_poc_access
},
7289 { .name
= "DC_CGVADP", .state
= ARM_CP_STATE_AA64
,
7290 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 3,
7291 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7292 .accessfn
= aa64_cacheop_poc_access
},
7293 { .name
= "DC_CGDVADP", .state
= ARM_CP_STATE_AA64
,
7294 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 5,
7295 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7296 .accessfn
= aa64_cacheop_poc_access
},
7297 { .name
= "DC_CIGVAC", .state
= ARM_CP_STATE_AA64
,
7298 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 3,
7299 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7300 .accessfn
= aa64_cacheop_poc_access
},
7301 { .name
= "DC_CIGDVAC", .state
= ARM_CP_STATE_AA64
,
7302 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 5,
7303 .type
= ARM_CP_NOP
, .access
= PL0_W
,
7304 .accessfn
= aa64_cacheop_poc_access
},
7305 { .name
= "DC_GVA", .state
= ARM_CP_STATE_AA64
,
7306 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 3,
7307 .access
= PL0_W
, .type
= ARM_CP_DC_GVA
,
7308 #ifndef CONFIG_USER_ONLY
7309 /* Avoid overhead of an access check that always passes in user-mode */
7310 .accessfn
= aa64_zva_access
,
7313 { .name
= "DC_GZVA", .state
= ARM_CP_STATE_AA64
,
7314 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 4,
7315 .access
= PL0_W
, .type
= ARM_CP_DC_GZVA
,
7316 #ifndef CONFIG_USER_ONLY
7317 /* Avoid overhead of an access check that always passes in user-mode */
7318 .accessfn
= aa64_zva_access
,
7326 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7329 int el
= arm_current_el(env
);
7332 uint64_t sctlr
= arm_sctlr(env
, el
);
7333 if (!(sctlr
& SCTLR_EnRCTX
)) {
7334 return CP_ACCESS_TRAP
;
7336 } else if (el
== 1) {
7337 uint64_t hcr
= arm_hcr_el2_eff(env
);
7339 return CP_ACCESS_TRAP_EL2
;
7342 return CP_ACCESS_OK
;
7345 static const ARMCPRegInfo predinv_reginfo
[] = {
7346 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
7347 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
7348 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7349 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
7350 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
7351 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7352 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
7353 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
7354 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7356 * Note the AArch32 opcodes have a different OPC1.
7358 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
7359 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
7360 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7361 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
7362 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
7363 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7364 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
7365 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
7366 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
7370 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7372 /* Read the high 32 bits of the current CCSIDR */
7373 return extract64(ccsidr_read(env
, ri
), 32, 32);
7376 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
7377 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
7378 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
7380 .accessfn
= access_aa64_tid2
,
7381 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
7385 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7388 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
7389 return CP_ACCESS_TRAP_EL2
;
7392 return CP_ACCESS_OK
;
7395 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7398 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7399 return access_aa64_tid3(env
, ri
, isread
);
7402 return CP_ACCESS_OK
;
7405 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7408 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
7409 return CP_ACCESS_TRAP_EL2
;
7412 return CP_ACCESS_OK
;
7415 static CPAccessResult
access_joscr_jmcr(CPUARMState
*env
,
7416 const ARMCPRegInfo
*ri
, bool isread
)
7419 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7420 * in v7A, not in v8A.
7422 if (!arm_feature(env
, ARM_FEATURE_V8
) &&
7423 arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
7424 (env
->cp15
.hstr_el2
& HSTR_TJDBX
)) {
7425 return CP_ACCESS_TRAP_EL2
;
7427 return CP_ACCESS_OK
;
7430 static const ARMCPRegInfo jazelle_regs
[] = {
7432 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
7433 .access
= PL1_R
, .accessfn
= access_jazelle
,
7434 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7436 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
7437 .accessfn
= access_joscr_jmcr
,
7438 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7440 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
7441 .accessfn
= access_joscr_jmcr
,
7442 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7446 static const ARMCPRegInfo vhe_reginfo
[] = {
7447 { .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7448 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
7450 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2]) },
7451 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
7452 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
7453 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
7454 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
7455 #ifndef CONFIG_USER_ONLY
7456 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
7457 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
7459 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
7460 .type
= ARM_CP_IO
, .access
= PL2_RW
,
7461 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
7462 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
7463 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
7464 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
7465 .resetfn
= gt_hv_timer_reset
,
7466 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
7467 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
7469 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
7471 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
7472 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
7473 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7474 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
7475 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7476 .access
= PL2_RW
, .accessfn
= e2h_access
,
7477 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
7478 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
7479 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
7480 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
7481 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7482 .access
= PL2_RW
, .accessfn
= e2h_access
,
7483 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
7484 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
7485 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7486 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
7487 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7488 .access
= PL2_RW
, .accessfn
= e2h_access
,
7489 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
7490 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7491 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
7492 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
7493 .access
= PL2_RW
, .accessfn
= e2h_access
,
7494 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
7495 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7496 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
7497 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7498 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
7499 .access
= PL2_RW
, .accessfn
= e2h_access
,
7500 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
7501 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
7502 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
7503 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7504 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
7505 .access
= PL2_RW
, .accessfn
= e2h_access
,
7506 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
7511 #ifndef CONFIG_USER_ONLY
7512 static const ARMCPRegInfo ats1e1_reginfo
[] = {
7513 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
7514 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7515 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7516 .writefn
= ats_write64
},
7517 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
7518 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7519 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7520 .writefn
= ats_write64
},
7524 static const ARMCPRegInfo ats1cp_reginfo
[] = {
7525 { .name
= "ATS1CPRP",
7526 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
7527 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7528 .writefn
= ats_write
},
7529 { .name
= "ATS1CPWP",
7530 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
7531 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
7532 .writefn
= ats_write
},
7538 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7539 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7540 * is non-zero, which is never for ARMv7, optionally in ARMv8
7541 * and mandatorily for ARMv8.2 and up.
7542 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7543 * implementation is RAZ/WI we can ignore this detail, as we
7546 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
7547 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
7548 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
7549 .access
= PL1_RW
, .accessfn
= access_tacr
,
7550 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7551 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
7552 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
7553 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
7558 void register_cp_regs_for_features(ARMCPU
*cpu
)
7560 /* Register all the coprocessor registers based on feature bits */
7561 CPUARMState
*env
= &cpu
->env
;
7562 if (arm_feature(env
, ARM_FEATURE_M
)) {
7563 /* M profile has no coprocessor registers */
7567 define_arm_cp_regs(cpu
, cp_reginfo
);
7568 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7569 /* Must go early as it is full of wildcards that may be
7570 * overridden by later definitions.
7572 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
7575 if (arm_feature(env
, ARM_FEATURE_V6
)) {
7576 /* The ID registers all have impdef reset values */
7577 ARMCPRegInfo v6_idregs
[] = {
7578 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
7579 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
7580 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7581 .accessfn
= access_aa32_tid3
,
7582 .resetvalue
= cpu
->isar
.id_pfr0
},
7583 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7584 * the value of the GIC field until after we define these regs.
7586 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
7587 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
7588 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
7589 .accessfn
= access_aa32_tid3
,
7590 .readfn
= id_pfr1_read
,
7591 .writefn
= arm_cp_write_ignore
},
7592 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
7593 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
7594 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7595 .accessfn
= access_aa32_tid3
,
7596 .resetvalue
= cpu
->isar
.id_dfr0
},
7597 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
7598 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
7599 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7600 .accessfn
= access_aa32_tid3
,
7601 .resetvalue
= cpu
->id_afr0
},
7602 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
7603 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
7604 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7605 .accessfn
= access_aa32_tid3
,
7606 .resetvalue
= cpu
->isar
.id_mmfr0
},
7607 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
7608 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
7609 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7610 .accessfn
= access_aa32_tid3
,
7611 .resetvalue
= cpu
->isar
.id_mmfr1
},
7612 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
7613 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
7614 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7615 .accessfn
= access_aa32_tid3
,
7616 .resetvalue
= cpu
->isar
.id_mmfr2
},
7617 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
7618 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
7619 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7620 .accessfn
= access_aa32_tid3
,
7621 .resetvalue
= cpu
->isar
.id_mmfr3
},
7622 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
7623 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
7624 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7625 .accessfn
= access_aa32_tid3
,
7626 .resetvalue
= cpu
->isar
.id_isar0
},
7627 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
7628 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
7629 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7630 .accessfn
= access_aa32_tid3
,
7631 .resetvalue
= cpu
->isar
.id_isar1
},
7632 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
7633 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
7634 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7635 .accessfn
= access_aa32_tid3
,
7636 .resetvalue
= cpu
->isar
.id_isar2
},
7637 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
7638 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
7639 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7640 .accessfn
= access_aa32_tid3
,
7641 .resetvalue
= cpu
->isar
.id_isar3
},
7642 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
7643 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
7644 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7645 .accessfn
= access_aa32_tid3
,
7646 .resetvalue
= cpu
->isar
.id_isar4
},
7647 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
7648 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
7649 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7650 .accessfn
= access_aa32_tid3
,
7651 .resetvalue
= cpu
->isar
.id_isar5
},
7652 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
7653 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
7654 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7655 .accessfn
= access_aa32_tid3
,
7656 .resetvalue
= cpu
->isar
.id_mmfr4
},
7657 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
7658 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
7659 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7660 .accessfn
= access_aa32_tid3
,
7661 .resetvalue
= cpu
->isar
.id_isar6
},
7664 define_arm_cp_regs(cpu
, v6_idregs
);
7665 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
7667 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
7669 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
7670 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
7672 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
7673 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
7674 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
7676 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
7677 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
7679 if (arm_feature(env
, ARM_FEATURE_V7
)) {
7680 ARMCPRegInfo clidr
= {
7681 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
7682 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
7683 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7684 .accessfn
= access_aa64_tid2
,
7685 .resetvalue
= cpu
->clidr
7687 define_one_arm_cp_reg(cpu
, &clidr
);
7688 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
7689 define_debug_regs(cpu
);
7690 define_pmu_regs(cpu
);
7692 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
7694 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7695 /* AArch64 ID registers, which all have impdef reset values.
7696 * Note that within the ID register ranges the unused slots
7697 * must all RAZ, not UNDEF; future architecture versions may
7698 * define new registers here.
7700 ARMCPRegInfo v8_idregs
[] = {
7702 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7703 * emulation because we don't know the right value for the
7704 * GIC field until after we define these regs.
7706 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7707 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
7709 #ifdef CONFIG_USER_ONLY
7710 .type
= ARM_CP_CONST
,
7711 .resetvalue
= cpu
->isar
.id_aa64pfr0
7713 .type
= ARM_CP_NO_RAW
,
7714 .accessfn
= access_aa64_tid3
,
7715 .readfn
= id_aa64pfr0_read
,
7716 .writefn
= arm_cp_write_ignore
7719 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7720 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
7721 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7722 .accessfn
= access_aa64_tid3
,
7723 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
7724 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7725 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
7726 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7727 .accessfn
= access_aa64_tid3
,
7729 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7730 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
7731 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7732 .accessfn
= access_aa64_tid3
,
7734 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7735 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
7736 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7737 .accessfn
= access_aa64_tid3
,
7738 .resetvalue
= cpu
->isar
.id_aa64zfr0
},
7739 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7740 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
7741 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7742 .accessfn
= access_aa64_tid3
,
7744 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7745 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
7746 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7747 .accessfn
= access_aa64_tid3
,
7749 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7750 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
7751 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7752 .accessfn
= access_aa64_tid3
,
7754 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7755 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
7756 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7757 .accessfn
= access_aa64_tid3
,
7758 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
7759 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7760 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
7761 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7762 .accessfn
= access_aa64_tid3
,
7763 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
7764 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7765 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
7766 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7767 .accessfn
= access_aa64_tid3
,
7769 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7770 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
7771 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7772 .accessfn
= access_aa64_tid3
,
7774 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7775 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
7776 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7777 .accessfn
= access_aa64_tid3
,
7778 .resetvalue
= cpu
->id_aa64afr0
},
7779 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7780 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
7781 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7782 .accessfn
= access_aa64_tid3
,
7783 .resetvalue
= cpu
->id_aa64afr1
},
7784 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7785 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
7786 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7787 .accessfn
= access_aa64_tid3
,
7789 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
7791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7792 .accessfn
= access_aa64_tid3
,
7794 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
7795 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
7796 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7797 .accessfn
= access_aa64_tid3
,
7798 .resetvalue
= cpu
->isar
.id_aa64isar0
},
7799 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
7800 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
7801 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7802 .accessfn
= access_aa64_tid3
,
7803 .resetvalue
= cpu
->isar
.id_aa64isar1
},
7804 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7805 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
7806 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7807 .accessfn
= access_aa64_tid3
,
7809 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
7811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7812 .accessfn
= access_aa64_tid3
,
7814 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7815 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
7816 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7817 .accessfn
= access_aa64_tid3
,
7819 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7820 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
7821 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7822 .accessfn
= access_aa64_tid3
,
7824 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7825 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
7826 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7827 .accessfn
= access_aa64_tid3
,
7829 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7830 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
7831 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7832 .accessfn
= access_aa64_tid3
,
7834 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7835 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
7836 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7837 .accessfn
= access_aa64_tid3
,
7838 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
7839 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7840 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
7841 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7842 .accessfn
= access_aa64_tid3
,
7843 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
7844 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7845 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
7846 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7847 .accessfn
= access_aa64_tid3
,
7848 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
7849 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7850 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
7851 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7852 .accessfn
= access_aa64_tid3
,
7854 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7855 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
7856 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7857 .accessfn
= access_aa64_tid3
,
7859 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7860 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
7861 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7862 .accessfn
= access_aa64_tid3
,
7864 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7865 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
7866 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7867 .accessfn
= access_aa64_tid3
,
7869 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7870 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
7871 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7872 .accessfn
= access_aa64_tid3
,
7874 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
7875 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
7876 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7877 .accessfn
= access_aa64_tid3
,
7878 .resetvalue
= cpu
->isar
.mvfr0
},
7879 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
7880 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
7881 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7882 .accessfn
= access_aa64_tid3
,
7883 .resetvalue
= cpu
->isar
.mvfr1
},
7884 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
7885 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
7886 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7887 .accessfn
= access_aa64_tid3
,
7888 .resetvalue
= cpu
->isar
.mvfr2
},
7889 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7890 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
7891 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7892 .accessfn
= access_aa64_tid3
,
7894 { .name
= "ID_PFR2", .state
= ARM_CP_STATE_BOTH
,
7895 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
7896 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7897 .accessfn
= access_aa64_tid3
,
7898 .resetvalue
= cpu
->isar
.id_pfr2
},
7899 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7900 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
7901 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7902 .accessfn
= access_aa64_tid3
,
7904 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7905 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
7906 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7907 .accessfn
= access_aa64_tid3
,
7909 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
7910 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
7911 .access
= PL1_R
, .type
= ARM_CP_CONST
,
7912 .accessfn
= access_aa64_tid3
,
7914 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
7915 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
7916 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7917 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
7918 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
7919 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
7920 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7921 .resetvalue
= cpu
->pmceid0
},
7922 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
7923 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
7924 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7925 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
7926 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
7927 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
7928 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7929 .resetvalue
= cpu
->pmceid1
},
7932 #ifdef CONFIG_USER_ONLY
7933 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
7934 { .name
= "ID_AA64PFR0_EL1",
7935 .exported_bits
= 0x000f000f00ff0000,
7936 .fixed_bits
= 0x0000000000000011 },
7937 { .name
= "ID_AA64PFR1_EL1",
7938 .exported_bits
= 0x00000000000000f0 },
7939 { .name
= "ID_AA64PFR*_EL1_RESERVED",
7941 { .name
= "ID_AA64ZFR0_EL1" },
7942 { .name
= "ID_AA64MMFR0_EL1",
7943 .fixed_bits
= 0x00000000ff000000 },
7944 { .name
= "ID_AA64MMFR1_EL1" },
7945 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
7947 { .name
= "ID_AA64DFR0_EL1",
7948 .fixed_bits
= 0x0000000000000006 },
7949 { .name
= "ID_AA64DFR1_EL1" },
7950 { .name
= "ID_AA64DFR*_EL1_RESERVED",
7952 { .name
= "ID_AA64AFR*",
7954 { .name
= "ID_AA64ISAR0_EL1",
7955 .exported_bits
= 0x00fffffff0fffff0 },
7956 { .name
= "ID_AA64ISAR1_EL1",
7957 .exported_bits
= 0x000000f0ffffffff },
7958 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
7960 REGUSERINFO_SENTINEL
7962 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
7964 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7965 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
7966 !arm_feature(env
, ARM_FEATURE_EL2
)) {
7967 ARMCPRegInfo rvbar
= {
7968 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
7969 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
7971 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
7973 define_one_arm_cp_reg(cpu
, &rvbar
);
7975 define_arm_cp_regs(cpu
, v8_idregs
);
7976 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
7978 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
7979 uint64_t vmpidr_def
= mpidr_read_val(env
);
7980 ARMCPRegInfo vpidr_regs
[] = {
7981 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
7982 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7983 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7984 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
7985 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
7986 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7987 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
7988 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
7989 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
7990 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
7991 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7992 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
7993 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
7994 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
7995 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
7996 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
7998 .resetvalue
= vmpidr_def
,
7999 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
8002 define_arm_cp_regs(cpu
, vpidr_regs
);
8003 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
8004 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8005 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
8007 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
8008 define_arm_cp_regs(cpu
, el2_sec_cp_reginfo
);
8010 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
8011 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
8012 ARMCPRegInfo rvbar
= {
8013 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
8014 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
8016 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
8018 define_one_arm_cp_reg(cpu
, &rvbar
);
8021 /* If EL2 is missing but higher ELs are enabled, we need to
8022 * register the no_el2 reginfos.
8024 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8025 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
8026 * of MIDR_EL1 and MPIDR_EL1.
8028 ARMCPRegInfo vpidr_regs
[] = {
8029 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
8030 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
8031 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8032 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
8033 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
8034 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
8035 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
8036 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
8037 .type
= ARM_CP_NO_RAW
,
8038 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
8041 define_arm_cp_regs(cpu
, vpidr_regs
);
8042 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
8043 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8044 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
8048 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8049 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
8050 ARMCPRegInfo el3_regs
[] = {
8051 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
8052 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
8054 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
),
8056 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8057 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
8059 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
8060 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
8061 .resetvalue
= cpu
->reset_sctlr
},
8065 define_arm_cp_regs(cpu
, el3_regs
);
8067 /* The behaviour of NSACR is sufficiently various that we don't
8068 * try to describe it in a single reginfo:
8069 * if EL3 is 64 bit, then trap to EL3 from S EL1,
8070 * reads as constant 0xc00 from NS EL1 and NS EL2
8071 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8072 * if v7 without EL3, register doesn't exist
8073 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8075 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8076 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8077 ARMCPRegInfo nsacr
= {
8078 .name
= "NSACR", .type
= ARM_CP_CONST
,
8079 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8080 .access
= PL1_RW
, .accessfn
= nsacr_access
,
8083 define_one_arm_cp_reg(cpu
, &nsacr
);
8085 ARMCPRegInfo nsacr
= {
8087 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8088 .access
= PL3_RW
| PL1_R
,
8090 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
8092 define_one_arm_cp_reg(cpu
, &nsacr
);
8095 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8096 ARMCPRegInfo nsacr
= {
8097 .name
= "NSACR", .type
= ARM_CP_CONST
,
8098 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
8102 define_one_arm_cp_reg(cpu
, &nsacr
);
8106 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
8107 if (arm_feature(env
, ARM_FEATURE_V6
)) {
8108 /* PMSAv6 not implemented */
8109 assert(arm_feature(env
, ARM_FEATURE_V7
));
8110 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8111 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
8113 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
8116 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
8117 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
8118 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
8119 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
8120 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
8123 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
8124 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
8126 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
8127 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
8129 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
8130 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
8132 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
8133 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
8135 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
8136 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
8138 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
8139 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
8141 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
8142 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
8144 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8145 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
8147 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8148 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
8150 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
8151 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
8153 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
8154 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
8156 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
8157 define_arm_cp_regs(cpu
, jazelle_regs
);
8159 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
8160 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8161 * be read-only (ie write causes UNDEF exception).
8164 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
8165 /* Pre-v8 MIDR space.
8166 * Note that the MIDR isn't a simple constant register because
8167 * of the TI925 behaviour where writes to another register can
8168 * cause the MIDR value to change.
8170 * Unimplemented registers in the c15 0 0 0 space default to
8171 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8172 * and friends override accordingly.
8175 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
8176 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
8177 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
8178 .readfn
= midr_read
,
8179 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8180 .type
= ARM_CP_OVERRIDE
},
8181 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8183 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
8184 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8186 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
8187 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8189 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
8190 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8192 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
8193 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8195 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
8196 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8199 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
8200 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8201 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
8202 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
8203 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
8204 .readfn
= midr_read
},
8205 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8206 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8207 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8208 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8209 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
8210 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
8211 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
8212 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8213 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
8215 .accessfn
= access_aa64_tid1
,
8216 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
8219 ARMCPRegInfo id_cp_reginfo
[] = {
8220 /* These are common to v8 and pre-v8 */
8222 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
8223 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
8224 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8225 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
8226 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
8227 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
8228 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
8229 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8231 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
8233 .accessfn
= access_aa32_tid1
,
8234 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8237 /* TLBTR is specific to VMSA */
8238 ARMCPRegInfo id_tlbtr_reginfo
= {
8240 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
8242 .accessfn
= access_aa32_tid1
,
8243 .type
= ARM_CP_CONST
, .resetvalue
= 0,
8245 /* MPUIR is specific to PMSA V6+ */
8246 ARMCPRegInfo id_mpuir_reginfo
= {
8248 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
8249 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8250 .resetvalue
= cpu
->pmsav7_dregion
<< 8
8252 ARMCPRegInfo crn0_wi_reginfo
= {
8253 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
8254 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
8255 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
8257 #ifdef CONFIG_USER_ONLY
8258 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
8259 { .name
= "MIDR_EL1",
8260 .exported_bits
= 0x00000000ffffffff },
8261 { .name
= "REVIDR_EL1" },
8262 REGUSERINFO_SENTINEL
8264 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
8266 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
8267 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
8269 /* Register the blanket "writes ignored" value first to cover the
8270 * whole space. Then update the specific ID registers to allow write
8271 * access, so that they ignore writes rather than causing them to
8274 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
8275 for (r
= id_pre_v8_midr_cp_reginfo
;
8276 r
->type
!= ARM_CP_SENTINEL
; r
++) {
8279 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8282 id_mpuir_reginfo
.access
= PL1_RW
;
8283 id_tlbtr_reginfo
.access
= PL1_RW
;
8285 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8286 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
8288 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
8290 define_arm_cp_regs(cpu
, id_cp_reginfo
);
8291 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
8292 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
8293 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8294 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
8298 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
8299 ARMCPRegInfo mpidr_cp_reginfo
[] = {
8300 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
8301 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
8302 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
8305 #ifdef CONFIG_USER_ONLY
8306 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
8307 { .name
= "MPIDR_EL1",
8308 .fixed_bits
= 0x0000000080000000 },
8309 REGUSERINFO_SENTINEL
8311 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
8313 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
8316 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
8317 ARMCPRegInfo auxcr_reginfo
[] = {
8318 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
8319 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
8320 .access
= PL1_RW
, .accessfn
= access_tacr
,
8321 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
8322 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
8323 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
8324 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
8326 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
8327 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
8328 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
8332 define_arm_cp_regs(cpu
, auxcr_reginfo
);
8333 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
8334 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
8338 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
8340 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8341 * There are two flavours:
8342 * (1) older 32-bit only cores have a simple 32-bit CBAR
8343 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8344 * 32-bit register visible to AArch32 at a different encoding
8345 * to the "flavour 1" register and with the bits rearranged to
8346 * be able to squash a 64-bit address into the 32-bit view.
8347 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8348 * in future if we support AArch32-only configs of some of the
8349 * AArch64 cores we might need to add a specific feature flag
8350 * to indicate cores with "flavour 2" CBAR.
8352 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
8353 /* 32 bit view is [31:18] 0...0 [43:32]. */
8354 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
8355 | extract64(cpu
->reset_cbar
, 32, 12);
8356 ARMCPRegInfo cbar_reginfo
[] = {
8358 .type
= ARM_CP_CONST
,
8359 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
8360 .access
= PL1_R
, .resetvalue
= cbar32
},
8361 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
8362 .type
= ARM_CP_CONST
,
8363 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
8364 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
8367 /* We don't implement a r/w 64 bit CBAR currently */
8368 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
8369 define_arm_cp_regs(cpu
, cbar_reginfo
);
8371 ARMCPRegInfo cbar
= {
8373 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
8374 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
8375 .fieldoffset
= offsetof(CPUARMState
,
8376 cp15
.c15_config_base_address
)
8378 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
8379 cbar
.access
= PL1_R
;
8380 cbar
.fieldoffset
= 0;
8381 cbar
.type
= ARM_CP_CONST
;
8383 define_one_arm_cp_reg(cpu
, &cbar
);
8387 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
8388 ARMCPRegInfo vbar_cp_reginfo
[] = {
8389 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
8390 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
8391 .access
= PL1_RW
, .writefn
= vbar_write
,
8392 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
8393 offsetof(CPUARMState
, cp15
.vbar_ns
) },
8397 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
8400 /* Generic registers whose values depend on the implementation */
8402 ARMCPRegInfo sctlr
= {
8403 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
8404 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
8405 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
8406 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
8407 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
8408 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
8409 .raw_writefn
= raw_write
,
8411 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8412 /* Normally we would always end the TB on an SCTLR write, but Linux
8413 * arch/arm/mach-pxa/sleep.S expects two instructions following
8414 * an MMU enable to execute from cache. Imitate this behaviour.
8416 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
8418 define_one_arm_cp_reg(cpu
, &sctlr
);
8421 if (cpu_isar_feature(aa64_lor
, cpu
)) {
8422 define_arm_cp_regs(cpu
, lor_reginfo
);
8424 if (cpu_isar_feature(aa64_pan
, cpu
)) {
8425 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
8427 #ifndef CONFIG_USER_ONLY
8428 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
8429 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
8431 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
8432 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
8435 if (cpu_isar_feature(aa64_uao
, cpu
)) {
8436 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
8439 if (cpu_isar_feature(aa64_dit
, cpu
)) {
8440 define_one_arm_cp_reg(cpu
, &dit_reginfo
);
8442 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
8443 define_one_arm_cp_reg(cpu
, &ssbs_reginfo
);
8446 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8447 define_arm_cp_regs(cpu
, vhe_reginfo
);
8450 if (cpu_isar_feature(aa64_sve
, cpu
)) {
8451 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
8452 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
8453 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
8455 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
8457 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
8458 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
8462 #ifdef TARGET_AARCH64
8463 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
8464 define_arm_cp_regs(cpu
, pauth_reginfo
);
8466 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
8467 define_arm_cp_regs(cpu
, rndr_reginfo
);
8469 if (cpu_isar_feature(aa64_tlbirange
, cpu
)) {
8470 define_arm_cp_regs(cpu
, tlbirange_reginfo
);
8472 if (cpu_isar_feature(aa64_tlbios
, cpu
)) {
8473 define_arm_cp_regs(cpu
, tlbios_reginfo
);
8475 #ifndef CONFIG_USER_ONLY
8476 /* Data Cache clean instructions up to PoP */
8477 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
8478 define_one_arm_cp_reg(cpu
, dcpop_reg
);
8480 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
8481 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
8484 #endif /*CONFIG_USER_ONLY*/
8487 * If full MTE is enabled, add all of the system registers.
8488 * If only "instructions available at EL0" are enabled,
8489 * then define only a RAZ/WI version of PSTATE.TCO.
8491 if (cpu_isar_feature(aa64_mte
, cpu
)) {
8492 define_arm_cp_regs(cpu
, mte_reginfo
);
8493 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8494 } else if (cpu_isar_feature(aa64_mte_insn_reg
, cpu
)) {
8495 define_arm_cp_regs(cpu
, mte_tco_ro_reginfo
);
8496 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
8500 if (cpu_isar_feature(any_predinv
, cpu
)) {
8501 define_arm_cp_regs(cpu
, predinv_reginfo
);
8504 if (cpu_isar_feature(any_ccidx
, cpu
)) {
8505 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
8508 #ifndef CONFIG_USER_ONLY
8510 * Register redirections and aliases must be done last,
8511 * after the registers from the other extensions have been defined.
8513 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
8514 define_arm_vh_e2h_redirects_aliases(cpu
);
8519 /* Sort alphabetically by type name, except for "any". */
8520 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
8522 ObjectClass
*class_a
= (ObjectClass
*)a
;
8523 ObjectClass
*class_b
= (ObjectClass
*)b
;
8524 const char *name_a
, *name_b
;
8526 name_a
= object_class_get_name(class_a
);
8527 name_b
= object_class_get_name(class_b
);
8528 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
8530 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
8533 return strcmp(name_a
, name_b
);
8537 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
8539 ObjectClass
*oc
= data
;
8540 const char *typename
;
8543 typename
= object_class_get_name(oc
);
8544 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8545 qemu_printf(" %s\n", name
);
8549 void arm_cpu_list(void)
8553 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8554 list
= g_slist_sort(list
, arm_cpu_list_compare
);
8555 qemu_printf("Available CPUs:\n");
8556 g_slist_foreach(list
, arm_cpu_list_entry
, NULL
);
8560 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
8562 ObjectClass
*oc
= data
;
8563 CpuDefinitionInfoList
**cpu_list
= user_data
;
8564 CpuDefinitionInfo
*info
;
8565 const char *typename
;
8567 typename
= object_class_get_name(oc
);
8568 info
= g_malloc0(sizeof(*info
));
8569 info
->name
= g_strndup(typename
,
8570 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
8571 info
->q_typename
= g_strdup(typename
);
8573 QAPI_LIST_PREPEND(*cpu_list
, info
);
8576 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
8578 CpuDefinitionInfoList
*cpu_list
= NULL
;
8581 list
= object_class_get_list(TYPE_ARM_CPU
, false);
8582 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
8588 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
8589 void *opaque
, int state
, int secstate
,
8590 int crm
, int opc1
, int opc2
,
8593 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8594 * add a single reginfo struct to the hash table.
8596 uint32_t *key
= g_new(uint32_t, 1);
8597 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
8598 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
8599 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
8601 r2
->name
= g_strdup(name
);
8602 /* Reset the secure state to the specific incoming state. This is
8603 * necessary as the register may have been defined with both states.
8605 r2
->secure
= secstate
;
8607 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8608 /* Register is banked (using both entries in array).
8609 * Overwriting fieldoffset as the array is only used to define
8610 * banked registers but later only fieldoffset is used.
8612 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
8615 if (state
== ARM_CP_STATE_AA32
) {
8616 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
8617 /* If the register is banked then we don't need to migrate or
8618 * reset the 32-bit instance in certain cases:
8620 * 1) If the register has both 32-bit and 64-bit instances then we
8621 * can count on the 64-bit instance taking care of the
8623 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8624 * taking care of the secure bank. This requires that separate
8625 * 32 and 64-bit definitions are provided.
8627 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
8628 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
8629 r2
->type
|= ARM_CP_ALIAS
;
8631 } else if ((secstate
!= r
->secure
) && !ns
) {
8632 /* The register is not banked so we only want to allow migration of
8633 * the non-secure instance.
8635 r2
->type
|= ARM_CP_ALIAS
;
8638 if (r
->state
== ARM_CP_STATE_BOTH
) {
8639 /* We assume it is a cp15 register if the .cp field is left unset.
8646 if (r2
->fieldoffset
) {
8647 r2
->fieldoffset
+= sizeof(uint32_t);
8652 if (state
== ARM_CP_STATE_AA64
) {
8653 /* To allow abbreviation of ARMCPRegInfo
8654 * definitions, we treat cp == 0 as equivalent to
8655 * the value for "standard guest-visible sysreg".
8656 * STATE_BOTH definitions are also always "standard
8657 * sysreg" in their AArch64 view (the .cp value may
8658 * be non-zero for the benefit of the AArch32 view).
8660 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
8661 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
8663 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
8664 r2
->opc0
, opc1
, opc2
);
8666 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
8669 r2
->opaque
= opaque
;
8671 /* reginfo passed to helpers is correct for the actual access,
8672 * and is never ARM_CP_STATE_BOTH:
8675 /* Make sure reginfo passed to helpers for wildcarded regs
8676 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8681 /* By convention, for wildcarded registers only the first
8682 * entry is used for migration; the others are marked as
8683 * ALIAS so we don't try to transfer the register
8684 * multiple times. Special registers (ie NOP/WFI) are
8685 * never migratable and not even raw-accessible.
8687 if ((r
->type
& ARM_CP_SPECIAL
)) {
8688 r2
->type
|= ARM_CP_NO_RAW
;
8690 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
8691 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
8692 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
8693 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
8696 /* Check that raw accesses are either forbidden or handled. Note that
8697 * we can't assert this earlier because the setup of fieldoffset for
8698 * banked registers has to be done first.
8700 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
8701 assert(!raw_accessors_invalid(r2
));
8704 /* Overriding of an existing definition must be explicitly
8707 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
8708 ARMCPRegInfo
*oldreg
;
8709 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
8710 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
8711 fprintf(stderr
, "Register redefined: cp=%d %d bit "
8712 "crn=%d crm=%d opc1=%d opc2=%d, "
8713 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
8714 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
8715 oldreg
->name
, r2
->name
);
8716 g_assert_not_reached();
8719 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
8723 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
8724 const ARMCPRegInfo
*r
, void *opaque
)
8726 /* Define implementations of coprocessor registers.
8727 * We store these in a hashtable because typically
8728 * there are less than 150 registers in a space which
8729 * is 16*16*16*8*8 = 262144 in size.
8730 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8731 * If a register is defined twice then the second definition is
8732 * used, so this can be used to define some generic registers and
8733 * then override them with implementation specific variations.
8734 * At least one of the original and the second definition should
8735 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8736 * against accidental use.
8738 * The state field defines whether the register is to be
8739 * visible in the AArch32 or AArch64 execution state. If the
8740 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8741 * reginfo structure for the AArch32 view, which sees the lower
8742 * 32 bits of the 64 bit register.
8744 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8745 * be wildcarded. AArch64 registers are always considered to be 64
8746 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8747 * the register, if any.
8749 int crm
, opc1
, opc2
, state
;
8750 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
8751 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
8752 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
8753 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
8754 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
8755 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
8756 /* 64 bit registers have only CRm and Opc1 fields */
8757 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
8758 /* op0 only exists in the AArch64 encodings */
8759 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
8760 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8761 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
8763 * This API is only for Arm's system coprocessors (14 and 15) or
8764 * (M-profile or v7A-and-earlier only) for implementation defined
8765 * coprocessors in the range 0..7. Our decode assumes this, since
8766 * 8..13 can be used for other insns including VFP and Neon. See
8767 * valid_cp() in translate.c. Assert here that we haven't tried
8768 * to use an invalid coprocessor number.
8771 case ARM_CP_STATE_BOTH
:
8772 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8777 case ARM_CP_STATE_AA32
:
8778 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
8779 !arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
8780 assert(r
->cp
>= 14 && r
->cp
<= 15);
8782 assert(r
->cp
< 8 || (r
->cp
>= 14 && r
->cp
<= 15));
8785 case ARM_CP_STATE_AA64
:
8786 assert(r
->cp
== 0 || r
->cp
== CP_REG_ARM64_SYSREG_CP
);
8789 g_assert_not_reached();
8791 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8792 * encodes a minimum access level for the register. We roll this
8793 * runtime check into our general permission check code, so check
8794 * here that the reginfo's specified permissions are strict enough
8795 * to encompass the generic architectural permission check.
8797 if (r
->state
!= ARM_CP_STATE_AA32
) {
8801 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8802 mask
= PL0U_R
| PL1_RW
;
8822 /* min_EL EL1, secure mode only (we don't check the latter) */
8826 /* broken reginfo with out-of-range opc1 */
8830 /* assert our permissions are not too lax (stricter is fine) */
8831 assert((r
->access
& ~mask
) == 0);
8834 /* Check that the register definition has enough info to handle
8835 * reads and writes if they are permitted.
8837 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
8838 if (r
->access
& PL3_R
) {
8839 assert((r
->fieldoffset
||
8840 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8843 if (r
->access
& PL3_W
) {
8844 assert((r
->fieldoffset
||
8845 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
8849 /* Bad type field probably means missing sentinel at end of reg list */
8850 assert(cptype_valid(r
->type
));
8851 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
8852 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
8853 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
8854 for (state
= ARM_CP_STATE_AA32
;
8855 state
<= ARM_CP_STATE_AA64
; state
++) {
8856 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
8859 if (state
== ARM_CP_STATE_AA32
) {
8860 /* Under AArch32 CP registers can be common
8861 * (same for secure and non-secure world) or banked.
8865 switch (r
->secure
) {
8866 case ARM_CP_SECSTATE_S
:
8867 case ARM_CP_SECSTATE_NS
:
8868 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8869 r
->secure
, crm
, opc1
, opc2
,
8873 name
= g_strdup_printf("%s_S", r
->name
);
8874 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8876 crm
, opc1
, opc2
, name
);
8878 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8880 crm
, opc1
, opc2
, r
->name
);
8884 /* AArch64 registers get mapped to non-secure instance
8886 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
8888 crm
, opc1
, opc2
, r
->name
);
8896 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
8897 const ARMCPRegInfo
*regs
, void *opaque
)
8899 /* Define a whole list of registers */
8900 const ARMCPRegInfo
*r
;
8901 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8902 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
8907 * Modify ARMCPRegInfo for access from userspace.
8909 * This is a data driven modification directed by
8910 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8911 * user-space cannot alter any values and dynamic values pertaining to
8912 * execution state are hidden from user space view anyway.
8914 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
8916 const ARMCPRegUserSpaceInfo
*m
;
8919 for (m
= mods
; m
->name
; m
++) {
8920 GPatternSpec
*pat
= NULL
;
8922 pat
= g_pattern_spec_new(m
->name
);
8924 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
8925 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
8926 r
->type
= ARM_CP_CONST
;
8930 } else if (strcmp(r
->name
, m
->name
) == 0) {
8931 r
->type
= ARM_CP_CONST
;
8933 r
->resetvalue
&= m
->exported_bits
;
8934 r
->resetvalue
|= m
->fixed_bits
;
8939 g_pattern_spec_free(pat
);
8944 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
8946 return g_hash_table_lookup(cpregs
, &encoded_cp
);
8949 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8952 /* Helper coprocessor write function for write-ignore registers */
8955 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8957 /* Helper coprocessor write function for read-as-zero registers */
8961 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
8963 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8966 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
8968 /* Return true if it is not valid for us to switch to
8969 * this CPU mode (ie all the UNPREDICTABLE cases in
8970 * the ARM ARM CPSRWriteByInstr pseudocode).
8973 /* Changes to or from Hyp via MSR and CPS are illegal. */
8974 if (write_type
== CPSRWriteByInstr
&&
8975 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
8976 mode
== ARM_CPU_MODE_HYP
)) {
8981 case ARM_CPU_MODE_USR
:
8983 case ARM_CPU_MODE_SYS
:
8984 case ARM_CPU_MODE_SVC
:
8985 case ARM_CPU_MODE_ABT
:
8986 case ARM_CPU_MODE_UND
:
8987 case ARM_CPU_MODE_IRQ
:
8988 case ARM_CPU_MODE_FIQ
:
8989 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8990 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8992 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8993 * and CPS are treated as illegal mode changes.
8995 if (write_type
== CPSRWriteByInstr
&&
8996 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
8997 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
9001 case ARM_CPU_MODE_HYP
:
9002 return !arm_is_el2_enabled(env
) || arm_current_el(env
) < 2;
9003 case ARM_CPU_MODE_MON
:
9004 return arm_current_el(env
) < 3;
9010 uint32_t cpsr_read(CPUARMState
*env
)
9013 ZF
= (env
->ZF
== 0);
9014 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
9015 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
9016 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
9017 | ((env
->condexec_bits
& 0xfc) << 8)
9018 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
9021 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
9022 CPSRWriteType write_type
)
9024 uint32_t changed_daif
;
9025 bool rebuild_hflags
= (write_type
!= CPSRWriteRaw
) &&
9026 (mask
& (CPSR_M
| CPSR_E
| CPSR_IL
));
9028 if (mask
& CPSR_NZCV
) {
9029 env
->ZF
= (~val
) & CPSR_Z
;
9031 env
->CF
= (val
>> 29) & 1;
9032 env
->VF
= (val
<< 3) & 0x80000000;
9035 env
->QF
= ((val
& CPSR_Q
) != 0);
9037 env
->thumb
= ((val
& CPSR_T
) != 0);
9038 if (mask
& CPSR_IT_0_1
) {
9039 env
->condexec_bits
&= ~3;
9040 env
->condexec_bits
|= (val
>> 25) & 3;
9042 if (mask
& CPSR_IT_2_7
) {
9043 env
->condexec_bits
&= 3;
9044 env
->condexec_bits
|= (val
>> 8) & 0xfc;
9046 if (mask
& CPSR_GE
) {
9047 env
->GE
= (val
>> 16) & 0xf;
9050 /* In a V7 implementation that includes the security extensions but does
9051 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9052 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9053 * bits respectively.
9055 * In a V8 implementation, it is permitted for privileged software to
9056 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9058 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
9059 arm_feature(env
, ARM_FEATURE_EL3
) &&
9060 !arm_feature(env
, ARM_FEATURE_EL2
) &&
9061 !arm_is_secure(env
)) {
9063 changed_daif
= (env
->daif
^ val
) & mask
;
9065 if (changed_daif
& CPSR_A
) {
9066 /* Check to see if we are allowed to change the masking of async
9067 * abort exceptions from a non-secure state.
9069 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
9070 qemu_log_mask(LOG_GUEST_ERROR
,
9071 "Ignoring attempt to switch CPSR_A flag from "
9072 "non-secure world with SCR.AW bit clear\n");
9077 if (changed_daif
& CPSR_F
) {
9078 /* Check to see if we are allowed to change the masking of FIQ
9079 * exceptions from a non-secure state.
9081 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
9082 qemu_log_mask(LOG_GUEST_ERROR
,
9083 "Ignoring attempt to switch CPSR_F flag from "
9084 "non-secure world with SCR.FW bit clear\n");
9088 /* Check whether non-maskable FIQ (NMFI) support is enabled.
9089 * If this bit is set software is not allowed to mask
9090 * FIQs, but is allowed to set CPSR_F to 0.
9092 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
9094 qemu_log_mask(LOG_GUEST_ERROR
,
9095 "Ignoring attempt to enable CPSR_F flag "
9096 "(non-maskable FIQ [NMFI] support enabled)\n");
9102 env
->daif
&= ~(CPSR_AIF
& mask
);
9103 env
->daif
|= val
& CPSR_AIF
& mask
;
9105 if (write_type
!= CPSRWriteRaw
&&
9106 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
9107 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
9108 /* Note that we can only get here in USR mode if this is a
9109 * gdb stub write; for this case we follow the architectural
9110 * behaviour for guest writes in USR mode of ignoring an attempt
9111 * to switch mode. (Those are caught by translate.c for writes
9112 * triggered by guest instructions.)
9115 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
9116 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9117 * v7, and has defined behaviour in v8:
9118 * + leave CPSR.M untouched
9119 * + allow changes to the other CPSR fields
9121 * For user changes via the GDB stub, we don't set PSTATE.IL,
9122 * as this would be unnecessarily harsh for a user error.
9125 if (write_type
!= CPSRWriteByGDBStub
&&
9126 arm_feature(env
, ARM_FEATURE_V8
)) {
9130 qemu_log_mask(LOG_GUEST_ERROR
,
9131 "Illegal AArch32 mode switch attempt from %s to %s\n",
9132 aarch32_mode_name(env
->uncached_cpsr
),
9133 aarch32_mode_name(val
));
9135 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
9136 write_type
== CPSRWriteExceptionReturn
?
9137 "Exception return from AArch32" :
9138 "AArch32 mode switch from",
9139 aarch32_mode_name(env
->uncached_cpsr
),
9140 aarch32_mode_name(val
), env
->regs
[15]);
9141 switch_mode(env
, val
& CPSR_M
);
9144 mask
&= ~CACHED_CPSR_BITS
;
9145 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
9146 if (rebuild_hflags
) {
9147 arm_rebuild_hflags(env
);
9151 /* Sign/zero extend */
9152 uint32_t HELPER(sxtb16
)(uint32_t x
)
9155 res
= (uint16_t)(int8_t)x
;
9156 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
9160 static void handle_possible_div0_trap(CPUARMState
*env
, uintptr_t ra
)
9163 * Take a division-by-zero exception if necessary; otherwise return
9164 * to get the usual non-trapping division behaviour (result of 0)
9166 if (arm_feature(env
, ARM_FEATURE_M
)
9167 && (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_DIV_0_TRP_MASK
)) {
9168 raise_exception_ra(env
, EXCP_DIVBYZERO
, 0, 1, ra
);
9172 uint32_t HELPER(uxtb16
)(uint32_t x
)
9175 res
= (uint16_t)(uint8_t)x
;
9176 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
9180 int32_t HELPER(sdiv
)(CPUARMState
*env
, int32_t num
, int32_t den
)
9183 handle_possible_div0_trap(env
, GETPC());
9186 if (num
== INT_MIN
&& den
== -1) {
9192 uint32_t HELPER(udiv
)(CPUARMState
*env
, uint32_t num
, uint32_t den
)
9195 handle_possible_div0_trap(env
, GETPC());
9201 uint32_t HELPER(rbit
)(uint32_t x
)
9206 #ifdef CONFIG_USER_ONLY
9208 static void switch_mode(CPUARMState
*env
, int mode
)
9210 ARMCPU
*cpu
= env_archcpu(env
);
9212 if (mode
!= ARM_CPU_MODE_USR
) {
9213 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
9217 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9218 uint32_t cur_el
, bool secure
)
9223 void aarch64_sync_64_to_32(CPUARMState
*env
)
9225 g_assert_not_reached();
9230 static void switch_mode(CPUARMState
*env
, int mode
)
9235 old_mode
= env
->uncached_cpsr
& CPSR_M
;
9236 if (mode
== old_mode
)
9239 if (old_mode
== ARM_CPU_MODE_FIQ
) {
9240 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9241 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
9242 } else if (mode
== ARM_CPU_MODE_FIQ
) {
9243 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
9244 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
9247 i
= bank_number(old_mode
);
9248 env
->banked_r13
[i
] = env
->regs
[13];
9249 env
->banked_spsr
[i
] = env
->spsr
;
9251 i
= bank_number(mode
);
9252 env
->regs
[13] = env
->banked_r13
[i
];
9253 env
->spsr
= env
->banked_spsr
[i
];
9255 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
9256 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
9259 /* Physical Interrupt Target EL Lookup Table
9261 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9263 * The below multi-dimensional table is used for looking up the target
9264 * exception level given numerous condition criteria. Specifically, the
9265 * target EL is based on SCR and HCR routing controls as well as the
9266 * currently executing EL and secure state.
9269 * target_el_table[2][2][2][2][2][4]
9270 * | | | | | +--- Current EL
9271 * | | | | +------ Non-secure(0)/Secure(1)
9272 * | | | +--------- HCR mask override
9273 * | | +------------ SCR exec state control
9274 * | +--------------- SCR mask override
9275 * +------------------ 32-bit(0)/64-bit(1) EL3
9277 * The table values are as such:
9281 * The ARM ARM target EL table includes entries indicating that an "exception
9282 * is not taken". The two cases where this is applicable are:
9283 * 1) An exception is taken from EL3 but the SCR does not have the exception
9285 * 2) An exception is taken from EL2 but the HCR does not have the exception
9287 * In these two cases, the below table contain a target of EL1. This value is
9288 * returned as it is expected that the consumer of the table data will check
9289 * for "target EL >= current EL" to ensure the exception is not taken.
9293 * BIT IRQ IMO Non-secure Secure
9294 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9296 static const int8_t target_el_table
[2][2][2][2][2][4] = {
9297 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9298 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9299 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9300 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9301 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9302 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9303 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9304 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9305 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9306 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9307 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9308 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9309 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9310 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9311 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9312 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9316 * Determine the target EL for physical exceptions
9318 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
9319 uint32_t cur_el
, bool secure
)
9321 CPUARMState
*env
= cs
->env_ptr
;
9326 /* Is the highest EL AArch64? */
9327 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
9330 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9331 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
9333 /* Either EL2 is the highest EL (and so the EL2 register width
9334 * is given by is64); or there is no EL2 or EL3, in which case
9335 * the value of 'rw' does not affect the table lookup anyway.
9340 hcr_el2
= arm_hcr_el2_eff(env
);
9343 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
9344 hcr
= hcr_el2
& HCR_IMO
;
9347 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
9348 hcr
= hcr_el2
& HCR_FMO
;
9351 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
9352 hcr
= hcr_el2
& HCR_AMO
;
9357 * For these purposes, TGE and AMO/IMO/FMO both force the
9358 * interrupt to EL2. Fold TGE into the bit extracted above.
9360 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
9362 /* Perform a table-lookup for the target EL given the current state */
9363 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
9365 assert(target_el
> 0);
9370 void arm_log_exception(CPUState
*cs
)
9372 int idx
= cs
->exception_index
;
9374 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
9375 const char *exc
= NULL
;
9376 static const char * const excnames
[] = {
9377 [EXCP_UDEF
] = "Undefined Instruction",
9379 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
9380 [EXCP_DATA_ABORT
] = "Data Abort",
9383 [EXCP_BKPT
] = "Breakpoint",
9384 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
9385 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
9386 [EXCP_HVC
] = "Hypervisor Call",
9387 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
9388 [EXCP_SMC
] = "Secure Monitor Call",
9389 [EXCP_VIRQ
] = "Virtual IRQ",
9390 [EXCP_VFIQ
] = "Virtual FIQ",
9391 [EXCP_SEMIHOST
] = "Semihosting call",
9392 [EXCP_NOCP
] = "v7M NOCP UsageFault",
9393 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
9394 [EXCP_STKOF
] = "v8M STKOF UsageFault",
9395 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
9396 [EXCP_LSERR
] = "v8M LSERR UsageFault",
9397 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
9398 [EXCP_DIVBYZERO
] = "v7M DIVBYZERO UsageFault",
9401 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
9402 exc
= excnames
[idx
];
9407 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s] on CPU %d\n",
9408 idx
, exc
, cs
->cpu_index
);
9413 * Function used to synchronize QEMU's AArch64 register set with AArch32
9414 * register set. This is necessary when switching between AArch32 and AArch64
9417 void aarch64_sync_32_to_64(CPUARMState
*env
)
9420 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9422 /* We can blanket copy R[0:7] to X[0:7] */
9423 for (i
= 0; i
< 8; i
++) {
9424 env
->xregs
[i
] = env
->regs
[i
];
9428 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9429 * Otherwise, they come from the banked user regs.
9431 if (mode
== ARM_CPU_MODE_FIQ
) {
9432 for (i
= 8; i
< 13; i
++) {
9433 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9436 for (i
= 8; i
< 13; i
++) {
9437 env
->xregs
[i
] = env
->regs
[i
];
9442 * Registers x13-x23 are the various mode SP and FP registers. Registers
9443 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9444 * from the mode banked register.
9446 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9447 env
->xregs
[13] = env
->regs
[13];
9448 env
->xregs
[14] = env
->regs
[14];
9450 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9451 /* HYP is an exception in that it is copied from r14 */
9452 if (mode
== ARM_CPU_MODE_HYP
) {
9453 env
->xregs
[14] = env
->regs
[14];
9455 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9459 if (mode
== ARM_CPU_MODE_HYP
) {
9460 env
->xregs
[15] = env
->regs
[13];
9462 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9465 if (mode
== ARM_CPU_MODE_IRQ
) {
9466 env
->xregs
[16] = env
->regs
[14];
9467 env
->xregs
[17] = env
->regs
[13];
9469 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9470 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9473 if (mode
== ARM_CPU_MODE_SVC
) {
9474 env
->xregs
[18] = env
->regs
[14];
9475 env
->xregs
[19] = env
->regs
[13];
9477 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9478 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9481 if (mode
== ARM_CPU_MODE_ABT
) {
9482 env
->xregs
[20] = env
->regs
[14];
9483 env
->xregs
[21] = env
->regs
[13];
9485 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9486 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9489 if (mode
== ARM_CPU_MODE_UND
) {
9490 env
->xregs
[22] = env
->regs
[14];
9491 env
->xregs
[23] = env
->regs
[13];
9493 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9494 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9498 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9499 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9500 * FIQ bank for r8-r14.
9502 if (mode
== ARM_CPU_MODE_FIQ
) {
9503 for (i
= 24; i
< 31; i
++) {
9504 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9507 for (i
= 24; i
< 29; i
++) {
9508 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9510 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9511 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9514 env
->pc
= env
->regs
[15];
9518 * Function used to synchronize QEMU's AArch32 register set with AArch64
9519 * register set. This is necessary when switching between AArch32 and AArch64
9522 void aarch64_sync_64_to_32(CPUARMState
*env
)
9525 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9527 /* We can blanket copy X[0:7] to R[0:7] */
9528 for (i
= 0; i
< 8; i
++) {
9529 env
->regs
[i
] = env
->xregs
[i
];
9533 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9534 * Otherwise, we copy x8-x12 into the banked user regs.
9536 if (mode
== ARM_CPU_MODE_FIQ
) {
9537 for (i
= 8; i
< 13; i
++) {
9538 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9541 for (i
= 8; i
< 13; i
++) {
9542 env
->regs
[i
] = env
->xregs
[i
];
9547 * Registers r13 & r14 depend on the current mode.
9548 * If we are in a given mode, we copy the corresponding x registers to r13
9549 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9552 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9553 env
->regs
[13] = env
->xregs
[13];
9554 env
->regs
[14] = env
->xregs
[14];
9556 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9559 * HYP is an exception in that it does not have its own banked r14 but
9560 * shares the USR r14
9562 if (mode
== ARM_CPU_MODE_HYP
) {
9563 env
->regs
[14] = env
->xregs
[14];
9565 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9569 if (mode
== ARM_CPU_MODE_HYP
) {
9570 env
->regs
[13] = env
->xregs
[15];
9572 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9575 if (mode
== ARM_CPU_MODE_IRQ
) {
9576 env
->regs
[14] = env
->xregs
[16];
9577 env
->regs
[13] = env
->xregs
[17];
9579 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9580 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9583 if (mode
== ARM_CPU_MODE_SVC
) {
9584 env
->regs
[14] = env
->xregs
[18];
9585 env
->regs
[13] = env
->xregs
[19];
9587 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9588 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9591 if (mode
== ARM_CPU_MODE_ABT
) {
9592 env
->regs
[14] = env
->xregs
[20];
9593 env
->regs
[13] = env
->xregs
[21];
9595 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9596 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9599 if (mode
== ARM_CPU_MODE_UND
) {
9600 env
->regs
[14] = env
->xregs
[22];
9601 env
->regs
[13] = env
->xregs
[23];
9603 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9604 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9607 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9608 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9609 * FIQ bank for r8-r14.
9611 if (mode
== ARM_CPU_MODE_FIQ
) {
9612 for (i
= 24; i
< 31; i
++) {
9613 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9616 for (i
= 24; i
< 29; i
++) {
9617 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9619 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9620 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9623 env
->regs
[15] = env
->pc
;
9626 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9627 uint32_t mask
, uint32_t offset
,
9632 /* Change the CPU state so as to actually take the exception. */
9633 switch_mode(env
, new_mode
);
9636 * For exceptions taken to AArch32 we must clear the SS bit in both
9637 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9639 env
->pstate
&= ~PSTATE_SS
;
9640 env
->spsr
= cpsr_read(env
);
9641 /* Clear IT bits. */
9642 env
->condexec_bits
= 0;
9643 /* Switch to the new mode, and to the correct instruction set. */
9644 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9646 /* This must be after mode switching. */
9647 new_el
= arm_current_el(env
);
9649 /* Set new mode endianness */
9650 env
->uncached_cpsr
&= ~CPSR_E
;
9651 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
9652 env
->uncached_cpsr
|= CPSR_E
;
9654 /* J and IL must always be cleared for exception entry */
9655 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9658 if (cpu_isar_feature(aa32_ssbs
, env_archcpu(env
))) {
9659 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_32
) {
9660 env
->uncached_cpsr
|= CPSR_SSBS
;
9662 env
->uncached_cpsr
&= ~CPSR_SSBS
;
9666 if (new_mode
== ARM_CPU_MODE_HYP
) {
9667 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9668 env
->elr_el
[2] = env
->regs
[15];
9670 /* CPSR.PAN is normally preserved preserved unless... */
9671 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
9674 if (!arm_is_secure_below_el3(env
)) {
9675 /* ... the target is EL3, from non-secure state. */
9676 env
->uncached_cpsr
&= ~CPSR_PAN
;
9679 /* ... the target is EL3, from secure state ... */
9682 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9683 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
9684 env
->uncached_cpsr
|= CPSR_PAN
;
9690 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9691 * and we should just guard the thumb mode on V4
9693 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9695 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9697 env
->regs
[14] = env
->regs
[15] + offset
;
9699 env
->regs
[15] = newpc
;
9700 arm_rebuild_hflags(env
);
9703 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9706 * Handle exception entry to Hyp mode; this is sufficiently
9707 * different to entry to other AArch32 modes that we handle it
9710 * The vector table entry used is always the 0x14 Hyp mode entry point,
9711 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
9712 * The offset applied to the preferred return address is always zero
9713 * (see DDI0487C.a section G1.12.3).
9714 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9716 uint32_t addr
, mask
;
9717 ARMCPU
*cpu
= ARM_CPU(cs
);
9718 CPUARMState
*env
= &cpu
->env
;
9720 switch (cs
->exception_index
) {
9728 /* Fall through to prefetch abort. */
9729 case EXCP_PREFETCH_ABORT
:
9730 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9731 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9732 (uint32_t)env
->exception
.vaddress
);
9735 case EXCP_DATA_ABORT
:
9736 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9737 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9738 (uint32_t)env
->exception
.vaddress
);
9754 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9757 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9758 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9760 * QEMU syndrome values are v8-style. v7 has the IL bit
9761 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9762 * If this is a v7 CPU, squash the IL bit in those cases.
9764 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9765 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9766 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9767 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9768 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9771 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9774 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9779 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9782 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9785 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9789 addr
+= env
->cp15
.hvbar
;
9791 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9794 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9796 ARMCPU
*cpu
= ARM_CPU(cs
);
9797 CPUARMState
*env
= &cpu
->env
;
9804 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9805 switch (syn_get_ec(env
->exception
.syndrome
)) {
9807 case EC_BREAKPOINT_SAME_EL
:
9811 case EC_WATCHPOINT_SAME_EL
:
9817 case EC_VECTORCATCH
:
9826 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9829 if (env
->exception
.target_el
== 2) {
9830 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9834 switch (cs
->exception_index
) {
9836 new_mode
= ARM_CPU_MODE_UND
;
9845 new_mode
= ARM_CPU_MODE_SVC
;
9848 /* The PC already points to the next instruction. */
9852 /* Fall through to prefetch abort. */
9853 case EXCP_PREFETCH_ABORT
:
9854 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9855 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9856 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9857 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9858 new_mode
= ARM_CPU_MODE_ABT
;
9860 mask
= CPSR_A
| CPSR_I
;
9863 case EXCP_DATA_ABORT
:
9864 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9865 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9866 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9868 (uint32_t)env
->exception
.vaddress
);
9869 new_mode
= ARM_CPU_MODE_ABT
;
9871 mask
= CPSR_A
| CPSR_I
;
9875 new_mode
= ARM_CPU_MODE_IRQ
;
9877 /* Disable IRQ and imprecise data aborts. */
9878 mask
= CPSR_A
| CPSR_I
;
9880 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9881 /* IRQ routed to monitor mode */
9882 new_mode
= ARM_CPU_MODE_MON
;
9887 new_mode
= ARM_CPU_MODE_FIQ
;
9889 /* Disable FIQ, IRQ and imprecise data aborts. */
9890 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9891 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9892 /* FIQ routed to monitor mode */
9893 new_mode
= ARM_CPU_MODE_MON
;
9898 new_mode
= ARM_CPU_MODE_IRQ
;
9900 /* Disable IRQ and imprecise data aborts. */
9901 mask
= CPSR_A
| CPSR_I
;
9905 new_mode
= ARM_CPU_MODE_FIQ
;
9907 /* Disable FIQ, IRQ and imprecise data aborts. */
9908 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9912 new_mode
= ARM_CPU_MODE_MON
;
9914 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9918 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9919 return; /* Never happens. Keep compiler happy. */
9922 if (new_mode
== ARM_CPU_MODE_MON
) {
9923 addr
+= env
->cp15
.mvbar
;
9924 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9925 /* High vectors. When enabled, base address cannot be remapped. */
9928 /* ARM v7 architectures provide a vector base address register to remap
9929 * the interrupt vector table.
9930 * This register is only followed in non-monitor mode, and is banked.
9931 * Note: only bits 31:5 are valid.
9933 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9936 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9937 env
->cp15
.scr_el3
&= ~SCR_NS
;
9940 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9943 static int aarch64_regnum(CPUARMState
*env
, int aarch32_reg
)
9946 * Return the register number of the AArch64 view of the AArch32
9947 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9948 * be that of the AArch32 mode the exception came from.
9950 int mode
= env
->uncached_cpsr
& CPSR_M
;
9952 switch (aarch32_reg
) {
9956 return mode
== ARM_CPU_MODE_FIQ
? aarch32_reg
+ 16 : aarch32_reg
;
9959 case ARM_CPU_MODE_USR
:
9960 case ARM_CPU_MODE_SYS
:
9962 case ARM_CPU_MODE_HYP
:
9964 case ARM_CPU_MODE_IRQ
:
9966 case ARM_CPU_MODE_SVC
:
9968 case ARM_CPU_MODE_ABT
:
9970 case ARM_CPU_MODE_UND
:
9972 case ARM_CPU_MODE_FIQ
:
9975 g_assert_not_reached();
9979 case ARM_CPU_MODE_USR
:
9980 case ARM_CPU_MODE_SYS
:
9981 case ARM_CPU_MODE_HYP
:
9983 case ARM_CPU_MODE_IRQ
:
9985 case ARM_CPU_MODE_SVC
:
9987 case ARM_CPU_MODE_ABT
:
9989 case ARM_CPU_MODE_UND
:
9991 case ARM_CPU_MODE_FIQ
:
9994 g_assert_not_reached();
9999 g_assert_not_reached();
10003 static uint32_t cpsr_read_for_spsr_elx(CPUARMState
*env
)
10005 uint32_t ret
= cpsr_read(env
);
10007 /* Move DIT to the correct location for SPSR_ELx */
10008 if (ret
& CPSR_DIT
) {
10012 /* Merge PSTATE.SS into SPSR_ELx */
10013 ret
|= env
->pstate
& PSTATE_SS
;
10018 /* Handle exception entry to a target EL which is using AArch64 */
10019 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
10021 ARMCPU
*cpu
= ARM_CPU(cs
);
10022 CPUARMState
*env
= &cpu
->env
;
10023 unsigned int new_el
= env
->exception
.target_el
;
10024 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
10025 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
10026 unsigned int old_mode
;
10027 unsigned int cur_el
= arm_current_el(env
);
10031 * Note that new_el can never be 0. If cur_el is 0, then
10032 * el0_a64 is is_a64(), else el0_a64 is ignored.
10034 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
10036 if (cur_el
< new_el
) {
10037 /* Entry vector offset depends on whether the implemented EL
10038 * immediately lower than the target level is using AArch32 or AArch64
10045 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
10048 hcr
= arm_hcr_el2_eff(env
);
10049 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
10050 is_aa64
= (hcr
& HCR_RW
) != 0;
10055 is_aa64
= is_a64(env
);
10058 g_assert_not_reached();
10066 } else if (pstate_read(env
) & PSTATE_SP
) {
10070 switch (cs
->exception_index
) {
10071 case EXCP_PREFETCH_ABORT
:
10072 case EXCP_DATA_ABORT
:
10073 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
10074 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
10075 env
->cp15
.far_el
[new_el
]);
10081 case EXCP_HYP_TRAP
:
10083 switch (syn_get_ec(env
->exception
.syndrome
)) {
10084 case EC_ADVSIMDFPACCESSTRAP
:
10086 * QEMU internal FP/SIMD syndromes from AArch32 include the
10087 * TA and coproc fields which are only exposed if the exception
10088 * is taken to AArch32 Hyp mode. Mask them out to get a valid
10089 * AArch64 format syndrome.
10091 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
10093 case EC_CP14RTTRAP
:
10094 case EC_CP15RTTRAP
:
10095 case EC_CP14DTTRAP
:
10097 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10098 * the raw register field from the insn; when taking this to
10099 * AArch64 we must convert it to the AArch64 view of the register
10100 * number. Notice that we read a 4-bit AArch32 register number and
10101 * write back a 5-bit AArch64 one.
10103 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10104 rt
= aarch64_regnum(env
, rt
);
10105 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10108 case EC_CP15RRTTRAP
:
10109 case EC_CP14RRTTRAP
:
10110 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10111 rt
= extract32(env
->exception
.syndrome
, 5, 4);
10112 rt
= aarch64_regnum(env
, rt
);
10113 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10115 rt
= extract32(env
->exception
.syndrome
, 10, 4);
10116 rt
= aarch64_regnum(env
, rt
);
10117 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
10121 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
10132 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
10136 old_mode
= pstate_read(env
);
10137 aarch64_save_sp(env
, arm_current_el(env
));
10138 env
->elr_el
[new_el
] = env
->pc
;
10140 old_mode
= cpsr_read_for_spsr_elx(env
);
10141 env
->elr_el
[new_el
] = env
->regs
[15];
10143 aarch64_sync_32_to_64(env
);
10145 env
->condexec_bits
= 0;
10147 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
10149 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
10150 env
->elr_el
[new_el
]);
10152 if (cpu_isar_feature(aa64_pan
, cpu
)) {
10153 /* The value of PSTATE.PAN is normally preserved, except when ... */
10154 new_mode
|= old_mode
& PSTATE_PAN
;
10157 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
10158 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
10159 != (HCR_E2H
| HCR_TGE
)) {
10164 /* ... the target is EL1 ... */
10165 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
10166 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
10167 new_mode
|= PSTATE_PAN
;
10172 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10173 new_mode
|= PSTATE_TCO
;
10176 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
10177 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_64
) {
10178 new_mode
|= PSTATE_SSBS
;
10180 new_mode
&= ~PSTATE_SSBS
;
10184 pstate_write(env
, PSTATE_DAIF
| new_mode
);
10186 aarch64_restore_sp(env
, new_el
);
10187 helper_rebuild_hflags_a64(env
, new_el
);
10191 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
10192 new_el
, env
->pc
, pstate_read(env
));
10196 * Do semihosting call and set the appropriate return value. All the
10197 * permission and validity checks have been done at translate time.
10199 * We only see semihosting exceptions in TCG only as they are not
10200 * trapped to the hypervisor in KVM.
10203 static void handle_semihosting(CPUState
*cs
)
10205 ARMCPU
*cpu
= ARM_CPU(cs
);
10206 CPUARMState
*env
= &cpu
->env
;
10209 qemu_log_mask(CPU_LOG_INT
,
10210 "...handling as semihosting call 0x%" PRIx64
"\n",
10212 env
->xregs
[0] = do_common_semihosting(cs
);
10215 qemu_log_mask(CPU_LOG_INT
,
10216 "...handling as semihosting call 0x%x\n",
10218 env
->regs
[0] = do_common_semihosting(cs
);
10219 env
->regs
[15] += env
->thumb
? 2 : 4;
10224 /* Handle a CPU exception for A and R profile CPUs.
10225 * Do any appropriate logging, handle PSCI calls, and then hand off
10226 * to the AArch64-entry or AArch32-entry function depending on the
10227 * target exception level's register width.
10229 * Note: this is used for both TCG (as the do_interrupt tcg op),
10230 * and KVM to re-inject guest debug exceptions, and to
10231 * inject a Synchronous-External-Abort.
10233 void arm_cpu_do_interrupt(CPUState
*cs
)
10235 ARMCPU
*cpu
= ARM_CPU(cs
);
10236 CPUARMState
*env
= &cpu
->env
;
10237 unsigned int new_el
= env
->exception
.target_el
;
10239 assert(!arm_feature(env
, ARM_FEATURE_M
));
10241 arm_log_exception(cs
);
10242 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
10244 if (qemu_loglevel_mask(CPU_LOG_INT
)
10245 && !excp_is_internal(cs
->exception_index
)) {
10246 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
10247 syn_get_ec(env
->exception
.syndrome
),
10248 env
->exception
.syndrome
);
10251 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
10252 arm_handle_psci_call(cpu
);
10253 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
10258 * Semihosting semantics depend on the register width of the code
10259 * that caused the exception, not the target exception level, so
10260 * must be handled here.
10263 if (cs
->exception_index
== EXCP_SEMIHOST
) {
10264 handle_semihosting(cs
);
10269 /* Hooks may change global state so BQL should be held, also the
10270 * BQL needs to be held for any modification of
10271 * cs->interrupt_request.
10273 g_assert(qemu_mutex_iothread_locked());
10275 arm_call_pre_el_change_hook(cpu
);
10277 assert(!excp_is_internal(cs
->exception_index
));
10278 if (arm_el_is_aa64(env
, new_el
)) {
10279 arm_cpu_do_interrupt_aarch64(cs
);
10281 arm_cpu_do_interrupt_aarch32(cs
);
10284 arm_call_el_change_hook(cpu
);
10286 if (!kvm_enabled()) {
10287 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
10290 #endif /* !CONFIG_USER_ONLY */
10292 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
10294 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10296 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
10297 el
= (mmu_idx
== ARMMMUIdx_E20_0
|| mmu_idx
== ARMMMUIdx_SE20_0
)
10300 return env
->cp15
.sctlr_el
[el
];
10303 /* Return the SCTLR value which controls this address translation regime */
10304 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10306 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
10309 #ifndef CONFIG_USER_ONLY
10311 /* Return true if the specified stage of address translation is disabled */
10312 static inline bool regime_translation_disabled(CPUARMState
*env
,
10317 if (arm_feature(env
, ARM_FEATURE_M
)) {
10318 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
10319 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
10320 case R_V7M_MPU_CTRL_ENABLE_MASK
:
10321 /* Enabled, but not for HardFault and NMI */
10322 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
10323 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
10324 /* Enabled for all cases */
10328 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10329 * we warned about that in armv7m_nvic.c when the guest set it.
10335 hcr_el2
= arm_hcr_el2_eff(env
);
10337 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
10338 /* HCR.DC means HCR.VM behaves as 1 */
10339 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
10342 if (hcr_el2
& HCR_TGE
) {
10343 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10344 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
10349 if ((hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
10350 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10354 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
10357 static inline bool regime_translation_big_endian(CPUARMState
*env
,
10360 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
10363 /* Return the TTBR associated with this translation regime */
10364 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10367 if (mmu_idx
== ARMMMUIdx_Stage2
) {
10368 return env
->cp15
.vttbr_el2
;
10370 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
10371 return env
->cp15
.vsttbr_el2
;
10374 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
10376 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
10380 #endif /* !CONFIG_USER_ONLY */
10382 /* Convert a possible stage1+2 MMU index into the appropriate
10383 * stage 1 MMU index
10385 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
10388 case ARMMMUIdx_SE10_0
:
10389 return ARMMMUIdx_Stage1_SE0
;
10390 case ARMMMUIdx_SE10_1
:
10391 return ARMMMUIdx_Stage1_SE1
;
10392 case ARMMMUIdx_SE10_1_PAN
:
10393 return ARMMMUIdx_Stage1_SE1_PAN
;
10394 case ARMMMUIdx_E10_0
:
10395 return ARMMMUIdx_Stage1_E0
;
10396 case ARMMMUIdx_E10_1
:
10397 return ARMMMUIdx_Stage1_E1
;
10398 case ARMMMUIdx_E10_1_PAN
:
10399 return ARMMMUIdx_Stage1_E1_PAN
;
10405 /* Return true if the translation regime is using LPAE format page tables */
10406 static inline bool regime_using_lpae_format(CPUARMState
*env
,
10409 int el
= regime_el(env
, mmu_idx
);
10410 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
10413 if (arm_feature(env
, ARM_FEATURE_LPAE
)
10414 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
10420 /* Returns true if the stage 1 translation regime is using LPAE format page
10421 * tables. Used when raising alignment exceptions, whose FSR changes depending
10422 * on whether the long or short descriptor format is in use. */
10423 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10425 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10427 return regime_using_lpae_format(env
, mmu_idx
);
10430 #ifndef CONFIG_USER_ONLY
10431 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
10434 case ARMMMUIdx_SE10_0
:
10435 case ARMMMUIdx_E20_0
:
10436 case ARMMMUIdx_SE20_0
:
10437 case ARMMMUIdx_Stage1_E0
:
10438 case ARMMMUIdx_Stage1_SE0
:
10439 case ARMMMUIdx_MUser
:
10440 case ARMMMUIdx_MSUser
:
10441 case ARMMMUIdx_MUserNegPri
:
10442 case ARMMMUIdx_MSUserNegPri
:
10446 case ARMMMUIdx_E10_0
:
10447 case ARMMMUIdx_E10_1
:
10448 case ARMMMUIdx_E10_1_PAN
:
10449 g_assert_not_reached();
10453 /* Translate section/page access permissions to page
10454 * R/W protection flags
10456 * @env: CPUARMState
10457 * @mmu_idx: MMU index indicating required translation regime
10458 * @ap: The 3-bit access permissions (AP[2:0])
10459 * @domain_prot: The 2-bit domain access permissions
10461 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10462 int ap
, int domain_prot
)
10464 bool is_user
= regime_is_user(env
, mmu_idx
);
10466 if (domain_prot
== 3) {
10467 return PAGE_READ
| PAGE_WRITE
;
10472 if (arm_feature(env
, ARM_FEATURE_V7
)) {
10475 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
10477 return is_user
? 0 : PAGE_READ
;
10484 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10489 return PAGE_READ
| PAGE_WRITE
;
10492 return PAGE_READ
| PAGE_WRITE
;
10493 case 4: /* Reserved. */
10496 return is_user
? 0 : PAGE_READ
;
10500 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
10505 g_assert_not_reached();
10509 /* Translate section/page access permissions to page
10510 * R/W protection flags.
10512 * @ap: The 2-bit simple AP (AP[2:1])
10513 * @is_user: TRUE if accessing from PL0
10515 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
10519 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
10521 return PAGE_READ
| PAGE_WRITE
;
10523 return is_user
? 0 : PAGE_READ
;
10527 g_assert_not_reached();
10532 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
10534 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
10537 /* Translate S2 section/page access permissions to protection flags
10539 * @env: CPUARMState
10540 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10541 * @xn: XN (execute-never) bits
10542 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10544 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
10552 prot
|= PAGE_WRITE
;
10555 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
10573 g_assert_not_reached();
10576 if (!extract32(xn
, 1, 1)) {
10577 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10585 /* Translate section/page access permissions to protection flags
10587 * @env: CPUARMState
10588 * @mmu_idx: MMU index indicating required translation regime
10589 * @is_aa64: TRUE if AArch64
10590 * @ap: The 2-bit simple AP (AP[2:1])
10591 * @ns: NS (non-secure) bit
10592 * @xn: XN (execute-never) bit
10593 * @pxn: PXN (privileged execute-never) bit
10595 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10596 int ap
, int ns
, int xn
, int pxn
)
10598 bool is_user
= regime_is_user(env
, mmu_idx
);
10599 int prot_rw
, user_rw
;
10603 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
10604 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
10606 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10610 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
10611 /* PAN forbids data accesses but doesn't affect insn fetch */
10614 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10618 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10622 /* TODO have_wxn should be replaced with
10623 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10624 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10625 * compatible processors have EL2, which is required for [U]WXN.
10627 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10630 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10634 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
10635 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10637 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10638 switch (regime_el(env
, mmu_idx
)) {
10642 xn
= xn
|| !(user_rw
& PAGE_READ
);
10646 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10648 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10649 (uwxn
&& (user_rw
& PAGE_WRITE
));
10659 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10662 return prot_rw
| PAGE_EXEC
;
10665 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10666 uint32_t *table
, uint32_t address
)
10668 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10669 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10671 if (address
& tcr
->mask
) {
10672 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10673 /* Translation table walk disabled for TTBR1 */
10676 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10678 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10679 /* Translation table walk disabled for TTBR0 */
10682 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10684 *table
|= (address
>> 18) & 0x3ffc;
10688 /* Translate a S1 pagetable walk through S2 if needed. */
10689 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10690 hwaddr addr
, bool *is_secure
,
10691 ARMMMUFaultInfo
*fi
)
10693 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
10694 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
10695 target_ulong s2size
;
10699 ARMMMUIdx s2_mmu_idx
= *is_secure
? ARMMMUIdx_Stage2_S
10700 : ARMMMUIdx_Stage2
;
10701 ARMCacheAttrs cacheattrs
= {};
10702 MemTxAttrs txattrs
= {};
10704 ret
= get_phys_addr_lpae(env
, addr
, MMU_DATA_LOAD
, s2_mmu_idx
, false,
10705 &s2pa
, &txattrs
, &s2prot
, &s2size
, fi
,
10708 assert(fi
->type
!= ARMFault_None
);
10712 fi
->s1ns
= !*is_secure
;
10715 if ((arm_hcr_el2_eff(env
) & HCR_PTW
) &&
10716 (cacheattrs
.attrs
& 0xf0) == 0) {
10718 * PTW set and S1 walk touched S2 Device memory:
10719 * generate Permission fault.
10721 fi
->type
= ARMFault_Permission
;
10725 fi
->s1ns
= !*is_secure
;
10729 if (arm_is_secure_below_el3(env
)) {
10730 /* Check if page table walk is to secure or non-secure PA space. */
10732 *is_secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
10734 *is_secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
10737 assert(!*is_secure
);
10745 /* All loads done in the course of a page table walk go through here. */
10746 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10747 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10749 ARMCPU
*cpu
= ARM_CPU(cs
);
10750 CPUARMState
*env
= &cpu
->env
;
10751 MemTxAttrs attrs
= {};
10752 MemTxResult result
= MEMTX_OK
;
10756 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
10757 attrs
.secure
= is_secure
;
10758 as
= arm_addressspace(cs
, attrs
);
10762 if (regime_translation_big_endian(env
, mmu_idx
)) {
10763 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10765 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10767 if (result
== MEMTX_OK
) {
10770 fi
->type
= ARMFault_SyncExternalOnWalk
;
10771 fi
->ea
= arm_extabort_type(result
);
10775 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10776 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10778 ARMCPU
*cpu
= ARM_CPU(cs
);
10779 CPUARMState
*env
= &cpu
->env
;
10780 MemTxAttrs attrs
= {};
10781 MemTxResult result
= MEMTX_OK
;
10785 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
10786 attrs
.secure
= is_secure
;
10787 as
= arm_addressspace(cs
, attrs
);
10791 if (regime_translation_big_endian(env
, mmu_idx
)) {
10792 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10794 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10796 if (result
== MEMTX_OK
) {
10799 fi
->type
= ARMFault_SyncExternalOnWalk
;
10800 fi
->ea
= arm_extabort_type(result
);
10804 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10805 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10806 hwaddr
*phys_ptr
, int *prot
,
10807 target_ulong
*page_size
,
10808 ARMMMUFaultInfo
*fi
)
10810 CPUState
*cs
= env_cpu(env
);
10821 /* Pagetable walk. */
10822 /* Lookup l1 descriptor. */
10823 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10824 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10825 fi
->type
= ARMFault_Translation
;
10828 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10830 if (fi
->type
!= ARMFault_None
) {
10834 domain
= (desc
>> 5) & 0x0f;
10835 if (regime_el(env
, mmu_idx
) == 1) {
10836 dacr
= env
->cp15
.dacr_ns
;
10838 dacr
= env
->cp15
.dacr_s
;
10840 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10842 /* Section translation fault. */
10843 fi
->type
= ARMFault_Translation
;
10849 if (domain_prot
== 0 || domain_prot
== 2) {
10850 fi
->type
= ARMFault_Domain
;
10855 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10856 ap
= (desc
>> 10) & 3;
10857 *page_size
= 1024 * 1024;
10859 /* Lookup l2 entry. */
10861 /* Coarse pagetable. */
10862 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10864 /* Fine pagetable. */
10865 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10867 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10869 if (fi
->type
!= ARMFault_None
) {
10872 switch (desc
& 3) {
10873 case 0: /* Page translation fault. */
10874 fi
->type
= ARMFault_Translation
;
10876 case 1: /* 64k page. */
10877 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10878 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10879 *page_size
= 0x10000;
10881 case 2: /* 4k page. */
10882 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10883 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10884 *page_size
= 0x1000;
10886 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10888 /* ARMv6/XScale extended small page format */
10889 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10890 || arm_feature(env
, ARM_FEATURE_V6
)) {
10891 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10892 *page_size
= 0x1000;
10894 /* UNPREDICTABLE in ARMv5; we choose to take a
10895 * page translation fault.
10897 fi
->type
= ARMFault_Translation
;
10901 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10902 *page_size
= 0x400;
10904 ap
= (desc
>> 4) & 3;
10907 /* Never happens, but compiler isn't smart enough to tell. */
10911 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10912 *prot
|= *prot
? PAGE_EXEC
: 0;
10913 if (!(*prot
& (1 << access_type
))) {
10914 /* Access permission fault. */
10915 fi
->type
= ARMFault_Permission
;
10918 *phys_ptr
= phys_addr
;
10921 fi
->domain
= domain
;
10926 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10927 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10928 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10929 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10931 CPUState
*cs
= env_cpu(env
);
10932 ARMCPU
*cpu
= env_archcpu(env
);
10946 /* Pagetable walk. */
10947 /* Lookup l1 descriptor. */
10948 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10949 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10950 fi
->type
= ARMFault_Translation
;
10953 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10955 if (fi
->type
!= ARMFault_None
) {
10959 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
10960 /* Section translation fault, or attempt to use the encoding
10961 * which is Reserved on implementations without PXN.
10963 fi
->type
= ARMFault_Translation
;
10966 if ((type
== 1) || !(desc
& (1 << 18))) {
10967 /* Page or Section. */
10968 domain
= (desc
>> 5) & 0x0f;
10970 if (regime_el(env
, mmu_idx
) == 1) {
10971 dacr
= env
->cp15
.dacr_ns
;
10973 dacr
= env
->cp15
.dacr_s
;
10978 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10979 if (domain_prot
== 0 || domain_prot
== 2) {
10980 /* Section or Page domain fault */
10981 fi
->type
= ARMFault_Domain
;
10985 if (desc
& (1 << 18)) {
10986 /* Supersection. */
10987 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10988 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10989 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10990 *page_size
= 0x1000000;
10993 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10994 *page_size
= 0x100000;
10996 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10997 xn
= desc
& (1 << 4);
10999 ns
= extract32(desc
, 19, 1);
11001 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
11002 pxn
= (desc
>> 2) & 1;
11004 ns
= extract32(desc
, 3, 1);
11005 /* Lookup l2 entry. */
11006 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
11007 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
11009 if (fi
->type
!= ARMFault_None
) {
11012 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
11013 switch (desc
& 3) {
11014 case 0: /* Page translation fault. */
11015 fi
->type
= ARMFault_Translation
;
11017 case 1: /* 64k page. */
11018 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
11019 xn
= desc
& (1 << 15);
11020 *page_size
= 0x10000;
11022 case 2: case 3: /* 4k page. */
11023 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
11025 *page_size
= 0x1000;
11028 /* Never happens, but compiler isn't smart enough to tell. */
11032 if (domain_prot
== 3) {
11033 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11035 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
11038 if (xn
&& access_type
== MMU_INST_FETCH
) {
11039 fi
->type
= ARMFault_Permission
;
11043 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
11044 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
11045 /* The simplified model uses AP[0] as an access control bit. */
11046 if ((ap
& 1) == 0) {
11047 /* Access flag fault. */
11048 fi
->type
= ARMFault_AccessFlag
;
11051 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
11053 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
11055 if (*prot
&& !xn
) {
11056 *prot
|= PAGE_EXEC
;
11058 if (!(*prot
& (1 << access_type
))) {
11059 /* Access permission fault. */
11060 fi
->type
= ARMFault_Permission
;
11065 /* The NS bit will (as required by the architecture) have no effect if
11066 * the CPU doesn't support TZ or this is a non-secure translation
11067 * regime, because the attribute will already be non-secure.
11069 attrs
->secure
= false;
11071 *phys_ptr
= phys_addr
;
11074 fi
->domain
= domain
;
11080 * check_s2_mmu_setup
11082 * @is_aa64: True if the translation regime is in AArch64 state
11083 * @startlevel: Suggested starting level
11084 * @inputsize: Bitsize of IPAs
11085 * @stride: Page-table stride (See the ARM ARM)
11087 * Returns true if the suggested S2 translation parameters are OK and
11090 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
11091 int inputsize
, int stride
, int outputsize
)
11093 const int grainsize
= stride
+ 3;
11094 int startsizecheck
;
11097 * Negative levels are usually not allowed...
11098 * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
11099 * begins with level -1. Note that previous feature tests will have
11100 * eliminated this combination if it is not enabled.
11102 if (level
< (inputsize
== 52 && stride
== 9 ? -1 : 0)) {
11106 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
11107 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
11113 case 13: /* 64KB Pages. */
11114 if (level
== 0 || (level
== 1 && outputsize
<= 42)) {
11118 case 11: /* 16KB Pages. */
11119 if (level
== 0 || (level
== 1 && outputsize
<= 40)) {
11123 case 9: /* 4KB Pages. */
11124 if (level
== 0 && outputsize
<= 42) {
11129 g_assert_not_reached();
11132 /* Inputsize checks. */
11133 if (inputsize
> outputsize
&&
11134 (arm_el_is_aa64(&cpu
->env
, 1) || inputsize
> 40)) {
11135 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
11139 /* AArch32 only supports 4KB pages. Assert on that. */
11140 assert(stride
== 9);
11149 /* Translate from the 4-bit stage 2 representation of
11150 * memory attributes (without cache-allocation hints) to
11151 * the 8-bit representation of the stage 1 MAIR registers
11152 * (which includes allocation hints).
11154 * ref: shared/translation/attrs/S2AttrDecode()
11155 * .../S2ConvertAttrsHints()
11157 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
11159 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
11160 uint8_t loattr
= extract32(s2attrs
, 0, 2);
11161 uint8_t hihint
= 0, lohint
= 0;
11163 if (hiattr
!= 0) { /* normal memory */
11164 if (arm_hcr_el2_eff(env
) & HCR_CD
) { /* cache disabled */
11165 hiattr
= loattr
= 1; /* non-cacheable */
11167 if (hiattr
!= 1) { /* Write-through or write-back */
11168 hihint
= 3; /* RW allocate */
11170 if (loattr
!= 1) { /* Write-through or write-back */
11171 lohint
= 3; /* RW allocate */
11176 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
11178 #endif /* !CONFIG_USER_ONLY */
11180 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
11181 static const uint8_t pamax_map
[] = {
11191 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
11192 unsigned int arm_pamax(ARMCPU
*cpu
)
11194 unsigned int parange
=
11195 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
11198 * id_aa64mmfr0 is a read-only register so values outside of the
11199 * supported mappings can be considered an implementation error.
11201 assert(parange
< ARRAY_SIZE(pamax_map
));
11202 return pamax_map
[parange
];
11205 static int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11207 if (regime_has_2_ranges(mmu_idx
)) {
11208 return extract64(tcr
, 37, 2);
11209 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11210 return 0; /* VTCR_EL2 */
11212 /* Replicate the single TBI bit so we always have 2 bits. */
11213 return extract32(tcr
, 20, 1) * 3;
11217 static int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11219 if (regime_has_2_ranges(mmu_idx
)) {
11220 return extract64(tcr
, 51, 2);
11221 } else if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11222 return 0; /* VTCR_EL2 */
11224 /* Replicate the single TBID bit so we always have 2 bits. */
11225 return extract32(tcr
, 29, 1) * 3;
11229 static int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11231 if (regime_has_2_ranges(mmu_idx
)) {
11232 return extract64(tcr
, 57, 2);
11234 /* Replicate the single TCMA bit so we always have 2 bits. */
11235 return extract32(tcr
, 30, 1) * 3;
11239 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
11240 ARMMMUIdx mmu_idx
, bool data
)
11242 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11243 bool epd
, hpd
, using16k
, using64k
, tsz_oob
, ds
;
11244 int select
, tsz
, tbi
, max_tsz
, min_tsz
, ps
, sh
;
11245 ARMCPU
*cpu
= env_archcpu(env
);
11247 if (!regime_has_2_ranges(mmu_idx
)) {
11249 tsz
= extract32(tcr
, 0, 6);
11250 using64k
= extract32(tcr
, 14, 1);
11251 using16k
= extract32(tcr
, 15, 1);
11252 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11256 hpd
= extract32(tcr
, 24, 1);
11259 sh
= extract32(tcr
, 12, 2);
11260 ps
= extract32(tcr
, 16, 3);
11261 ds
= extract64(tcr
, 32, 1);
11264 * Bit 55 is always between the two regions, and is canonical for
11265 * determining if address tagging is enabled.
11267 select
= extract64(va
, 55, 1);
11269 tsz
= extract32(tcr
, 0, 6);
11270 epd
= extract32(tcr
, 7, 1);
11271 sh
= extract32(tcr
, 12, 2);
11272 using64k
= extract32(tcr
, 14, 1);
11273 using16k
= extract32(tcr
, 15, 1);
11274 hpd
= extract64(tcr
, 41, 1);
11276 int tg
= extract32(tcr
, 30, 2);
11277 using16k
= tg
== 1;
11278 using64k
= tg
== 3;
11279 tsz
= extract32(tcr
, 16, 6);
11280 epd
= extract32(tcr
, 23, 1);
11281 sh
= extract32(tcr
, 28, 2);
11282 hpd
= extract64(tcr
, 42, 1);
11284 ps
= extract64(tcr
, 32, 3);
11285 ds
= extract64(tcr
, 59, 1);
11288 if (cpu_isar_feature(aa64_st
, cpu
)) {
11289 max_tsz
= 48 - using64k
;
11295 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
11296 * adjust the effective value of DS, as documented.
11300 if (cpu_isar_feature(aa64_lva
, cpu
)) {
11306 case ARMMMUIdx_Stage2
:
11307 case ARMMMUIdx_Stage2_S
:
11309 ds
= cpu_isar_feature(aa64_tgran16_2_lpa2
, cpu
);
11311 ds
= cpu_isar_feature(aa64_tgran4_2_lpa2
, cpu
);
11316 ds
= cpu_isar_feature(aa64_tgran16_lpa2
, cpu
);
11318 ds
= cpu_isar_feature(aa64_tgran4_lpa2
, cpu
);
11327 if (tsz
> max_tsz
) {
11330 } else if (tsz
< min_tsz
) {
11337 /* Present TBI as a composite with TBID. */
11338 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
11340 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
11342 tbi
= (tbi
>> select
) & 1;
11344 return (ARMVAParameters
) {
11352 .using16k
= using16k
,
11353 .using64k
= using64k
,
11354 .tsz_oob
= tsz_oob
,
11359 #ifndef CONFIG_USER_ONLY
11360 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
11363 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
11364 uint32_t el
= regime_el(env
, mmu_idx
);
11368 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
11370 if (mmu_idx
== ARMMMUIdx_Stage2
) {
11372 bool sext
= extract32(tcr
, 4, 1);
11373 bool sign
= extract32(tcr
, 3, 1);
11376 * If the sign-extend bit is not the same as t0sz[3], the result
11377 * is unpredictable. Flag this as a guest error.
11379 if (sign
!= sext
) {
11380 qemu_log_mask(LOG_GUEST_ERROR
,
11381 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11383 tsz
= sextract32(tcr
, 0, 4) + 8;
11387 } else if (el
== 2) {
11389 tsz
= extract32(tcr
, 0, 3);
11391 hpd
= extract64(tcr
, 24, 1);
11394 int t0sz
= extract32(tcr
, 0, 3);
11395 int t1sz
= extract32(tcr
, 16, 3);
11398 select
= va
> (0xffffffffu
>> t0sz
);
11400 /* Note that we will detect errors later. */
11401 select
= va
>= ~(0xffffffffu
>> t1sz
);
11405 epd
= extract32(tcr
, 7, 1);
11406 hpd
= extract64(tcr
, 41, 1);
11409 epd
= extract32(tcr
, 23, 1);
11410 hpd
= extract64(tcr
, 42, 1);
11412 /* For aarch32, hpd0 is not enabled without t2e as well. */
11413 hpd
&= extract32(tcr
, 6, 1);
11416 return (ARMVAParameters
) {
11425 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
11427 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11428 * prot and page_size may not be filled in, and the populated fsr value provides
11429 * information on why the translation aborted, in the format of a long-format
11430 * DFSR/IFSR fault register, with the following caveats:
11431 * * the WnR bit is never set (the caller must do this).
11433 * @env: CPUARMState
11434 * @address: virtual address to get physical address for
11435 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
11436 * @mmu_idx: MMU index indicating required translation regime
11437 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
11438 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
11439 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
11440 * @phys_ptr: set to the physical address corresponding to the virtual address
11441 * @attrs: set to the memory transaction attributes to use
11442 * @prot: set to the permissions for the page containing phys_ptr
11443 * @page_size_ptr: set to the size of the page containing phys_ptr
11444 * @fi: set to fault info if the translation fails
11445 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11447 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
11448 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11450 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
11451 target_ulong
*page_size_ptr
,
11452 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11454 ARMCPU
*cpu
= env_archcpu(env
);
11455 CPUState
*cs
= CPU(cpu
);
11456 /* Read an LPAE long-descriptor translation table. */
11457 ARMFaultType fault_type
= ARMFault_Translation
;
11459 ARMVAParameters param
;
11461 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
11462 uint32_t tableattrs
;
11463 target_ulong page_size
;
11466 int addrsize
, inputsize
, outputsize
;
11467 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
11468 int ap
, ns
, xn
, pxn
;
11469 uint32_t el
= regime_el(env
, mmu_idx
);
11470 uint64_t descaddrmask
;
11471 bool aarch64
= arm_el_is_aa64(env
, el
);
11472 bool guarded
= false;
11474 /* TODO: This code does not support shareability levels. */
11478 param
= aa64_va_parameters(env
, address
, mmu_idx
,
11479 access_type
!= MMU_INST_FETCH
);
11483 * If TxSZ is programmed to a value larger than the maximum,
11484 * or smaller than the effective minimum, it is IMPLEMENTATION
11485 * DEFINED whether we behave as if the field were programmed
11486 * within bounds, or if a level 0 Translation fault is generated.
11488 * With FEAT_LVA, fault on less than minimum becomes required,
11489 * so our choice is to always raise the fault.
11491 if (param
.tsz_oob
) {
11492 fault_type
= ARMFault_Translation
;
11496 addrsize
= 64 - 8 * param
.tbi
;
11497 inputsize
= 64 - param
.tsz
;
11500 * Bound PS by PARANGE to find the effective output address size.
11501 * ID_AA64MMFR0 is a read-only register so values outside of the
11502 * supported mappings can be considered an implementation error.
11504 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
11505 ps
= MIN(ps
, param
.ps
);
11506 assert(ps
< ARRAY_SIZE(pamax_map
));
11507 outputsize
= pamax_map
[ps
];
11509 param
= aa32_va_parameters(env
, address
, mmu_idx
);
11511 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
11512 inputsize
= addrsize
- param
.tsz
;
11517 * We determined the region when collecting the parameters, but we
11518 * have not yet validated that the address is valid for the region.
11519 * Extract the top bits and verify that they all match select.
11521 * For aa32, if inputsize == addrsize, then we have selected the
11522 * region by exclusion in aa32_va_parameters and there is no more
11523 * validation to do here.
11525 if (inputsize
< addrsize
) {
11526 target_ulong top_bits
= sextract64(address
, inputsize
,
11527 addrsize
- inputsize
);
11528 if (-top_bits
!= param
.select
) {
11529 /* The gap between the two regions is a Translation fault */
11530 fault_type
= ARMFault_Translation
;
11535 if (param
.using64k
) {
11537 } else if (param
.using16k
) {
11543 /* Note that QEMU ignores shareability and cacheability attributes,
11544 * so we don't need to do anything with the SH, ORGN, IRGN fields
11545 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11546 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11547 * implement any ASID-like capability so we can ignore it (instead
11548 * we will always flush the TLB any time the ASID is changed).
11550 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
11552 /* Here we should have set up all the parameters for the translation:
11553 * inputsize, ttbr, epd, stride, tbi
11557 /* Translation table walk disabled => Translation fault on TLB miss
11558 * Note: This is always 0 on 64-bit EL2 and EL3.
11563 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
11564 /* The starting level depends on the virtual address size (which can
11565 * be up to 48 bits) and the translation granule size. It indicates
11566 * the number of strides (stride bits at a time) needed to
11567 * consume the bits of the input address. In the pseudocode this is:
11568 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11569 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11570 * our 'stride + 3' and 'stride' is our 'stride'.
11571 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11572 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11573 * = 4 - (inputsize - 4) / stride;
11575 level
= 4 - (inputsize
- 4) / stride
;
11577 /* For stage 2 translations the starting level is specified by the
11578 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11580 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
11581 uint32_t sl2
= extract64(tcr
->raw_tcr
, 33, 1);
11582 uint32_t startlevel
;
11585 /* SL2 is RES0 unless DS=1 & 4kb granule. */
11586 if (param
.ds
&& stride
== 9 && sl2
) {
11589 fault_type
= ARMFault_Translation
;
11593 } else if (!aarch64
|| stride
== 9) {
11594 /* AArch32 or 4KB pages */
11595 startlevel
= 2 - sl0
;
11597 if (cpu_isar_feature(aa64_st
, cpu
)) {
11601 /* 16KB or 64KB pages */
11602 startlevel
= 3 - sl0
;
11605 /* Check that the starting level is valid. */
11606 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
11607 inputsize
, stride
, outputsize
);
11609 fault_type
= ARMFault_Translation
;
11612 level
= startlevel
;
11615 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
11616 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
11618 /* Now we can extract the actual base address from the TTBR */
11619 descaddr
= extract64(ttbr
, 0, 48);
11622 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
11624 * Otherwise, if the base address is out of range, raise AddressSizeFault.
11625 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
11626 * but we've just cleared the bits above 47, so simplify the test.
11628 if (outputsize
> 48) {
11629 descaddr
|= extract64(ttbr
, 2, 4) << 48;
11630 } else if (descaddr
>> outputsize
) {
11632 fault_type
= ARMFault_AddressSize
;
11637 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11638 * and also to mask out CnP (bit 0) which could validly be non-zero.
11640 descaddr
&= ~indexmask
;
11643 * For AArch32, the address field in the descriptor goes up to bit 39
11644 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
11645 * or an AddressSize fault is raised. So for v8 we extract those SBZ
11646 * bits as part of the address, which will be checked via outputsize.
11647 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
11648 * the highest bits of a 52-bit output are placed elsewhere.
11651 descaddrmask
= MAKE_64BIT_MASK(0, 50);
11652 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
11653 descaddrmask
= MAKE_64BIT_MASK(0, 48);
11655 descaddrmask
= MAKE_64BIT_MASK(0, 40);
11657 descaddrmask
&= ~indexmask_grainsize
;
11659 /* Secure accesses start with the page table in secure memory and
11660 * can be downgraded to non-secure at any step. Non-secure accesses
11661 * remain non-secure. We implement this by just ORing in the NSTable/NS
11662 * bits at each step.
11664 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
11666 uint64_t descriptor
;
11669 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
11671 nstable
= extract32(tableattrs
, 4, 1);
11672 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
11673 if (fi
->type
!= ARMFault_None
) {
11677 if (!(descriptor
& 1) ||
11678 (!(descriptor
& 2) && (level
== 3))) {
11679 /* Invalid, or the Reserved level 3 encoding */
11683 descaddr
= descriptor
& descaddrmask
;
11686 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
11687 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
11688 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
11689 * raise AddressSizeFault.
11691 if (outputsize
> 48) {
11693 descaddr
|= extract64(descriptor
, 8, 2) << 50;
11695 descaddr
|= extract64(descriptor
, 12, 4) << 48;
11697 } else if (descaddr
>> outputsize
) {
11698 fault_type
= ARMFault_AddressSize
;
11702 if ((descriptor
& 2) && (level
< 3)) {
11703 /* Table entry. The top five bits are attributes which may
11704 * propagate down through lower levels of the table (and
11705 * which are all arranged so that 0 means "no effect", so
11706 * we can gather them up by ORing in the bits at each level).
11708 tableattrs
|= extract64(descriptor
, 59, 5);
11710 indexmask
= indexmask_grainsize
;
11714 * Block entry at level 1 or 2, or page entry at level 3.
11715 * These are basically the same thing, although the number
11716 * of bits we pull in from the vaddr varies. Note that although
11717 * descaddrmask masks enough of the low bits of the descriptor
11718 * to give a correct page or table address, the address field
11719 * in a block descriptor is smaller; so we need to explicitly
11720 * clear the lower bits here before ORing in the low vaddr bits.
11722 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
11723 descaddr
&= ~(page_size
- 1);
11724 descaddr
|= (address
& (page_size
- 1));
11725 /* Extract attributes from the descriptor */
11726 attrs
= extract64(descriptor
, 2, 10)
11727 | (extract64(descriptor
, 52, 12) << 10);
11729 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11730 /* Stage 2 table descriptors do not include any attribute fields */
11733 /* Merge in attributes from table descriptors */
11734 attrs
|= nstable
<< 3; /* NS */
11735 guarded
= extract64(descriptor
, 50, 1); /* GP */
11737 /* HPD disables all the table attributes except NSTable. */
11740 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
11741 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11742 * means "force PL1 access only", which means forcing AP[1] to 0.
11744 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
11745 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
11748 /* Here descaddr is the final physical address, and attributes
11749 * are all in attrs.
11751 fault_type
= ARMFault_AccessFlag
;
11752 if ((attrs
& (1 << 8)) == 0) {
11757 ap
= extract32(attrs
, 4, 2);
11759 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11760 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11761 xn
= extract32(attrs
, 11, 2);
11762 *prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
11764 ns
= extract32(attrs
, 3, 1);
11765 xn
= extract32(attrs
, 12, 1);
11766 pxn
= extract32(attrs
, 11, 1);
11767 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
11770 fault_type
= ARMFault_Permission
;
11771 if (!(*prot
& (1 << access_type
))) {
11776 /* The NS bit will (as required by the architecture) have no effect if
11777 * the CPU doesn't support TZ or this is a non-secure translation
11778 * regime, because the attribute will already be non-secure.
11780 txattrs
->secure
= false;
11782 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11783 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
11784 arm_tlb_bti_gp(txattrs
) = true;
11787 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
11788 cacheattrs
->attrs
= convert_stage2_attrs(env
, extract32(attrs
, 0, 4));
11790 /* Index into MAIR registers for cache attributes */
11791 uint8_t attrindx
= extract32(attrs
, 0, 3);
11792 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
11793 assert(attrindx
<= 7);
11794 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
11798 * For FEAT_LPA2 and effective DS, the SH field in the attributes
11799 * was re-purposed for output address bits. The SH attribute in
11800 * that case comes from TCR_ELx, which we extracted earlier.
11803 cacheattrs
->shareability
= param
.sh
;
11805 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
11808 *phys_ptr
= descaddr
;
11809 *page_size_ptr
= page_size
;
11813 fi
->type
= fault_type
;
11815 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11816 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
||
11817 mmu_idx
== ARMMMUIdx_Stage2_S
);
11818 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
11822 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11824 int32_t address
, int *prot
)
11826 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11827 *prot
= PAGE_READ
| PAGE_WRITE
;
11829 case 0xF0000000 ... 0xFFFFFFFF:
11830 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11831 /* hivecs execing is ok */
11832 *prot
|= PAGE_EXEC
;
11835 case 0x00000000 ... 0x7FFFFFFF:
11836 *prot
|= PAGE_EXEC
;
11840 /* Default system address map for M profile cores.
11841 * The architecture specifies which regions are execute-never;
11842 * at the MPU level no other checks are defined.
11845 case 0x00000000 ... 0x1fffffff: /* ROM */
11846 case 0x20000000 ... 0x3fffffff: /* SRAM */
11847 case 0x60000000 ... 0x7fffffff: /* RAM */
11848 case 0x80000000 ... 0x9fffffff: /* RAM */
11849 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11851 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11852 case 0xa0000000 ... 0xbfffffff: /* Device */
11853 case 0xc0000000 ... 0xdfffffff: /* Device */
11854 case 0xe0000000 ... 0xffffffff: /* System */
11855 *prot
= PAGE_READ
| PAGE_WRITE
;
11858 g_assert_not_reached();
11863 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11864 ARMMMUIdx mmu_idx
, bool is_user
)
11866 /* Return true if we should use the default memory map as a
11867 * "background" region if there are no hits against any MPU regions.
11869 CPUARMState
*env
= &cpu
->env
;
11875 if (arm_feature(env
, ARM_FEATURE_M
)) {
11876 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11877 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11879 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11883 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11885 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11886 return arm_feature(env
, ARM_FEATURE_M
) &&
11887 extract32(address
, 20, 12) == 0xe00;
11890 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11892 /* True if address is in the M profile system region
11893 * 0xe0000000 - 0xffffffff
11895 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11898 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11899 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11900 hwaddr
*phys_ptr
, int *prot
,
11901 target_ulong
*page_size
,
11902 ARMMMUFaultInfo
*fi
)
11904 ARMCPU
*cpu
= env_archcpu(env
);
11906 bool is_user
= regime_is_user(env
, mmu_idx
);
11908 *phys_ptr
= address
;
11909 *page_size
= TARGET_PAGE_SIZE
;
11912 if (regime_translation_disabled(env
, mmu_idx
) ||
11913 m_is_ppb_region(env
, address
)) {
11914 /* MPU disabled or M profile PPB access: use default memory map.
11915 * The other case which uses the default memory map in the
11916 * v7M ARM ARM pseudocode is exception vector reads from the vector
11917 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11918 * which always does a direct read using address_space_ldl(), rather
11919 * than going via this function, so we don't need to check that here.
11921 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11922 } else { /* MPU enabled */
11923 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11924 /* region search */
11925 uint32_t base
= env
->pmsav7
.drbar
[n
];
11926 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11928 bool srdis
= false;
11930 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11935 qemu_log_mask(LOG_GUEST_ERROR
,
11936 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11940 rmask
= (1ull << rsize
) - 1;
11942 if (base
& rmask
) {
11943 qemu_log_mask(LOG_GUEST_ERROR
,
11944 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11945 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11950 if (address
< base
|| address
> base
+ rmask
) {
11952 * Address not in this region. We must check whether the
11953 * region covers addresses in the same page as our address.
11954 * In that case we must not report a size that covers the
11955 * whole page for a subsequent hit against a different MPU
11956 * region or the background region, because it would result in
11957 * incorrect TLB hits for subsequent accesses to addresses that
11958 * are in this MPU region.
11960 if (ranges_overlap(base
, rmask
,
11961 address
& TARGET_PAGE_MASK
,
11962 TARGET_PAGE_SIZE
)) {
11968 /* Region matched */
11970 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11972 uint32_t srdis_mask
;
11974 rsize
-= 3; /* sub region size (power of 2) */
11975 snd
= ((address
- base
) >> rsize
) & 0x7;
11976 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11978 srdis_mask
= srdis
? 0x3 : 0x0;
11979 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11980 /* This will check in groups of 2, 4 and then 8, whether
11981 * the subregion bits are consistent. rsize is incremented
11982 * back up to give the region size, considering consistent
11983 * adjacent subregions as one region. Stop testing if rsize
11984 * is already big enough for an entire QEMU page.
11986 int snd_rounded
= snd
& ~(i
- 1);
11987 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11988 snd_rounded
+ 8, i
);
11989 if (srdis_mask
^ srdis_multi
) {
11992 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11999 if (rsize
< TARGET_PAGE_BITS
) {
12000 *page_size
= 1 << rsize
;
12005 if (n
== -1) { /* no hits */
12006 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12007 /* background fault */
12008 fi
->type
= ARMFault_Background
;
12011 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12012 } else { /* a MPU hit! */
12013 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
12014 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
12016 if (m_is_system_region(env
, address
)) {
12017 /* System space is always execute never */
12021 if (is_user
) { /* User mode AP bit decoding */
12026 break; /* no access */
12028 *prot
|= PAGE_WRITE
;
12032 *prot
|= PAGE_READ
| PAGE_EXEC
;
12035 /* for v7M, same as 6; for R profile a reserved value */
12036 if (arm_feature(env
, ARM_FEATURE_M
)) {
12037 *prot
|= PAGE_READ
| PAGE_EXEC
;
12042 qemu_log_mask(LOG_GUEST_ERROR
,
12043 "DRACR[%d]: Bad value for AP bits: 0x%"
12044 PRIx32
"\n", n
, ap
);
12046 } else { /* Priv. mode AP bits decoding */
12049 break; /* no access */
12053 *prot
|= PAGE_WRITE
;
12057 *prot
|= PAGE_READ
| PAGE_EXEC
;
12060 /* for v7M, same as 6; for R profile a reserved value */
12061 if (arm_feature(env
, ARM_FEATURE_M
)) {
12062 *prot
|= PAGE_READ
| PAGE_EXEC
;
12067 qemu_log_mask(LOG_GUEST_ERROR
,
12068 "DRACR[%d]: Bad value for AP bits: 0x%"
12069 PRIx32
"\n", n
, ap
);
12073 /* execute never */
12075 *prot
&= ~PAGE_EXEC
;
12080 fi
->type
= ARMFault_Permission
;
12082 return !(*prot
& (1 << access_type
));
12085 static bool v8m_is_sau_exempt(CPUARMState
*env
,
12086 uint32_t address
, MMUAccessType access_type
)
12088 /* The architecture specifies that certain address ranges are
12089 * exempt from v8M SAU/IDAU checks.
12092 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
12093 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
12094 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
12095 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
12096 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
12097 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
12100 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
12101 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12102 V8M_SAttributes
*sattrs
)
12104 /* Look up the security attributes for this address. Compare the
12105 * pseudocode SecurityCheck() function.
12106 * We assume the caller has zero-initialized *sattrs.
12108 ARMCPU
*cpu
= env_archcpu(env
);
12110 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
12111 int idau_region
= IREGION_NOTVALID
;
12112 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12113 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12116 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
12117 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
12119 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
12123 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
12124 /* 0xf0000000..0xffffffff is always S for insn fetches */
12128 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
12129 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
12133 if (idau_region
!= IREGION_NOTVALID
) {
12134 sattrs
->irvalid
= true;
12135 sattrs
->iregion
= idau_region
;
12138 switch (env
->sau
.ctrl
& 3) {
12139 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
12141 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
12144 default: /* SAU.ENABLE == 1 */
12145 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
12146 if (env
->sau
.rlar
[r
] & 1) {
12147 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
12148 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
12150 if (base
<= address
&& limit
>= address
) {
12151 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12152 sattrs
->subpage
= true;
12154 if (sattrs
->srvalid
) {
12155 /* If we hit in more than one region then we must report
12156 * as Secure, not NS-Callable, with no valid region
12159 sattrs
->ns
= false;
12160 sattrs
->nsc
= false;
12161 sattrs
->sregion
= 0;
12162 sattrs
->srvalid
= false;
12165 if (env
->sau
.rlar
[r
] & 2) {
12166 sattrs
->nsc
= true;
12170 sattrs
->srvalid
= true;
12171 sattrs
->sregion
= r
;
12175 * Address not in this region. We must check whether the
12176 * region covers addresses in the same page as our address.
12177 * In that case we must not report a size that covers the
12178 * whole page for a subsequent hit against a different MPU
12179 * region or the background region, because it would result
12180 * in incorrect TLB hits for subsequent accesses to
12181 * addresses that are in this MPU region.
12183 if (limit
>= base
&&
12184 ranges_overlap(base
, limit
- base
+ 1,
12186 TARGET_PAGE_SIZE
)) {
12187 sattrs
->subpage
= true;
12196 * The IDAU will override the SAU lookup results if it specifies
12197 * higher security than the SAU does.
12200 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
12201 sattrs
->ns
= false;
12202 sattrs
->nsc
= idau_nsc
;
12207 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
12208 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12209 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12210 int *prot
, bool *is_subpage
,
12211 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
12213 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
12214 * that a full phys-to-virt translation does).
12215 * mregion is (if not NULL) set to the region number which matched,
12216 * or -1 if no region number is returned (MPU off, address did not
12217 * hit a region, address hit in multiple regions).
12218 * We set is_subpage to true if the region hit doesn't cover the
12219 * entire TARGET_PAGE the address is within.
12221 ARMCPU
*cpu
= env_archcpu(env
);
12222 bool is_user
= regime_is_user(env
, mmu_idx
);
12223 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12225 int matchregion
= -1;
12227 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
12228 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
12230 *is_subpage
= false;
12231 *phys_ptr
= address
;
12237 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
12238 * was an exception vector read from the vector table (which is always
12239 * done using the default system address map), because those accesses
12240 * are done in arm_v7m_load_vector(), which always does a direct
12241 * read using address_space_ldl(), rather than going via this function.
12243 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
12245 } else if (m_is_ppb_region(env
, address
)) {
12248 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
12252 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
12253 /* region search */
12254 /* Note that the base address is bits [31:5] from the register
12255 * with bits [4:0] all zeroes, but the limit address is bits
12256 * [31:5] from the register with bits [4:0] all ones.
12258 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
12259 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
12261 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
12262 /* Region disabled */
12266 if (address
< base
|| address
> limit
) {
12268 * Address not in this region. We must check whether the
12269 * region covers addresses in the same page as our address.
12270 * In that case we must not report a size that covers the
12271 * whole page for a subsequent hit against a different MPU
12272 * region or the background region, because it would result in
12273 * incorrect TLB hits for subsequent accesses to addresses that
12274 * are in this MPU region.
12276 if (limit
>= base
&&
12277 ranges_overlap(base
, limit
- base
+ 1,
12279 TARGET_PAGE_SIZE
)) {
12280 *is_subpage
= true;
12285 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
12286 *is_subpage
= true;
12289 if (matchregion
!= -1) {
12290 /* Multiple regions match -- always a failure (unlike
12291 * PMSAv7 where highest-numbered-region wins)
12293 fi
->type
= ARMFault_Permission
;
12304 /* background fault */
12305 fi
->type
= ARMFault_Background
;
12309 if (matchregion
== -1) {
12310 /* hit using the background region */
12311 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
12313 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
12314 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
12317 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
12318 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
12321 if (m_is_system_region(env
, address
)) {
12322 /* System space is always execute never */
12326 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
12327 if (*prot
&& !xn
&& !(pxn
&& !is_user
)) {
12328 *prot
|= PAGE_EXEC
;
12330 /* We don't need to look the attribute up in the MAIR0/MAIR1
12331 * registers because that only tells us about cacheability.
12334 *mregion
= matchregion
;
12338 fi
->type
= ARMFault_Permission
;
12340 return !(*prot
& (1 << access_type
));
12344 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
12345 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12346 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
12347 int *prot
, target_ulong
*page_size
,
12348 ARMMMUFaultInfo
*fi
)
12350 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
12351 V8M_SAttributes sattrs
= {};
12353 bool mpu_is_subpage
;
12355 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12356 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
12357 if (access_type
== MMU_INST_FETCH
) {
12358 /* Instruction fetches always use the MMU bank and the
12359 * transaction attribute determined by the fetch address,
12360 * regardless of CPU state. This is painful for QEMU
12361 * to handle, because it would mean we need to encode
12362 * into the mmu_idx not just the (user, negpri) information
12363 * for the current security state but also that for the
12364 * other security state, which would balloon the number
12365 * of mmu_idx values needed alarmingly.
12366 * Fortunately we can avoid this because it's not actually
12367 * possible to arbitrarily execute code from memory with
12368 * the wrong security attribute: it will always generate
12369 * an exception of some kind or another, apart from the
12370 * special case of an NS CPU executing an SG instruction
12371 * in S&NSC memory. So we always just fail the translation
12372 * here and sort things out in the exception handler
12373 * (including possibly emulating an SG instruction).
12375 if (sattrs
.ns
!= !secure
) {
12377 fi
->type
= ARMFault_QEMU_NSCExec
;
12379 fi
->type
= ARMFault_QEMU_SFault
;
12381 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12382 *phys_ptr
= address
;
12387 /* For data accesses we always use the MMU bank indicated
12388 * by the current CPU state, but the security attributes
12389 * might downgrade a secure access to nonsecure.
12392 txattrs
->secure
= false;
12393 } else if (!secure
) {
12394 /* NS access to S memory must fault.
12395 * Architecturally we should first check whether the
12396 * MPU information for this address indicates that we
12397 * are doing an unaligned access to Device memory, which
12398 * should generate a UsageFault instead. QEMU does not
12399 * currently check for that kind of unaligned access though.
12400 * If we added it we would need to do so as a special case
12401 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12403 fi
->type
= ARMFault_QEMU_SFault
;
12404 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
12405 *phys_ptr
= address
;
12412 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
12413 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
12414 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
12418 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
12419 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12420 hwaddr
*phys_ptr
, int *prot
,
12421 ARMMMUFaultInfo
*fi
)
12426 bool is_user
= regime_is_user(env
, mmu_idx
);
12428 if (regime_translation_disabled(env
, mmu_idx
)) {
12429 /* MPU disabled. */
12430 *phys_ptr
= address
;
12431 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12435 *phys_ptr
= address
;
12436 for (n
= 7; n
>= 0; n
--) {
12437 base
= env
->cp15
.c6_region
[n
];
12438 if ((base
& 1) == 0) {
12441 mask
= 1 << ((base
>> 1) & 0x1f);
12442 /* Keep this shift separate from the above to avoid an
12443 (undefined) << 32. */
12444 mask
= (mask
<< 1) - 1;
12445 if (((base
^ address
) & ~mask
) == 0) {
12450 fi
->type
= ARMFault_Background
;
12454 if (access_type
== MMU_INST_FETCH
) {
12455 mask
= env
->cp15
.pmsav5_insn_ap
;
12457 mask
= env
->cp15
.pmsav5_data_ap
;
12459 mask
= (mask
>> (n
* 4)) & 0xf;
12462 fi
->type
= ARMFault_Permission
;
12467 fi
->type
= ARMFault_Permission
;
12471 *prot
= PAGE_READ
| PAGE_WRITE
;
12476 *prot
|= PAGE_WRITE
;
12480 *prot
= PAGE_READ
| PAGE_WRITE
;
12484 fi
->type
= ARMFault_Permission
;
12494 /* Bad permission. */
12495 fi
->type
= ARMFault_Permission
;
12499 *prot
|= PAGE_EXEC
;
12503 /* Combine either inner or outer cacheability attributes for normal
12504 * memory, according to table D4-42 and pseudocode procedure
12505 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12507 * NB: only stage 1 includes allocation hints (RW bits), leading to
12510 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
12512 if (s1
== 4 || s2
== 4) {
12513 /* non-cacheable has precedence */
12515 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
12516 /* stage 1 write-through takes precedence */
12518 } else if (extract32(s2
, 2, 2) == 2) {
12519 /* stage 2 write-through takes precedence, but the allocation hint
12520 * is still taken from stage 1
12522 return (2 << 2) | extract32(s1
, 0, 2);
12523 } else { /* write-back */
12528 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12529 * and CombineS1S2Desc()
12531 * @s1: Attributes from stage 1 walk
12532 * @s2: Attributes from stage 2 walk
12534 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
12536 uint8_t s1lo
, s2lo
, s1hi
, s2hi
;
12538 bool tagged
= false;
12540 if (s1
.attrs
== 0xf0) {
12545 s1lo
= extract32(s1
.attrs
, 0, 4);
12546 s2lo
= extract32(s2
.attrs
, 0, 4);
12547 s1hi
= extract32(s1
.attrs
, 4, 4);
12548 s2hi
= extract32(s2
.attrs
, 4, 4);
12550 /* Combine shareability attributes (table D4-43) */
12551 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
12552 /* if either are outer-shareable, the result is outer-shareable */
12553 ret
.shareability
= 2;
12554 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
12555 /* if either are inner-shareable, the result is inner-shareable */
12556 ret
.shareability
= 3;
12558 /* both non-shareable */
12559 ret
.shareability
= 0;
12562 /* Combine memory type and cacheability attributes */
12563 if (s1hi
== 0 || s2hi
== 0) {
12564 /* Device has precedence over normal */
12565 if (s1lo
== 0 || s2lo
== 0) {
12566 /* nGnRnE has precedence over anything */
12568 } else if (s1lo
== 4 || s2lo
== 4) {
12569 /* non-Reordering has precedence over Reordering */
12570 ret
.attrs
= 4; /* nGnRE */
12571 } else if (s1lo
== 8 || s2lo
== 8) {
12572 /* non-Gathering has precedence over Gathering */
12573 ret
.attrs
= 8; /* nGRE */
12575 ret
.attrs
= 0xc; /* GRE */
12578 /* Any location for which the resultant memory type is any
12579 * type of Device memory is always treated as Outer Shareable.
12581 ret
.shareability
= 2;
12582 } else { /* Normal memory */
12583 /* Outer/inner cacheability combine independently */
12584 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
12585 | combine_cacheattr_nibble(s1lo
, s2lo
);
12587 if (ret
.attrs
== 0x44) {
12588 /* Any location for which the resultant memory type is Normal
12589 * Inner Non-cacheable, Outer Non-cacheable is always treated
12590 * as Outer Shareable.
12592 ret
.shareability
= 2;
12596 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12597 if (tagged
&& ret
.attrs
== 0xff) {
12605 /* get_phys_addr - get the physical address for this virtual address
12607 * Find the physical address corresponding to the given virtual address,
12608 * by doing a translation table walk on MMU based systems or using the
12609 * MPU state on MPU based systems.
12611 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12612 * prot and page_size may not be filled in, and the populated fsr value provides
12613 * information on why the translation aborted, in the format of a
12614 * DFSR/IFSR fault register, with the following caveats:
12615 * * we honour the short vs long DFSR format differences.
12616 * * the WnR bit is never set (the caller must do this).
12617 * * for PSMAv5 based systems we don't bother to return a full FSR format
12620 * @env: CPUARMState
12621 * @address: virtual address to get physical address for
12622 * @access_type: 0 for read, 1 for write, 2 for execute
12623 * @mmu_idx: MMU index indicating required translation regime
12624 * @phys_ptr: set to the physical address corresponding to the virtual address
12625 * @attrs: set to the memory transaction attributes to use
12626 * @prot: set to the permissions for the page containing phys_ptr
12627 * @page_size: set to the size of the page containing phys_ptr
12628 * @fi: set to fault info if the translation fails
12629 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12631 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
12632 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
12633 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
12634 target_ulong
*page_size
,
12635 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
12637 ARMMMUIdx s1_mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12639 if (mmu_idx
!= s1_mmu_idx
) {
12640 /* Call ourselves recursively to do the stage 1 and then stage 2
12641 * translations if mmu_idx is a two-stage regime.
12643 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
12648 ARMCacheAttrs cacheattrs2
= {};
12649 ARMMMUIdx s2_mmu_idx
;
12652 ret
= get_phys_addr(env
, address
, access_type
, s1_mmu_idx
, &ipa
,
12653 attrs
, prot
, page_size
, fi
, cacheattrs
);
12655 /* If S1 fails or S2 is disabled, return early. */
12656 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
12661 ipa_secure
= attrs
->secure
;
12662 if (arm_is_secure_below_el3(env
)) {
12664 attrs
->secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
12666 attrs
->secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
12669 assert(!ipa_secure
);
12672 s2_mmu_idx
= attrs
->secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
12673 is_el0
= mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_SE10_0
;
12675 /* S1 is done. Now do S2 translation. */
12676 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, s2_mmu_idx
, is_el0
,
12677 phys_ptr
, attrs
, &s2_prot
,
12678 page_size
, fi
, &cacheattrs2
);
12680 /* Combine the S1 and S2 perms. */
12683 /* If S2 fails, return early. */
12688 /* Combine the S1 and S2 cache attributes. */
12689 if (arm_hcr_el2_eff(env
) & HCR_DC
) {
12691 * HCR.DC forces the first stage attributes to
12692 * Normal Non-Shareable,
12693 * Inner Write-Back Read-Allocate Write-Allocate,
12694 * Outer Write-Back Read-Allocate Write-Allocate.
12695 * Do not overwrite Tagged within attrs.
12697 if (cacheattrs
->attrs
!= 0xf0) {
12698 cacheattrs
->attrs
= 0xff;
12700 cacheattrs
->shareability
= 0;
12702 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
12704 /* Check if IPA translates to secure or non-secure PA space. */
12705 if (arm_is_secure_below_el3(env
)) {
12708 !(env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
));
12711 !((env
->cp15
.vtcr_el2
.raw_tcr
& (VTCR_NSA
| VTCR_NSW
))
12712 || (env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
)));
12718 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12720 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
12724 /* The page table entries may downgrade secure to non-secure, but
12725 * cannot upgrade an non-secure translation regime's attributes
12728 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
12729 attrs
->user
= regime_is_user(env
, mmu_idx
);
12731 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12732 * In v7 and earlier it affects all stage 1 translations.
12734 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
12735 && !arm_feature(env
, ARM_FEATURE_V8
)) {
12736 if (regime_el(env
, mmu_idx
) == 3) {
12737 address
+= env
->cp15
.fcseidr_s
;
12739 address
+= env
->cp15
.fcseidr_ns
;
12743 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
12745 *page_size
= TARGET_PAGE_SIZE
;
12747 if (arm_feature(env
, ARM_FEATURE_V8
)) {
12749 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
12750 phys_ptr
, attrs
, prot
, page_size
, fi
);
12751 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
12753 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
12754 phys_ptr
, prot
, page_size
, fi
);
12757 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
12758 phys_ptr
, prot
, fi
);
12760 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
12761 " mmu_idx %u -> %s (prot %c%c%c)\n",
12762 access_type
== MMU_DATA_LOAD
? "reading" :
12763 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
12764 (uint32_t)address
, mmu_idx
,
12765 ret
? "Miss" : "Hit",
12766 *prot
& PAGE_READ
? 'r' : '-',
12767 *prot
& PAGE_WRITE
? 'w' : '-',
12768 *prot
& PAGE_EXEC
? 'x' : '-');
12773 /* Definitely a real MMU, not an MPU */
12775 if (regime_translation_disabled(env
, mmu_idx
)) {
12780 * MMU disabled. S1 addresses within aa64 translation regimes are
12781 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12783 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
12784 int r_el
= regime_el(env
, mmu_idx
);
12785 if (arm_el_is_aa64(env
, r_el
)) {
12786 int pamax
= arm_pamax(env_archcpu(env
));
12787 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
12790 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12791 if (access_type
== MMU_INST_FETCH
) {
12792 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12794 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
12795 addrtop
= (tbi
? 55 : 63);
12797 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
12798 fi
->type
= ARMFault_AddressSize
;
12800 fi
->stage2
= false;
12805 * When TBI is disabled, we've just validated that all of the
12806 * bits above PAMax are zero, so logically we only need to
12807 * clear the top byte for TBI. But it's clearer to follow
12808 * the pseudocode set of addrdesc.paddress.
12810 address
= extract64(address
, 0, 52);
12813 *phys_ptr
= address
;
12814 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
12815 *page_size
= TARGET_PAGE_SIZE
;
12817 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12818 hcr
= arm_hcr_el2_eff(env
);
12819 cacheattrs
->shareability
= 0;
12820 if (hcr
& HCR_DC
) {
12821 if (hcr
& HCR_DCT
) {
12822 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
12824 memattr
= 0xff; /* Normal, WB, RWA */
12826 } else if (access_type
== MMU_INST_FETCH
) {
12827 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
12828 memattr
= 0xee; /* Normal, WT, RA, NT */
12830 memattr
= 0x44; /* Normal, NC, No */
12832 cacheattrs
->shareability
= 2; /* outer sharable */
12834 memattr
= 0x00; /* Device, nGnRnE */
12836 cacheattrs
->attrs
= memattr
;
12840 if (regime_using_lpae_format(env
, mmu_idx
)) {
12841 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, false,
12842 phys_ptr
, attrs
, prot
, page_size
,
12844 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
12845 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
12846 phys_ptr
, attrs
, prot
, page_size
, fi
);
12848 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
12849 phys_ptr
, prot
, page_size
, fi
);
12853 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
12856 ARMCPU
*cpu
= ARM_CPU(cs
);
12857 CPUARMState
*env
= &cpu
->env
;
12859 target_ulong page_size
;
12862 ARMMMUFaultInfo fi
= {};
12863 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12864 ARMCacheAttrs cacheattrs
= {};
12866 *attrs
= (MemTxAttrs
) {};
12868 ret
= get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &phys_addr
,
12869 attrs
, &prot
, &page_size
, &fi
, &cacheattrs
);
12879 /* Note that signed overflow is undefined in C. The following routines are
12880 careful to use unsigned types where modulo arithmetic is required.
12881 Failure to do so _will_ break on newer gcc. */
12883 /* Signed saturating arithmetic. */
12885 /* Perform 16-bit signed saturating addition. */
12886 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12891 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12900 /* Perform 8-bit signed saturating addition. */
12901 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12906 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12915 /* Perform 16-bit signed saturating subtraction. */
12916 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12921 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12930 /* Perform 8-bit signed saturating subtraction. */
12931 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12936 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12945 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12946 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12947 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12948 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12951 #include "op_addsub.h"
12953 /* Unsigned saturating arithmetic. */
12954 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12963 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12971 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12980 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12988 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12989 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12990 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12991 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12994 #include "op_addsub.h"
12996 /* Signed modulo arithmetic. */
12997 #define SARITH16(a, b, n, op) do { \
12999 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
13000 RESULT(sum, n, 16); \
13002 ge |= 3 << (n * 2); \
13005 #define SARITH8(a, b, n, op) do { \
13007 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
13008 RESULT(sum, n, 8); \
13014 #define ADD16(a, b, n) SARITH16(a, b, n, +)
13015 #define SUB16(a, b, n) SARITH16(a, b, n, -)
13016 #define ADD8(a, b, n) SARITH8(a, b, n, +)
13017 #define SUB8(a, b, n) SARITH8(a, b, n, -)
13021 #include "op_addsub.h"
13023 /* Unsigned modulo arithmetic. */
13024 #define ADD16(a, b, n) do { \
13026 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
13027 RESULT(sum, n, 16); \
13028 if ((sum >> 16) == 1) \
13029 ge |= 3 << (n * 2); \
13032 #define ADD8(a, b, n) do { \
13034 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
13035 RESULT(sum, n, 8); \
13036 if ((sum >> 8) == 1) \
13040 #define SUB16(a, b, n) do { \
13042 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
13043 RESULT(sum, n, 16); \
13044 if ((sum >> 16) == 0) \
13045 ge |= 3 << (n * 2); \
13048 #define SUB8(a, b, n) do { \
13050 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
13051 RESULT(sum, n, 8); \
13052 if ((sum >> 8) == 0) \
13059 #include "op_addsub.h"
13061 /* Halved signed arithmetic. */
13062 #define ADD16(a, b, n) \
13063 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
13064 #define SUB16(a, b, n) \
13065 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
13066 #define ADD8(a, b, n) \
13067 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
13068 #define SUB8(a, b, n) \
13069 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
13072 #include "op_addsub.h"
13074 /* Halved unsigned arithmetic. */
13075 #define ADD16(a, b, n) \
13076 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13077 #define SUB16(a, b, n) \
13078 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
13079 #define ADD8(a, b, n) \
13080 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13081 #define SUB8(a, b, n) \
13082 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
13085 #include "op_addsub.h"
13087 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
13095 /* Unsigned sum of absolute byte differences. */
13096 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
13099 sum
= do_usad(a
, b
);
13100 sum
+= do_usad(a
>> 8, b
>> 8);
13101 sum
+= do_usad(a
>> 16, b
>> 16);
13102 sum
+= do_usad(a
>> 24, b
>> 24);
13106 /* For ARMv6 SEL instruction. */
13107 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
13119 mask
|= 0xff000000;
13120 return (a
& mask
) | (b
& ~mask
);
13124 * The upper bytes of val (above the number specified by 'bytes') must have
13125 * been zeroed out by the caller.
13127 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13131 stl_le_p(buf
, val
);
13133 /* zlib crc32 converts the accumulator and output to one's complement. */
13134 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
13137 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13141 stl_le_p(buf
, val
);
13143 /* Linux crc32c converts the output to one's complement. */
13144 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
13147 /* Return the exception level to which FP-disabled exceptions should
13148 * be taken, or 0 if FP is enabled.
13150 int fp_exception_el(CPUARMState
*env
, int cur_el
)
13152 #ifndef CONFIG_USER_ONLY
13155 /* CPACR and the CPTR registers don't exist before v6, so FP is
13156 * always accessible
13158 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
13162 if (arm_feature(env
, ARM_FEATURE_M
)) {
13163 /* CPACR can cause a NOCP UsageFault taken to current security state */
13164 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
13168 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
13169 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
13170 /* FP insns cause a NOCP UsageFault taken to Secure */
13178 hcr_el2
= arm_hcr_el2_eff(env
);
13180 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13181 * 0, 2 : trap EL0 and EL1/PL1 accesses
13182 * 1 : trap only EL0 accesses
13183 * 3 : trap no accesses
13184 * This register is ignored if E2H+TGE are both set.
13186 if ((hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
13187 int fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13192 if (cur_el
== 0 || cur_el
== 1) {
13193 /* Trap to PL1, which might be EL1 or EL3 */
13194 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13199 if (cur_el
== 3 && !is_a64(env
)) {
13200 /* Secure PL1 running at EL3 */
13215 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
13216 * to control non-secure access to the FPU. It doesn't have any
13217 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
13219 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
13220 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
13221 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
13222 /* FP insns act as UNDEF */
13223 return cur_el
== 2 ? 2 : 1;
13228 * CPTR_EL2 is present in v7VE or v8, and changes format
13229 * with HCR_EL2.E2H (regardless of TGE).
13232 if (hcr_el2
& HCR_E2H
) {
13233 /* Check CPTR_EL2.FPEN. */
13234 switch (extract32(env
->cp15
.cptr_el
[2], 20, 2)) {
13236 if (cur_el
!= 0 || !(hcr_el2
& HCR_TGE
)) {
13244 } else if (arm_is_el2_enabled(env
)) {
13245 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
13251 /* CPTR_EL3 : present in v8 */
13252 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
13253 /* Trap all FP ops to EL3 */
13260 /* Return the exception level we're running at if this is our mmu_idx */
13261 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
)
13263 if (mmu_idx
& ARM_MMU_IDX_M
) {
13264 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
13268 case ARMMMUIdx_E10_0
:
13269 case ARMMMUIdx_E20_0
:
13270 case ARMMMUIdx_SE10_0
:
13271 case ARMMMUIdx_SE20_0
:
13273 case ARMMMUIdx_E10_1
:
13274 case ARMMMUIdx_E10_1_PAN
:
13275 case ARMMMUIdx_SE10_1
:
13276 case ARMMMUIdx_SE10_1_PAN
:
13279 case ARMMMUIdx_E20_2
:
13280 case ARMMMUIdx_E20_2_PAN
:
13281 case ARMMMUIdx_SE2
:
13282 case ARMMMUIdx_SE20_2
:
13283 case ARMMMUIdx_SE20_2_PAN
:
13285 case ARMMMUIdx_SE3
:
13288 g_assert_not_reached();
13293 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13295 g_assert_not_reached();
13299 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
13304 if (arm_feature(env
, ARM_FEATURE_M
)) {
13305 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13308 /* See ARM pseudo-function ELIsInHost. */
13311 hcr
= arm_hcr_el2_eff(env
);
13312 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
13313 idx
= ARMMMUIdx_E20_0
;
13315 idx
= ARMMMUIdx_E10_0
;
13319 if (env
->pstate
& PSTATE_PAN
) {
13320 idx
= ARMMMUIdx_E10_1_PAN
;
13322 idx
= ARMMMUIdx_E10_1
;
13326 /* Note that TGE does not apply at EL2. */
13327 if (arm_hcr_el2_eff(env
) & HCR_E2H
) {
13328 if (env
->pstate
& PSTATE_PAN
) {
13329 idx
= ARMMMUIdx_E20_2_PAN
;
13331 idx
= ARMMMUIdx_E20_2
;
13334 idx
= ARMMMUIdx_E2
;
13338 return ARMMMUIdx_SE3
;
13340 g_assert_not_reached();
13343 if (arm_is_secure_below_el3(env
)) {
13344 idx
&= ~ARM_MMU_IDX_A_NS
;
13350 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13352 return arm_mmu_idx_el(env
, arm_current_el(env
));
13355 #ifndef CONFIG_USER_ONLY
13356 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13358 return stage_1_mmu_idx(arm_mmu_idx(env
));
13362 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
13364 CPUARMTBFlags flags
)
13366 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
13367 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13369 if (arm_singlestep_active(env
)) {
13370 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
13375 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
13377 CPUARMTBFlags flags
)
13379 bool sctlr_b
= arm_sctlr_b(env
);
13382 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
13384 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
13385 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13387 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
13389 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13392 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
13395 CPUARMTBFlags flags
= {};
13396 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
13398 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
13399 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
13400 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13403 if (arm_v7m_is_handler_mode(env
)) {
13404 DP_TBFLAG_M32(flags
, HANDLER
, 1);
13408 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
13409 * is suppressing them because the requested execution priority
13412 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13413 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13414 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13415 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
13418 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13421 static CPUARMTBFlags
rebuild_hflags_aprofile(CPUARMState
*env
)
13423 CPUARMTBFlags flags
= {};
13425 DP_TBFLAG_ANY(flags
, DEBUG_TARGET_EL
, arm_debug_target_el(env
));
13429 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
13432 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13433 int el
= arm_current_el(env
);
13435 if (arm_sctlr(env
, el
) & SCTLR_A
) {
13436 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13439 if (arm_el_is_aa64(env
, 1)) {
13440 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13443 if (el
< 2 && env
->cp15
.hstr_el2
&&
13444 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
13445 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
13448 if (env
->uncached_cpsr
& CPSR_IL
) {
13449 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
13452 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
13455 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
13458 CPUARMTBFlags flags
= rebuild_hflags_aprofile(env
);
13459 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13460 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
13464 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
13466 /* Get control bits for tagged addresses. */
13467 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
13468 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
13470 DP_TBFLAG_A64(flags
, TBII
, tbii
);
13471 DP_TBFLAG_A64(flags
, TBID
, tbid
);
13473 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
13474 int sve_el
= sve_exception_el(env
, el
);
13478 * If SVE is disabled, but FP is enabled,
13479 * then the effective len is 0.
13481 if (sve_el
!= 0 && fp_el
== 0) {
13484 zcr_len
= sve_zcr_len_for_el(env
, el
);
13486 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
13487 DP_TBFLAG_A64(flags
, ZCR_LEN
, zcr_len
);
13490 sctlr
= regime_sctlr(env
, stage1
);
13492 if (sctlr
& SCTLR_A
) {
13493 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
13496 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
13497 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
13500 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
13502 * In order to save space in flags, we record only whether
13503 * pauth is "inactive", meaning all insns are implemented as
13504 * a nop, or "active" when some action must be performed.
13505 * The decision of which action to take is left to a helper.
13507 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13508 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
13512 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13513 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13514 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
13515 DP_TBFLAG_A64(flags
, BT
, 1);
13519 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
13520 if (!(env
->pstate
& PSTATE_UAO
)) {
13522 case ARMMMUIdx_E10_1
:
13523 case ARMMMUIdx_E10_1_PAN
:
13524 case ARMMMUIdx_SE10_1
:
13525 case ARMMMUIdx_SE10_1_PAN
:
13526 /* TODO: ARMv8.3-NV */
13527 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13529 case ARMMMUIdx_E20_2
:
13530 case ARMMMUIdx_E20_2_PAN
:
13531 case ARMMMUIdx_SE20_2
:
13532 case ARMMMUIdx_SE20_2_PAN
:
13534 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
13535 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
13537 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
13538 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
13546 if (env
->pstate
& PSTATE_IL
) {
13547 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
13550 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
13552 * Set MTE_ACTIVE if any access may be Checked, and leave clear
13553 * if all accesses must be Unchecked:
13554 * 1) If no TBI, then there are no tags in the address to check,
13555 * 2) If Tag Check Override, then all accesses are Unchecked,
13556 * 3) If Tag Check Fail == 0, then Checked access have no effect,
13557 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
13559 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
13560 DP_TBFLAG_A64(flags
, ATA
, 1);
13562 && !(env
->pstate
& PSTATE_TCO
)
13563 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
13564 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
13567 /* And again for unprivileged accesses, if required. */
13568 if (EX_TBFLAG_A64(flags
, UNPRIV
)
13570 && !(env
->pstate
& PSTATE_TCO
)
13571 && (sctlr
& SCTLR_TCF0
)
13572 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
13573 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
13575 /* Cache TCMA as well as TBI. */
13576 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
13579 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
13582 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
13584 int el
= arm_current_el(env
);
13585 int fp_el
= fp_exception_el(env
, el
);
13586 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13589 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13590 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
13591 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13593 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13597 void arm_rebuild_hflags(CPUARMState
*env
)
13599 env
->hflags
= rebuild_hflags_internal(env
);
13603 * If we have triggered a EL state change we can't rely on the
13604 * translator having passed it to us, we need to recompute.
13606 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
13608 int el
= arm_current_el(env
);
13609 int fp_el
= fp_exception_el(env
, el
);
13610 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13612 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13615 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
13617 int fp_el
= fp_exception_el(env
, el
);
13618 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13620 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
13624 * If we have triggered a EL state change we can't rely on the
13625 * translator having passed it to us, we need to recompute.
13627 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
13629 int el
= arm_current_el(env
);
13630 int fp_el
= fp_exception_el(env
, el
);
13631 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13632 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13635 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
13637 int fp_el
= fp_exception_el(env
, el
);
13638 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13640 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
13643 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
13645 int fp_el
= fp_exception_el(env
, el
);
13646 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
13648 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
13651 static inline void assert_hflags_rebuild_correctly(CPUARMState
*env
)
13653 #ifdef CONFIG_DEBUG_TCG
13654 CPUARMTBFlags c
= env
->hflags
;
13655 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
13657 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
13658 fprintf(stderr
, "TCG hflags mismatch "
13659 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
13660 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
13661 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);
13667 static bool mve_no_pred(CPUARMState
*env
)
13670 * Return true if there is definitely no predication of MVE
13671 * instructions by VPR or LTPSIZE. (Returning false even if there
13672 * isn't any predication is OK; generated code will just be
13674 * If the CPU does not implement MVE then this TB flag is always 0.
13676 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
13677 * logic in gen_update_fp_context() needs to be updated to match.
13679 * We do not include the effect of the ECI bits here -- they are
13680 * tracked in other TB flags. This simplifies the logic for
13681 * "when did we emit code that changes the MVE_NO_PRED TB flag
13682 * and thus need to end the TB?".
13684 if (cpu_isar_feature(aa32_mve
, env_archcpu(env
))) {
13687 if (env
->v7m
.vpr
) {
13690 if (env
->v7m
.ltpsize
< 4) {
13696 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13697 target_ulong
*cs_base
, uint32_t *pflags
)
13699 CPUARMTBFlags flags
;
13701 assert_hflags_rebuild_correctly(env
);
13702 flags
= env
->hflags
;
13704 if (EX_TBFLAG_ANY(flags
, AARCH64_STATE
)) {
13706 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
13707 DP_TBFLAG_A64(flags
, BTYPE
, env
->btype
);
13710 *pc
= env
->regs
[15];
13712 if (arm_feature(env
, ARM_FEATURE_M
)) {
13713 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13714 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
13715 != env
->v7m
.secure
) {
13716 DP_TBFLAG_M32(flags
, FPCCR_S_WRONG
, 1);
13719 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
13720 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
13721 (env
->v7m
.secure
&&
13722 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
13724 * ASPEN is set, but FPCA/SFPA indicate that there is no
13725 * active FP context; we must create a new FP context before
13726 * executing any FP insn.
13728 DP_TBFLAG_M32(flags
, NEW_FP_CTXT_NEEDED
, 1);
13731 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
13732 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
13733 DP_TBFLAG_M32(flags
, LSPACT
, 1);
13736 if (mve_no_pred(env
)) {
13737 DP_TBFLAG_M32(flags
, MVE_NO_PRED
, 1);
13741 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13742 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13744 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
13745 DP_TBFLAG_A32(flags
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13747 DP_TBFLAG_A32(flags
, VECLEN
, env
->vfp
.vec_len
);
13748 DP_TBFLAG_A32(flags
, VECSTRIDE
, env
->vfp
.vec_stride
);
13750 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
13751 DP_TBFLAG_A32(flags
, VFPEN
, 1);
13755 DP_TBFLAG_AM32(flags
, THUMB
, env
->thumb
);
13756 DP_TBFLAG_AM32(flags
, CONDEXEC
, env
->condexec_bits
);
13760 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13761 * states defined in the ARM ARM for software singlestep:
13762 * SS_ACTIVE PSTATE.SS State
13763 * 0 x Inactive (the TB flag for SS is always 0)
13764 * 1 0 Active-pending
13765 * 1 1 Active-not-pending
13766 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
13768 if (EX_TBFLAG_ANY(flags
, SS_ACTIVE
) && (env
->pstate
& PSTATE_SS
)) {
13769 DP_TBFLAG_ANY(flags
, PSTATE__SS
, 1);
13772 *pflags
= flags
.flags
;
13773 *cs_base
= flags
.flags2
;
13776 #ifdef TARGET_AARCH64
13778 * The manual says that when SVE is enabled and VQ is widened the
13779 * implementation is allowed to zero the previously inaccessible
13780 * portion of the registers. The corollary to that is that when
13781 * SVE is enabled and VQ is narrowed we are also allowed to zero
13782 * the now inaccessible portion of the registers.
13784 * The intent of this is that no predicate bit beyond VQ is ever set.
13785 * Which means that some operations on predicate registers themselves
13786 * may operate on full uint64_t or even unrolled across the maximum
13787 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13788 * may well be cheaper than conditionals to restrict the operation
13789 * to the relevant portion of a uint16_t[16].
13791 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13796 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13797 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
13799 /* Zap the high bits of the zregs. */
13800 for (i
= 0; i
< 32; i
++) {
13801 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13804 /* Zap the high bits of the pregs and ffr. */
13807 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13809 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13810 for (i
= 0; i
< 17; ++i
) {
13811 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13818 * Notice a change in SVE vector size when changing EL.
13820 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13821 int new_el
, bool el0_a64
)
13823 ARMCPU
*cpu
= env_archcpu(env
);
13824 int old_len
, new_len
;
13825 bool old_a64
, new_a64
;
13827 /* Nothing to do if no SVE. */
13828 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13832 /* Nothing to do if FP is disabled in either EL. */
13833 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13838 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13839 * at ELx, or not available because the EL is in AArch32 state, then
13840 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13841 * has an effective value of 0".
13843 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13844 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13845 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13846 * we already have the correct register contents when encountering the
13847 * vq0->vq0 transition between EL0->EL1.
13849 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13850 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13851 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13852 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13853 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13854 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13856 /* When changing vector length, clear inaccessible state. */
13857 if (new_len
< old_len
) {
13858 aarch64_sve_narrow_vq(env
, new_len
+ 1);