4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "exec/helper-proto.h"
14 static inline bool fgt_svc(CPUARMState
*env
, int el
)
17 * Assuming fine-grained-traps are active, return true if we
18 * should be trapping on SVC instructions. Only AArch64 can
19 * trap on an SVC at EL1, but we don't need to special-case this
20 * because if this is AArch32 EL1 then arm_fgt_active() is false.
21 * We also know el is 0 or 1.
24 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL0
) :
25 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL1
);
28 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
32 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
33 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
35 if (arm_singlestep_active(env
)) {
36 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
42 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
46 bool sctlr_b
= arm_sctlr_b(env
);
49 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
51 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
52 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
54 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
56 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
59 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
62 CPUARMTBFlags flags
= {};
63 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
65 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
66 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
67 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
70 if (arm_v7m_is_handler_mode(env
)) {
71 DP_TBFLAG_M32(flags
, HANDLER
, 1);
75 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
76 * is suppressing them because the requested execution priority
79 if (arm_feature(env
, ARM_FEATURE_V8
) &&
80 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
81 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
82 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
85 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && env
->v7m
.secure
) {
86 DP_TBFLAG_M32(flags
, SECURE
, 1);
89 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
92 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
93 static bool sme_fa64(CPUARMState
*env
, int el
)
95 if (!cpu_isar_feature(aa64_sme_fa64
, env_archcpu(env
))) {
99 if (el
<= 1 && !el_is_in_host(env
, el
)) {
100 if (!FIELD_EX64(env
->vfp
.smcr_el
[1], SMCR
, FA64
)) {
104 if (el
<= 2 && arm_is_el2_enabled(env
)) {
105 if (!FIELD_EX64(env
->vfp
.smcr_el
[2], SMCR
, FA64
)) {
109 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
110 if (!FIELD_EX64(env
->vfp
.smcr_el
[3], SMCR
, FA64
)) {
118 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
121 CPUARMTBFlags flags
= {};
122 int el
= arm_current_el(env
);
124 if (arm_sctlr(env
, el
) & SCTLR_A
) {
125 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
128 if (arm_el_is_aa64(env
, 1)) {
129 DP_TBFLAG_A32(flags
, VFPEN
, 1);
132 if (el
< 2 && env
->cp15
.hstr_el2
&& arm_is_el2_enabled(env
) &&
133 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
134 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
137 if (arm_fgt_active(env
, el
)) {
138 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
139 if (fgt_svc(env
, el
)) {
140 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
144 if (env
->uncached_cpsr
& CPSR_IL
) {
145 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
149 * The SME exception we are testing for is raised via
150 * AArch64.CheckFPAdvSIMDEnabled(), as called from
151 * AArch32.CheckAdvSIMDOrFPEnabled().
154 && FIELD_EX64(env
->svcr
, SVCR
, SM
)
155 && (!arm_is_el2_enabled(env
)
156 || (arm_el_is_aa64(env
, 2) && !(env
->cp15
.hcr_el2
& HCR_TGE
)))
157 && arm_el_is_aa64(env
, 1)
158 && !sme_fa64(env
, el
)) {
159 DP_TBFLAG_A32(flags
, SME_TRAP_NONSTREAMING
, 1);
162 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
165 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
168 CPUARMTBFlags flags
= {};
169 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
170 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
174 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
176 /* Get control bits for tagged addresses. */
177 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
178 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
180 DP_TBFLAG_A64(flags
, TBII
, tbii
);
181 DP_TBFLAG_A64(flags
, TBID
, tbid
);
183 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
184 int sve_el
= sve_exception_el(env
, el
);
187 * If either FP or SVE are disabled, translator does not need len.
188 * If SVE EL > FP EL, FP exception has precedence, and translator
189 * does not need SVE EL. Save potential re-translations by forcing
190 * the unneeded data to zero.
193 if (sve_el
> fp_el
) {
196 } else if (sve_el
== 0) {
197 DP_TBFLAG_A64(flags
, VL
, sve_vqm1_for_el(env
, el
));
199 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
201 if (cpu_isar_feature(aa64_sme
, env_archcpu(env
))) {
202 int sme_el
= sme_exception_el(env
, el
);
203 bool sm
= FIELD_EX64(env
->svcr
, SVCR
, SM
);
205 DP_TBFLAG_A64(flags
, SMEEXC_EL
, sme_el
);
207 /* Similarly, do not compute SVL if SME is disabled. */
208 int svl
= sve_vqm1_for_el_sm(env
, el
, true);
209 DP_TBFLAG_A64(flags
, SVL
, svl
);
211 /* If SVE is disabled, we will not have set VL above. */
212 DP_TBFLAG_A64(flags
, VL
, svl
);
216 DP_TBFLAG_A64(flags
, PSTATE_SM
, 1);
217 DP_TBFLAG_A64(flags
, SME_TRAP_NONSTREAMING
, !sme_fa64(env
, el
));
219 DP_TBFLAG_A64(flags
, PSTATE_ZA
, FIELD_EX64(env
->svcr
, SVCR
, ZA
));
222 sctlr
= regime_sctlr(env
, stage1
);
224 if (sctlr
& SCTLR_A
) {
225 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
228 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
229 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
232 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
234 * In order to save space in flags, we record only whether
235 * pauth is "inactive", meaning all insns are implemented as
236 * a nop, or "active" when some action must be performed.
237 * The decision of which action to take is left to a helper.
239 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
240 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
244 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
245 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
246 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
247 DP_TBFLAG_A64(flags
, BT
, 1);
251 if (cpu_isar_feature(aa64_lse2
, env_archcpu(env
))) {
252 if (sctlr
& SCTLR_nAA
) {
253 DP_TBFLAG_A64(flags
, NAA
, 1);
257 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
258 if (!(env
->pstate
& PSTATE_UAO
)) {
260 case ARMMMUIdx_E10_1
:
261 case ARMMMUIdx_E10_1_PAN
:
262 /* TODO: ARMv8.3-NV */
263 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
265 case ARMMMUIdx_E20_2
:
266 case ARMMMUIdx_E20_2_PAN
:
268 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
269 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
271 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
272 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
280 if (env
->pstate
& PSTATE_IL
) {
281 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
284 if (arm_fgt_active(env
, el
)) {
285 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
286 if (FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, ERET
)) {
287 DP_TBFLAG_A64(flags
, FGT_ERET
, 1);
289 if (fgt_svc(env
, el
)) {
290 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
294 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
296 * Set MTE_ACTIVE if any access may be Checked, and leave clear
297 * if all accesses must be Unchecked:
298 * 1) If no TBI, then there are no tags in the address to check,
299 * 2) If Tag Check Override, then all accesses are Unchecked,
300 * 3) If Tag Check Fail == 0, then Checked access have no effect,
301 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
303 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
304 DP_TBFLAG_A64(flags
, ATA
, 1);
306 && !(env
->pstate
& PSTATE_TCO
)
307 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
308 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
309 if (!EX_TBFLAG_A64(flags
, UNPRIV
)) {
311 * In non-unpriv contexts (eg EL0), unpriv load/stores
312 * act like normal ones; duplicate the MTE info to
313 * avoid translate-a64.c having to check UNPRIV to see
314 * whether it is OK to index into MTE_ACTIVE[].
316 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
320 /* And again for unprivileged accesses, if required. */
321 if (EX_TBFLAG_A64(flags
, UNPRIV
)
323 && !(env
->pstate
& PSTATE_TCO
)
324 && (sctlr
& SCTLR_TCF0
)
325 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
326 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
329 * For unpriv tag-setting accesses we alse need ATA0. Again, in
330 * contexts where unpriv and normal insns are the same we
331 * duplicate the ATA bit to save effort for translate-a64.c.
333 if (EX_TBFLAG_A64(flags
, UNPRIV
)) {
334 if (allocation_tag_access_enabled(env
, 0, sctlr
)) {
335 DP_TBFLAG_A64(flags
, ATA0
, 1);
338 DP_TBFLAG_A64(flags
, ATA0
, EX_TBFLAG_A64(flags
, ATA
));
340 /* Cache TCMA as well as TBI. */
341 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
344 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
347 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
349 int el
= arm_current_el(env
);
350 int fp_el
= fp_exception_el(env
, el
);
351 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
354 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
355 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
356 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
358 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
362 void arm_rebuild_hflags(CPUARMState
*env
)
364 env
->hflags
= rebuild_hflags_internal(env
);
368 * If we have triggered a EL state change we can't rely on the
369 * translator having passed it to us, we need to recompute.
371 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
373 int el
= arm_current_el(env
);
374 int fp_el
= fp_exception_el(env
, el
);
375 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
377 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
380 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
382 int fp_el
= fp_exception_el(env
, el
);
383 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
385 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
389 * If we have triggered a EL state change we can't rely on the
390 * translator having passed it to us, we need to recompute.
392 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
394 int el
= arm_current_el(env
);
395 int fp_el
= fp_exception_el(env
, el
);
396 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
397 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
400 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
402 int fp_el
= fp_exception_el(env
, el
);
403 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
405 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
408 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
410 int fp_el
= fp_exception_el(env
, el
);
411 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
413 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
416 void assert_hflags_rebuild_correctly(CPUARMState
*env
)
418 #ifdef CONFIG_DEBUG_TCG
419 CPUARMTBFlags c
= env
->hflags
;
420 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
422 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
423 fprintf(stderr
, "TCG hflags mismatch "
424 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
425 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
426 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);