]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/tcg/hflags.c
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[mirror_qemu.git] / target / arm / tcg / hflags.c
CommitLineData
671efad1
FR
1/*
2 * ARM hflags
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8#include "qemu/osdep.h"
9#include "cpu.h"
10#include "internals.h"
5a534314 11#include "cpu-features.h"
671efad1
FR
12#include "exec/helper-proto.h"
13#include "cpregs.h"
14
15static inline bool fgt_svc(CPUARMState *env, int el)
16{
17 /*
18 * Assuming fine-grained-traps are active, return true if we
19 * should be trapping on SVC instructions. Only AArch64 can
20 * trap on an SVC at EL1, but we don't need to special-case this
21 * because if this is AArch32 EL1 then arm_fgt_active() is false.
22 * We also know el is 0 or 1.
23 */
24 return el == 0 ?
25 FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
26 FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
27}
28
29static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
30 ARMMMUIdx mmu_idx,
31 CPUARMTBFlags flags)
32{
33 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
34 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
35
36 if (arm_singlestep_active(env)) {
37 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
38 }
39
40 return flags;
41}
42
43static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
44 ARMMMUIdx mmu_idx,
45 CPUARMTBFlags flags)
46{
47 bool sctlr_b = arm_sctlr_b(env);
48
49 if (sctlr_b) {
50 DP_TBFLAG_A32(flags, SCTLR__B, 1);
51 }
52 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
53 DP_TBFLAG_ANY(flags, BE_DATA, 1);
54 }
55 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
56
57 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
58}
59
60static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
61 ARMMMUIdx mmu_idx)
62{
63 CPUARMTBFlags flags = {};
64 uint32_t ccr = env->v7m.ccr[env->v7m.secure];
65
66 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
67 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
68 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
69 }
70
71 if (arm_v7m_is_handler_mode(env)) {
72 DP_TBFLAG_M32(flags, HANDLER, 1);
73 }
74
75 /*
76 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
77 * is suppressing them because the requested execution priority
78 * is less than 0.
79 */
80 if (arm_feature(env, ARM_FEATURE_V8) &&
81 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
82 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
83 DP_TBFLAG_M32(flags, STACKCHECK, 1);
84 }
85
86 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
87 DP_TBFLAG_M32(flags, SECURE, 1);
88 }
89
90 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
91}
92
93/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
94static bool sme_fa64(CPUARMState *env, int el)
95{
96 if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
97 return false;
98 }
99
100 if (el <= 1 && !el_is_in_host(env, el)) {
101 if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
102 return false;
103 }
104 }
105 if (el <= 2 && arm_is_el2_enabled(env)) {
106 if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
107 return false;
108 }
109 }
110 if (arm_feature(env, ARM_FEATURE_EL3)) {
111 if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
112 return false;
113 }
114 }
115
116 return true;
117}
118
119static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
120 ARMMMUIdx mmu_idx)
121{
122 CPUARMTBFlags flags = {};
123 int el = arm_current_el(env);
124
125 if (arm_sctlr(env, el) & SCTLR_A) {
126 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
127 }
128
129 if (arm_el_is_aa64(env, 1)) {
130 DP_TBFLAG_A32(flags, VFPEN, 1);
131 }
132
133 if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
134 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
135 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
136 }
137
138 if (arm_fgt_active(env, el)) {
139 DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
140 if (fgt_svc(env, el)) {
141 DP_TBFLAG_ANY(flags, FGT_SVC, 1);
142 }
143 }
144
145 if (env->uncached_cpsr & CPSR_IL) {
146 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
147 }
148
149 /*
150 * The SME exception we are testing for is raised via
151 * AArch64.CheckFPAdvSIMDEnabled(), as called from
152 * AArch32.CheckAdvSIMDOrFPEnabled().
153 */
154 if (el == 0
155 && FIELD_EX64(env->svcr, SVCR, SM)
156 && (!arm_is_el2_enabled(env)
157 || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
158 && arm_el_is_aa64(env, 1)
159 && !sme_fa64(env, el)) {
160 DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
161 }
162
163 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
164}
165
166static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
167 ARMMMUIdx mmu_idx)
168{
169 CPUARMTBFlags flags = {};
170 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
171 uint64_t tcr = regime_tcr(env, mmu_idx);
172 uint64_t sctlr;
173 int tbii, tbid;
174
175 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
176
177 /* Get control bits for tagged addresses. */
178 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
179 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
180
181 DP_TBFLAG_A64(flags, TBII, tbii);
182 DP_TBFLAG_A64(flags, TBID, tbid);
183
184 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
185 int sve_el = sve_exception_el(env, el);
186
187 /*
188 * If either FP or SVE are disabled, translator does not need len.
189 * If SVE EL > FP EL, FP exception has precedence, and translator
190 * does not need SVE EL. Save potential re-translations by forcing
191 * the unneeded data to zero.
192 */
193 if (fp_el != 0) {
194 if (sve_el > fp_el) {
195 sve_el = 0;
196 }
197 } else if (sve_el == 0) {
198 DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
199 }
200 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
201 }
202 if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
203 int sme_el = sme_exception_el(env, el);
204 bool sm = FIELD_EX64(env->svcr, SVCR, SM);
205
206 DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
207 if (sme_el == 0) {
208 /* Similarly, do not compute SVL if SME is disabled. */
209 int svl = sve_vqm1_for_el_sm(env, el, true);
210 DP_TBFLAG_A64(flags, SVL, svl);
211 if (sm) {
212 /* If SVE is disabled, we will not have set VL above. */
213 DP_TBFLAG_A64(flags, VL, svl);
214 }
215 }
216 if (sm) {
217 DP_TBFLAG_A64(flags, PSTATE_SM, 1);
218 DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
219 }
220 DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
221 }
222
223 sctlr = regime_sctlr(env, stage1);
224
225 if (sctlr & SCTLR_A) {
226 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
227 }
228
229 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
230 DP_TBFLAG_ANY(flags, BE_DATA, 1);
231 }
232
233 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
234 /*
235 * In order to save space in flags, we record only whether
236 * pauth is "inactive", meaning all insns are implemented as
237 * a nop, or "active" when some action must be performed.
238 * The decision of which action to take is left to a helper.
239 */
240 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
241 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
242 }
243 }
244
245 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
246 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
247 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
248 DP_TBFLAG_A64(flags, BT, 1);
249 }
250 }
251
83f624d9
RH
252 if (cpu_isar_feature(aa64_lse2, env_archcpu(env))) {
253 if (sctlr & SCTLR_nAA) {
254 DP_TBFLAG_A64(flags, NAA, 1);
255 }
256 }
257
671efad1
FR
258 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
259 if (!(env->pstate & PSTATE_UAO)) {
260 switch (mmu_idx) {
261 case ARMMMUIdx_E10_1:
262 case ARMMMUIdx_E10_1_PAN:
263 /* TODO: ARMv8.3-NV */
264 DP_TBFLAG_A64(flags, UNPRIV, 1);
265 break;
266 case ARMMMUIdx_E20_2:
267 case ARMMMUIdx_E20_2_PAN:
268 /*
269 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
270 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
271 */
272 if (env->cp15.hcr_el2 & HCR_TGE) {
273 DP_TBFLAG_A64(flags, UNPRIV, 1);
274 }
275 break;
276 default:
277 break;
278 }
279 }
280
281 if (env->pstate & PSTATE_IL) {
282 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
283 }
284
285 if (arm_fgt_active(env, el)) {
286 DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
287 if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
288 DP_TBFLAG_A64(flags, FGT_ERET, 1);
289 }
290 if (fgt_svc(env, el)) {
291 DP_TBFLAG_ANY(flags, FGT_SVC, 1);
292 }
293 }
294
295 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
296 /*
297 * Set MTE_ACTIVE if any access may be Checked, and leave clear
298 * if all accesses must be Unchecked:
299 * 1) If no TBI, then there are no tags in the address to check,
300 * 2) If Tag Check Override, then all accesses are Unchecked,
301 * 3) If Tag Check Fail == 0, then Checked access have no effect,
302 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
303 */
304 if (allocation_tag_access_enabled(env, el, sctlr)) {
305 DP_TBFLAG_A64(flags, ATA, 1);
306 if (tbid
307 && !(env->pstate & PSTATE_TCO)
308 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
309 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
903dbefc
PM
310 if (!EX_TBFLAG_A64(flags, UNPRIV)) {
311 /*
312 * In non-unpriv contexts (eg EL0), unpriv load/stores
313 * act like normal ones; duplicate the MTE info to
314 * avoid translate-a64.c having to check UNPRIV to see
315 * whether it is OK to index into MTE_ACTIVE[].
316 */
317 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
318 }
671efad1
FR
319 }
320 }
321 /* And again for unprivileged accesses, if required. */
322 if (EX_TBFLAG_A64(flags, UNPRIV)
323 && tbid
324 && !(env->pstate & PSTATE_TCO)
325 && (sctlr & SCTLR_TCF0)
326 && allocation_tag_access_enabled(env, 0, sctlr)) {
327 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
328 }
179e9a3b 329 /*
51464c56 330 * For unpriv tag-setting accesses we also need ATA0. Again, in
179e9a3b
PM
331 * contexts where unpriv and normal insns are the same we
332 * duplicate the ATA bit to save effort for translate-a64.c.
333 */
334 if (EX_TBFLAG_A64(flags, UNPRIV)) {
335 if (allocation_tag_access_enabled(env, 0, sctlr)) {
336 DP_TBFLAG_A64(flags, ATA0, 1);
337 }
338 } else {
339 DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA));
340 }
671efad1
FR
341 /* Cache TCMA as well as TBI. */
342 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
343 }
344
345 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
346}
347
348static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
349{
350 int el = arm_current_el(env);
351 int fp_el = fp_exception_el(env, el);
352 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
353
354 if (is_a64(env)) {
355 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
356 } else if (arm_feature(env, ARM_FEATURE_M)) {
357 return rebuild_hflags_m32(env, fp_el, mmu_idx);
358 } else {
359 return rebuild_hflags_a32(env, fp_el, mmu_idx);
360 }
361}
362
363void arm_rebuild_hflags(CPUARMState *env)
364{
365 env->hflags = rebuild_hflags_internal(env);
366}
367
368/*
369 * If we have triggered a EL state change we can't rely on the
370 * translator having passed it to us, we need to recompute.
371 */
372void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
373{
374 int el = arm_current_el(env);
375 int fp_el = fp_exception_el(env, el);
376 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
377
378 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
379}
380
381void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
382{
383 int fp_el = fp_exception_el(env, el);
384 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
385
386 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
387}
388
389/*
390 * If we have triggered a EL state change we can't rely on the
391 * translator having passed it to us, we need to recompute.
392 */
393void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
394{
395 int el = arm_current_el(env);
396 int fp_el = fp_exception_el(env, el);
397 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
398 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
399}
400
401void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
402{
403 int fp_el = fp_exception_el(env, el);
404 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
405
406 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
407}
408
409void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
410{
411 int fp_el = fp_exception_el(env, el);
412 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
413
414 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
415}
416
417void assert_hflags_rebuild_correctly(CPUARMState *env)
418{
419#ifdef CONFIG_DEBUG_TCG
420 CPUARMTBFlags c = env->hflags;
421 CPUARMTBFlags r = rebuild_hflags_internal(env);
422
423 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
424 fprintf(stderr, "TCG hflags mismatch "
425 "(current:(0x%08x,0x" TARGET_FMT_lx ")"
426 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
427 c.flags, c.flags2, r.flags, r.flags2);
428 abort();
429 }
430#endif
431}