]>
Commit | Line | Data |
---|---|---|
671efad1 FR |
1 | /* |
2 | * ARM hflags | |
3 | * | |
4 | * This code is licensed under the GNU GPL v2 or later. | |
5 | * | |
6 | * SPDX-License-Identifier: GPL-2.0-or-later | |
7 | */ | |
8 | #include "qemu/osdep.h" | |
9 | #include "cpu.h" | |
10 | #include "internals.h" | |
5a534314 | 11 | #include "cpu-features.h" |
671efad1 FR |
12 | #include "exec/helper-proto.h" |
13 | #include "cpregs.h" | |
14 | ||
15 | static inline bool fgt_svc(CPUARMState *env, int el) | |
16 | { | |
17 | /* | |
18 | * Assuming fine-grained-traps are active, return true if we | |
19 | * should be trapping on SVC instructions. Only AArch64 can | |
20 | * trap on an SVC at EL1, but we don't need to special-case this | |
21 | * because if this is AArch32 EL1 then arm_fgt_active() is false. | |
22 | * We also know el is 0 or 1. | |
23 | */ | |
24 | return el == 0 ? | |
25 | FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) : | |
26 | FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1); | |
27 | } | |
28 | ||
59754f85 RH |
29 | /* Return true if memory alignment should be enforced. */ |
30 | static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr) | |
31 | { | |
32 | #ifdef CONFIG_USER_ONLY | |
33 | return false; | |
34 | #else | |
35 | /* Check the alignment enable bit. */ | |
36 | if (sctlr & SCTLR_A) { | |
37 | return true; | |
38 | } | |
39 | ||
40 | /* | |
7b19a355 RH |
41 | * With PMSA, when the MPU is disabled, all memory types in the |
42 | * default map are Normal, so don't need aligment enforcing. | |
43 | */ | |
44 | if (arm_feature(env, ARM_FEATURE_PMSA)) { | |
45 | return false; | |
46 | } | |
47 | ||
48 | /* | |
49 | * With VMSA, if translation is disabled, then the default memory type | |
50 | * is Device(-nGnRnE) instead of Normal, which requires that alignment | |
59754f85 RH |
51 | * be enforced. Since this affects all ram, it is most efficient |
52 | * to handle this during translation. | |
53 | */ | |
54 | if (sctlr & SCTLR_M) { | |
55 | /* Translation enabled: memory type in PTE via MAIR_ELx. */ | |
56 | return false; | |
57 | } | |
58 | if (el < 2 && (arm_hcr_el2_eff(env) & (HCR_DC | HCR_VM))) { | |
59 | /* Stage 2 translation enabled: memory type in PTE. */ | |
60 | return false; | |
61 | } | |
62 | return true; | |
63 | #endif | |
64 | } | |
65 | ||
671efad1 FR |
66 | static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, |
67 | ARMMMUIdx mmu_idx, | |
68 | CPUARMTBFlags flags) | |
69 | { | |
70 | DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el); | |
71 | DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); | |
72 | ||
73 | if (arm_singlestep_active(env)) { | |
74 | DP_TBFLAG_ANY(flags, SS_ACTIVE, 1); | |
75 | } | |
76 | ||
77 | return flags; | |
78 | } | |
79 | ||
80 | static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el, | |
81 | ARMMMUIdx mmu_idx, | |
82 | CPUARMTBFlags flags) | |
83 | { | |
84 | bool sctlr_b = arm_sctlr_b(env); | |
85 | ||
86 | if (sctlr_b) { | |
87 | DP_TBFLAG_A32(flags, SCTLR__B, 1); | |
88 | } | |
89 | if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { | |
90 | DP_TBFLAG_ANY(flags, BE_DATA, 1); | |
91 | } | |
92 | DP_TBFLAG_A32(flags, NS, !access_secure_reg(env)); | |
93 | ||
94 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | |
95 | } | |
96 | ||
97 | static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, | |
98 | ARMMMUIdx mmu_idx) | |
99 | { | |
100 | CPUARMTBFlags flags = {}; | |
101 | uint32_t ccr = env->v7m.ccr[env->v7m.secure]; | |
102 | ||
103 | /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */ | |
104 | if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) { | |
105 | DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); | |
106 | } | |
107 | ||
108 | if (arm_v7m_is_handler_mode(env)) { | |
109 | DP_TBFLAG_M32(flags, HANDLER, 1); | |
110 | } | |
111 | ||
112 | /* | |
113 | * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN | |
114 | * is suppressing them because the requested execution priority | |
115 | * is less than 0. | |
116 | */ | |
117 | if (arm_feature(env, ARM_FEATURE_V8) && | |
118 | !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && | |
119 | (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) { | |
120 | DP_TBFLAG_M32(flags, STACKCHECK, 1); | |
121 | } | |
122 | ||
123 | if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) { | |
124 | DP_TBFLAG_M32(flags, SECURE, 1); | |
125 | } | |
126 | ||
127 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | |
128 | } | |
129 | ||
130 | /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */ | |
131 | static bool sme_fa64(CPUARMState *env, int el) | |
132 | { | |
133 | if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) { | |
134 | return false; | |
135 | } | |
136 | ||
137 | if (el <= 1 && !el_is_in_host(env, el)) { | |
138 | if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) { | |
139 | return false; | |
140 | } | |
141 | } | |
142 | if (el <= 2 && arm_is_el2_enabled(env)) { | |
143 | if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) { | |
144 | return false; | |
145 | } | |
146 | } | |
147 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
148 | if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) { | |
149 | return false; | |
150 | } | |
151 | } | |
152 | ||
153 | return true; | |
154 | } | |
155 | ||
156 | static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, | |
157 | ARMMMUIdx mmu_idx) | |
158 | { | |
159 | CPUARMTBFlags flags = {}; | |
160 | int el = arm_current_el(env); | |
59754f85 | 161 | uint64_t sctlr = arm_sctlr(env, el); |
671efad1 | 162 | |
59754f85 | 163 | if (aprofile_require_alignment(env, el, sctlr)) { |
671efad1 FR |
164 | DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); |
165 | } | |
166 | ||
167 | if (arm_el_is_aa64(env, 1)) { | |
168 | DP_TBFLAG_A32(flags, VFPEN, 1); | |
169 | } | |
170 | ||
171 | if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) && | |
172 | (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { | |
173 | DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); | |
174 | } | |
175 | ||
176 | if (arm_fgt_active(env, el)) { | |
177 | DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1); | |
178 | if (fgt_svc(env, el)) { | |
179 | DP_TBFLAG_ANY(flags, FGT_SVC, 1); | |
180 | } | |
181 | } | |
182 | ||
183 | if (env->uncached_cpsr & CPSR_IL) { | |
184 | DP_TBFLAG_ANY(flags, PSTATE__IL, 1); | |
185 | } | |
186 | ||
187 | /* | |
188 | * The SME exception we are testing for is raised via | |
189 | * AArch64.CheckFPAdvSIMDEnabled(), as called from | |
190 | * AArch32.CheckAdvSIMDOrFPEnabled(). | |
191 | */ | |
192 | if (el == 0 | |
193 | && FIELD_EX64(env->svcr, SVCR, SM) | |
194 | && (!arm_is_el2_enabled(env) | |
195 | || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE))) | |
196 | && arm_el_is_aa64(env, 1) | |
197 | && !sme_fa64(env, el)) { | |
198 | DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); | |
199 | } | |
200 | ||
201 | return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); | |
202 | } | |
203 | ||
204 | static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, | |
205 | ARMMMUIdx mmu_idx) | |
206 | { | |
207 | CPUARMTBFlags flags = {}; | |
208 | ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); | |
209 | uint64_t tcr = regime_tcr(env, mmu_idx); | |
e37e98b7 | 210 | uint64_t hcr = arm_hcr_el2_eff(env); |
671efad1 FR |
211 | uint64_t sctlr; |
212 | int tbii, tbid; | |
213 | ||
214 | DP_TBFLAG_ANY(flags, AARCH64_STATE, 1); | |
215 | ||
216 | /* Get control bits for tagged addresses. */ | |
217 | tbid = aa64_va_parameter_tbi(tcr, mmu_idx); | |
218 | tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); | |
219 | ||
220 | DP_TBFLAG_A64(flags, TBII, tbii); | |
221 | DP_TBFLAG_A64(flags, TBID, tbid); | |
222 | ||
223 | if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { | |
224 | int sve_el = sve_exception_el(env, el); | |
225 | ||
226 | /* | |
227 | * If either FP or SVE are disabled, translator does not need len. | |
228 | * If SVE EL > FP EL, FP exception has precedence, and translator | |
229 | * does not need SVE EL. Save potential re-translations by forcing | |
230 | * the unneeded data to zero. | |
231 | */ | |
232 | if (fp_el != 0) { | |
233 | if (sve_el > fp_el) { | |
234 | sve_el = 0; | |
235 | } | |
236 | } else if (sve_el == 0) { | |
237 | DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el)); | |
238 | } | |
239 | DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el); | |
240 | } | |
241 | if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { | |
242 | int sme_el = sme_exception_el(env, el); | |
243 | bool sm = FIELD_EX64(env->svcr, SVCR, SM); | |
244 | ||
245 | DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el); | |
246 | if (sme_el == 0) { | |
247 | /* Similarly, do not compute SVL if SME is disabled. */ | |
248 | int svl = sve_vqm1_for_el_sm(env, el, true); | |
249 | DP_TBFLAG_A64(flags, SVL, svl); | |
250 | if (sm) { | |
251 | /* If SVE is disabled, we will not have set VL above. */ | |
252 | DP_TBFLAG_A64(flags, VL, svl); | |
253 | } | |
254 | } | |
255 | if (sm) { | |
256 | DP_TBFLAG_A64(flags, PSTATE_SM, 1); | |
257 | DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); | |
258 | } | |
259 | DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); | |
260 | } | |
261 | ||
262 | sctlr = regime_sctlr(env, stage1); | |
263 | ||
59754f85 | 264 | if (aprofile_require_alignment(env, el, sctlr)) { |
671efad1 FR |
265 | DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); |
266 | } | |
267 | ||
268 | if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { | |
269 | DP_TBFLAG_ANY(flags, BE_DATA, 1); | |
270 | } | |
271 | ||
272 | if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { | |
273 | /* | |
274 | * In order to save space in flags, we record only whether | |
275 | * pauth is "inactive", meaning all insns are implemented as | |
276 | * a nop, or "active" when some action must be performed. | |
277 | * The decision of which action to take is left to a helper. | |
278 | */ | |
279 | if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { | |
280 | DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1); | |
281 | } | |
282 | } | |
283 | ||
284 | if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { | |
285 | /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ | |
286 | if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { | |
287 | DP_TBFLAG_A64(flags, BT, 1); | |
288 | } | |
289 | } | |
290 | ||
83f624d9 RH |
291 | if (cpu_isar_feature(aa64_lse2, env_archcpu(env))) { |
292 | if (sctlr & SCTLR_nAA) { | |
293 | DP_TBFLAG_A64(flags, NAA, 1); | |
294 | } | |
295 | } | |
296 | ||
671efad1 FR |
297 | /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ |
298 | if (!(env->pstate & PSTATE_UAO)) { | |
299 | switch (mmu_idx) { | |
300 | case ARMMMUIdx_E10_1: | |
301 | case ARMMMUIdx_E10_1_PAN: | |
2e9b1e50 PM |
302 | /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */ |
303 | if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) { | |
304 | DP_TBFLAG_A64(flags, UNPRIV, 1); | |
305 | } | |
671efad1 FR |
306 | break; |
307 | case ARMMMUIdx_E20_2: | |
308 | case ARMMMUIdx_E20_2_PAN: | |
309 | /* | |
310 | * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is | |
311 | * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. | |
312 | */ | |
313 | if (env->cp15.hcr_el2 & HCR_TGE) { | |
314 | DP_TBFLAG_A64(flags, UNPRIV, 1); | |
315 | } | |
316 | break; | |
317 | default: | |
318 | break; | |
319 | } | |
320 | } | |
321 | ||
322 | if (env->pstate & PSTATE_IL) { | |
323 | DP_TBFLAG_ANY(flags, PSTATE__IL, 1); | |
324 | } | |
325 | ||
326 | if (arm_fgt_active(env, el)) { | |
327 | DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1); | |
328 | if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) { | |
e37e98b7 | 329 | DP_TBFLAG_A64(flags, TRAP_ERET, 1); |
671efad1 FR |
330 | } |
331 | if (fgt_svc(env, el)) { | |
332 | DP_TBFLAG_ANY(flags, FGT_SVC, 1); | |
333 | } | |
334 | } | |
335 | ||
e37e98b7 PM |
336 | /* |
337 | * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care | |
338 | * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present. | |
339 | */ | |
340 | if (el == 1 && (hcr & HCR_NV)) { | |
341 | DP_TBFLAG_A64(flags, TRAP_ERET, 1); | |
67d10fc4 | 342 | DP_TBFLAG_A64(flags, NV, 1); |
c35da11d PM |
343 | if (hcr & HCR_NV1) { |
344 | DP_TBFLAG_A64(flags, NV1, 1); | |
345 | } | |
346 | if (hcr & HCR_NV2) { | |
347 | DP_TBFLAG_A64(flags, NV2, 1); | |
daf9b4a0 PM |
348 | if (hcr & HCR_E2H) { |
349 | DP_TBFLAG_A64(flags, NV2_MEM_E20, 1); | |
350 | } | |
351 | if (env->cp15.sctlr_el[2] & SCTLR_EE) { | |
352 | DP_TBFLAG_A64(flags, NV2_MEM_BE, 1); | |
353 | } | |
c35da11d | 354 | } |
e37e98b7 PM |
355 | } |
356 | ||
671efad1 FR |
357 | if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { |
358 | /* | |
359 | * Set MTE_ACTIVE if any access may be Checked, and leave clear | |
360 | * if all accesses must be Unchecked: | |
361 | * 1) If no TBI, then there are no tags in the address to check, | |
362 | * 2) If Tag Check Override, then all accesses are Unchecked, | |
363 | * 3) If Tag Check Fail == 0, then Checked access have no effect, | |
364 | * 4) If no Allocation Tag Access, then all accesses are Unchecked. | |
365 | */ | |
366 | if (allocation_tag_access_enabled(env, el, sctlr)) { | |
367 | DP_TBFLAG_A64(flags, ATA, 1); | |
368 | if (tbid | |
369 | && !(env->pstate & PSTATE_TCO) | |
370 | && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { | |
371 | DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); | |
903dbefc PM |
372 | if (!EX_TBFLAG_A64(flags, UNPRIV)) { |
373 | /* | |
374 | * In non-unpriv contexts (eg EL0), unpriv load/stores | |
375 | * act like normal ones; duplicate the MTE info to | |
376 | * avoid translate-a64.c having to check UNPRIV to see | |
377 | * whether it is OK to index into MTE_ACTIVE[]. | |
378 | */ | |
379 | DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); | |
380 | } | |
671efad1 FR |
381 | } |
382 | } | |
383 | /* And again for unprivileged accesses, if required. */ | |
384 | if (EX_TBFLAG_A64(flags, UNPRIV) | |
385 | && tbid | |
386 | && !(env->pstate & PSTATE_TCO) | |
387 | && (sctlr & SCTLR_TCF0) | |
388 | && allocation_tag_access_enabled(env, 0, sctlr)) { | |
389 | DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); | |
390 | } | |
179e9a3b | 391 | /* |
51464c56 | 392 | * For unpriv tag-setting accesses we also need ATA0. Again, in |
179e9a3b PM |
393 | * contexts where unpriv and normal insns are the same we |
394 | * duplicate the ATA bit to save effort for translate-a64.c. | |
395 | */ | |
396 | if (EX_TBFLAG_A64(flags, UNPRIV)) { | |
397 | if (allocation_tag_access_enabled(env, 0, sctlr)) { | |
398 | DP_TBFLAG_A64(flags, ATA0, 1); | |
399 | } | |
400 | } else { | |
401 | DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA)); | |
402 | } | |
671efad1 FR |
403 | /* Cache TCMA as well as TBI. */ |
404 | DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); | |
405 | } | |
406 | ||
407 | return rebuild_hflags_common(env, fp_el, mmu_idx, flags); | |
408 | } | |
409 | ||
410 | static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env) | |
411 | { | |
412 | int el = arm_current_el(env); | |
413 | int fp_el = fp_exception_el(env, el); | |
414 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
415 | ||
416 | if (is_a64(env)) { | |
417 | return rebuild_hflags_a64(env, el, fp_el, mmu_idx); | |
418 | } else if (arm_feature(env, ARM_FEATURE_M)) { | |
419 | return rebuild_hflags_m32(env, fp_el, mmu_idx); | |
420 | } else { | |
421 | return rebuild_hflags_a32(env, fp_el, mmu_idx); | |
422 | } | |
423 | } | |
424 | ||
425 | void arm_rebuild_hflags(CPUARMState *env) | |
426 | { | |
427 | env->hflags = rebuild_hflags_internal(env); | |
428 | } | |
429 | ||
430 | /* | |
431 | * If we have triggered a EL state change we can't rely on the | |
432 | * translator having passed it to us, we need to recompute. | |
433 | */ | |
434 | void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) | |
435 | { | |
436 | int el = arm_current_el(env); | |
437 | int fp_el = fp_exception_el(env, el); | |
438 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
439 | ||
440 | env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); | |
441 | } | |
442 | ||
443 | void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) | |
444 | { | |
445 | int fp_el = fp_exception_el(env, el); | |
446 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
447 | ||
448 | env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); | |
449 | } | |
450 | ||
451 | /* | |
452 | * If we have triggered a EL state change we can't rely on the | |
453 | * translator having passed it to us, we need to recompute. | |
454 | */ | |
455 | void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) | |
456 | { | |
457 | int el = arm_current_el(env); | |
458 | int fp_el = fp_exception_el(env, el); | |
459 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
460 | env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); | |
461 | } | |
462 | ||
463 | void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) | |
464 | { | |
465 | int fp_el = fp_exception_el(env, el); | |
466 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
467 | ||
468 | env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); | |
469 | } | |
470 | ||
471 | void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) | |
472 | { | |
473 | int fp_el = fp_exception_el(env, el); | |
474 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); | |
475 | ||
476 | env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); | |
477 | } | |
478 | ||
479 | void assert_hflags_rebuild_correctly(CPUARMState *env) | |
480 | { | |
481 | #ifdef CONFIG_DEBUG_TCG | |
482 | CPUARMTBFlags c = env->hflags; | |
483 | CPUARMTBFlags r = rebuild_hflags_internal(env); | |
484 | ||
485 | if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) { | |
486 | fprintf(stderr, "TCG hflags mismatch " | |
487 | "(current:(0x%08x,0x" TARGET_FMT_lx ")" | |
488 | " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n", | |
489 | c.flags, c.flags2, r.flags, r.flags2); | |
490 | abort(); | |
491 | } | |
492 | #endif | |
493 | } |