2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
16 * Returns true if the stage 1 translation regime is using LPAE format page
17 * tables. Used when raising alignment exceptions, whose FSR changes depending
18 * on whether the long or short descriptor format is in use.
20 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
22 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
23 return regime_using_lpae_format(env
, mmu_idx
);
26 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
28 unsigned int target_el
,
29 bool same_el
, bool is_write
,
35 * ISV is only set for stage-2 data aborts routed to EL2 and
36 * never for stage-1 page table walks faulting on stage 2
37 * or for stage-1 faults.
39 * Furthermore, ISV is only set for certain kinds of load/stores.
40 * If the template syndrome does not have ISV set, we should leave
43 * See ARMv8 specs, D7-1974:
44 * ISS encoding for an exception from a Data Abort, the
47 * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
48 * Access Flag, and Permission faults caused by LD64B, ST64B,
49 * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
50 * faults and regardless of the target EL.
52 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2
53 || fi
->s1ptw
|| !fi
->stage2
) {
54 syn
= syn_data_abort_no_iss(same_el
, 0,
55 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
);
58 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
59 * syndrome created at translation time.
60 * Now we create the runtime syndrome with the remaining fields.
62 syn
= syn_data_abort_with_iss(same_el
,
64 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
,
66 /* Merge the runtime syndrome with the template syndrome. */
72 static uint32_t compute_fsr_fsc(CPUARMState
*env
, ARMMMUFaultInfo
*fi
,
73 int target_el
, int mmu_idx
, uint32_t *ret_fsc
)
75 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
79 * For M-profile there is no guest-facing FSR. We compute a
80 * short-form value for env->exception.fsr which we will then
81 * examine in arm_v7m_cpu_do_interrupt(). In theory we could
82 * use the LPAE format instead as long as both bits of code agree
83 * (and arm_fi_to_lfsc() handled the M-profile specific
84 * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases).
86 if (!arm_feature(env
, ARM_FEATURE_M
) &&
87 (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
88 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
))) {
90 * LPAE format fault status register : bottom 6 bits are
91 * status code in the same form as needed for syndrome
93 fsr
= arm_fi_to_lfsc(fi
);
94 fsc
= extract32(fsr
, 0, 6);
96 fsr
= arm_fi_to_sfsc(fi
);
98 * Short format FSR : this fault will never actually be reported
99 * to an EL that uses a syndrome register. Use a (currently)
100 * reserved FSR code in case the constructed syndrome does leak
101 * into the guest somehow.
110 static bool report_as_gpc_exception(ARMCPU
*cpu
, int current_el
,
118 case GPCF_AddressSize
:
121 /* R_PYTGX: GPT faults are reported as GPC. */
126 * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
127 * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
128 * if SCR_EL3.GPF is set, otherwise an insn or data abort.
130 ret
= (cpu
->env
.cp15
.scr_el3
& SCR_GPF
) && current_el
!= 3;
133 g_assert_not_reached();
136 assert(cpu_isar_feature(aa64_rme
, cpu
));
137 assert(fi
->type
== ARMFault_GPCFOnWalk
||
138 fi
->type
== ARMFault_GPCFOnOutput
);
139 if (fi
->gpcf
== GPCF_AddressSize
) {
140 assert(fi
->level
== 0);
142 assert(fi
->level
>= 0 && fi
->level
<= 1);
148 static unsigned encode_gpcsc(ARMMMUFaultInfo
*fi
)
150 static uint8_t const gpcsc
[] = {
151 [GPCF_AddressSize
] = 0b000000,
152 [GPCF_Walk
] = 0b000100,
153 [GPCF_Fail
] = 0b001100,
154 [GPCF_EABT
] = 0b010100,
157 /* Note that we've validated fi->gpcf and fi->level above. */
158 return gpcsc
[fi
->gpcf
] | fi
->level
;
162 void arm_deliver_fault(ARMCPU
*cpu
, vaddr addr
,
163 MMUAccessType access_type
,
164 int mmu_idx
, ARMMMUFaultInfo
*fi
)
166 CPUARMState
*env
= &cpu
->env
;
167 int target_el
= exception_target_el(env
);
168 int current_el
= arm_current_el(env
);
170 uint32_t syn
, exc
, fsr
, fsc
;
172 if (report_as_gpc_exception(cpu
, current_el
, fi
)) {
175 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
177 syn
= syn_gpc(fi
->stage2
&& fi
->type
== ARMFault_GPCFOnWalk
,
178 access_type
== MMU_INST_FETCH
,
179 encode_gpcsc(fi
), 0, fi
->s1ptw
,
180 access_type
== MMU_DATA_STORE
, fsc
);
182 env
->cp15
.mfar_el3
= fi
->paddr
;
183 switch (fi
->paddr_space
) {
186 case ARMSS_NonSecure
:
187 env
->cp15
.mfar_el3
|= R_MFAR_NS_MASK
;
190 env
->cp15
.mfar_el3
|= R_MFAR_NSE_MASK
;
193 env
->cp15
.mfar_el3
|= R_MFAR_NSE_MASK
| R_MFAR_NS_MASK
;
196 g_assert_not_reached();
203 /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
204 if (fi
->gpcf
== GPCF_Fail
&& target_el
< 2) {
205 if (arm_hcr_el2_eff(env
) & HCR_GPF
) {
212 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
213 if (arm_is_secure_below_el3(env
) && fi
->s1ns
) {
214 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
218 same_el
= current_el
== target_el
;
219 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
221 if (access_type
== MMU_INST_FETCH
) {
222 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
223 exc
= EXCP_PREFETCH_ABORT
;
225 syn
= merge_syn_data_abort(env
->exception
.syndrome
, fi
, target_el
,
226 same_el
, access_type
== MMU_DATA_STORE
,
228 if (access_type
== MMU_DATA_STORE
229 && arm_feature(env
, ARM_FEATURE_V6
)) {
232 exc
= EXCP_DATA_ABORT
;
236 env
->exception
.vaddress
= addr
;
237 env
->exception
.fsr
= fsr
;
238 raise_exception(env
, exc
, syn
, target_el
);
241 /* Raise a data fault alignment exception for the specified virtual address */
242 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
243 MMUAccessType access_type
,
244 int mmu_idx
, uintptr_t retaddr
)
246 ARMCPU
*cpu
= ARM_CPU(cs
);
247 ARMMMUFaultInfo fi
= {};
249 /* now we have a real cpu fault */
250 cpu_restore_state(cs
, retaddr
);
252 fi
.type
= ARMFault_Alignment
;
253 arm_deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
256 void helper_exception_pc_alignment(CPUARMState
*env
, target_ulong pc
)
258 ARMMMUFaultInfo fi
= { .type
= ARMFault_Alignment
};
259 int target_el
= exception_target_el(env
);
260 int mmu_idx
= cpu_mmu_index(env
, true);
263 env
->exception
.vaddress
= pc
;
266 * Note that the fsc is not applicable to this exception,
267 * since any syndrome is pcalignment not insn_abort.
269 env
->exception
.fsr
= compute_fsr_fsc(env
, &fi
, target_el
, mmu_idx
, &fsc
);
270 raise_exception(env
, EXCP_PREFETCH_ABORT
, syn_pcalignment(), target_el
);
273 #if !defined(CONFIG_USER_ONLY)
276 * arm_cpu_do_transaction_failed: handle a memory system error response
277 * (eg "no device/memory present at address") by raising an external abort
280 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
281 vaddr addr
, unsigned size
,
282 MMUAccessType access_type
,
283 int mmu_idx
, MemTxAttrs attrs
,
284 MemTxResult response
, uintptr_t retaddr
)
286 ARMCPU
*cpu
= ARM_CPU(cs
);
287 ARMMMUFaultInfo fi
= {};
289 /* now we have a real cpu fault */
290 cpu_restore_state(cs
, retaddr
);
292 fi
.ea
= arm_extabort_type(response
);
293 fi
.type
= ARMFault_SyncExternal
;
294 arm_deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
297 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
298 MMUAccessType access_type
, int mmu_idx
,
299 bool probe
, uintptr_t retaddr
)
301 ARMCPU
*cpu
= ARM_CPU(cs
);
302 GetPhysAddrResult res
= {};
303 ARMMMUFaultInfo local_fi
, *fi
;
307 * Allow S1_ptw_translate to see any fault generated here.
308 * Since this may recurse, read and clear.
310 fi
= cpu
->env
.tlb_fi
;
312 cpu
->env
.tlb_fi
= NULL
;
314 fi
= memset(&local_fi
, 0, sizeof(local_fi
));
318 * Walk the page table and (if the mapping exists) add the page
319 * to the TLB. On success, return true. Otherwise, if probing,
320 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
321 * register format, and signal the fault.
323 ret
= get_phys_addr(&cpu
->env
, address
, access_type
,
324 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
328 * Map a single [sub]page. Regions smaller than our declared
329 * target page size are handled specially, so for those we
330 * pass in the exact addresses.
332 if (res
.f
.lg_page_size
>= TARGET_PAGE_BITS
) {
333 res
.f
.phys_addr
&= TARGET_PAGE_MASK
;
334 address
&= TARGET_PAGE_MASK
;
337 res
.f
.extra
.arm
.pte_attrs
= res
.cacheattrs
.attrs
;
338 res
.f
.extra
.arm
.shareability
= res
.cacheattrs
.shareability
;
340 tlb_set_page_full(cs
, mmu_idx
, address
, &res
.f
);
345 /* now we have a real cpu fault */
346 cpu_restore_state(cs
, retaddr
);
347 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, fi
);
351 void arm_cpu_record_sigsegv(CPUState
*cs
, vaddr addr
,
352 MMUAccessType access_type
,
353 bool maperr
, uintptr_t ra
)
355 ARMMMUFaultInfo fi
= {
356 .type
= maperr
? ARMFault_Translation
: ARMFault_Permission
,
359 ARMCPU
*cpu
= ARM_CPU(cs
);
362 * We report both ESR and FAR to signal handlers.
363 * For now, it's easiest to deliver the fault normally.
365 cpu_restore_state(cs
, ra
);
366 arm_deliver_fault(cpu
, addr
, access_type
, MMU_USER_IDX
, &fi
);
369 void arm_cpu_record_sigbus(CPUState
*cs
, vaddr addr
,
370 MMUAccessType access_type
, uintptr_t ra
)
372 arm_cpu_do_unaligned_access(cs
, addr
, access_type
, MMU_USER_IDX
, ra
);
374 #endif /* !defined(CONFIG_USER_ONLY) */