2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
16 * Returns true if the stage 1 translation regime is using LPAE format page
17 * tables. Used when raising alignment exceptions, whose FSR changes depending
18 * on whether the long or short descriptor format is in use.
20 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
22 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
23 return regime_using_lpae_format(env
, mmu_idx
);
26 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
28 unsigned int target_el
,
29 bool same_el
, bool is_write
,
35 * ISV is only set for stage-2 data aborts routed to EL2 and
36 * never for stage-1 page table walks faulting on stage 2
37 * or for stage-1 faults.
39 * Furthermore, ISV is only set for certain kinds of load/stores.
40 * If the template syndrome does not have ISV set, we should leave
43 * See ARMv8 specs, D7-1974:
44 * ISS encoding for an exception from a Data Abort, the
47 * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation,
48 * Access Flag, and Permission faults caused by LD64B, ST64B,
49 * ST64BV, or ST64BV0 insns report syndrome info even for stage-1
50 * faults and regardless of the target EL.
52 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2
53 || fi
->s1ptw
|| !fi
->stage2
) {
54 syn
= syn_data_abort_no_iss(same_el
, 0,
55 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
);
58 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
59 * syndrome created at translation time.
60 * Now we create the runtime syndrome with the remaining fields.
62 syn
= syn_data_abort_with_iss(same_el
,
64 fi
->ea
, 0, fi
->s1ptw
, is_write
, fsc
,
66 /* Merge the runtime syndrome with the template syndrome. */
72 static uint32_t compute_fsr_fsc(CPUARMState
*env
, ARMMMUFaultInfo
*fi
,
73 int target_el
, int mmu_idx
, uint32_t *ret_fsc
)
75 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
78 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
79 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
81 * LPAE format fault status register : bottom 6 bits are
82 * status code in the same form as needed for syndrome
84 fsr
= arm_fi_to_lfsc(fi
);
85 fsc
= extract32(fsr
, 0, 6);
87 fsr
= arm_fi_to_sfsc(fi
);
89 * Short format FSR : this fault will never actually be reported
90 * to an EL that uses a syndrome register. Use a (currently)
91 * reserved FSR code in case the constructed syndrome does leak
92 * into the guest somehow.
102 void arm_deliver_fault(ARMCPU
*cpu
, vaddr addr
,
103 MMUAccessType access_type
,
104 int mmu_idx
, ARMMMUFaultInfo
*fi
)
106 CPUARMState
*env
= &cpu
->env
;
109 uint32_t syn
, exc
, fsr
, fsc
;
111 target_el
= exception_target_el(env
);
114 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
115 if (arm_is_secure_below_el3(env
) && fi
->s1ns
) {
116 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
119 same_el
= (arm_current_el(env
) == target_el
);
121 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
123 if (access_type
== MMU_INST_FETCH
) {
124 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
125 exc
= EXCP_PREFETCH_ABORT
;
127 syn
= merge_syn_data_abort(env
->exception
.syndrome
, fi
, target_el
,
128 same_el
, access_type
== MMU_DATA_STORE
,
130 if (access_type
== MMU_DATA_STORE
131 && arm_feature(env
, ARM_FEATURE_V6
)) {
134 exc
= EXCP_DATA_ABORT
;
137 env
->exception
.vaddress
= addr
;
138 env
->exception
.fsr
= fsr
;
139 raise_exception(env
, exc
, syn
, target_el
);
142 /* Raise a data fault alignment exception for the specified virtual address */
143 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
144 MMUAccessType access_type
,
145 int mmu_idx
, uintptr_t retaddr
)
147 ARMCPU
*cpu
= ARM_CPU(cs
);
148 ARMMMUFaultInfo fi
= {};
150 /* now we have a real cpu fault */
151 cpu_restore_state(cs
, retaddr
);
153 fi
.type
= ARMFault_Alignment
;
154 arm_deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
157 void helper_exception_pc_alignment(CPUARMState
*env
, target_ulong pc
)
159 ARMMMUFaultInfo fi
= { .type
= ARMFault_Alignment
};
160 int target_el
= exception_target_el(env
);
161 int mmu_idx
= cpu_mmu_index(env
, true);
164 env
->exception
.vaddress
= pc
;
167 * Note that the fsc is not applicable to this exception,
168 * since any syndrome is pcalignment not insn_abort.
170 env
->exception
.fsr
= compute_fsr_fsc(env
, &fi
, target_el
, mmu_idx
, &fsc
);
171 raise_exception(env
, EXCP_PREFETCH_ABORT
, syn_pcalignment(), target_el
);
174 #if !defined(CONFIG_USER_ONLY)
177 * arm_cpu_do_transaction_failed: handle a memory system error response
178 * (eg "no device/memory present at address") by raising an external abort
181 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
182 vaddr addr
, unsigned size
,
183 MMUAccessType access_type
,
184 int mmu_idx
, MemTxAttrs attrs
,
185 MemTxResult response
, uintptr_t retaddr
)
187 ARMCPU
*cpu
= ARM_CPU(cs
);
188 ARMMMUFaultInfo fi
= {};
190 /* now we have a real cpu fault */
191 cpu_restore_state(cs
, retaddr
);
193 fi
.ea
= arm_extabort_type(response
);
194 fi
.type
= ARMFault_SyncExternal
;
195 arm_deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
198 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
199 MMUAccessType access_type
, int mmu_idx
,
200 bool probe
, uintptr_t retaddr
)
202 ARMCPU
*cpu
= ARM_CPU(cs
);
203 GetPhysAddrResult res
= {};
204 ARMMMUFaultInfo local_fi
, *fi
;
208 * Allow S1_ptw_translate to see any fault generated here.
209 * Since this may recurse, read and clear.
211 fi
= cpu
->env
.tlb_fi
;
213 cpu
->env
.tlb_fi
= NULL
;
215 fi
= memset(&local_fi
, 0, sizeof(local_fi
));
219 * Walk the page table and (if the mapping exists) add the page
220 * to the TLB. On success, return true. Otherwise, if probing,
221 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
222 * register format, and signal the fault.
224 ret
= get_phys_addr(&cpu
->env
, address
, access_type
,
225 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
229 * Map a single [sub]page. Regions smaller than our declared
230 * target page size are handled specially, so for those we
231 * pass in the exact addresses.
233 if (res
.f
.lg_page_size
>= TARGET_PAGE_BITS
) {
234 res
.f
.phys_addr
&= TARGET_PAGE_MASK
;
235 address
&= TARGET_PAGE_MASK
;
238 res
.f
.pte_attrs
= res
.cacheattrs
.attrs
;
239 res
.f
.shareability
= res
.cacheattrs
.shareability
;
241 tlb_set_page_full(cs
, mmu_idx
, address
, &res
.f
);
246 /* now we have a real cpu fault */
247 cpu_restore_state(cs
, retaddr
);
248 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, fi
);
252 void arm_cpu_record_sigsegv(CPUState
*cs
, vaddr addr
,
253 MMUAccessType access_type
,
254 bool maperr
, uintptr_t ra
)
256 ARMMMUFaultInfo fi
= {
257 .type
= maperr
? ARMFault_Translation
: ARMFault_Permission
,
260 ARMCPU
*cpu
= ARM_CPU(cs
);
263 * We report both ESR and FAR to signal handlers.
264 * For now, it's easiest to deliver the fault normally.
266 cpu_restore_state(cs
, ra
);
267 arm_deliver_fault(cpu
, addr
, access_type
, MMU_USER_IDX
, &fi
);
270 void arm_cpu_record_sigbus(CPUState
*cs
, vaddr addr
,
271 MMUAccessType access_type
, uintptr_t ra
)
273 arm_cpu_do_unaligned_access(cs
, addr
, access_type
, MMU_USER_IDX
, ra
);
275 #endif /* !defined(CONFIG_USER_ONLY) */