]>
Commit | Line | Data |
---|---|---|
e21b551c PMD |
1 | /* |
2 | * ARM TLB (Translation lookaside buffer) helpers. | |
3 | * | |
4 | * This code is licensed under the GNU GPL v2 or later. | |
5 | * | |
6 | * SPDX-License-Identifier: GPL-2.0-or-later | |
7 | */ | |
8 | #include "qemu/osdep.h" | |
9 | #include "cpu.h" | |
10 | #include "internals.h" | |
11 | #include "exec/exec-all.h" | |
12 | ||
e21b551c PMD |
13 | static inline uint32_t merge_syn_data_abort(uint32_t template_syn, |
14 | unsigned int target_el, | |
15 | bool same_el, bool ea, | |
16 | bool s1ptw, bool is_write, | |
17 | int fsc) | |
18 | { | |
19 | uint32_t syn; | |
20 | ||
21 | /* | |
22 | * ISV is only set for data aborts routed to EL2 and | |
23 | * never for stage-1 page table walks faulting on stage 2. | |
24 | * | |
25 | * Furthermore, ISV is only set for certain kinds of load/stores. | |
26 | * If the template syndrome does not have ISV set, we should leave | |
27 | * it cleared. | |
28 | * | |
29 | * See ARMv8 specs, D7-1974: | |
30 | * ISS encoding for an exception from a Data Abort, the | |
31 | * ISV field. | |
32 | */ | |
33 | if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) { | |
e24fd076 | 34 | syn = syn_data_abort_no_iss(same_el, 0, |
e21b551c PMD |
35 | ea, 0, s1ptw, is_write, fsc); |
36 | } else { | |
37 | /* | |
38 | * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template | |
39 | * syndrome created at translation time. | |
40 | * Now we create the runtime syndrome with the remaining fields. | |
41 | */ | |
42 | syn = syn_data_abort_with_iss(same_el, | |
43 | 0, 0, 0, 0, 0, | |
44 | ea, 0, s1ptw, is_write, fsc, | |
30d54483 | 45 | true); |
e21b551c PMD |
46 | /* Merge the runtime syndrome with the template syndrome. */ |
47 | syn |= template_syn; | |
48 | } | |
49 | return syn; | |
50 | } | |
51 | ||
52 | static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr, | |
53 | MMUAccessType access_type, | |
54 | int mmu_idx, ARMMMUFaultInfo *fi) | |
55 | { | |
56 | CPUARMState *env = &cpu->env; | |
57 | int target_el; | |
58 | bool same_el; | |
59 | uint32_t syn, exc, fsr, fsc; | |
60 | ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); | |
61 | ||
62 | target_el = exception_target_el(env); | |
63 | if (fi->stage2) { | |
64 | target_el = 2; | |
65 | env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; | |
66 | } | |
67 | same_el = (arm_current_el(env) == target_el); | |
68 | ||
69 | if (target_el == 2 || arm_el_is_aa64(env, target_el) || | |
70 | arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { | |
71 | /* | |
72 | * LPAE format fault status register : bottom 6 bits are | |
73 | * status code in the same form as needed for syndrome | |
74 | */ | |
75 | fsr = arm_fi_to_lfsc(fi); | |
76 | fsc = extract32(fsr, 0, 6); | |
77 | } else { | |
78 | fsr = arm_fi_to_sfsc(fi); | |
79 | /* | |
80 | * Short format FSR : this fault will never actually be reported | |
81 | * to an EL that uses a syndrome register. Use a (currently) | |
82 | * reserved FSR code in case the constructed syndrome does leak | |
83 | * into the guest somehow. | |
84 | */ | |
85 | fsc = 0x3f; | |
86 | } | |
87 | ||
88 | if (access_type == MMU_INST_FETCH) { | |
89 | syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); | |
90 | exc = EXCP_PREFETCH_ABORT; | |
91 | } else { | |
92 | syn = merge_syn_data_abort(env->exception.syndrome, target_el, | |
93 | same_el, fi->ea, fi->s1ptw, | |
94 | access_type == MMU_DATA_STORE, | |
95 | fsc); | |
96 | if (access_type == MMU_DATA_STORE | |
97 | && arm_feature(env, ARM_FEATURE_V6)) { | |
98 | fsr |= (1 << 11); | |
99 | } | |
100 | exc = EXCP_DATA_ABORT; | |
101 | } | |
102 | ||
103 | env->exception.vaddress = addr; | |
104 | env->exception.fsr = fsr; | |
105 | raise_exception(env, exc, syn, target_el); | |
106 | } | |
107 | ||
108 | /* Raise a data fault alignment exception for the specified virtual address */ | |
109 | void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, | |
110 | MMUAccessType access_type, | |
111 | int mmu_idx, uintptr_t retaddr) | |
112 | { | |
113 | ARMCPU *cpu = ARM_CPU(cs); | |
114 | ARMMMUFaultInfo fi = {}; | |
115 | ||
116 | /* now we have a real cpu fault */ | |
117 | cpu_restore_state(cs, retaddr, true); | |
118 | ||
119 | fi.type = ARMFault_Alignment; | |
120 | arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); | |
121 | } | |
122 | ||
0d1762e9 RH |
123 | #if !defined(CONFIG_USER_ONLY) |
124 | ||
e21b551c PMD |
125 | /* |
126 | * arm_cpu_do_transaction_failed: handle a memory system error response | |
127 | * (eg "no device/memory present at address") by raising an external abort | |
128 | * exception | |
129 | */ | |
130 | void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, | |
131 | vaddr addr, unsigned size, | |
132 | MMUAccessType access_type, | |
133 | int mmu_idx, MemTxAttrs attrs, | |
134 | MemTxResult response, uintptr_t retaddr) | |
135 | { | |
136 | ARMCPU *cpu = ARM_CPU(cs); | |
137 | ARMMMUFaultInfo fi = {}; | |
138 | ||
139 | /* now we have a real cpu fault */ | |
140 | cpu_restore_state(cs, retaddr, true); | |
141 | ||
142 | fi.ea = arm_extabort_type(response); | |
143 | fi.type = ARMFault_SyncExternal; | |
144 | arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); | |
145 | } | |
146 | ||
147 | #endif /* !defined(CONFIG_USER_ONLY) */ | |
148 | ||
149 | bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | |
150 | MMUAccessType access_type, int mmu_idx, | |
151 | bool probe, uintptr_t retaddr) | |
152 | { | |
153 | ARMCPU *cpu = ARM_CPU(cs); | |
154 | ||
155 | #ifdef CONFIG_USER_ONLY | |
156 | cpu->env.exception.vaddress = address; | |
157 | if (access_type == MMU_INST_FETCH) { | |
158 | cs->exception_index = EXCP_PREFETCH_ABORT; | |
159 | } else { | |
160 | cs->exception_index = EXCP_DATA_ABORT; | |
161 | } | |
162 | cpu_loop_exit_restore(cs, retaddr); | |
163 | #else | |
164 | hwaddr phys_addr; | |
165 | target_ulong page_size; | |
166 | int prot, ret; | |
167 | MemTxAttrs attrs = {}; | |
168 | ARMMMUFaultInfo fi = {}; | |
7e98e21c | 169 | ARMCacheAttrs cacheattrs = {}; |
e21b551c PMD |
170 | |
171 | /* | |
172 | * Walk the page table and (if the mapping exists) add the page | |
173 | * to the TLB. On success, return true. Otherwise, if probing, | |
174 | * return false. Otherwise populate fsr with ARM DFSR/IFSR fault | |
175 | * register format, and signal the fault. | |
176 | */ | |
177 | ret = get_phys_addr(&cpu->env, address, access_type, | |
178 | core_to_arm_mmu_idx(&cpu->env, mmu_idx), | |
7e98e21c RH |
179 | &phys_addr, &attrs, &prot, &page_size, |
180 | &fi, &cacheattrs); | |
e21b551c PMD |
181 | if (likely(!ret)) { |
182 | /* | |
183 | * Map a single [sub]page. Regions smaller than our declared | |
184 | * target page size are handled specially, so for those we | |
185 | * pass in the exact addresses. | |
186 | */ | |
187 | if (page_size >= TARGET_PAGE_SIZE) { | |
188 | phys_addr &= TARGET_PAGE_MASK; | |
189 | address &= TARGET_PAGE_MASK; | |
190 | } | |
337a03f0 RH |
191 | /* Notice and record tagged memory. */ |
192 | if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) { | |
193 | arm_tlb_mte_tagged(&attrs) = true; | |
194 | } | |
195 | ||
e21b551c PMD |
196 | tlb_set_page_with_attrs(cs, address, phys_addr, attrs, |
197 | prot, mmu_idx, page_size); | |
198 | return true; | |
199 | } else if (probe) { | |
200 | return false; | |
201 | } else { | |
202 | /* now we have a real cpu fault */ | |
203 | cpu_restore_state(cs, retaddr, true); | |
204 | arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); | |
205 | } | |
206 | #endif | |
207 | } |