]>
Commit | Line | Data |
---|---|---|
f08b6170 PB |
1 | /* |
2 | * Software MMU support | |
3 | * | |
4 | * This library is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 7 | * version 2.1 of the License, or (at your option) any later version. |
f08b6170 PB |
8 | * |
9 | * This library is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
16 | * | |
17 | */ | |
18 | ||
19 | /* | |
20 | * Generate inline load/store functions for all MMU modes (typically | |
21 | * at least _user and _kernel) as well as _data versions, for all data | |
22 | * sizes. | |
23 | * | |
24 | * Used by target op helpers. | |
25 | * | |
db5fd8d7 PM |
26 | * The syntax for the accessors is: |
27 | * | |
b9e60257 RH |
28 | * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) |
29 | * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) | |
30 | * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) | |
f83bcecb | 31 | * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) |
db5fd8d7 | 32 | * |
b9e60257 RH |
33 | * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) |
34 | * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) | |
35 | * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) | |
f83bcecb | 36 | * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) |
db5fd8d7 PM |
37 | * |
38 | * sign is: | |
39 | * (empty): for 32 and 64 bit sizes | |
40 | * u : unsigned | |
41 | * s : signed | |
42 | * | |
43 | * size is: | |
44 | * b: 8 bits | |
45 | * w: 16 bits | |
46 | * l: 32 bits | |
47 | * q: 64 bits | |
48 | * | |
b9e60257 RH |
49 | * end is: |
50 | * (empty): for target native endian, or for 8 bit access | |
51 | * _be: for forced big endian | |
52 | * _le: for forced little endian | |
53 | * | |
f4e1bae2 RH |
54 | * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". |
55 | * The "mmuidx" suffix carries an extra mmu_idx argument that specifies | |
56 | * the index to use; the "data" and "code" suffixes take the index from | |
57 | * cpu_mmu_index(). | |
f83bcecb RH |
58 | * |
59 | * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the | |
60 | * MemOp including alignment requirements. The alignment will be enforced. | |
f08b6170 PB |
61 | */ |
62 | #ifndef CPU_LDST_H | |
63 | #define CPU_LDST_H | |
64 | ||
f83bcecb | 65 | #include "exec/memopidx.h" |
b4c8f3d4 | 66 | #include "qemu/int128.h" |
f83bcecb | 67 | |
c773828a | 68 | #if defined(CONFIG_USER_ONLY) |
3e23de15 LV |
69 | /* sparc32plus has 64bit long but 32bit space address |
70 | * this can make bad result with g2h() and h2g() | |
71 | */ | |
72 | #if TARGET_VIRT_ADDR_SPACE_BITS <= 32 | |
73 | typedef uint32_t abi_ptr; | |
74 | #define TARGET_ABI_FMT_ptr "%x" | |
75 | #else | |
76 | typedef uint64_t abi_ptr; | |
77 | #define TARGET_ABI_FMT_ptr "%"PRIx64 | |
78 | #endif | |
79 | ||
141a56d8 RH |
80 | #ifndef TARGET_TAGGED_ADDRESSES |
81 | static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x) | |
82 | { | |
83 | return x; | |
84 | } | |
85 | #endif | |
86 | ||
c773828a | 87 | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ |
3e8f1628 RH |
88 | static inline void *g2h_untagged(abi_ptr x) |
89 | { | |
90 | return (void *)((uintptr_t)(x) + guest_base); | |
91 | } | |
92 | ||
93 | static inline void *g2h(CPUState *cs, abi_ptr x) | |
94 | { | |
95 | return g2h_untagged(cpu_untagged_addr(cs, x)); | |
96 | } | |
c773828a | 97 | |
46b12f46 | 98 | static inline bool guest_addr_valid_untagged(abi_ulong x) |
a78a6363 RH |
99 | { |
100 | return x <= GUEST_ADDR_MAX; | |
101 | } | |
ebf9a363 | 102 | |
46b12f46 | 103 | static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) |
ebf9a363 MF |
104 | { |
105 | return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1; | |
106 | } | |
c773828a | 107 | |
57096f29 RH |
108 | #define h2g_valid(x) \ |
109 | (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \ | |
110 | (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX) | |
111 | ||
c773828a | 112 | #define h2g_nocheck(x) ({ \ |
9abf09ff | 113 | uintptr_t __ret = (uintptr_t)(x) - guest_base; \ |
3e23de15 | 114 | (abi_ptr)__ret; \ |
c773828a PB |
115 | }) |
116 | ||
117 | #define h2g(x) ({ \ | |
118 | /* Check if given address fits target address space */ \ | |
119 | assert(h2g_valid(x)); \ | |
120 | h2g_nocheck(x); \ | |
121 | }) | |
3e23de15 LV |
122 | #else |
123 | typedef target_ulong abi_ptr; | |
514f9f8e | 124 | #define TARGET_ABI_FMT_ptr TARGET_FMT_lx |
f08b6170 PB |
125 | #endif |
126 | ||
ed4cfbcd | 127 | uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); |
ed4cfbcd | 128 | int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); |
b9e60257 RH |
129 | uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); |
130 | int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); | |
131 | uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); | |
132 | uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); | |
b9e60257 RH |
133 | uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); |
134 | int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); | |
135 | uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); | |
136 | uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); | |
137 | ||
138 | uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
139 | int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
b9e60257 RH |
140 | uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); |
141 | int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
142 | uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
143 | uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
b9e60257 RH |
144 | uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); |
145 | int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
146 | uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
147 | uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); | |
ed4cfbcd RH |
148 | |
149 | void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); | |
b9e60257 RH |
150 | void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); |
151 | void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); | |
152 | void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); | |
b9e60257 RH |
153 | void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); |
154 | void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); | |
155 | void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); | |
ed4cfbcd RH |
156 | |
157 | void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, | |
b9e60257 | 158 | uint32_t val, uintptr_t ra); |
b9e60257 RH |
159 | void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, |
160 | uint32_t val, uintptr_t ra); | |
161 | void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, | |
162 | uint32_t val, uintptr_t ra); | |
163 | void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, | |
164 | uint64_t val, uintptr_t ra); | |
b9e60257 RH |
165 | void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, |
166 | uint32_t val, uintptr_t ra); | |
167 | void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, | |
168 | uint32_t val, uintptr_t ra); | |
169 | void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, | |
170 | uint64_t val, uintptr_t ra); | |
c773828a | 171 | |
f83bcecb RH |
172 | uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr, |
173 | int mmu_idx, uintptr_t ra); | |
174 | int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
175 | int mmu_idx, uintptr_t ra); | |
176 | uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
177 | int mmu_idx, uintptr_t ra); | |
178 | int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
179 | int mmu_idx, uintptr_t ra); | |
180 | uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
181 | int mmu_idx, uintptr_t ra); | |
182 | uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
183 | int mmu_idx, uintptr_t ra); | |
184 | uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
185 | int mmu_idx, uintptr_t ra); | |
186 | int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
187 | int mmu_idx, uintptr_t ra); | |
188 | uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
189 | int mmu_idx, uintptr_t ra); | |
190 | uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, | |
191 | int mmu_idx, uintptr_t ra); | |
192 | ||
193 | void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
194 | int mmu_idx, uintptr_t ra); | |
195 | void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
196 | int mmu_idx, uintptr_t ra); | |
197 | void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
198 | int mmu_idx, uintptr_t ra); | |
199 | void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, | |
200 | int mmu_idx, uintptr_t ra); | |
201 | void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
202 | int mmu_idx, uintptr_t ra); | |
203 | void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
204 | int mmu_idx, uintptr_t ra); | |
205 | void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, | |
206 | int mmu_idx, uintptr_t ra); | |
207 | ||
208 | uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); | |
209 | uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr, | |
210 | MemOpIdx oi, uintptr_t ra); | |
211 | uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr, | |
212 | MemOpIdx oi, uintptr_t ra); | |
213 | uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr, | |
214 | MemOpIdx oi, uintptr_t ra); | |
215 | uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr, | |
216 | MemOpIdx oi, uintptr_t ra); | |
217 | uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr, | |
218 | MemOpIdx oi, uintptr_t ra); | |
219 | uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr, | |
220 | MemOpIdx oi, uintptr_t ra); | |
221 | ||
222 | void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, | |
223 | MemOpIdx oi, uintptr_t ra); | |
224 | void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, | |
225 | MemOpIdx oi, uintptr_t ra); | |
226 | void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
227 | MemOpIdx oi, uintptr_t ra); | |
228 | void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, | |
229 | MemOpIdx oi, uintptr_t ra); | |
230 | void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, | |
231 | MemOpIdx oi, uintptr_t ra); | |
232 | void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, | |
233 | MemOpIdx oi, uintptr_t ra); | |
234 | void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, | |
235 | MemOpIdx oi, uintptr_t ra); | |
236 | ||
b4c8f3d4 RH |
237 | uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, |
238 | uint32_t cmpv, uint32_t newv, | |
239 | MemOpIdx oi, uintptr_t retaddr); | |
240 | uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, | |
241 | uint32_t cmpv, uint32_t newv, | |
242 | MemOpIdx oi, uintptr_t retaddr); | |
243 | uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, | |
244 | uint32_t cmpv, uint32_t newv, | |
245 | MemOpIdx oi, uintptr_t retaddr); | |
246 | uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, | |
247 | uint64_t cmpv, uint64_t newv, | |
248 | MemOpIdx oi, uintptr_t retaddr); | |
249 | uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, | |
250 | uint32_t cmpv, uint32_t newv, | |
251 | MemOpIdx oi, uintptr_t retaddr); | |
252 | uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, | |
253 | uint32_t cmpv, uint32_t newv, | |
254 | MemOpIdx oi, uintptr_t retaddr); | |
255 | uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, | |
256 | uint64_t cmpv, uint64_t newv, | |
257 | MemOpIdx oi, uintptr_t retaddr); | |
258 | ||
259 | #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ | |
260 | TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ | |
261 | (CPUArchState *env, target_ulong addr, TYPE val, \ | |
262 | MemOpIdx oi, uintptr_t retaddr); | |
263 | ||
264 | #ifdef CONFIG_ATOMIC64 | |
265 | #define GEN_ATOMIC_HELPER_ALL(NAME) \ | |
266 | GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ | |
267 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ | |
268 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ | |
269 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ | |
270 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ | |
271 | GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ | |
272 | GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) | |
273 | #else | |
274 | #define GEN_ATOMIC_HELPER_ALL(NAME) \ | |
275 | GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ | |
276 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ | |
277 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ | |
278 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ | |
279 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) | |
280 | #endif | |
281 | ||
282 | GEN_ATOMIC_HELPER_ALL(fetch_add) | |
283 | GEN_ATOMIC_HELPER_ALL(fetch_sub) | |
284 | GEN_ATOMIC_HELPER_ALL(fetch_and) | |
285 | GEN_ATOMIC_HELPER_ALL(fetch_or) | |
286 | GEN_ATOMIC_HELPER_ALL(fetch_xor) | |
287 | GEN_ATOMIC_HELPER_ALL(fetch_smin) | |
288 | GEN_ATOMIC_HELPER_ALL(fetch_umin) | |
289 | GEN_ATOMIC_HELPER_ALL(fetch_smax) | |
290 | GEN_ATOMIC_HELPER_ALL(fetch_umax) | |
291 | ||
292 | GEN_ATOMIC_HELPER_ALL(add_fetch) | |
293 | GEN_ATOMIC_HELPER_ALL(sub_fetch) | |
294 | GEN_ATOMIC_HELPER_ALL(and_fetch) | |
295 | GEN_ATOMIC_HELPER_ALL(or_fetch) | |
296 | GEN_ATOMIC_HELPER_ALL(xor_fetch) | |
297 | GEN_ATOMIC_HELPER_ALL(smin_fetch) | |
298 | GEN_ATOMIC_HELPER_ALL(umin_fetch) | |
299 | GEN_ATOMIC_HELPER_ALL(smax_fetch) | |
300 | GEN_ATOMIC_HELPER_ALL(umax_fetch) | |
301 | ||
302 | GEN_ATOMIC_HELPER_ALL(xchg) | |
303 | ||
304 | #undef GEN_ATOMIC_HELPER_ALL | |
305 | #undef GEN_ATOMIC_HELPER | |
306 | ||
307 | Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, | |
308 | Int128 cmpv, Int128 newv, | |
309 | MemOpIdx oi, uintptr_t retaddr); | |
310 | Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, | |
311 | Int128 cmpv, Int128 newv, | |
312 | MemOpIdx oi, uintptr_t retaddr); | |
313 | ||
314 | Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, | |
315 | MemOpIdx oi, uintptr_t retaddr); | |
316 | Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, | |
317 | MemOpIdx oi, uintptr_t retaddr); | |
318 | void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, | |
319 | MemOpIdx oi, uintptr_t retaddr); | |
320 | void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, | |
321 | MemOpIdx oi, uintptr_t retaddr); | |
322 | ||
cfe04a4b RH |
323 | #if defined(CONFIG_USER_ONLY) |
324 | ||
325 | extern __thread uintptr_t helper_retaddr; | |
326 | ||
327 | static inline void set_helper_retaddr(uintptr_t ra) | |
328 | { | |
329 | helper_retaddr = ra; | |
330 | /* | |
331 | * Ensure that this write is visible to the SIGSEGV handler that | |
332 | * may be invoked due to a subsequent invalid memory operation. | |
333 | */ | |
334 | signal_barrier(); | |
335 | } | |
336 | ||
337 | static inline void clear_helper_retaddr(void) | |
338 | { | |
339 | /* | |
340 | * Ensure that previous memory operations have succeeded before | |
341 | * removing the data visible to the signal handler. | |
342 | */ | |
343 | signal_barrier(); | |
344 | helper_retaddr = 0; | |
345 | } | |
346 | ||
c773828a PB |
347 | #else |
348 | ||
d03f1408 | 349 | /* Needed for TCG_OVERSIZED_GUEST */ |
dcb32f1d | 350 | #include "tcg/tcg.h" |
c773828a | 351 | |
403f290c EC |
352 | static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) |
353 | { | |
354 | #if TCG_OVERSIZED_GUEST | |
355 | return entry->addr_write; | |
356 | #else | |
d73415a3 | 357 | return qatomic_read(&entry->addr_write); |
403f290c EC |
358 | #endif |
359 | } | |
360 | ||
86e1eff8 EC |
361 | /* Find the TLB index corresponding to the mmu_idx + address pair. */ |
362 | static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, | |
363 | target_ulong addr) | |
364 | { | |
a40ec84e | 365 | uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; |
86e1eff8 EC |
366 | |
367 | return (addr >> TARGET_PAGE_BITS) & size_mask; | |
368 | } | |
369 | ||
383beda9 RH |
370 | /* Find the TLB entry corresponding to the mmu_idx + address pair. */ |
371 | static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, | |
372 | target_ulong addr) | |
373 | { | |
a40ec84e | 374 | return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; |
383beda9 RH |
375 | } |
376 | ||
ed4cfbcd RH |
377 | #endif /* defined(CONFIG_USER_ONLY) */ |
378 | ||
b9e60257 RH |
379 | #ifdef TARGET_WORDS_BIGENDIAN |
380 | # define cpu_lduw_data cpu_lduw_be_data | |
381 | # define cpu_ldsw_data cpu_ldsw_be_data | |
382 | # define cpu_ldl_data cpu_ldl_be_data | |
383 | # define cpu_ldq_data cpu_ldq_be_data | |
384 | # define cpu_lduw_data_ra cpu_lduw_be_data_ra | |
385 | # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra | |
386 | # define cpu_ldl_data_ra cpu_ldl_be_data_ra | |
387 | # define cpu_ldq_data_ra cpu_ldq_be_data_ra | |
388 | # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra | |
389 | # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra | |
390 | # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra | |
391 | # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra | |
f83bcecb RH |
392 | # define cpu_ldw_mmu cpu_ldw_be_mmu |
393 | # define cpu_ldl_mmu cpu_ldl_be_mmu | |
394 | # define cpu_ldq_mmu cpu_ldq_be_mmu | |
b9e60257 RH |
395 | # define cpu_stw_data cpu_stw_be_data |
396 | # define cpu_stl_data cpu_stl_be_data | |
397 | # define cpu_stq_data cpu_stq_be_data | |
398 | # define cpu_stw_data_ra cpu_stw_be_data_ra | |
399 | # define cpu_stl_data_ra cpu_stl_be_data_ra | |
400 | # define cpu_stq_data_ra cpu_stq_be_data_ra | |
401 | # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra | |
402 | # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra | |
403 | # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra | |
f83bcecb RH |
404 | # define cpu_stw_mmu cpu_stw_be_mmu |
405 | # define cpu_stl_mmu cpu_stl_be_mmu | |
406 | # define cpu_stq_mmu cpu_stq_be_mmu | |
b9e60257 RH |
407 | #else |
408 | # define cpu_lduw_data cpu_lduw_le_data | |
409 | # define cpu_ldsw_data cpu_ldsw_le_data | |
410 | # define cpu_ldl_data cpu_ldl_le_data | |
411 | # define cpu_ldq_data cpu_ldq_le_data | |
412 | # define cpu_lduw_data_ra cpu_lduw_le_data_ra | |
413 | # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra | |
414 | # define cpu_ldl_data_ra cpu_ldl_le_data_ra | |
415 | # define cpu_ldq_data_ra cpu_ldq_le_data_ra | |
416 | # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra | |
417 | # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra | |
418 | # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra | |
419 | # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra | |
f83bcecb RH |
420 | # define cpu_ldw_mmu cpu_ldw_le_mmu |
421 | # define cpu_ldl_mmu cpu_ldl_le_mmu | |
422 | # define cpu_ldq_mmu cpu_ldq_le_mmu | |
b9e60257 RH |
423 | # define cpu_stw_data cpu_stw_le_data |
424 | # define cpu_stl_data cpu_stl_le_data | |
425 | # define cpu_stq_data cpu_stq_le_data | |
426 | # define cpu_stw_data_ra cpu_stw_le_data_ra | |
427 | # define cpu_stl_data_ra cpu_stl_le_data_ra | |
428 | # define cpu_stq_data_ra cpu_stq_le_data_ra | |
429 | # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra | |
430 | # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra | |
431 | # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra | |
f83bcecb RH |
432 | # define cpu_stw_mmu cpu_stw_le_mmu |
433 | # define cpu_stl_mmu cpu_stl_le_mmu | |
434 | # define cpu_stq_mmu cpu_stq_le_mmu | |
b9e60257 RH |
435 | #endif |
436 | ||
fc4120a3 RH |
437 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); |
438 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); | |
439 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); | |
440 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); | |
c773828a | 441 | |
fc4120a3 RH |
442 | static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr) |
443 | { | |
444 | return (int8_t)cpu_ldub_code(env, addr); | |
445 | } | |
c773828a | 446 | |
fc4120a3 RH |
447 | static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr) |
448 | { | |
449 | return (int16_t)cpu_lduw_code(env, addr); | |
450 | } | |
c773828a PB |
451 | |
452 | /** | |
453 | * tlb_vaddr_to_host: | |
454 | * @env: CPUArchState | |
455 | * @addr: guest virtual address to look up | |
456 | * @access_type: 0 for read, 1 for write, 2 for execute | |
457 | * @mmu_idx: MMU index to use for lookup | |
458 | * | |
459 | * Look up the specified guest virtual index in the TCG softmmu TLB. | |
4811e909 RH |
460 | * If we can translate a host virtual address suitable for direct RAM |
461 | * access, without causing a guest exception, then return it. | |
462 | * Otherwise (TLB entry is for an I/O access, guest software | |
463 | * TLB fill required, etc) return NULL. | |
c773828a | 464 | */ |
4811e909 | 465 | #ifdef CONFIG_USER_ONLY |
3e23de15 | 466 | static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, |
4811e909 | 467 | MMUAccessType access_type, int mmu_idx) |
c773828a | 468 | { |
3e8f1628 | 469 | return g2h(env_cpu(env), addr); |
2e83c496 | 470 | } |
4811e909 RH |
471 | #else |
472 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, | |
473 | MMUAccessType access_type, int mmu_idx); | |
474 | #endif | |
c773828a | 475 | |
f08b6170 | 476 | #endif /* CPU_LDST_H */ |