]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/cpu_ldst.h
Merge tag 'pull-tcg-20230204' of https://gitlab.com/rth7680/qemu into staging
[mirror_qemu.git] / include / exec / cpu_ldst.h
1 /*
2 * Software MMU support
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19 /*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
26 * The syntax for the accessors is:
27 *
28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
32 *
33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
37 *
38 * sign is:
39 * (empty): for 32 and 64 bit sizes
40 * u : unsigned
41 * s : signed
42 *
43 * size is:
44 * b: 8 bits
45 * w: 16 bits
46 * l: 32 bits
47 * q: 64 bits
48 *
49 * end is:
50 * (empty): for target native endian, or for 8 bit access
51 * _be: for forced big endian
52 * _le: for forced little endian
53 *
54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56 * the index to use; the "data" and "code" suffixes take the index from
57 * cpu_mmu_index().
58 *
59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60 * MemOp including alignment requirements. The alignment will be enforced.
61 */
62 #ifndef CPU_LDST_H
63 #define CPU_LDST_H
64
65 #include "exec/memopidx.h"
66 #include "qemu/int128.h"
67 #include "cpu.h"
68
69 #if defined(CONFIG_USER_ONLY)
70 /* sparc32plus has 64bit long but 32bit space address
71 * this can make bad result with g2h() and h2g()
72 */
73 #if TARGET_VIRT_ADDR_SPACE_BITS <= 32
74 typedef uint32_t abi_ptr;
75 #define TARGET_ABI_FMT_ptr "%x"
76 #else
77 typedef uint64_t abi_ptr;
78 #define TARGET_ABI_FMT_ptr "%"PRIx64
79 #endif
80
81 #ifndef TARGET_TAGGED_ADDRESSES
82 static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
83 {
84 return x;
85 }
86 #endif
87
88 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
89 static inline void *g2h_untagged(abi_ptr x)
90 {
91 return (void *)((uintptr_t)(x) + guest_base);
92 }
93
94 static inline void *g2h(CPUState *cs, abi_ptr x)
95 {
96 return g2h_untagged(cpu_untagged_addr(cs, x));
97 }
98
99 static inline bool guest_addr_valid_untagged(abi_ulong x)
100 {
101 return x <= GUEST_ADDR_MAX;
102 }
103
104 static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
105 {
106 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
107 }
108
109 #define h2g_valid(x) \
110 (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
111 (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
112
113 #define h2g_nocheck(x) ({ \
114 uintptr_t __ret = (uintptr_t)(x) - guest_base; \
115 (abi_ptr)__ret; \
116 })
117
118 #define h2g(x) ({ \
119 /* Check if given address fits target address space */ \
120 assert(h2g_valid(x)); \
121 h2g_nocheck(x); \
122 })
123 #else
124 typedef target_ulong abi_ptr;
125 #define TARGET_ABI_FMT_ptr TARGET_FMT_lx
126 #endif
127
128 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
129 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
130 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
131 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
132 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
133 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
134 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
135 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
136 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
137 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
138
139 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
140 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
141 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
142 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
143 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
144 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
145 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
146 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
147 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
148 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
149
150 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
151 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
152 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
153 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
154 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
155 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
156 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
157
158 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
159 uint32_t val, uintptr_t ra);
160 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
161 uint32_t val, uintptr_t ra);
162 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
163 uint32_t val, uintptr_t ra);
164 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
165 uint64_t val, uintptr_t ra);
166 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
167 uint32_t val, uintptr_t ra);
168 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
169 uint32_t val, uintptr_t ra);
170 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
171 uint64_t val, uintptr_t ra);
172
173 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
174 int mmu_idx, uintptr_t ra);
175 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
176 int mmu_idx, uintptr_t ra);
177 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
178 int mmu_idx, uintptr_t ra);
179 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
180 int mmu_idx, uintptr_t ra);
181 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
182 int mmu_idx, uintptr_t ra);
183 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
184 int mmu_idx, uintptr_t ra);
185 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
186 int mmu_idx, uintptr_t ra);
187 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
188 int mmu_idx, uintptr_t ra);
189 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
190 int mmu_idx, uintptr_t ra);
191 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
192 int mmu_idx, uintptr_t ra);
193
194 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
195 int mmu_idx, uintptr_t ra);
196 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
197 int mmu_idx, uintptr_t ra);
198 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
199 int mmu_idx, uintptr_t ra);
200 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
201 int mmu_idx, uintptr_t ra);
202 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
203 int mmu_idx, uintptr_t ra);
204 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
205 int mmu_idx, uintptr_t ra);
206 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
207 int mmu_idx, uintptr_t ra);
208
209 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
210 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
211 MemOpIdx oi, uintptr_t ra);
212 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
213 MemOpIdx oi, uintptr_t ra);
214 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
215 MemOpIdx oi, uintptr_t ra);
216 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
217 MemOpIdx oi, uintptr_t ra);
218 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
219 MemOpIdx oi, uintptr_t ra);
220 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
221 MemOpIdx oi, uintptr_t ra);
222
223 Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
224 MemOpIdx oi, uintptr_t ra);
225 Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
226 MemOpIdx oi, uintptr_t ra);
227
228 void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
229 MemOpIdx oi, uintptr_t ra);
230 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
231 MemOpIdx oi, uintptr_t ra);
232 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
233 MemOpIdx oi, uintptr_t ra);
234 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
235 MemOpIdx oi, uintptr_t ra);
236 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
237 MemOpIdx oi, uintptr_t ra);
238 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
239 MemOpIdx oi, uintptr_t ra);
240 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
241 MemOpIdx oi, uintptr_t ra);
242
243 void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
244 MemOpIdx oi, uintptr_t ra);
245 void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
246 MemOpIdx oi, uintptr_t ra);
247
248 uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
249 uint32_t cmpv, uint32_t newv,
250 MemOpIdx oi, uintptr_t retaddr);
251 uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
252 uint32_t cmpv, uint32_t newv,
253 MemOpIdx oi, uintptr_t retaddr);
254 uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
255 uint32_t cmpv, uint32_t newv,
256 MemOpIdx oi, uintptr_t retaddr);
257 uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
258 uint64_t cmpv, uint64_t newv,
259 MemOpIdx oi, uintptr_t retaddr);
260 uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
261 uint32_t cmpv, uint32_t newv,
262 MemOpIdx oi, uintptr_t retaddr);
263 uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
264 uint32_t cmpv, uint32_t newv,
265 MemOpIdx oi, uintptr_t retaddr);
266 uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
267 uint64_t cmpv, uint64_t newv,
268 MemOpIdx oi, uintptr_t retaddr);
269
270 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
271 TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
272 (CPUArchState *env, target_ulong addr, TYPE val, \
273 MemOpIdx oi, uintptr_t retaddr);
274
275 #ifdef CONFIG_ATOMIC64
276 #define GEN_ATOMIC_HELPER_ALL(NAME) \
277 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
278 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
279 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
280 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
281 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
282 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
283 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
284 #else
285 #define GEN_ATOMIC_HELPER_ALL(NAME) \
286 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
287 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
288 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
289 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
290 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
291 #endif
292
293 GEN_ATOMIC_HELPER_ALL(fetch_add)
294 GEN_ATOMIC_HELPER_ALL(fetch_sub)
295 GEN_ATOMIC_HELPER_ALL(fetch_and)
296 GEN_ATOMIC_HELPER_ALL(fetch_or)
297 GEN_ATOMIC_HELPER_ALL(fetch_xor)
298 GEN_ATOMIC_HELPER_ALL(fetch_smin)
299 GEN_ATOMIC_HELPER_ALL(fetch_umin)
300 GEN_ATOMIC_HELPER_ALL(fetch_smax)
301 GEN_ATOMIC_HELPER_ALL(fetch_umax)
302
303 GEN_ATOMIC_HELPER_ALL(add_fetch)
304 GEN_ATOMIC_HELPER_ALL(sub_fetch)
305 GEN_ATOMIC_HELPER_ALL(and_fetch)
306 GEN_ATOMIC_HELPER_ALL(or_fetch)
307 GEN_ATOMIC_HELPER_ALL(xor_fetch)
308 GEN_ATOMIC_HELPER_ALL(smin_fetch)
309 GEN_ATOMIC_HELPER_ALL(umin_fetch)
310 GEN_ATOMIC_HELPER_ALL(smax_fetch)
311 GEN_ATOMIC_HELPER_ALL(umax_fetch)
312
313 GEN_ATOMIC_HELPER_ALL(xchg)
314
315 #undef GEN_ATOMIC_HELPER_ALL
316 #undef GEN_ATOMIC_HELPER
317
318 Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
319 Int128 cmpv, Int128 newv,
320 MemOpIdx oi, uintptr_t retaddr);
321 Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
322 Int128 cmpv, Int128 newv,
323 MemOpIdx oi, uintptr_t retaddr);
324
325 Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
326 MemOpIdx oi, uintptr_t retaddr);
327 Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
328 MemOpIdx oi, uintptr_t retaddr);
329 void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
330 MemOpIdx oi, uintptr_t retaddr);
331 void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
332 MemOpIdx oi, uintptr_t retaddr);
333
334 #if defined(CONFIG_USER_ONLY)
335
336 extern __thread uintptr_t helper_retaddr;
337
338 static inline void set_helper_retaddr(uintptr_t ra)
339 {
340 helper_retaddr = ra;
341 /*
342 * Ensure that this write is visible to the SIGSEGV handler that
343 * may be invoked due to a subsequent invalid memory operation.
344 */
345 signal_barrier();
346 }
347
348 static inline void clear_helper_retaddr(void)
349 {
350 /*
351 * Ensure that previous memory operations have succeeded before
352 * removing the data visible to the signal handler.
353 */
354 signal_barrier();
355 helper_retaddr = 0;
356 }
357
358 #else
359
360 /* Needed for TCG_OVERSIZED_GUEST */
361 #include "tcg/tcg.h"
362
363 static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
364 {
365 #if TCG_OVERSIZED_GUEST
366 return entry->addr_write;
367 #else
368 return qatomic_read(&entry->addr_write);
369 #endif
370 }
371
372 /* Find the TLB index corresponding to the mmu_idx + address pair. */
373 static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
374 target_ulong addr)
375 {
376 uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
377
378 return (addr >> TARGET_PAGE_BITS) & size_mask;
379 }
380
381 /* Find the TLB entry corresponding to the mmu_idx + address pair. */
382 static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
383 target_ulong addr)
384 {
385 return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
386 }
387
388 #endif /* defined(CONFIG_USER_ONLY) */
389
390 #if TARGET_BIG_ENDIAN
391 # define cpu_lduw_data cpu_lduw_be_data
392 # define cpu_ldsw_data cpu_ldsw_be_data
393 # define cpu_ldl_data cpu_ldl_be_data
394 # define cpu_ldq_data cpu_ldq_be_data
395 # define cpu_lduw_data_ra cpu_lduw_be_data_ra
396 # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
397 # define cpu_ldl_data_ra cpu_ldl_be_data_ra
398 # define cpu_ldq_data_ra cpu_ldq_be_data_ra
399 # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
400 # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
401 # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
402 # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
403 # define cpu_ldw_mmu cpu_ldw_be_mmu
404 # define cpu_ldl_mmu cpu_ldl_be_mmu
405 # define cpu_ldq_mmu cpu_ldq_be_mmu
406 # define cpu_stw_data cpu_stw_be_data
407 # define cpu_stl_data cpu_stl_be_data
408 # define cpu_stq_data cpu_stq_be_data
409 # define cpu_stw_data_ra cpu_stw_be_data_ra
410 # define cpu_stl_data_ra cpu_stl_be_data_ra
411 # define cpu_stq_data_ra cpu_stq_be_data_ra
412 # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
413 # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
414 # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
415 # define cpu_stw_mmu cpu_stw_be_mmu
416 # define cpu_stl_mmu cpu_stl_be_mmu
417 # define cpu_stq_mmu cpu_stq_be_mmu
418 #else
419 # define cpu_lduw_data cpu_lduw_le_data
420 # define cpu_ldsw_data cpu_ldsw_le_data
421 # define cpu_ldl_data cpu_ldl_le_data
422 # define cpu_ldq_data cpu_ldq_le_data
423 # define cpu_lduw_data_ra cpu_lduw_le_data_ra
424 # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
425 # define cpu_ldl_data_ra cpu_ldl_le_data_ra
426 # define cpu_ldq_data_ra cpu_ldq_le_data_ra
427 # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
428 # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
429 # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
430 # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
431 # define cpu_ldw_mmu cpu_ldw_le_mmu
432 # define cpu_ldl_mmu cpu_ldl_le_mmu
433 # define cpu_ldq_mmu cpu_ldq_le_mmu
434 # define cpu_stw_data cpu_stw_le_data
435 # define cpu_stl_data cpu_stl_le_data
436 # define cpu_stq_data cpu_stq_le_data
437 # define cpu_stw_data_ra cpu_stw_le_data_ra
438 # define cpu_stl_data_ra cpu_stl_le_data_ra
439 # define cpu_stq_data_ra cpu_stq_le_data_ra
440 # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
441 # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
442 # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
443 # define cpu_stw_mmu cpu_stw_le_mmu
444 # define cpu_stl_mmu cpu_stl_le_mmu
445 # define cpu_stq_mmu cpu_stq_le_mmu
446 #endif
447
448 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
449 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
450 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
451 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
452
453 static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
454 {
455 return (int8_t)cpu_ldub_code(env, addr);
456 }
457
458 static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
459 {
460 return (int16_t)cpu_lduw_code(env, addr);
461 }
462
463 /**
464 * tlb_vaddr_to_host:
465 * @env: CPUArchState
466 * @addr: guest virtual address to look up
467 * @access_type: 0 for read, 1 for write, 2 for execute
468 * @mmu_idx: MMU index to use for lookup
469 *
470 * Look up the specified guest virtual index in the TCG softmmu TLB.
471 * If we can translate a host virtual address suitable for direct RAM
472 * access, without causing a guest exception, then return it.
473 * Otherwise (TLB entry is for an I/O access, guest software
474 * TLB fill required, etc) return NULL.
475 */
476 #ifdef CONFIG_USER_ONLY
477 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
478 MMUAccessType access_type, int mmu_idx)
479 {
480 return g2h(env_cpu(env), addr);
481 }
482 #else
483 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
484 MMUAccessType access_type, int mmu_idx);
485 #endif
486
487 #endif /* CPU_LDST_H */