]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/cpu_ldst.h
Revert "include/exec: typedef abi_ptr to vaddr in softmmu"
[mirror_qemu.git] / include / exec / cpu_ldst.h
CommitLineData
f08b6170
PB
1/*
2 * Software MMU support
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
d6ea4236 7 * version 2.1 of the License, or (at your option) any later version.
f08b6170
PB
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19/*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
db5fd8d7
PM
26 * The syntax for the accessors is:
27 *
b9e60257
RH
28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
f83bcecb 31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
db5fd8d7 32 *
b9e60257
RH
33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
f83bcecb 36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
db5fd8d7
PM
37 *
38 * sign is:
39 * (empty): for 32 and 64 bit sizes
40 * u : unsigned
41 * s : signed
42 *
43 * size is:
44 * b: 8 bits
45 * w: 16 bits
46 * l: 32 bits
47 * q: 64 bits
48 *
b9e60257
RH
49 * end is:
50 * (empty): for target native endian, or for 8 bit access
51 * _be: for forced big endian
52 * _le: for forced little endian
53 *
f4e1bae2
RH
54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56 * the index to use; the "data" and "code" suffixes take the index from
57 * cpu_mmu_index().
f83bcecb
RH
58 *
59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60 * MemOp including alignment requirements. The alignment will be enforced.
f08b6170
PB
61 */
62#ifndef CPU_LDST_H
63#define CPU_LDST_H
64
f83bcecb 65#include "exec/memopidx.h"
b4c8f3d4 66#include "qemu/int128.h"
f1d4d9fc 67#include "cpu.h"
f83bcecb 68
c773828a 69#if defined(CONFIG_USER_ONLY)
3e23de15
LV
70/* sparc32plus has 64bit long but 32bit space address
71 * this can make bad result with g2h() and h2g()
72 */
73#if TARGET_VIRT_ADDR_SPACE_BITS <= 32
74typedef uint32_t abi_ptr;
75#define TARGET_ABI_FMT_ptr "%x"
76#else
77typedef uint64_t abi_ptr;
78#define TARGET_ABI_FMT_ptr "%"PRIx64
79#endif
80
141a56d8
RH
81#ifndef TARGET_TAGGED_ADDRESSES
82static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
83{
84 return x;
85}
86#endif
87
c773828a 88/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
3e8f1628
RH
89static inline void *g2h_untagged(abi_ptr x)
90{
91 return (void *)((uintptr_t)(x) + guest_base);
92}
93
94static inline void *g2h(CPUState *cs, abi_ptr x)
95{
96 return g2h_untagged(cpu_untagged_addr(cs, x));
97}
c773828a 98
46b12f46 99static inline bool guest_addr_valid_untagged(abi_ulong x)
a78a6363
RH
100{
101 return x <= GUEST_ADDR_MAX;
102}
ebf9a363 103
46b12f46 104static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
ebf9a363
MF
105{
106 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
107}
c773828a 108
57096f29
RH
109#define h2g_valid(x) \
110 (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
111 (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
112
c773828a 113#define h2g_nocheck(x) ({ \
9abf09ff 114 uintptr_t __ret = (uintptr_t)(x) - guest_base; \
3e23de15 115 (abi_ptr)__ret; \
c773828a
PB
116})
117
118#define h2g(x) ({ \
119 /* Check if given address fits target address space */ \
120 assert(h2g_valid(x)); \
121 h2g_nocheck(x); \
122})
3e23de15 123#else
669fd615
RH
124typedef target_ulong abi_ptr;
125#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
f08b6170
PB
126#endif
127
ed4cfbcd 128uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
ed4cfbcd 129int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
b9e60257
RH
130uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
131int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
132uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
133uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
b9e60257
RH
134uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
135int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
136uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
137uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
138
139uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
140int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
b9e60257
RH
141uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
142int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
143uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
144uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
b9e60257
RH
145uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
146int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
147uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
148uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
ed4cfbcd
RH
149
150void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
b9e60257
RH
151void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
152void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
153void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
b9e60257
RH
154void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
155void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
156void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
ed4cfbcd
RH
157
158void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
b9e60257 159 uint32_t val, uintptr_t ra);
b9e60257
RH
160void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
161 uint32_t val, uintptr_t ra);
162void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
163 uint32_t val, uintptr_t ra);
164void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
165 uint64_t val, uintptr_t ra);
b9e60257
RH
166void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
167 uint32_t val, uintptr_t ra);
168void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
169 uint32_t val, uintptr_t ra);
170void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
171 uint64_t val, uintptr_t ra);
c773828a 172
f83bcecb
RH
173uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
174 int mmu_idx, uintptr_t ra);
175int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
176 int mmu_idx, uintptr_t ra);
177uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
178 int mmu_idx, uintptr_t ra);
179int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
180 int mmu_idx, uintptr_t ra);
181uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
182 int mmu_idx, uintptr_t ra);
183uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
184 int mmu_idx, uintptr_t ra);
185uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
186 int mmu_idx, uintptr_t ra);
187int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
188 int mmu_idx, uintptr_t ra);
189uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
190 int mmu_idx, uintptr_t ra);
191uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
192 int mmu_idx, uintptr_t ra);
193
194void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
195 int mmu_idx, uintptr_t ra);
196void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
197 int mmu_idx, uintptr_t ra);
198void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
199 int mmu_idx, uintptr_t ra);
200void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
201 int mmu_idx, uintptr_t ra);
202void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
203 int mmu_idx, uintptr_t ra);
204void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
205 int mmu_idx, uintptr_t ra);
206void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
207 int mmu_idx, uintptr_t ra);
208
209uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
fbea7a40
RH
210uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
211uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
212uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
213Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
cb48f365 214
f83bcecb
RH
215void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
216 MemOpIdx oi, uintptr_t ra);
fbea7a40
RH
217void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
218 MemOpIdx oi, uintptr_t ra);
219void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
220 MemOpIdx oi, uintptr_t ra);
221void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
222 MemOpIdx oi, uintptr_t ra);
223void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
224 MemOpIdx oi, uintptr_t ra);
cb48f365 225
022b9bce 226uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
227 uint32_t cmpv, uint32_t newv,
228 MemOpIdx oi, uintptr_t retaddr);
022b9bce 229uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
230 uint32_t cmpv, uint32_t newv,
231 MemOpIdx oi, uintptr_t retaddr);
022b9bce 232uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
233 uint32_t cmpv, uint32_t newv,
234 MemOpIdx oi, uintptr_t retaddr);
022b9bce 235uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
236 uint64_t cmpv, uint64_t newv,
237 MemOpIdx oi, uintptr_t retaddr);
022b9bce 238uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
239 uint32_t cmpv, uint32_t newv,
240 MemOpIdx oi, uintptr_t retaddr);
022b9bce 241uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
242 uint32_t cmpv, uint32_t newv,
243 MemOpIdx oi, uintptr_t retaddr);
022b9bce 244uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
245 uint64_t cmpv, uint64_t newv,
246 MemOpIdx oi, uintptr_t retaddr);
247
022b9bce
AJ
248#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
249TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
250 (CPUArchState *env, abi_ptr addr, TYPE val, \
b4c8f3d4
RH
251 MemOpIdx oi, uintptr_t retaddr);
252
253#ifdef CONFIG_ATOMIC64
254#define GEN_ATOMIC_HELPER_ALL(NAME) \
255 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
256 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
257 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
258 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
259 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
260 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
261 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
262#else
263#define GEN_ATOMIC_HELPER_ALL(NAME) \
264 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
265 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
266 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
267 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
268 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
269#endif
270
271GEN_ATOMIC_HELPER_ALL(fetch_add)
272GEN_ATOMIC_HELPER_ALL(fetch_sub)
273GEN_ATOMIC_HELPER_ALL(fetch_and)
274GEN_ATOMIC_HELPER_ALL(fetch_or)
275GEN_ATOMIC_HELPER_ALL(fetch_xor)
276GEN_ATOMIC_HELPER_ALL(fetch_smin)
277GEN_ATOMIC_HELPER_ALL(fetch_umin)
278GEN_ATOMIC_HELPER_ALL(fetch_smax)
279GEN_ATOMIC_HELPER_ALL(fetch_umax)
280
281GEN_ATOMIC_HELPER_ALL(add_fetch)
282GEN_ATOMIC_HELPER_ALL(sub_fetch)
283GEN_ATOMIC_HELPER_ALL(and_fetch)
284GEN_ATOMIC_HELPER_ALL(or_fetch)
285GEN_ATOMIC_HELPER_ALL(xor_fetch)
286GEN_ATOMIC_HELPER_ALL(smin_fetch)
287GEN_ATOMIC_HELPER_ALL(umin_fetch)
288GEN_ATOMIC_HELPER_ALL(smax_fetch)
289GEN_ATOMIC_HELPER_ALL(umax_fetch)
290
291GEN_ATOMIC_HELPER_ALL(xchg)
292
293#undef GEN_ATOMIC_HELPER_ALL
294#undef GEN_ATOMIC_HELPER
295
022b9bce 296Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
297 Int128 cmpv, Int128 newv,
298 MemOpIdx oi, uintptr_t retaddr);
022b9bce 299Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
300 Int128 cmpv, Int128 newv,
301 MemOpIdx oi, uintptr_t retaddr);
302
cfe04a4b
RH
303#if defined(CONFIG_USER_ONLY)
304
305extern __thread uintptr_t helper_retaddr;
306
307static inline void set_helper_retaddr(uintptr_t ra)
308{
309 helper_retaddr = ra;
310 /*
311 * Ensure that this write is visible to the SIGSEGV handler that
312 * may be invoked due to a subsequent invalid memory operation.
313 */
314 signal_barrier();
315}
316
317static inline void clear_helper_retaddr(void)
318{
319 /*
320 * Ensure that previous memory operations have succeeded before
321 * removing the data visible to the signal handler.
322 */
323 signal_barrier();
324 helper_retaddr = 0;
325}
326
c773828a
PB
327#else
328
70f168f8 329#include "tcg/oversized-guest.h"
c773828a 330
9e39de98
AJ
331static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
332 MMUAccessType access_type)
403f290c 333{
0b3c75ad
RH
334 /* Do not rearrange the CPUTLBEntry structure members. */
335 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
238f4380 336 MMU_DATA_LOAD * sizeof(uint64_t));
0b3c75ad 337 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
238f4380 338 MMU_DATA_STORE * sizeof(uint64_t));
0b3c75ad 339 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
238f4380 340 MMU_INST_FETCH * sizeof(uint64_t));
0b3c75ad 341
238f4380
RH
342#if TARGET_LONG_BITS == 32
343 /* Use qatomic_read, in case of addr_write; only care about low bits. */
344 const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
345 ptr += HOST_BIG_ENDIAN;
346 return qatomic_read(ptr);
403f290c 347#else
238f4380
RH
348 const uint64_t *ptr = &entry->addr_idx[access_type];
349# if TCG_OVERSIZED_GUEST
350 return *ptr;
351# else
0b3c75ad
RH
352 /* ofs might correspond to .addr_write, so use qatomic_read */
353 return qatomic_read(ptr);
238f4380 354# endif
403f290c
EC
355#endif
356}
357
9e39de98 358static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
0b3c75ad
RH
359{
360 return tlb_read_idx(entry, MMU_DATA_STORE);
361}
362
86e1eff8
EC
363/* Find the TLB index corresponding to the mmu_idx + address pair. */
364static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
9e39de98 365 vaddr addr)
86e1eff8 366{
a40ec84e 367 uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
86e1eff8
EC
368
369 return (addr >> TARGET_PAGE_BITS) & size_mask;
370}
371
383beda9
RH
372/* Find the TLB entry corresponding to the mmu_idx + address pair. */
373static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
9e39de98 374 vaddr addr)
383beda9 375{
a40ec84e 376 return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
383beda9
RH
377}
378
ed4cfbcd
RH
379#endif /* defined(CONFIG_USER_ONLY) */
380
ee3eb3a7 381#if TARGET_BIG_ENDIAN
b9e60257
RH
382# define cpu_lduw_data cpu_lduw_be_data
383# define cpu_ldsw_data cpu_ldsw_be_data
384# define cpu_ldl_data cpu_ldl_be_data
385# define cpu_ldq_data cpu_ldq_be_data
386# define cpu_lduw_data_ra cpu_lduw_be_data_ra
387# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
388# define cpu_ldl_data_ra cpu_ldl_be_data_ra
389# define cpu_ldq_data_ra cpu_ldq_be_data_ra
390# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
391# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
392# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
393# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
394# define cpu_stw_data cpu_stw_be_data
395# define cpu_stl_data cpu_stl_be_data
396# define cpu_stq_data cpu_stq_be_data
397# define cpu_stw_data_ra cpu_stw_be_data_ra
398# define cpu_stl_data_ra cpu_stl_be_data_ra
399# define cpu_stq_data_ra cpu_stq_be_data_ra
400# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
401# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
402# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
403#else
404# define cpu_lduw_data cpu_lduw_le_data
405# define cpu_ldsw_data cpu_ldsw_le_data
406# define cpu_ldl_data cpu_ldl_le_data
407# define cpu_ldq_data cpu_ldq_le_data
408# define cpu_lduw_data_ra cpu_lduw_le_data_ra
409# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
410# define cpu_ldl_data_ra cpu_ldl_le_data_ra
411# define cpu_ldq_data_ra cpu_ldq_le_data_ra
412# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
413# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
414# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
415# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
416# define cpu_stw_data cpu_stw_le_data
417# define cpu_stl_data cpu_stl_le_data
418# define cpu_stq_data cpu_stq_le_data
419# define cpu_stw_data_ra cpu_stw_le_data_ra
420# define cpu_stl_data_ra cpu_stl_le_data_ra
421# define cpu_stq_data_ra cpu_stq_le_data_ra
422# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
423# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
424# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
425#endif
426
28990626
RH
427uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
428 MemOpIdx oi, uintptr_t ra);
429uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
430 MemOpIdx oi, uintptr_t ra);
431uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
432 MemOpIdx oi, uintptr_t ra);
433uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
434 MemOpIdx oi, uintptr_t ra);
435
fc4120a3
RH
436uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
437uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
438uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
439uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
c773828a 440
fc4120a3
RH
441static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
442{
443 return (int8_t)cpu_ldub_code(env, addr);
444}
c773828a 445
fc4120a3
RH
446static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
447{
448 return (int16_t)cpu_lduw_code(env, addr);
449}
c773828a
PB
450
451/**
452 * tlb_vaddr_to_host:
453 * @env: CPUArchState
454 * @addr: guest virtual address to look up
455 * @access_type: 0 for read, 1 for write, 2 for execute
456 * @mmu_idx: MMU index to use for lookup
457 *
458 * Look up the specified guest virtual index in the TCG softmmu TLB.
4811e909
RH
459 * If we can translate a host virtual address suitable for direct RAM
460 * access, without causing a guest exception, then return it.
461 * Otherwise (TLB entry is for an I/O access, guest software
462 * TLB fill required, etc) return NULL.
c773828a 463 */
4811e909 464#ifdef CONFIG_USER_ONLY
3e23de15 465static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
4811e909 466 MMUAccessType access_type, int mmu_idx)
c773828a 467{
3e8f1628 468 return g2h(env_cpu(env), addr);
2e83c496 469}
4811e909
RH
470#else
471void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
472 MMUAccessType access_type, int mmu_idx);
473#endif
c773828a 474
f08b6170 475#endif /* CPU_LDST_H */