]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/cpu_ldst.h
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / include / exec / cpu_ldst.h
CommitLineData
f08b6170 1/*
1ce871a3 2 * Software MMU support (per-target)
f08b6170
PB
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
d6ea4236 7 * version 2.1 of the License, or (at your option) any later version.
f08b6170
PB
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19/*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
db5fd8d7
PM
26 * The syntax for the accessors is:
27 *
b9e60257
RH
28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
f83bcecb 31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
db5fd8d7 32 *
b9e60257
RH
33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
f83bcecb 36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
db5fd8d7
PM
37 *
38 * sign is:
39 * (empty): for 32 and 64 bit sizes
40 * u : unsigned
41 * s : signed
42 *
43 * size is:
44 * b: 8 bits
45 * w: 16 bits
46 * l: 32 bits
47 * q: 64 bits
48 *
b9e60257
RH
49 * end is:
50 * (empty): for target native endian, or for 8 bit access
51 * _be: for forced big endian
52 * _le: for forced little endian
53 *
f4e1bae2
RH
54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56 * the index to use; the "data" and "code" suffixes take the index from
57 * cpu_mmu_index().
f83bcecb
RH
58 *
59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60 * MemOp including alignment requirements. The alignment will be enforced.
f08b6170
PB
61 */
62#ifndef CPU_LDST_H
63#define CPU_LDST_H
64
1ce871a3
PMD
65#ifndef CONFIG_TCG
66#error Can only include this header with TCG
67#endif
68
f83bcecb 69#include "exec/memopidx.h"
471558cb 70#include "exec/abi_ptr.h"
9c1283dd 71#include "exec/mmu-access-type.h"
b4c8f3d4 72#include "qemu/int128.h"
f83bcecb 73
c773828a 74#if defined(CONFIG_USER_ONLY)
3e23de15 75
16aa8eaa
PMD
76#include "user/guest-base.h"
77
141a56d8
RH
78#ifndef TARGET_TAGGED_ADDRESSES
79static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
80{
81 return x;
82}
83#endif
84
c773828a 85/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
3e8f1628
RH
86static inline void *g2h_untagged(abi_ptr x)
87{
88 return (void *)((uintptr_t)(x) + guest_base);
89}
90
91static inline void *g2h(CPUState *cs, abi_ptr x)
92{
93 return g2h_untagged(cpu_untagged_addr(cs, x));
94}
c773828a 95
46b12f46 96static inline bool guest_addr_valid_untagged(abi_ulong x)
a78a6363
RH
97{
98 return x <= GUEST_ADDR_MAX;
99}
ebf9a363 100
46b12f46 101static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
ebf9a363
MF
102{
103 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
104}
c773828a 105
57096f29
RH
106#define h2g_valid(x) \
107 (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
108 (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
109
c773828a 110#define h2g_nocheck(x) ({ \
9abf09ff 111 uintptr_t __ret = (uintptr_t)(x) - guest_base; \
3e23de15 112 (abi_ptr)__ret; \
c773828a
PB
113})
114
115#define h2g(x) ({ \
116 /* Check if given address fits target address space */ \
117 assert(h2g_valid(x)); \
118 h2g_nocheck(x); \
119})
471558cb
PMD
120
121#endif /* CONFIG_USER_ONLY */
f08b6170 122
ed4cfbcd 123uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
ed4cfbcd 124int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
b9e60257
RH
125uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
126int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
127uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
128uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
b9e60257
RH
129uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
130int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
131uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
132uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
133
134uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
135int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
b9e60257
RH
136uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
137int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
138uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
139uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
b9e60257
RH
140uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
141int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
142uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
143uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
ed4cfbcd
RH
144
145void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
b9e60257
RH
146void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
147void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
148void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
b9e60257
RH
149void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
150void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
151void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
ed4cfbcd
RH
152
153void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
b9e60257 154 uint32_t val, uintptr_t ra);
b9e60257
RH
155void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
156 uint32_t val, uintptr_t ra);
157void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
158 uint32_t val, uintptr_t ra);
159void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
160 uint64_t val, uintptr_t ra);
b9e60257
RH
161void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
162 uint32_t val, uintptr_t ra);
163void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
164 uint32_t val, uintptr_t ra);
165void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
166 uint64_t val, uintptr_t ra);
c773828a 167
f83bcecb
RH
168uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
169 int mmu_idx, uintptr_t ra);
170int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
171 int mmu_idx, uintptr_t ra);
172uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
173 int mmu_idx, uintptr_t ra);
174int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
175 int mmu_idx, uintptr_t ra);
176uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
177 int mmu_idx, uintptr_t ra);
178uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
179 int mmu_idx, uintptr_t ra);
180uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
181 int mmu_idx, uintptr_t ra);
182int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
183 int mmu_idx, uintptr_t ra);
184uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
185 int mmu_idx, uintptr_t ra);
186uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
187 int mmu_idx, uintptr_t ra);
188
189void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
190 int mmu_idx, uintptr_t ra);
191void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
192 int mmu_idx, uintptr_t ra);
193void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
194 int mmu_idx, uintptr_t ra);
195void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
196 int mmu_idx, uintptr_t ra);
197void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
198 int mmu_idx, uintptr_t ra);
199void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
200 int mmu_idx, uintptr_t ra);
201void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
202 int mmu_idx, uintptr_t ra);
203
204uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
fbea7a40
RH
205uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
206uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
207uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
208Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
cb48f365 209
f83bcecb
RH
210void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
211 MemOpIdx oi, uintptr_t ra);
fbea7a40
RH
212void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
213 MemOpIdx oi, uintptr_t ra);
214void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
215 MemOpIdx oi, uintptr_t ra);
216void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
217 MemOpIdx oi, uintptr_t ra);
218void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
219 MemOpIdx oi, uintptr_t ra);
cb48f365 220
022b9bce 221uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
222 uint32_t cmpv, uint32_t newv,
223 MemOpIdx oi, uintptr_t retaddr);
022b9bce 224uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
225 uint32_t cmpv, uint32_t newv,
226 MemOpIdx oi, uintptr_t retaddr);
022b9bce 227uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
228 uint32_t cmpv, uint32_t newv,
229 MemOpIdx oi, uintptr_t retaddr);
022b9bce 230uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
231 uint64_t cmpv, uint64_t newv,
232 MemOpIdx oi, uintptr_t retaddr);
022b9bce 233uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
234 uint32_t cmpv, uint32_t newv,
235 MemOpIdx oi, uintptr_t retaddr);
022b9bce 236uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
237 uint32_t cmpv, uint32_t newv,
238 MemOpIdx oi, uintptr_t retaddr);
022b9bce 239uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
240 uint64_t cmpv, uint64_t newv,
241 MemOpIdx oi, uintptr_t retaddr);
242
022b9bce
AJ
243#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
244TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
245 (CPUArchState *env, abi_ptr addr, TYPE val, \
b4c8f3d4
RH
246 MemOpIdx oi, uintptr_t retaddr);
247
248#ifdef CONFIG_ATOMIC64
249#define GEN_ATOMIC_HELPER_ALL(NAME) \
250 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
251 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
252 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
253 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
254 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
255 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
256 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
257#else
258#define GEN_ATOMIC_HELPER_ALL(NAME) \
259 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
260 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
261 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
262 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
263 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
264#endif
265
266GEN_ATOMIC_HELPER_ALL(fetch_add)
267GEN_ATOMIC_HELPER_ALL(fetch_sub)
268GEN_ATOMIC_HELPER_ALL(fetch_and)
269GEN_ATOMIC_HELPER_ALL(fetch_or)
270GEN_ATOMIC_HELPER_ALL(fetch_xor)
271GEN_ATOMIC_HELPER_ALL(fetch_smin)
272GEN_ATOMIC_HELPER_ALL(fetch_umin)
273GEN_ATOMIC_HELPER_ALL(fetch_smax)
274GEN_ATOMIC_HELPER_ALL(fetch_umax)
275
276GEN_ATOMIC_HELPER_ALL(add_fetch)
277GEN_ATOMIC_HELPER_ALL(sub_fetch)
278GEN_ATOMIC_HELPER_ALL(and_fetch)
279GEN_ATOMIC_HELPER_ALL(or_fetch)
280GEN_ATOMIC_HELPER_ALL(xor_fetch)
281GEN_ATOMIC_HELPER_ALL(smin_fetch)
282GEN_ATOMIC_HELPER_ALL(umin_fetch)
283GEN_ATOMIC_HELPER_ALL(smax_fetch)
284GEN_ATOMIC_HELPER_ALL(umax_fetch)
285
286GEN_ATOMIC_HELPER_ALL(xchg)
287
288#undef GEN_ATOMIC_HELPER_ALL
289#undef GEN_ATOMIC_HELPER
290
022b9bce 291Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
292 Int128 cmpv, Int128 newv,
293 MemOpIdx oi, uintptr_t retaddr);
022b9bce 294Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
b4c8f3d4
RH
295 Int128 cmpv, Int128 newv,
296 MemOpIdx oi, uintptr_t retaddr);
297
ee3eb3a7 298#if TARGET_BIG_ENDIAN
b9e60257
RH
299# define cpu_lduw_data cpu_lduw_be_data
300# define cpu_ldsw_data cpu_ldsw_be_data
301# define cpu_ldl_data cpu_ldl_be_data
302# define cpu_ldq_data cpu_ldq_be_data
303# define cpu_lduw_data_ra cpu_lduw_be_data_ra
304# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
305# define cpu_ldl_data_ra cpu_ldl_be_data_ra
306# define cpu_ldq_data_ra cpu_ldq_be_data_ra
307# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
308# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
309# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
310# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
311# define cpu_stw_data cpu_stw_be_data
312# define cpu_stl_data cpu_stl_be_data
313# define cpu_stq_data cpu_stq_be_data
314# define cpu_stw_data_ra cpu_stw_be_data_ra
315# define cpu_stl_data_ra cpu_stl_be_data_ra
316# define cpu_stq_data_ra cpu_stq_be_data_ra
317# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
318# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
319# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
320#else
321# define cpu_lduw_data cpu_lduw_le_data
322# define cpu_ldsw_data cpu_ldsw_le_data
323# define cpu_ldl_data cpu_ldl_le_data
324# define cpu_ldq_data cpu_ldq_le_data
325# define cpu_lduw_data_ra cpu_lduw_le_data_ra
326# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
327# define cpu_ldl_data_ra cpu_ldl_le_data_ra
328# define cpu_ldq_data_ra cpu_ldq_le_data_ra
329# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
330# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
331# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
332# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
333# define cpu_stw_data cpu_stw_le_data
334# define cpu_stl_data cpu_stl_le_data
335# define cpu_stq_data cpu_stq_le_data
336# define cpu_stw_data_ra cpu_stw_le_data_ra
337# define cpu_stl_data_ra cpu_stl_le_data_ra
338# define cpu_stq_data_ra cpu_stq_le_data_ra
339# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
340# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
341# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
342#endif
343
28990626
RH
344uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
345 MemOpIdx oi, uintptr_t ra);
346uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
347 MemOpIdx oi, uintptr_t ra);
348uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
349 MemOpIdx oi, uintptr_t ra);
350uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
351 MemOpIdx oi, uintptr_t ra);
352
fc4120a3
RH
353uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
354uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
355uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
356uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
c773828a 357
c773828a
PB
358/**
359 * tlb_vaddr_to_host:
360 * @env: CPUArchState
361 * @addr: guest virtual address to look up
362 * @access_type: 0 for read, 1 for write, 2 for execute
363 * @mmu_idx: MMU index to use for lookup
364 *
365 * Look up the specified guest virtual index in the TCG softmmu TLB.
4811e909
RH
366 * If we can translate a host virtual address suitable for direct RAM
367 * access, without causing a guest exception, then return it.
368 * Otherwise (TLB entry is for an I/O access, guest software
369 * TLB fill required, etc) return NULL.
c773828a 370 */
4811e909 371#ifdef CONFIG_USER_ONLY
3e23de15 372static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
4811e909 373 MMUAccessType access_type, int mmu_idx)
c773828a 374{
3e8f1628 375 return g2h(env_cpu(env), addr);
2e83c496 376}
4811e909
RH
377#else
378void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
379 MMUAccessType access_type, int mmu_idx);
380#endif
c773828a 381
f08b6170 382#endif /* CPU_LDST_H */