]>
git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/atomic_template.h
2 * Atomic helper templates
3 * Included from tcg-runtime.c and cputlb.c.
5 * Copyright (c) 2016 Red Hat, Inc
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/plugin.h"
25 # define DATA_TYPE Int128
26 # define BSWAP bswap128
30 # define DATA_TYPE aligned_uint64_t
31 # define SDATA_TYPE aligned_int64_t
32 # define BSWAP bswap64
36 # define DATA_TYPE uint32_t
37 # define SDATA_TYPE int32_t
38 # define BSWAP bswap32
42 # define DATA_TYPE uint16_t
43 # define SDATA_TYPE int16_t
44 # define BSWAP bswap16
48 # define DATA_TYPE uint8_t
49 # define SDATA_TYPE int8_t
53 # error unsupported data size
57 # define ABI_TYPE DATA_TYPE
59 # define ABI_TYPE uint32_t
62 /* Define host-endian atomic operations. Note that END is used within
63 the ATOMIC_NAME macro, and redefined below. */
72 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, target_ulong addr
,
73 ABI_TYPE cmpv
, ABI_TYPE newv
,
74 MemOpIdx oi
, uintptr_t retaddr
)
76 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
77 PAGE_READ
| PAGE_WRITE
, retaddr
);
81 ret
= atomic16_cmpxchg(haddr
, cmpv
, newv
);
83 ret
= qatomic_cmpxchg__nocheck(haddr
, cmpv
, newv
);
86 atomic_trace_rmw_post(env
, addr
, oi
);
92 ABI_TYPE
ATOMIC_NAME(ld
)(CPUArchState
*env
, target_ulong addr
,
93 MemOpIdx oi
, uintptr_t retaddr
)
95 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
99 val
= atomic16_read(haddr
);
101 atomic_trace_ld_post(env
, addr
, oi
);
105 void ATOMIC_NAME(st
)(CPUArchState
*env
, target_ulong addr
, ABI_TYPE val
,
106 MemOpIdx oi
, uintptr_t retaddr
)
108 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
109 PAGE_WRITE
, retaddr
);
111 atomic16_set(haddr
, val
);
113 atomic_trace_st_post(env
, addr
, oi
);
117 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, target_ulong addr
, ABI_TYPE val
,
118 MemOpIdx oi
, uintptr_t retaddr
)
120 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
121 PAGE_READ
| PAGE_WRITE
, retaddr
);
124 ret
= qatomic_xchg__nocheck(haddr
, val
);
126 atomic_trace_rmw_post(env
, addr
, oi
);
130 #define GEN_ATOMIC_HELPER(X) \
131 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
132 ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
134 DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
135 PAGE_READ | PAGE_WRITE, retaddr); \
137 ret = qatomic_##X(haddr, val); \
138 ATOMIC_MMU_CLEANUP; \
139 atomic_trace_rmw_post(env, addr, oi); \
143 GEN_ATOMIC_HELPER(fetch_add
)
144 GEN_ATOMIC_HELPER(fetch_and
)
145 GEN_ATOMIC_HELPER(fetch_or
)
146 GEN_ATOMIC_HELPER(fetch_xor
)
147 GEN_ATOMIC_HELPER(add_fetch
)
148 GEN_ATOMIC_HELPER(and_fetch
)
149 GEN_ATOMIC_HELPER(or_fetch
)
150 GEN_ATOMIC_HELPER(xor_fetch
)
152 #undef GEN_ATOMIC_HELPER
155 * These helpers are, as a whole, full barriers. Within the helper,
156 * the leading barrier is explicit and the trailing barrier is within
159 * Trace this load + RMW loop as a single RMW op. This way, regardless
160 * of CF_PARALLEL's value, we'll trace just a read and a write.
162 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
163 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
164 ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
166 XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
167 PAGE_READ | PAGE_WRITE, retaddr); \
168 XDATA_TYPE cmp, old, new, val = xval; \
170 cmp = qatomic_read__nocheck(haddr); \
172 old = cmp; new = FN(old, val); \
173 cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
174 } while (cmp != old); \
175 ATOMIC_MMU_CLEANUP; \
176 atomic_trace_rmw_post(env, addr, oi); \
180 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
181 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
182 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
183 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
185 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
186 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
187 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
188 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
190 #undef GEN_ATOMIC_HELPER_FN
191 #endif /* DATA SIZE >= 16 */
197 /* Define reverse-host-endian atomic operations. Note that END is used
198 within the ATOMIC_NAME macro. */
205 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, target_ulong addr
,
206 ABI_TYPE cmpv
, ABI_TYPE newv
,
207 MemOpIdx oi
, uintptr_t retaddr
)
209 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
210 PAGE_READ
| PAGE_WRITE
, retaddr
);
214 ret
= atomic16_cmpxchg(haddr
, BSWAP(cmpv
), BSWAP(newv
));
216 ret
= qatomic_cmpxchg__nocheck(haddr
, BSWAP(cmpv
), BSWAP(newv
));
219 atomic_trace_rmw_post(env
, addr
, oi
);
225 ABI_TYPE
ATOMIC_NAME(ld
)(CPUArchState
*env
, target_ulong addr
,
226 MemOpIdx oi
, uintptr_t retaddr
)
228 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
232 val
= atomic16_read(haddr
);
234 atomic_trace_ld_post(env
, addr
, oi
);
238 void ATOMIC_NAME(st
)(CPUArchState
*env
, target_ulong addr
, ABI_TYPE val
,
239 MemOpIdx oi
, uintptr_t retaddr
)
241 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
242 PAGE_WRITE
, retaddr
);
245 atomic16_set(haddr
, val
);
247 atomic_trace_st_post(env
, addr
, oi
);
251 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, target_ulong addr
, ABI_TYPE val
,
252 MemOpIdx oi
, uintptr_t retaddr
)
254 DATA_TYPE
*haddr
= atomic_mmu_lookup(env
, addr
, oi
, DATA_SIZE
,
255 PAGE_READ
| PAGE_WRITE
, retaddr
);
258 ret
= qatomic_xchg__nocheck(haddr
, BSWAP(val
));
260 atomic_trace_rmw_post(env
, addr
, oi
);
264 #define GEN_ATOMIC_HELPER(X) \
265 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
266 ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
268 DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
269 PAGE_READ | PAGE_WRITE, retaddr); \
271 ret = qatomic_##X(haddr, BSWAP(val)); \
272 ATOMIC_MMU_CLEANUP; \
273 atomic_trace_rmw_post(env, addr, oi); \
277 GEN_ATOMIC_HELPER(fetch_and
)
278 GEN_ATOMIC_HELPER(fetch_or
)
279 GEN_ATOMIC_HELPER(fetch_xor
)
280 GEN_ATOMIC_HELPER(and_fetch
)
281 GEN_ATOMIC_HELPER(or_fetch
)
282 GEN_ATOMIC_HELPER(xor_fetch
)
284 #undef GEN_ATOMIC_HELPER
286 /* These helpers are, as a whole, full barriers. Within the helper,
287 * the leading barrier is explicit and the trailing barrier is within
290 * Trace this load + RMW loop as a single RMW op. This way, regardless
291 * of CF_PARALLEL's value, we'll trace just a read and a write.
293 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
294 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
295 ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
297 XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
298 PAGE_READ | PAGE_WRITE, retaddr); \
299 XDATA_TYPE ldo, ldn, old, new, val = xval; \
301 ldn = qatomic_read__nocheck(haddr); \
303 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
304 ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
305 } while (ldo != ldn); \
306 ATOMIC_MMU_CLEANUP; \
307 atomic_trace_rmw_post(env, addr, oi); \
311 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
312 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
313 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
314 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
316 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
317 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
318 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
319 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
321 /* Note that for addition, we need to use a separate cmpxchg loop instead
322 of bswaps for the reverse-host-endian helpers. */
323 #define ADD(X, Y) (X + Y)
324 GEN_ATOMIC_HELPER_FN(fetch_add
, ADD
, DATA_TYPE
, old
)
325 GEN_ATOMIC_HELPER_FN(add_fetch
, ADD
, DATA_TYPE
, new)
328 #undef GEN_ATOMIC_HELPER_FN
329 #endif /* DATA_SIZE >= 16 */
332 #endif /* DATA_SIZE > 1 */