2 * Atomic helper templates
3 * Included from tcg-runtime.c and cputlb.c.
5 * Copyright (c) 2016 Red Hat, Inc
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/plugin.h"
22 #include "trace/mem.h"
26 # define DATA_TYPE Int128
27 # define BSWAP bswap128
31 # define DATA_TYPE uint64_t
32 # define SDATA_TYPE int64_t
33 # define BSWAP bswap64
37 # define DATA_TYPE uint32_t
38 # define SDATA_TYPE int32_t
39 # define BSWAP bswap32
43 # define DATA_TYPE uint16_t
44 # define SDATA_TYPE int16_t
45 # define BSWAP bswap16
49 # define DATA_TYPE uint8_t
50 # define SDATA_TYPE int8_t
54 # error unsupported data size
58 # define ABI_TYPE DATA_TYPE
60 # define ABI_TYPE uint32_t
63 /* Define host-endian atomic operations. Note that END is used within
64 the ATOMIC_NAME macro, and redefined below. */
67 #elif defined(HOST_WORDS_BIGENDIAN)
73 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, target_ulong addr
,
74 ABI_TYPE cmpv
, ABI_TYPE newv EXTRA_ARGS
)
77 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
79 uint16_t info
= trace_mem_build_info(SHIFT
, false, 0, false,
82 atomic_trace_rmw_pre(env
, addr
, info
);
84 ret
= atomic16_cmpxchg(haddr
, cmpv
, newv
);
86 ret
= qatomic_cmpxchg__nocheck(haddr
, cmpv
, newv
);
89 atomic_trace_rmw_post(env
, addr
, info
);
95 ABI_TYPE
ATOMIC_NAME(ld
)(CPUArchState
*env
, target_ulong addr EXTRA_ARGS
)
98 DATA_TYPE val
, *haddr
= ATOMIC_MMU_LOOKUP
;
99 uint16_t info
= trace_mem_build_info(SHIFT
, false, 0, false,
102 atomic_trace_ld_pre(env
, addr
, info
);
103 val
= atomic16_read(haddr
);
105 atomic_trace_ld_post(env
, addr
, info
);
109 void ATOMIC_NAME(st
)(CPUArchState
*env
, target_ulong addr
,
110 ABI_TYPE val EXTRA_ARGS
)
113 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
114 uint16_t info
= trace_mem_build_info(SHIFT
, false, 0, true,
117 atomic_trace_st_pre(env
, addr
, info
);
118 atomic16_set(haddr
, val
);
120 atomic_trace_st_post(env
, addr
, info
);
124 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, target_ulong addr
,
125 ABI_TYPE val EXTRA_ARGS
)
128 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
130 uint16_t info
= trace_mem_build_info(SHIFT
, false, 0, false,
133 atomic_trace_rmw_pre(env
, addr
, info
);
134 ret
= qatomic_xchg__nocheck(haddr
, val
);
136 atomic_trace_rmw_post(env
, addr
, info
);
140 #define GEN_ATOMIC_HELPER(X) \
141 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
142 ABI_TYPE val EXTRA_ARGS) \
145 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
147 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
149 atomic_trace_rmw_pre(env, addr, info); \
150 ret = qatomic_##X(haddr, val); \
151 ATOMIC_MMU_CLEANUP; \
152 atomic_trace_rmw_post(env, addr, info); \
156 GEN_ATOMIC_HELPER(fetch_add
)
157 GEN_ATOMIC_HELPER(fetch_and
)
158 GEN_ATOMIC_HELPER(fetch_or
)
159 GEN_ATOMIC_HELPER(fetch_xor
)
160 GEN_ATOMIC_HELPER(add_fetch
)
161 GEN_ATOMIC_HELPER(and_fetch
)
162 GEN_ATOMIC_HELPER(or_fetch
)
163 GEN_ATOMIC_HELPER(xor_fetch
)
165 #undef GEN_ATOMIC_HELPER
167 /* These helpers are, as a whole, full barriers. Within the helper,
168 * the leading barrier is explicit and the trailing barrier is within
171 * Trace this load + RMW loop as a single RMW op. This way, regardless
172 * of CF_PARALLEL's value, we'll trace just a read and a write.
174 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
175 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
176 ABI_TYPE xval EXTRA_ARGS) \
179 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
180 XDATA_TYPE cmp, old, new, val = xval; \
181 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
183 atomic_trace_rmw_pre(env, addr, info); \
185 cmp = qatomic_read__nocheck(haddr); \
187 old = cmp; new = FN(old, val); \
188 cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
189 } while (cmp != old); \
190 ATOMIC_MMU_CLEANUP; \
191 atomic_trace_rmw_post(env, addr, info); \
195 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
196 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
197 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
198 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
200 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
201 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
202 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
203 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
205 #undef GEN_ATOMIC_HELPER_FN
206 #endif /* DATA SIZE >= 16 */
212 /* Define reverse-host-endian atomic operations. Note that END is used
213 within the ATOMIC_NAME macro. */
214 #ifdef HOST_WORDS_BIGENDIAN
220 ABI_TYPE
ATOMIC_NAME(cmpxchg
)(CPUArchState
*env
, target_ulong addr
,
221 ABI_TYPE cmpv
, ABI_TYPE newv EXTRA_ARGS
)
224 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
226 uint16_t info
= trace_mem_build_info(SHIFT
, false, MO_BSWAP
, false,
229 atomic_trace_rmw_pre(env
, addr
, info
);
231 ret
= atomic16_cmpxchg(haddr
, BSWAP(cmpv
), BSWAP(newv
));
233 ret
= qatomic_cmpxchg__nocheck(haddr
, BSWAP(cmpv
), BSWAP(newv
));
236 atomic_trace_rmw_post(env
, addr
, info
);
242 ABI_TYPE
ATOMIC_NAME(ld
)(CPUArchState
*env
, target_ulong addr EXTRA_ARGS
)
245 DATA_TYPE val
, *haddr
= ATOMIC_MMU_LOOKUP
;
246 uint16_t info
= trace_mem_build_info(SHIFT
, false, MO_BSWAP
, false,
249 atomic_trace_ld_pre(env
, addr
, info
);
250 val
= atomic16_read(haddr
);
252 atomic_trace_ld_post(env
, addr
, info
);
256 void ATOMIC_NAME(st
)(CPUArchState
*env
, target_ulong addr
,
257 ABI_TYPE val EXTRA_ARGS
)
260 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
261 uint16_t info
= trace_mem_build_info(SHIFT
, false, MO_BSWAP
, true,
265 atomic_trace_st_pre(env
, addr
, info
);
267 atomic16_set(haddr
, val
);
269 atomic_trace_st_post(env
, addr
, info
);
273 ABI_TYPE
ATOMIC_NAME(xchg
)(CPUArchState
*env
, target_ulong addr
,
274 ABI_TYPE val EXTRA_ARGS
)
277 DATA_TYPE
*haddr
= ATOMIC_MMU_LOOKUP
;
279 uint16_t info
= trace_mem_build_info(SHIFT
, false, MO_BSWAP
, false,
282 atomic_trace_rmw_pre(env
, addr
, info
);
283 ret
= qatomic_xchg__nocheck(haddr
, BSWAP(val
));
285 atomic_trace_rmw_post(env
, addr
, info
);
289 #define GEN_ATOMIC_HELPER(X) \
290 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
291 ABI_TYPE val EXTRA_ARGS) \
294 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
296 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
297 false, ATOMIC_MMU_IDX); \
298 atomic_trace_rmw_pre(env, addr, info); \
299 ret = qatomic_##X(haddr, BSWAP(val)); \
300 ATOMIC_MMU_CLEANUP; \
301 atomic_trace_rmw_post(env, addr, info); \
305 GEN_ATOMIC_HELPER(fetch_and
)
306 GEN_ATOMIC_HELPER(fetch_or
)
307 GEN_ATOMIC_HELPER(fetch_xor
)
308 GEN_ATOMIC_HELPER(and_fetch
)
309 GEN_ATOMIC_HELPER(or_fetch
)
310 GEN_ATOMIC_HELPER(xor_fetch
)
312 #undef GEN_ATOMIC_HELPER
314 /* These helpers are, as a whole, full barriers. Within the helper,
315 * the leading barrier is explicit and the trailing barrier is within
318 * Trace this load + RMW loop as a single RMW op. This way, regardless
319 * of CF_PARALLEL's value, we'll trace just a read and a write.
321 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
322 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
323 ABI_TYPE xval EXTRA_ARGS) \
326 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
327 XDATA_TYPE ldo, ldn, old, new, val = xval; \
328 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
329 false, ATOMIC_MMU_IDX); \
330 atomic_trace_rmw_pre(env, addr, info); \
332 ldn = qatomic_read__nocheck(haddr); \
334 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
335 ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
336 } while (ldo != ldn); \
337 ATOMIC_MMU_CLEANUP; \
338 atomic_trace_rmw_post(env, addr, info); \
342 GEN_ATOMIC_HELPER_FN(fetch_smin
, MIN
, SDATA_TYPE
, old
)
343 GEN_ATOMIC_HELPER_FN(fetch_umin
, MIN
, DATA_TYPE
, old
)
344 GEN_ATOMIC_HELPER_FN(fetch_smax
, MAX
, SDATA_TYPE
, old
)
345 GEN_ATOMIC_HELPER_FN(fetch_umax
, MAX
, DATA_TYPE
, old
)
347 GEN_ATOMIC_HELPER_FN(smin_fetch
, MIN
, SDATA_TYPE
, new)
348 GEN_ATOMIC_HELPER_FN(umin_fetch
, MIN
, DATA_TYPE
, new)
349 GEN_ATOMIC_HELPER_FN(smax_fetch
, MAX
, SDATA_TYPE
, new)
350 GEN_ATOMIC_HELPER_FN(umax_fetch
, MAX
, DATA_TYPE
, new)
352 /* Note that for addition, we need to use a separate cmpxchg loop instead
353 of bswaps for the reverse-host-endian helpers. */
354 #define ADD(X, Y) (X + Y)
355 GEN_ATOMIC_HELPER_FN(fetch_add
, ADD
, DATA_TYPE
, old
)
356 GEN_ATOMIC_HELPER_FN(add_fetch
, ADD
, DATA_TYPE
, new)
359 #undef GEN_ATOMIC_HELPER_FN
360 #endif /* DATA_SIZE >= 16 */
363 #endif /* DATA_SIZE > 1 */