]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/atomic_template.h
qemu/atomic.h: rename atomic_ to qatomic_
[mirror_qemu.git] / accel / tcg / atomic_template.h
1 /*
2 * Atomic helper templates
3 * Included from tcg-runtime.c and cputlb.c.
4 *
5 * Copyright (c) 2016 Red Hat, Inc
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/plugin.h"
22 #include "trace/mem.h"
23
24 #if DATA_SIZE == 16
25 # define SUFFIX o
26 # define DATA_TYPE Int128
27 # define BSWAP bswap128
28 # define SHIFT 4
29 #elif DATA_SIZE == 8
30 # define SUFFIX q
31 # define DATA_TYPE uint64_t
32 # define SDATA_TYPE int64_t
33 # define BSWAP bswap64
34 # define SHIFT 3
35 #elif DATA_SIZE == 4
36 # define SUFFIX l
37 # define DATA_TYPE uint32_t
38 # define SDATA_TYPE int32_t
39 # define BSWAP bswap32
40 # define SHIFT 2
41 #elif DATA_SIZE == 2
42 # define SUFFIX w
43 # define DATA_TYPE uint16_t
44 # define SDATA_TYPE int16_t
45 # define BSWAP bswap16
46 # define SHIFT 1
47 #elif DATA_SIZE == 1
48 # define SUFFIX b
49 # define DATA_TYPE uint8_t
50 # define SDATA_TYPE int8_t
51 # define BSWAP
52 # define SHIFT 0
53 #else
54 # error unsupported data size
55 #endif
56
57 #if DATA_SIZE >= 4
58 # define ABI_TYPE DATA_TYPE
59 #else
60 # define ABI_TYPE uint32_t
61 #endif
62
63 /* Define host-endian atomic operations. Note that END is used within
64 the ATOMIC_NAME macro, and redefined below. */
65 #if DATA_SIZE == 1
66 # define END
67 #elif defined(HOST_WORDS_BIGENDIAN)
68 # define END _be
69 #else
70 # define END _le
71 #endif
72
73 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
74 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
75 {
76 ATOMIC_MMU_DECLS;
77 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
78 DATA_TYPE ret;
79 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
80 ATOMIC_MMU_IDX);
81
82 atomic_trace_rmw_pre(env, addr, info);
83 #if DATA_SIZE == 16
84 ret = atomic16_cmpxchg(haddr, cmpv, newv);
85 #else
86 ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
87 #endif
88 ATOMIC_MMU_CLEANUP;
89 atomic_trace_rmw_post(env, addr, info);
90 return ret;
91 }
92
93 #if DATA_SIZE >= 16
94 #if HAVE_ATOMIC128
95 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
96 {
97 ATOMIC_MMU_DECLS;
98 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
99 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
100 ATOMIC_MMU_IDX);
101
102 atomic_trace_ld_pre(env, addr, info);
103 val = atomic16_read(haddr);
104 ATOMIC_MMU_CLEANUP;
105 atomic_trace_ld_post(env, addr, info);
106 return val;
107 }
108
109 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
110 ABI_TYPE val EXTRA_ARGS)
111 {
112 ATOMIC_MMU_DECLS;
113 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
114 uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
115 ATOMIC_MMU_IDX);
116
117 atomic_trace_st_pre(env, addr, info);
118 atomic16_set(haddr, val);
119 ATOMIC_MMU_CLEANUP;
120 atomic_trace_st_post(env, addr, info);
121 }
122 #endif
123 #else
124 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
125 ABI_TYPE val EXTRA_ARGS)
126 {
127 ATOMIC_MMU_DECLS;
128 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
129 DATA_TYPE ret;
130 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
131 ATOMIC_MMU_IDX);
132
133 atomic_trace_rmw_pre(env, addr, info);
134 ret = qatomic_xchg__nocheck(haddr, val);
135 ATOMIC_MMU_CLEANUP;
136 atomic_trace_rmw_post(env, addr, info);
137 return ret;
138 }
139
140 #define GEN_ATOMIC_HELPER(X) \
141 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
142 ABI_TYPE val EXTRA_ARGS) \
143 { \
144 ATOMIC_MMU_DECLS; \
145 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
146 DATA_TYPE ret; \
147 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
148 ATOMIC_MMU_IDX); \
149 atomic_trace_rmw_pre(env, addr, info); \
150 ret = qatomic_##X(haddr, val); \
151 ATOMIC_MMU_CLEANUP; \
152 atomic_trace_rmw_post(env, addr, info); \
153 return ret; \
154 }
155
156 GEN_ATOMIC_HELPER(fetch_add)
157 GEN_ATOMIC_HELPER(fetch_and)
158 GEN_ATOMIC_HELPER(fetch_or)
159 GEN_ATOMIC_HELPER(fetch_xor)
160 GEN_ATOMIC_HELPER(add_fetch)
161 GEN_ATOMIC_HELPER(and_fetch)
162 GEN_ATOMIC_HELPER(or_fetch)
163 GEN_ATOMIC_HELPER(xor_fetch)
164
165 #undef GEN_ATOMIC_HELPER
166
167 /* These helpers are, as a whole, full barriers. Within the helper,
168 * the leading barrier is explicit and the trailing barrier is within
169 * cmpxchg primitive.
170 *
171 * Trace this load + RMW loop as a single RMW op. This way, regardless
172 * of CF_PARALLEL's value, we'll trace just a read and a write.
173 */
174 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
175 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
176 ABI_TYPE xval EXTRA_ARGS) \
177 { \
178 ATOMIC_MMU_DECLS; \
179 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
180 XDATA_TYPE cmp, old, new, val = xval; \
181 uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
182 ATOMIC_MMU_IDX); \
183 atomic_trace_rmw_pre(env, addr, info); \
184 smp_mb(); \
185 cmp = qatomic_read__nocheck(haddr); \
186 do { \
187 old = cmp; new = FN(old, val); \
188 cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
189 } while (cmp != old); \
190 ATOMIC_MMU_CLEANUP; \
191 atomic_trace_rmw_post(env, addr, info); \
192 return RET; \
193 }
194
195 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
196 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
197 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
198 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
199
200 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
201 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
202 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
203 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
204
205 #undef GEN_ATOMIC_HELPER_FN
206 #endif /* DATA SIZE >= 16 */
207
208 #undef END
209
210 #if DATA_SIZE > 1
211
212 /* Define reverse-host-endian atomic operations. Note that END is used
213 within the ATOMIC_NAME macro. */
214 #ifdef HOST_WORDS_BIGENDIAN
215 # define END _le
216 #else
217 # define END _be
218 #endif
219
220 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
221 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
222 {
223 ATOMIC_MMU_DECLS;
224 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
225 DATA_TYPE ret;
226 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
227 ATOMIC_MMU_IDX);
228
229 atomic_trace_rmw_pre(env, addr, info);
230 #if DATA_SIZE == 16
231 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
232 #else
233 ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
234 #endif
235 ATOMIC_MMU_CLEANUP;
236 atomic_trace_rmw_post(env, addr, info);
237 return BSWAP(ret);
238 }
239
240 #if DATA_SIZE >= 16
241 #if HAVE_ATOMIC128
242 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
243 {
244 ATOMIC_MMU_DECLS;
245 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
246 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
247 ATOMIC_MMU_IDX);
248
249 atomic_trace_ld_pre(env, addr, info);
250 val = atomic16_read(haddr);
251 ATOMIC_MMU_CLEANUP;
252 atomic_trace_ld_post(env, addr, info);
253 return BSWAP(val);
254 }
255
256 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
257 ABI_TYPE val EXTRA_ARGS)
258 {
259 ATOMIC_MMU_DECLS;
260 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
261 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
262 ATOMIC_MMU_IDX);
263
264 val = BSWAP(val);
265 atomic_trace_st_pre(env, addr, info);
266 val = BSWAP(val);
267 atomic16_set(haddr, val);
268 ATOMIC_MMU_CLEANUP;
269 atomic_trace_st_post(env, addr, info);
270 }
271 #endif
272 #else
273 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
274 ABI_TYPE val EXTRA_ARGS)
275 {
276 ATOMIC_MMU_DECLS;
277 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
278 ABI_TYPE ret;
279 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
280 ATOMIC_MMU_IDX);
281
282 atomic_trace_rmw_pre(env, addr, info);
283 ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
284 ATOMIC_MMU_CLEANUP;
285 atomic_trace_rmw_post(env, addr, info);
286 return BSWAP(ret);
287 }
288
289 #define GEN_ATOMIC_HELPER(X) \
290 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
291 ABI_TYPE val EXTRA_ARGS) \
292 { \
293 ATOMIC_MMU_DECLS; \
294 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
295 DATA_TYPE ret; \
296 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
297 false, ATOMIC_MMU_IDX); \
298 atomic_trace_rmw_pre(env, addr, info); \
299 ret = qatomic_##X(haddr, BSWAP(val)); \
300 ATOMIC_MMU_CLEANUP; \
301 atomic_trace_rmw_post(env, addr, info); \
302 return BSWAP(ret); \
303 }
304
305 GEN_ATOMIC_HELPER(fetch_and)
306 GEN_ATOMIC_HELPER(fetch_or)
307 GEN_ATOMIC_HELPER(fetch_xor)
308 GEN_ATOMIC_HELPER(and_fetch)
309 GEN_ATOMIC_HELPER(or_fetch)
310 GEN_ATOMIC_HELPER(xor_fetch)
311
312 #undef GEN_ATOMIC_HELPER
313
314 /* These helpers are, as a whole, full barriers. Within the helper,
315 * the leading barrier is explicit and the trailing barrier is within
316 * cmpxchg primitive.
317 *
318 * Trace this load + RMW loop as a single RMW op. This way, regardless
319 * of CF_PARALLEL's value, we'll trace just a read and a write.
320 */
321 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
322 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
323 ABI_TYPE xval EXTRA_ARGS) \
324 { \
325 ATOMIC_MMU_DECLS; \
326 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
327 XDATA_TYPE ldo, ldn, old, new, val = xval; \
328 uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
329 false, ATOMIC_MMU_IDX); \
330 atomic_trace_rmw_pre(env, addr, info); \
331 smp_mb(); \
332 ldn = qatomic_read__nocheck(haddr); \
333 do { \
334 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
335 ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
336 } while (ldo != ldn); \
337 ATOMIC_MMU_CLEANUP; \
338 atomic_trace_rmw_post(env, addr, info); \
339 return RET; \
340 }
341
342 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
343 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
344 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
345 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
346
347 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
348 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
349 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
350 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
351
352 /* Note that for addition, we need to use a separate cmpxchg loop instead
353 of bswaps for the reverse-host-endian helpers. */
354 #define ADD(X, Y) (X + Y)
355 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
356 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
357 #undef ADD
358
359 #undef GEN_ATOMIC_HELPER_FN
360 #endif /* DATA_SIZE >= 16 */
361
362 #undef END
363 #endif /* DATA_SIZE > 1 */
364
365 #undef BSWAP
366 #undef ABI_TYPE
367 #undef DATA_TYPE
368 #undef SDATA_TYPE
369 #undef SUFFIX
370 #undef DATA_SIZE
371 #undef SHIFT