]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/atomic.h
include: Clean up includes
[mirror_qemu.git] / include / qemu / atomic.h
1 /*
2 * Simple interface for atomic operations.
3 *
4 * Copyright (C) 2013 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
11 * See docs/atomics.txt for discussion about the guarantees each
12 * atomic primitive is meant to provide.
13 */
14
15 #ifndef __QEMU_ATOMIC_H
16 #define __QEMU_ATOMIC_H 1
17
18
19
20 /* Compiler barrier */
21 #define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23 #ifdef __ATOMIC_RELAXED
24 /* For C11 atomic ops */
25
26 /* Manual memory barriers
27 *
28 *__atomic_thread_fence does not include a compiler barrier; instead,
29 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
30 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
31 * the compiler is free to reorder stores on each side of the barrier.
32 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
33 */
34
35 #define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
36 #define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
37 #define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
38
39 #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
40
41 /* Weak atomic operations prevent the compiler moving other
42 * loads/stores past the atomic operation load/store. However there is
43 * no explicit memory barrier for the processor.
44 */
45 #define atomic_read(ptr) \
46 ({ \
47 typeof(*ptr) _val; \
48 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
49 _val; \
50 })
51
52 #define atomic_set(ptr, i) do { \
53 typeof(*ptr) _val = (i); \
54 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
55 } while(0)
56
57 /* Atomic RCU operations imply weak memory barriers */
58
59 #define atomic_rcu_read(ptr) \
60 ({ \
61 typeof(*ptr) _val; \
62 __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
63 _val; \
64 })
65
66 #define atomic_rcu_set(ptr, i) do { \
67 typeof(*ptr) _val = (i); \
68 __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
69 } while(0)
70
71 /* atomic_mb_read/set semantics map Java volatile variables. They are
72 * less expensive on some platforms (notably POWER & ARMv7) than fully
73 * sequentially consistent operations.
74 *
75 * As long as they are used as paired operations they are safe to
76 * use. See docs/atomic.txt for more discussion.
77 */
78
79 #if defined(_ARCH_PPC)
80 #define atomic_mb_read(ptr) \
81 ({ \
82 typeof(*ptr) _val; \
83 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
84 smp_rmb(); \
85 _val; \
86 })
87
88 #define atomic_mb_set(ptr, i) do { \
89 typeof(*ptr) _val = (i); \
90 smp_wmb(); \
91 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
92 smp_mb(); \
93 } while(0)
94 #else
95 #define atomic_mb_read(ptr) \
96 ({ \
97 typeof(*ptr) _val; \
98 __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
99 _val; \
100 })
101
102 #define atomic_mb_set(ptr, i) do { \
103 typeof(*ptr) _val = (i); \
104 __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
105 } while(0)
106 #endif
107
108
109 /* All the remaining operations are fully sequentially consistent */
110
111 #define atomic_xchg(ptr, i) ({ \
112 typeof(*ptr) _new = (i), _old; \
113 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
114 _old; \
115 })
116
117 /* Returns the eventual value, failed or not */
118 #define atomic_cmpxchg(ptr, old, new) \
119 ({ \
120 typeof(*ptr) _old = (old), _new = (new); \
121 __atomic_compare_exchange(ptr, &_old, &_new, false, \
122 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
123 _old; \
124 })
125
126 /* Provide shorter names for GCC atomic builtins, return old value */
127 #define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
128 #define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
129 #define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
130 #define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
131 #define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
132 #define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
133
134 /* And even shorter names that return void. */
135 #define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
136 #define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
137 #define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
138 #define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
139 #define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
140 #define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
141
142 #else /* __ATOMIC_RELAXED */
143
144 /*
145 * We use GCC builtin if it's available, as that can use mfence on
146 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
147 * i386 the spec is buggy, and the implementation followed it until
148 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
149 */
150 #if defined(__i386__) || defined(__x86_64__)
151 #if !QEMU_GNUC_PREREQ(4, 4)
152 #if defined __x86_64__
153 #define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
154 #else
155 #define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
156 #endif
157 #endif
158 #endif
159
160
161 #ifdef __alpha__
162 #define smp_read_barrier_depends() asm volatile("mb":::"memory")
163 #endif
164
165 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
166
167 /*
168 * Because of the strongly ordered storage model, wmb() and rmb() are nops
169 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
170 * qemu memory or non-temporal load/stores from C code.
171 */
172 #define smp_wmb() barrier()
173 #define smp_rmb() barrier()
174
175 /*
176 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
177 * but it is a full barrier at the hardware level. Add a compiler barrier
178 * to make it a full barrier also at the compiler level.
179 */
180 #define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
181
182 /*
183 * Load/store with Java volatile semantics.
184 */
185 #define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
186
187 #elif defined(_ARCH_PPC)
188
189 /*
190 * We use an eieio() for wmb() on powerpc. This assumes we don't
191 * need to order cacheable and non-cacheable stores with respect to
192 * each other.
193 *
194 * smp_mb has the same problem as on x86 for not-very-new GCC
195 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
196 */
197 #define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
198 #if defined(__powerpc64__)
199 #define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
200 #else
201 #define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
202 #endif
203 #define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
204
205 #endif /* _ARCH_PPC */
206
207 /*
208 * For (host) platforms we don't have explicit barrier definitions
209 * for, we use the gcc __sync_synchronize() primitive to generate a
210 * full barrier. This should be safe on all platforms, though it may
211 * be overkill for smp_wmb() and smp_rmb().
212 */
213 #ifndef smp_mb
214 #define smp_mb() __sync_synchronize()
215 #endif
216
217 #ifndef smp_wmb
218 #define smp_wmb() __sync_synchronize()
219 #endif
220
221 #ifndef smp_rmb
222 #define smp_rmb() __sync_synchronize()
223 #endif
224
225 #ifndef smp_read_barrier_depends
226 #define smp_read_barrier_depends() barrier()
227 #endif
228
229 /* These will only be atomic if the processor does the fetch or store
230 * in a single issue memory operation
231 */
232 #define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
233 #define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
234
235 /**
236 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
237 * into a RCU read-side critical section. The pointer can later be safely
238 * dereferenced within the critical section.
239 *
240 * This ensures that the pointer copy is invariant thorough the whole critical
241 * section.
242 *
243 * Inserts memory barriers on architectures that require them (currently only
244 * Alpha) and documents which pointers are protected by RCU.
245 *
246 * atomic_rcu_read also includes a compiler barrier to ensure that
247 * value-speculative optimizations (e.g. VSS: Value Speculation
248 * Scheduling) does not perform the data read before the pointer read
249 * by speculating the value of the pointer.
250 *
251 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
252 */
253 #define atomic_rcu_read(ptr) ({ \
254 typeof(*ptr) _val = atomic_read(ptr); \
255 smp_read_barrier_depends(); \
256 _val; \
257 })
258
259 /**
260 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
261 * meant to be read by RCU read-side critical sections.
262 *
263 * Documents which pointers will be dereferenced by RCU read-side critical
264 * sections and adds the required memory barriers on architectures requiring
265 * them. It also makes sure the compiler does not reorder code initializing the
266 * data structure before its publication.
267 *
268 * Should match atomic_rcu_read().
269 */
270 #define atomic_rcu_set(ptr, i) do { \
271 smp_wmb(); \
272 atomic_set(ptr, i); \
273 } while (0)
274
275 /* These have the same semantics as Java volatile variables.
276 * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
277 * "1. Issue a StoreStore barrier (wmb) before each volatile store."
278 * 2. Issue a StoreLoad barrier after each volatile store.
279 * Note that you could instead issue one before each volatile load, but
280 * this would be slower for typical programs using volatiles in which
281 * reads greatly outnumber writes. Alternatively, if available, you
282 * can implement volatile store as an atomic instruction (for example
283 * XCHG on x86) and omit the barrier. This may be more efficient if
284 * atomic instructions are cheaper than StoreLoad barriers.
285 * 3. Issue LoadLoad and LoadStore barriers after each volatile load."
286 *
287 * If you prefer to think in terms of "pairing" of memory barriers,
288 * an atomic_mb_read pairs with an atomic_mb_set.
289 *
290 * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
291 * while an atomic_mb_set is a st.rel followed by a memory barrier.
292 *
293 * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
294 * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
295 * Just always use the barriers manually by the rules above.
296 */
297 #define atomic_mb_read(ptr) ({ \
298 typeof(*ptr) _val = atomic_read(ptr); \
299 smp_rmb(); \
300 _val; \
301 })
302
303 #ifndef atomic_mb_set
304 #define atomic_mb_set(ptr, i) do { \
305 smp_wmb(); \
306 atomic_set(ptr, i); \
307 smp_mb(); \
308 } while (0)
309 #endif
310
311 #ifndef atomic_xchg
312 #if defined(__clang__)
313 #define atomic_xchg(ptr, i) __sync_swap(ptr, i)
314 #else
315 /* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
316 #define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
317 #endif
318 #endif
319
320 /* Provide shorter names for GCC atomic builtins. */
321 #define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
322 #define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
323 #define atomic_fetch_add __sync_fetch_and_add
324 #define atomic_fetch_sub __sync_fetch_and_sub
325 #define atomic_fetch_and __sync_fetch_and_and
326 #define atomic_fetch_or __sync_fetch_and_or
327 #define atomic_cmpxchg __sync_val_compare_and_swap
328
329 /* And even shorter names that return void. */
330 #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
331 #define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
332 #define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
333 #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
334 #define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
335 #define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
336
337 #endif /* __ATOMIC_RELAXED */
338 #endif /* __QEMU_ATOMIC_H */