]>
git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/atomic.h
2 * Simple interface for atomic operations.
4 * Copyright (C) 2013 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
11 * See docs/atomics.txt for discussion about the guarantees each
12 * atomic primitive is meant to provide.
15 #ifndef __QEMU_ATOMIC_H
16 #define __QEMU_ATOMIC_H 1
20 /* Compiler barrier */
21 #define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
23 #ifdef __ATOMIC_RELAXED
24 /* For C11 atomic ops */
26 /* Manual memory barriers
28 *__atomic_thread_fence does not include a compiler barrier; instead,
29 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
30 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
31 * the compiler is free to reorder stores on each side of the barrier.
32 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
35 #define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
36 #define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
37 #define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
39 #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
41 /* Weak atomic operations prevent the compiler moving other
42 * loads/stores past the atomic operation load/store. However there is
43 * no explicit memory barrier for the processor.
45 #define atomic_read(ptr) \
48 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
52 #define atomic_set(ptr, i) do { \
53 typeof(*ptr) _val = (i); \
54 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
57 /* Atomic RCU operations imply weak memory barriers */
59 #define atomic_rcu_read(ptr) \
62 __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
66 #define atomic_rcu_set(ptr, i) do { \
67 typeof(*ptr) _val = (i); \
68 __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
71 /* atomic_mb_read/set semantics map Java volatile variables. They are
72 * less expensive on some platforms (notably POWER & ARMv7) than fully
73 * sequentially consistent operations.
75 * As long as they are used as paired operations they are safe to
76 * use. See docs/atomic.txt for more discussion.
79 #if defined(_ARCH_PPC)
80 #define atomic_mb_read(ptr) \
83 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
88 #define atomic_mb_set(ptr, i) do { \
89 typeof(*ptr) _val = (i); \
91 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
95 #define atomic_mb_read(ptr) \
98 __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
102 #define atomic_mb_set(ptr, i) do { \
103 typeof(*ptr) _val = (i); \
104 __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
109 /* All the remaining operations are fully sequentially consistent */
111 #define atomic_xchg(ptr, i) ({ \
112 typeof(*ptr) _new = (i), _old; \
113 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
117 /* Returns the eventual value, failed or not */
118 #define atomic_cmpxchg(ptr, old, new) \
120 typeof(*ptr) _old = (old), _new = (new); \
121 __atomic_compare_exchange(ptr, &_old, &_new, false, \
122 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
126 /* Provide shorter names for GCC atomic builtins, return old value */
127 #define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
128 #define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
129 #define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
130 #define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
131 #define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
132 #define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
134 /* And even shorter names that return void. */
135 #define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
136 #define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
137 #define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
138 #define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
139 #define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
140 #define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
142 #else /* __ATOMIC_RELAXED */
145 * We use GCC builtin if it's available, as that can use mfence on
146 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
147 * i386 the spec is buggy, and the implementation followed it until
148 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
150 #if defined(__i386__) || defined(__x86_64__)
151 #if !QEMU_GNUC_PREREQ(4, 4)
152 #if defined __x86_64__
153 #define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
155 #define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
162 #define smp_read_barrier_depends() asm volatile("mb":::"memory")
165 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
168 * Because of the strongly ordered storage model, wmb() and rmb() are nops
169 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
170 * qemu memory or non-temporal load/stores from C code.
172 #define smp_wmb() barrier()
173 #define smp_rmb() barrier()
176 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
177 * but it is a full barrier at the hardware level. Add a compiler barrier
178 * to make it a full barrier also at the compiler level.
180 #define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
183 * Load/store with Java volatile semantics.
185 #define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
187 #elif defined(_ARCH_PPC)
190 * We use an eieio() for wmb() on powerpc. This assumes we don't
191 * need to order cacheable and non-cacheable stores with respect to
194 * smp_mb has the same problem as on x86 for not-very-new GCC
195 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
197 #define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
198 #if defined(__powerpc64__)
199 #define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
201 #define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
203 #define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
205 #endif /* _ARCH_PPC */
208 * For (host) platforms we don't have explicit barrier definitions
209 * for, we use the gcc __sync_synchronize() primitive to generate a
210 * full barrier. This should be safe on all platforms, though it may
211 * be overkill for smp_wmb() and smp_rmb().
214 #define smp_mb() __sync_synchronize()
218 #define smp_wmb() __sync_synchronize()
222 #define smp_rmb() __sync_synchronize()
225 #ifndef smp_read_barrier_depends
226 #define smp_read_barrier_depends() barrier()
229 /* These will only be atomic if the processor does the fetch or store
230 * in a single issue memory operation
232 #define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
233 #define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
236 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
237 * into a RCU read-side critical section. The pointer can later be safely
238 * dereferenced within the critical section.
240 * This ensures that the pointer copy is invariant thorough the whole critical
243 * Inserts memory barriers on architectures that require them (currently only
244 * Alpha) and documents which pointers are protected by RCU.
246 * atomic_rcu_read also includes a compiler barrier to ensure that
247 * value-speculative optimizations (e.g. VSS: Value Speculation
248 * Scheduling) does not perform the data read before the pointer read
249 * by speculating the value of the pointer.
251 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
253 #define atomic_rcu_read(ptr) ({ \
254 typeof(*ptr) _val = atomic_read(ptr); \
255 smp_read_barrier_depends(); \
260 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
261 * meant to be read by RCU read-side critical sections.
263 * Documents which pointers will be dereferenced by RCU read-side critical
264 * sections and adds the required memory barriers on architectures requiring
265 * them. It also makes sure the compiler does not reorder code initializing the
266 * data structure before its publication.
268 * Should match atomic_rcu_read().
270 #define atomic_rcu_set(ptr, i) do { \
272 atomic_set(ptr, i); \
275 /* These have the same semantics as Java volatile variables.
276 * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
277 * "1. Issue a StoreStore barrier (wmb) before each volatile store."
278 * 2. Issue a StoreLoad barrier after each volatile store.
279 * Note that you could instead issue one before each volatile load, but
280 * this would be slower for typical programs using volatiles in which
281 * reads greatly outnumber writes. Alternatively, if available, you
282 * can implement volatile store as an atomic instruction (for example
283 * XCHG on x86) and omit the barrier. This may be more efficient if
284 * atomic instructions are cheaper than StoreLoad barriers.
285 * 3. Issue LoadLoad and LoadStore barriers after each volatile load."
287 * If you prefer to think in terms of "pairing" of memory barriers,
288 * an atomic_mb_read pairs with an atomic_mb_set.
290 * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
291 * while an atomic_mb_set is a st.rel followed by a memory barrier.
293 * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
294 * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
295 * Just always use the barriers manually by the rules above.
297 #define atomic_mb_read(ptr) ({ \
298 typeof(*ptr) _val = atomic_read(ptr); \
303 #ifndef atomic_mb_set
304 #define atomic_mb_set(ptr, i) do { \
306 atomic_set(ptr, i); \
312 #if defined(__clang__)
313 #define atomic_xchg(ptr, i) __sync_swap(ptr, i)
315 /* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
316 #define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
320 /* Provide shorter names for GCC atomic builtins. */
321 #define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
322 #define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
323 #define atomic_fetch_add __sync_fetch_and_add
324 #define atomic_fetch_sub __sync_fetch_and_sub
325 #define atomic_fetch_and __sync_fetch_and_and
326 #define atomic_fetch_or __sync_fetch_and_or
327 #define atomic_cmpxchg __sync_val_compare_and_swap
329 /* And even shorter names that return void. */
330 #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
331 #define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
332 #define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
333 #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
334 #define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
335 #define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
337 #endif /* __ATOMIC_RELAXED */
338 #endif /* __QEMU_ATOMIC_H */