]> git.proxmox.com Git - qemu.git/blame - include/qemu/atomic.h
atomic.h: Fix build with clang
[qemu.git] / include / qemu / atomic.h
CommitLineData
5444e768
PB
1/*
2 * Simple interface for atomic operations.
3 *
4 * Copyright (C) 2013 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
11 */
85199474 12
5444e768
PB
13#ifndef __QEMU_ATOMIC_H
14#define __QEMU_ATOMIC_H 1
1d93f0f0 15
5444e768 16#include "qemu/compiler.h"
e2251708 17
5444e768 18/* For C11 atomic ops */
1d31fca4 19
5444e768
PB
20/* Compiler barrier */
21#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23#ifndef __ATOMIC_RELAXED
52e850de 24
a281ebc1 25/*
5444e768
PB
26 * We use GCC builtin if it's available, as that can use mfence on
27 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
28 * i386 the spec is buggy, and the implementation followed it until
29 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
30 */
31#if defined(__i386__) || defined(__x86_64__)
32#if !QEMU_GNUC_PREREQ(4, 4)
33#if defined __x86_64__
34#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
a281ebc1 35#else
5444e768
PB
36#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
37#endif
38#endif
39#endif
40
41
42#ifdef __alpha__
43#define smp_read_barrier_depends() asm volatile("mb":::"memory")
a281ebc1
MT
44#endif
45
5444e768 46#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
a281ebc1 47
5444e768
PB
48/*
49 * Because of the strongly ordered storage model, wmb() and rmb() are nops
50 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
51 * qemu memory or non-temporal load/stores from C code.
52 */
a281ebc1 53#define smp_wmb() barrier()
a821ce59 54#define smp_rmb() barrier()
5444e768
PB
55
56/*
57 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
58 * but it is a full barrier at the hardware level. Add a compiler barrier
59 * to make it a full barrier also at the compiler level.
60 */
61#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
62
63/*
64 * Load/store with Java volatile semantics.
65 */
66#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
e2251708 67
463ce4ae 68#elif defined(_ARCH_PPC)
e2251708
DG
69
70/*
a281ebc1 71 * We use an eieio() for wmb() on powerpc. This assumes we don't
e2251708 72 * need to order cacheable and non-cacheable stores with respect to
5444e768
PB
73 * each other.
74 *
75 * smp_mb has the same problem as on x86 for not-very-new GCC
76 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
e2251708 77 */
5444e768 78#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
a821ce59 79#if defined(__powerpc64__)
5444e768 80#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
a821ce59 81#else
5444e768 82#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 83#endif
5444e768 84#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 85
5444e768 86#endif /* _ARCH_PPC */
e2251708 87
5444e768 88#endif /* C11 atomics */
e2251708
DG
89
90/*
91 * For (host) platforms we don't have explicit barrier definitions
92 * for, we use the gcc __sync_synchronize() primitive to generate a
93 * full barrier. This should be safe on all platforms, though it may
5444e768 94 * be overkill for smp_wmb() and smp_rmb().
e2251708 95 */
5444e768
PB
96#ifndef smp_mb
97#define smp_mb() __sync_synchronize()
98#endif
99
100#ifndef smp_wmb
101#ifdef __ATOMIC_RELEASE
102#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
103#else
e2251708 104#define smp_wmb() __sync_synchronize()
5444e768
PB
105#endif
106#endif
107
108#ifndef smp_rmb
109#ifdef __ATOMIC_ACQUIRE
110#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
111#else
a821ce59 112#define smp_rmb() __sync_synchronize()
5444e768
PB
113#endif
114#endif
115
116#ifndef smp_read_barrier_depends
117#ifdef __ATOMIC_CONSUME
118#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
119#else
120#define smp_read_barrier_depends() barrier()
121#endif
122#endif
e2251708 123
5444e768
PB
124#ifndef atomic_read
125#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr))
e2251708
DG
126#endif
127
5444e768
PB
128#ifndef atomic_set
129#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
130#endif
131
132/* These have the same semantics as Java volatile variables.
133 * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
134 * "1. Issue a StoreStore barrier (wmb) before each volatile store."
135 * 2. Issue a StoreLoad barrier after each volatile store.
136 * Note that you could instead issue one before each volatile load, but
137 * this would be slower for typical programs using volatiles in which
138 * reads greatly outnumber writes. Alternatively, if available, you
139 * can implement volatile store as an atomic instruction (for example
140 * XCHG on x86) and omit the barrier. This may be more efficient if
141 * atomic instructions are cheaper than StoreLoad barriers.
142 * 3. Issue LoadLoad and LoadStore barriers after each volatile load."
143 *
144 * If you prefer to think in terms of "pairing" of memory barriers,
145 * an atomic_mb_read pairs with an atomic_mb_set.
146 *
147 * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
148 * while an atomic_mb_set is a st.rel followed by a memory barrier.
149 *
150 * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
151 * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
152 * Just always use the barriers manually by the rules above.
153 */
154#ifndef atomic_mb_read
155#define atomic_mb_read(ptr) ({ \
156 typeof(*ptr) _val = atomic_read(ptr); \
157 smp_rmb(); \
158 _val; \
159})
160#endif
161
162#ifndef atomic_mb_set
163#define atomic_mb_set(ptr, i) do { \
164 smp_wmb(); \
165 atomic_set(ptr, i); \
166 smp_mb(); \
167} while (0)
168#endif
169
170#ifndef atomic_xchg
392a4d5b
PM
171#if defined(__clang__)
172#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
173#elif defined(__ATOMIC_SEQ_CST)
5444e768
PB
174#define atomic_xchg(ptr, i) ({ \
175 typeof(*ptr) _new = (i), _old; \
176 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
177 _old; \
178})
5444e768
PB
179#else
180/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
181#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
182#endif
183#endif
184
185/* Provide shorter names for GCC atomic builtins. */
186#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
187#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
188#define atomic_fetch_add __sync_fetch_and_add
189#define atomic_fetch_sub __sync_fetch_and_sub
190#define atomic_fetch_and __sync_fetch_and_and
191#define atomic_fetch_or __sync_fetch_and_or
192#define atomic_cmpxchg __sync_val_compare_and_swap
193
194/* And even shorter names that return void. */
195#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
196#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
197#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
198#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
199#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
200#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
201
85199474 202#endif