]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/atomic.h
virtio-ccw: range check in READ_VQ_CONF
[mirror_qemu.git] / include / qemu / atomic.h
CommitLineData
5444e768
PB
1/*
2 * Simple interface for atomic operations.
3 *
4 * Copyright (C) 2013 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
11 */
85199474 12
5444e768
PB
13#ifndef __QEMU_ATOMIC_H
14#define __QEMU_ATOMIC_H 1
1d93f0f0 15
5444e768 16#include "qemu/compiler.h"
e2251708 17
5444e768 18/* For C11 atomic ops */
1d31fca4 19
5444e768
PB
20/* Compiler barrier */
21#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23#ifndef __ATOMIC_RELAXED
52e850de 24
a281ebc1 25/*
5444e768
PB
26 * We use GCC builtin if it's available, as that can use mfence on
27 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
28 * i386 the spec is buggy, and the implementation followed it until
29 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
30 */
31#if defined(__i386__) || defined(__x86_64__)
32#if !QEMU_GNUC_PREREQ(4, 4)
33#if defined __x86_64__
34#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
a281ebc1 35#else
5444e768
PB
36#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
37#endif
38#endif
39#endif
40
41
42#ifdef __alpha__
43#define smp_read_barrier_depends() asm volatile("mb":::"memory")
a281ebc1
MT
44#endif
45
5444e768 46#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
a281ebc1 47
5444e768
PB
48/*
49 * Because of the strongly ordered storage model, wmb() and rmb() are nops
50 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
51 * qemu memory or non-temporal load/stores from C code.
52 */
a281ebc1 53#define smp_wmb() barrier()
a821ce59 54#define smp_rmb() barrier()
5444e768
PB
55
56/*
57 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
58 * but it is a full barrier at the hardware level. Add a compiler barrier
59 * to make it a full barrier also at the compiler level.
60 */
61#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
62
63/*
64 * Load/store with Java volatile semantics.
65 */
66#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
e2251708 67
463ce4ae 68#elif defined(_ARCH_PPC)
e2251708
DG
69
70/*
a281ebc1 71 * We use an eieio() for wmb() on powerpc. This assumes we don't
e2251708 72 * need to order cacheable and non-cacheable stores with respect to
5444e768
PB
73 * each other.
74 *
75 * smp_mb has the same problem as on x86 for not-very-new GCC
76 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
e2251708 77 */
5444e768 78#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
a821ce59 79#if defined(__powerpc64__)
5444e768 80#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
a821ce59 81#else
5444e768 82#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 83#endif
5444e768 84#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 85
5444e768 86#endif /* _ARCH_PPC */
e2251708 87
5444e768 88#endif /* C11 atomics */
e2251708
DG
89
90/*
91 * For (host) platforms we don't have explicit barrier definitions
92 * for, we use the gcc __sync_synchronize() primitive to generate a
93 * full barrier. This should be safe on all platforms, though it may
5444e768 94 * be overkill for smp_wmb() and smp_rmb().
e2251708 95 */
5444e768
PB
96#ifndef smp_mb
97#define smp_mb() __sync_synchronize()
98#endif
99
100#ifndef smp_wmb
101#ifdef __ATOMIC_RELEASE
102#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
103#else
e2251708 104#define smp_wmb() __sync_synchronize()
5444e768
PB
105#endif
106#endif
107
108#ifndef smp_rmb
109#ifdef __ATOMIC_ACQUIRE
110#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
111#else
a821ce59 112#define smp_rmb() __sync_synchronize()
5444e768
PB
113#endif
114#endif
115
116#ifndef smp_read_barrier_depends
117#ifdef __ATOMIC_CONSUME
118#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
119#else
120#define smp_read_barrier_depends() barrier()
121#endif
122#endif
e2251708 123
5444e768 124#ifndef atomic_read
2cbcfb28 125#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
e2251708
DG
126#endif
127
5444e768 128#ifndef atomic_set
2cbcfb28 129#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
5444e768
PB
130#endif
131
7911747b
PB
132/**
133 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
134 * into a RCU read-side critical section. The pointer can later be safely
135 * dereferenced within the critical section.
136 *
137 * This ensures that the pointer copy is invariant thorough the whole critical
138 * section.
139 *
140 * Inserts memory barriers on architectures that require them (currently only
141 * Alpha) and documents which pointers are protected by RCU.
142 *
143 * Unless the __ATOMIC_CONSUME memory order is available, atomic_rcu_read also
144 * includes a compiler barrier to ensure that value-speculative optimizations
145 * (e.g. VSS: Value Speculation Scheduling) does not perform the data read
146 * before the pointer read by speculating the value of the pointer. On new
147 * enough compilers, atomic_load takes care of such concern about
148 * dependency-breaking optimizations.
149 *
150 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
151 */
152#ifndef atomic_rcu_read
153#ifdef __ATOMIC_CONSUME
154#define atomic_rcu_read(ptr) ({ \
155 typeof(*ptr) _val; \
156 __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
157 _val; \
158})
159#else
160#define atomic_rcu_read(ptr) ({ \
161 typeof(*ptr) _val = atomic_read(ptr); \
162 smp_read_barrier_depends(); \
163 _val; \
164})
165#endif
166#endif
167
168/**
169 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
170 * meant to be read by RCU read-side critical sections.
171 *
172 * Documents which pointers will be dereferenced by RCU read-side critical
173 * sections and adds the required memory barriers on architectures requiring
174 * them. It also makes sure the compiler does not reorder code initializing the
175 * data structure before its publication.
176 *
177 * Should match atomic_rcu_read().
178 */
179#ifndef atomic_rcu_set
180#ifdef __ATOMIC_RELEASE
181#define atomic_rcu_set(ptr, i) do { \
182 typeof(*ptr) _val = (i); \
183 __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
184} while(0)
185#else
186#define atomic_rcu_set(ptr, i) do { \
187 smp_wmb(); \
188 atomic_set(ptr, i); \
189} while (0)
190#endif
191#endif
192
5444e768
PB
193/* These have the same semantics as Java volatile variables.
194 * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
195 * "1. Issue a StoreStore barrier (wmb) before each volatile store."
196 * 2. Issue a StoreLoad barrier after each volatile store.
197 * Note that you could instead issue one before each volatile load, but
198 * this would be slower for typical programs using volatiles in which
199 * reads greatly outnumber writes. Alternatively, if available, you
200 * can implement volatile store as an atomic instruction (for example
201 * XCHG on x86) and omit the barrier. This may be more efficient if
202 * atomic instructions are cheaper than StoreLoad barriers.
203 * 3. Issue LoadLoad and LoadStore barriers after each volatile load."
204 *
205 * If you prefer to think in terms of "pairing" of memory barriers,
206 * an atomic_mb_read pairs with an atomic_mb_set.
207 *
208 * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
209 * while an atomic_mb_set is a st.rel followed by a memory barrier.
210 *
211 * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
212 * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
213 * Just always use the barriers manually by the rules above.
214 */
215#ifndef atomic_mb_read
216#define atomic_mb_read(ptr) ({ \
217 typeof(*ptr) _val = atomic_read(ptr); \
218 smp_rmb(); \
219 _val; \
220})
221#endif
222
223#ifndef atomic_mb_set
224#define atomic_mb_set(ptr, i) do { \
225 smp_wmb(); \
226 atomic_set(ptr, i); \
227 smp_mb(); \
228} while (0)
229#endif
230
231#ifndef atomic_xchg
33effd3a
PM
232#if defined(__clang__)
233#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
234#elif defined(__ATOMIC_SEQ_CST)
5444e768
PB
235#define atomic_xchg(ptr, i) ({ \
236 typeof(*ptr) _new = (i), _old; \
237 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
238 _old; \
239})
5444e768
PB
240#else
241/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
242#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
243#endif
244#endif
245
246/* Provide shorter names for GCC atomic builtins. */
247#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
248#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
249#define atomic_fetch_add __sync_fetch_and_add
250#define atomic_fetch_sub __sync_fetch_and_sub
251#define atomic_fetch_and __sync_fetch_and_and
252#define atomic_fetch_or __sync_fetch_and_or
253#define atomic_cmpxchg __sync_val_compare_and_swap
254
255/* And even shorter names that return void. */
256#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
257#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
258#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
259#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
260#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
261#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
262
85199474 263#endif