]> git.proxmox.com Git - mirror_ovs.git/blame - lib/ovs-atomic-gcc4+.h
cfm: Notify connectivity_seq on cfm_set_fault
[mirror_ovs.git] / lib / ovs-atomic-gcc4+.h
CommitLineData
31a3fc6e
BP
1/*
2 * Copyright (c) 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This header implements atomic operation primitives on GCC 4.x. */
18#ifndef IN_OVS_ATOMIC_H
19#error "This header should only be included indirectly via ovs-atomic.h."
20#endif
21
22#define OVS_ATOMIC_GCC4P_IMPL 1
23
24#define DEFINE_LOCKLESS_ATOMIC(TYPE, NAME) typedef struct { TYPE value; } NAME
25
26#define ATOMIC_BOOL_LOCK_FREE 2
27DEFINE_LOCKLESS_ATOMIC(bool, atomic_bool);
28
29#define ATOMIC_CHAR_LOCK_FREE 2
30DEFINE_LOCKLESS_ATOMIC(char, atomic_char);
31DEFINE_LOCKLESS_ATOMIC(signed char, atomic_schar);
32DEFINE_LOCKLESS_ATOMIC(unsigned char, atomic_uchar);
33
34#define ATOMIC_SHORT_LOCK_FREE 2
35DEFINE_LOCKLESS_ATOMIC(short, atomic_short);
36DEFINE_LOCKLESS_ATOMIC(unsigned short, atomic_ushort);
37
38#define ATOMIC_INT_LOCK_FREE 2
39DEFINE_LOCKLESS_ATOMIC(int, atomic_int);
40DEFINE_LOCKLESS_ATOMIC(unsigned int, atomic_uint);
41
42#if ULONG_MAX <= UINTPTR_MAX
43 #define ATOMIC_LONG_LOCK_FREE 2
44 DEFINE_LOCKLESS_ATOMIC(long, atomic_long);
45 DEFINE_LOCKLESS_ATOMIC(unsigned long, atomic_ulong);
46#elif ULONG_MAX == UINT64_MAX
47 #define ATOMIC_LONG_LOCK_FREE 0
48 typedef struct locked_int64 atomic_long;
49 typedef struct locked_uint64 atomic_ulong;
50#else
51 #error "not implemented"
52#endif
53
54#if ULLONG_MAX <= UINTPTR_MAX
55 #define ATOMIC_LLONG_LOCK_FREE 2
56 DEFINE_LOCKLESS_ATOMIC(long long, atomic_llong);
57 DEFINE_LOCKLESS_ATOMIC(unsigned long long, atomic_ullong);
58#elif ULLONG_MAX == UINT64_MAX
59 #define ATOMIC_LLONG_LOCK_FREE 0
60 typedef struct locked_int64 atomic_llong;
61 typedef struct locked_uint64 atomic_ullong;
62#else
63 #error "not implemented"
64#endif
65
66#if SIZE_MAX <= UINTPTR_MAX
67 DEFINE_LOCKLESS_ATOMIC(size_t, atomic_size_t);
68 DEFINE_LOCKLESS_ATOMIC(ptrdiff_t, atomic_ptrdiff_t);
69#elif SIZE_MAX == UINT64_MAX
70 typedef struct locked_uint64 atomic_size_t;
71 typedef struct locked_int64 atomic_ptrdiff_t;
72#else
73 #error "not implemented"
74#endif
75
76#if UINTMAX_MAX <= UINTPTR_MAX
77 DEFINE_LOCKLESS_ATOMIC(intmax_t, atomic_intmax_t);
78 DEFINE_LOCKLESS_ATOMIC(uintmax_t, atomic_uintmax_t);
79#elif UINTMAX_MAX == UINT64_MAX
80 typedef struct locked_int64 atomic_intmax_t;
81 typedef struct locked_uint64 atomic_uintmax_t;
82#else
83 #error "not implemented"
84#endif
85
86#define ATOMIC_POINTER_LOCK_FREE 2
87DEFINE_LOCKLESS_ATOMIC(intptr_t, atomic_intptr_t);
88DEFINE_LOCKLESS_ATOMIC(uintptr_t, atomic_uintptr_t);
89
90/* Nonstandard atomic types. */
91DEFINE_LOCKLESS_ATOMIC(uint8_t, atomic_uint8_t);
92DEFINE_LOCKLESS_ATOMIC(uint16_t, atomic_uint16_t);
93DEFINE_LOCKLESS_ATOMIC(uint32_t, atomic_uint32_t);
94DEFINE_LOCKLESS_ATOMIC(int8_t, atomic_int8_t);
95DEFINE_LOCKLESS_ATOMIC(int16_t, atomic_int16_t);
96DEFINE_LOCKLESS_ATOMIC(int32_t, atomic_int32_t);
97#if UINT64_MAX <= UINTPTR_MAX
98 DEFINE_LOCKLESS_ATOMIC(uint64_t, atomic_uint64_t);
99 DEFINE_LOCKLESS_ATOMIC(int64_t, atomic_int64_t);
100#else
101 typedef struct locked_uint64 atomic_uint64_t;
102 typedef struct locked_int64 atomic_int64_t;
103#endif
104
105typedef enum {
106 memory_order_relaxed,
107 memory_order_consume,
108 memory_order_acquire,
109 memory_order_release,
110 memory_order_acq_rel,
111 memory_order_seq_cst
112} memory_order;
113\f
114/* locked_uint64. */
115
116#define IF_LOCKED_UINT64(OBJECT, THEN, ELSE) \
117 __builtin_choose_expr( \
118 __builtin_types_compatible_p(typeof(OBJECT), struct locked_uint64), \
119 (THEN), (ELSE))
db5a1019 120#define AS_LOCKED_UINT64(OBJECT) ((struct locked_uint64 *) (void *) (OBJECT))
31a3fc6e
BP
121#define AS_UINT64(OBJECT) ((uint64_t *) (OBJECT))
122struct locked_uint64 {
123 uint64_t value;
124};
125
126uint64_t locked_uint64_load(const struct locked_uint64 *);
127void locked_uint64_store(struct locked_uint64 *, uint64_t);
128uint64_t locked_uint64_add(struct locked_uint64 *, uint64_t arg);
129uint64_t locked_uint64_sub(struct locked_uint64 *, uint64_t arg);
130uint64_t locked_uint64_or(struct locked_uint64 *, uint64_t arg);
131uint64_t locked_uint64_xor(struct locked_uint64 *, uint64_t arg);
132uint64_t locked_uint64_and(struct locked_uint64 *, uint64_t arg);
133\f
134#define IF_LOCKED_INT64(OBJECT, THEN, ELSE) \
135 __builtin_choose_expr( \
136 __builtin_types_compatible_p(typeof(OBJECT), struct locked_int64), \
137 (THEN), (ELSE))
db5a1019 138#define AS_LOCKED_INT64(OBJECT) ((struct locked_int64 *) (void *) (OBJECT))
31a3fc6e
BP
139#define AS_INT64(OBJECT) ((int64_t *) (OBJECT))
140struct locked_int64 {
141 int64_t value;
142};
143int64_t locked_int64_load(const struct locked_int64 *);
144void locked_int64_store(struct locked_int64 *, int64_t);
145int64_t locked_int64_add(struct locked_int64 *, int64_t arg);
146int64_t locked_int64_sub(struct locked_int64 *, int64_t arg);
147int64_t locked_int64_or(struct locked_int64 *, int64_t arg);
148int64_t locked_int64_xor(struct locked_int64 *, int64_t arg);
149int64_t locked_int64_and(struct locked_int64 *, int64_t arg);
150\f
151#define ATOMIC_VAR_INIT(VALUE) { .value = (VALUE) }
152#define atomic_init(OBJECT, VALUE) ((OBJECT)->value = (VALUE), (void) 0)
153
154static inline void
155atomic_thread_fence(memory_order order)
156{
157 if (order != memory_order_relaxed) {
158 __sync_synchronize();
159 }
160}
161
162static inline void
163atomic_thread_fence_if_seq_cst(memory_order order)
164{
165 if (order == memory_order_seq_cst) {
166 __sync_synchronize();
167 }
168}
169
170static inline void
171atomic_signal_fence(memory_order order OVS_UNUSED)
172{
173 if (order != memory_order_relaxed) {
174 asm volatile("" : : : "memory");
175 }
176}
177
178#define ATOMIC_SWITCH(OBJECT, LOCKLESS_CASE, \
179 LOCKED_UINT64_CASE, LOCKED_INT64_CASE) \
180 IF_LOCKED_UINT64(OBJECT, LOCKED_UINT64_CASE, \
181 IF_LOCKED_INT64(OBJECT, LOCKED_INT64_CASE, \
182 LOCKLESS_CASE))
183
184#define atomic_is_lock_free(OBJ) \
185 ((void) (OBJ)->value, \
186 ATOMIC_SWITCH(OBJ, true, false, false))
187
188#define atomic_store(DST, SRC) \
189 atomic_store_explicit(DST, SRC, memory_order_seq_cst)
190#define atomic_store_explicit(DST, SRC, ORDER) \
191 (ATOMIC_SWITCH(DST, \
192 (atomic_thread_fence(ORDER), \
193 (DST)->value = (SRC), \
194 atomic_thread_fence_if_seq_cst(ORDER)), \
195 locked_uint64_store(AS_LOCKED_UINT64(DST), SRC), \
196 locked_int64_store(AS_LOCKED_INT64(DST), SRC)), \
197 (void) 0)
198
199#define atomic_read(SRC, DST) \
200 atomic_read_explicit(SRC, DST, memory_order_seq_cst)
201#define atomic_read_explicit(SRC, DST, ORDER) \
202 (ATOMIC_SWITCH(SRC, \
203 (atomic_thread_fence_if_seq_cst(ORDER), \
204 (*DST) = (SRC)->value, \
205 atomic_thread_fence(ORDER)), \
206 *(DST) = locked_uint64_load(AS_LOCKED_UINT64(SRC)), \
207 *(DST) = locked_int64_load(AS_LOCKED_INT64(SRC))), \
208 (void) 0)
209
210#define atomic_op__(RMW, OP, ARG, ORIG) \
211 (ATOMIC_SWITCH(RMW, \
212 *(ORIG) = __sync_fetch_and_##OP(&(RMW)->value, ARG), \
213 *(ORIG) = locked_uint64_##OP(AS_LOCKED_UINT64(RMW), ARG), \
214 *(ORIG) = locked_int64_##OP(AS_LOCKED_INT64(RMW), ARG)), \
215 (void) 0)
216
217#define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
218#define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
219#define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
220#define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
221#define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
222
223#define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER) \
224 ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
225#define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER) \
226 ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
227#define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER) \
228 ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
229#define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER) \
230 ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
231#define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER) \
232 ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
233\f
234/* atomic_flag */
235
236typedef struct {
237 int b;
238} atomic_flag;
239#define ATOMIC_FLAG_INIT { false }
240
241static inline bool
242atomic_flag_test_and_set(volatile atomic_flag *object)
243{
244 return __sync_lock_test_and_set(&object->b, 1);
245}
246
247static inline bool
248atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
249 memory_order order OVS_UNUSED)
250{
251 return atomic_flag_test_and_set(object);
252}
253
254static inline void
255atomic_flag_clear(volatile atomic_flag *object)
256{
257 __sync_lock_release(&object->b);
258}
259
260static inline void
261atomic_flag_clear_explicit(volatile atomic_flag *object,
262 memory_order order OVS_UNUSED)
263{
264 atomic_flag_clear(object);
265}