]> git.proxmox.com Git - mirror_ovs.git/blame - lib/ovs-atomic-gcc4+.h
cirrus: Use FreeBSD 12.2.
[mirror_ovs.git] / lib / ovs-atomic-gcc4+.h
CommitLineData
31a3fc6e 1/*
1bd2c9ed 2 * Copyright (c) 2013, 2014 Nicira, Inc.
31a3fc6e
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This header implements atomic operation primitives on GCC 4.x. */
18#ifndef IN_OVS_ATOMIC_H
19#error "This header should only be included indirectly via ovs-atomic.h."
20#endif
21
1bd2c9ed 22#include "ovs-atomic-locked.h"
31a3fc6e
BP
23#define OVS_ATOMIC_GCC4P_IMPL 1
24
1bd2c9ed 25#define ATOMIC(TYPE) TYPE
31a3fc6e
BP
26
27#define ATOMIC_BOOL_LOCK_FREE 2
31a3fc6e 28#define ATOMIC_CHAR_LOCK_FREE 2
31a3fc6e 29#define ATOMIC_SHORT_LOCK_FREE 2
31a3fc6e 30#define ATOMIC_INT_LOCK_FREE 2
1bd2c9ed
BP
31#define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
32#define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
31a3fc6e 33#define ATOMIC_POINTER_LOCK_FREE 2
31a3fc6e
BP
34
35typedef enum {
36 memory_order_relaxed,
37 memory_order_consume,
38 memory_order_acquire,
39 memory_order_release,
40 memory_order_acq_rel,
41 memory_order_seq_cst
42} memory_order;
43\f
1bd2c9ed 44#define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
31a3fc6e 45\f
1bd2c9ed
BP
46#define ATOMIC_VAR_INIT(VALUE) VALUE
47#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
31a3fc6e
BP
48
49static inline void
50atomic_thread_fence(memory_order order)
51{
52 if (order != memory_order_relaxed) {
53 __sync_synchronize();
54 }
55}
56
57static inline void
58atomic_thread_fence_if_seq_cst(memory_order order)
59{
60 if (order == memory_order_seq_cst) {
61 __sync_synchronize();
62 }
63}
64
65static inline void
15ba057e 66atomic_signal_fence(memory_order order)
31a3fc6e
BP
67{
68 if (order != memory_order_relaxed) {
69 asm volatile("" : : : "memory");
70 }
71}
72
31a3fc6e 73#define atomic_is_lock_free(OBJ) \
1bd2c9ed 74 ((void) *(OBJ), \
3e0b991e 75 IS_LOCKLESS_ATOMIC(*(OBJ)) ? 2 : 0)
31a3fc6e
BP
76
77#define atomic_store(DST, SRC) \
78 atomic_store_explicit(DST, SRC, memory_order_seq_cst)
1bd2c9ed
BP
79#define atomic_store_explicit(DST, SRC, ORDER) \
80 ({ \
81 typeof(DST) dst__ = (DST); \
82 typeof(SRC) src__ = (SRC); \
1bd2c9ed
BP
83 \
84 if (IS_LOCKLESS_ATOMIC(*dst__)) { \
15ba057e 85 atomic_thread_fence(ORDER); \
0b83904f 86 *(typeof(*(DST)) volatile *)dst__ = src__; \
15ba057e 87 atomic_thread_fence_if_seq_cst(ORDER); \
1bd2c9ed 88 } else { \
3e0b991e 89 atomic_store_locked(dst__, src__); \
1bd2c9ed
BP
90 } \
91 (void) 0; \
92 })
31a3fc6e
BP
93#define atomic_read(SRC, DST) \
94 atomic_read_explicit(SRC, DST, memory_order_seq_cst)
1bd2c9ed
BP
95#define atomic_read_explicit(SRC, DST, ORDER) \
96 ({ \
97 typeof(DST) dst__ = (DST); \
98 typeof(SRC) src__ = (SRC); \
1bd2c9ed
BP
99 \
100 if (IS_LOCKLESS_ATOMIC(*src__)) { \
15ba057e 101 atomic_thread_fence_if_seq_cst(ORDER); \
0b83904f 102 *dst__ = *(typeof(*(SRC)) volatile *)src__; \
1bd2c9ed 103 } else { \
3e0b991e 104 atomic_read_locked(src__, dst__); \
1bd2c9ed
BP
105 } \
106 (void) 0; \
107 })
108
25045d75
JR
109#define atomic_compare_exchange_strong(DST, EXP, SRC) \
110 ({ \
111 typeof(DST) dst__ = (DST); \
112 typeof(EXP) expp__ = (EXP); \
113 typeof(SRC) src__ = (SRC); \
114 typeof(SRC) exp__ = *expp__; \
115 typeof(SRC) ret__; \
116 \
117 ret__ = __sync_val_compare_and_swap(dst__, exp__, src__); \
118 if (ret__ != exp__) { \
119 *expp__ = ret__; \
120 } \
121 ret__ == exp__; \
122 })
123#define atomic_compare_exchange_strong_explicit(DST, EXP, SRC, ORD1, ORD2) \
124 ((void) (ORD1), (void) (ORD2), \
125 atomic_compare_exchange_strong(DST, EXP, SRC))
126#define atomic_compare_exchange_weak \
127 atomic_compare_exchange_strong
128#define atomic_compare_exchange_weak_explicit \
129 atomic_compare_exchange_strong_explicit
130
1bd2c9ed
BP
131#define atomic_op__(RMW, OP, ARG, ORIG) \
132 ({ \
133 typeof(RMW) rmw__ = (RMW); \
134 typeof(ARG) arg__ = (ARG); \
135 typeof(ORIG) orig__ = (ORIG); \
136 \
137 if (IS_LOCKLESS_ATOMIC(*rmw__)) { \
138 *orig__ = __sync_fetch_and_##OP(rmw__, arg__); \
139 } else { \
3e0b991e 140 atomic_op_locked(rmw__, OP, arg__, orig__); \
1bd2c9ed 141 } \
0b83904f 142 (void) 0; \
1bd2c9ed 143 })
31a3fc6e
BP
144
145#define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
146#define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
0b83904f 147#define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
31a3fc6e
BP
148#define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
149#define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
150
151#define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER) \
152 ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
153#define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER) \
154 ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
155#define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER) \
156 ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
157#define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER) \
158 ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
159#define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER) \
160 ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
161\f
162/* atomic_flag */
163
164typedef struct {
165 int b;
166} atomic_flag;
167#define ATOMIC_FLAG_INIT { false }
168
31a3fc6e
BP
169static inline bool
170atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
91645580 171 memory_order order)
31a3fc6e 172{
91645580 173 bool old;
31a3fc6e 174
91645580
JR
175 /* __sync_lock_test_and_set() by itself is an acquire barrier.
176 * For anything higher additional barriers are needed. */
177 if (order > memory_order_acquire) {
178 atomic_thread_fence(order);
179 }
180 old = __sync_lock_test_and_set(&object->b, 1);
181 atomic_thread_fence_if_seq_cst(order);
182
183 return old;
31a3fc6e
BP
184}
185
91645580
JR
186#define atomic_flag_test_and_set(FLAG) \
187 atomic_flag_test_and_set_explicit(FLAG, memory_order_seq_cst)
188
31a3fc6e
BP
189static inline void
190atomic_flag_clear_explicit(volatile atomic_flag *object,
91645580 191 memory_order order)
31a3fc6e 192{
91645580
JR
193 /* __sync_lock_release() by itself is a release barrier. For
194 * anything else additional barrier may be needed. */
195 if (order != memory_order_release) {
196 atomic_thread_fence(order);
197 }
198 __sync_lock_release(&object->b);
199 atomic_thread_fence_if_seq_cst(order);
31a3fc6e 200}
91645580
JR
201
202#define atomic_flag_clear(FLAG) \
203 atomic_flag_clear_explicit(FLAG, memory_order_seq_cst)