]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / atomic / include / boost / atomic / detail / ops_gcc_sync.hpp
1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013 Tim Blechmann
8 * Copyright (c) 2014 Andrey Semashev
9 */
10 /*!
11 * \file atomic/detail/ops_gcc_sync.hpp
12 *
13 * This header contains implementation of the \c operations template.
14 */
15
16 #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
17 #define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
18
19 #include <boost/memory_order.hpp>
20 #include <boost/atomic/detail/config.hpp>
21 #include <boost/atomic/detail/storage_type.hpp>
22 #include <boost/atomic/detail/operations_fwd.hpp>
23 #include <boost/atomic/detail/ops_extending_cas_based.hpp>
24 #include <boost/atomic/capabilities.hpp>
25
26 #ifdef BOOST_HAS_PRAGMA_ONCE
27 #pragma once
28 #endif
29
30 namespace boost {
31 namespace atomics {
32 namespace detail {
33
34 struct gcc_sync_operations_base
35 {
36 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
37
38 static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
39 {
40 if ((order & memory_order_release) != 0)
41 __sync_synchronize();
42 }
43
44 static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
45 {
46 if (order == memory_order_seq_cst)
47 __sync_synchronize();
48 }
49
50 static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
51 {
52 if ((order & (memory_order_acquire | memory_order_consume)) != 0)
53 __sync_synchronize();
54 }
55 };
56
57 template< typename T >
58 struct gcc_sync_operations :
59 public gcc_sync_operations_base
60 {
61 typedef T storage_type;
62
63 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
64 {
65 fence_before_store(order);
66 storage = v;
67 fence_after_store(order);
68 }
69
70 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
71 {
72 storage_type v = storage;
73 fence_after_load(order);
74 return v;
75 }
76
77 static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
78 {
79 return __sync_fetch_and_add(&storage, v);
80 }
81
82 static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
83 {
84 return __sync_fetch_and_sub(&storage, v);
85 }
86
87 static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
88 {
89 // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
90 // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
91 // add a check here and fall back to a CAS loop.
92 if ((order & memory_order_release) != 0)
93 __sync_synchronize();
94 return __sync_lock_test_and_set(&storage, v);
95 }
96
97 static BOOST_FORCEINLINE bool compare_exchange_strong(
98 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
99 {
100 storage_type expected2 = expected;
101 storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
102
103 if (old_val == expected2)
104 {
105 return true;
106 }
107 else
108 {
109 expected = old_val;
110 return false;
111 }
112 }
113
114 static BOOST_FORCEINLINE bool compare_exchange_weak(
115 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
116 {
117 return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
118 }
119
120 static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
121 {
122 return __sync_fetch_and_and(&storage, v);
123 }
124
125 static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
126 {
127 return __sync_fetch_and_or(&storage, v);
128 }
129
130 static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
131 {
132 return __sync_fetch_and_xor(&storage, v);
133 }
134
135 static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
136 {
137 if ((order & memory_order_release) != 0)
138 __sync_synchronize();
139 return !!__sync_lock_test_and_set(&storage, 1);
140 }
141
142 static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
143 {
144 __sync_lock_release(&storage);
145 if (order == memory_order_seq_cst)
146 __sync_synchronize();
147 }
148
149 static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
150 {
151 return true;
152 }
153 };
154
155 #if BOOST_ATOMIC_INT8_LOCK_FREE > 0
156 template< bool Signed >
157 struct operations< 1u, Signed > :
158 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
159 public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type >
160 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
161 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
162 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
163 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
164 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
165 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
166 #else
167 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
168 #endif
169 {
170 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
171 typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
172 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
173 typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
174 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
175 typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
176 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
177 typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
178 #else
179 typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
180 #endif
181 };
182 #endif
183
184 #if BOOST_ATOMIC_INT16_LOCK_FREE > 0
185 template< bool Signed >
186 struct operations< 2u, Signed > :
187 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
188 public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >
189 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
190 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
191 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
192 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
193 #else
194 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
195 #endif
196 {
197 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
198 typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
199 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
200 typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
201 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
202 typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
203 #else
204 typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
205 #endif
206 };
207 #endif
208
209 #if BOOST_ATOMIC_INT32_LOCK_FREE > 0
210 template< bool Signed >
211 struct operations< 4u, Signed > :
212 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
213 public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >
214 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
215 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
216 #else
217 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
218 #endif
219 {
220 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
221 typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
222 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
223 typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
224 #else
225 typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
226 #endif
227 };
228 #endif
229
230 #if BOOST_ATOMIC_INT64_LOCK_FREE > 0
231 template< bool Signed >
232 struct operations< 8u, Signed > :
233 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
234 public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >
235 #else
236 public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
237 #endif
238 {
239 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
240 typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
241 #else
242 typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
243 #endif
244 };
245 #endif
246
247 #if BOOST_ATOMIC_INT128_LOCK_FREE > 0
248 template< bool Signed >
249 struct operations< 16u, Signed > :
250 public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
251 {
252 typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
253 };
254 #endif
255
256 BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
257 {
258 if (order != memory_order_relaxed)
259 __sync_synchronize();
260 }
261
262 BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
263 {
264 if (order != memory_order_relaxed)
265 __asm__ __volatile__ ("" ::: "memory");
266 }
267
268 } // namespace detail
269 } // namespace atomics
270 } // namespace boost
271
272 #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_