]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/boost/atomic/detail/ops_gcc_sync.hpp
buildsys: switch source download to quincy
[ceph.git] / ceph / src / boost / boost / atomic / detail / ops_gcc_sync.hpp
CommitLineData
7c673cae
FG
1/*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013 Tim Blechmann
8 * Copyright (c) 2014 Andrey Semashev
9 */
10/*!
11 * \file atomic/detail/ops_gcc_sync.hpp
12 *
13 * This header contains implementation of the \c operations template.
14 */
15
16#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
17#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
18
b32b8144 19#include <cstddef>
7c673cae
FG
20#include <boost/memory_order.hpp>
21#include <boost/atomic/detail/config.hpp>
f67539c2 22#include <boost/atomic/detail/storage_traits.hpp>
7c673cae
FG
23#include <boost/atomic/detail/operations_fwd.hpp>
24#include <boost/atomic/detail/ops_extending_cas_based.hpp>
25#include <boost/atomic/capabilities.hpp>
26
27#ifdef BOOST_HAS_PRAGMA_ONCE
28#pragma once
29#endif
30
31namespace boost {
32namespace atomics {
33namespace detail {
34
35struct gcc_sync_operations_base
36{
11fdf7f2 37 static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
7c673cae
FG
38 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
39
40 static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
41 {
11fdf7f2 42 if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
7c673cae
FG
43 __sync_synchronize();
44 }
45
46 static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
47 {
48 if (order == memory_order_seq_cst)
49 __sync_synchronize();
50 }
51
52 static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
53 {
11fdf7f2 54 if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
7c673cae
FG
55 __sync_synchronize();
56 }
57};
58
11fdf7f2 59template< std::size_t Size, bool Signed >
7c673cae
FG
60struct gcc_sync_operations :
61 public gcc_sync_operations_base
62{
f67539c2 63 typedef typename storage_traits< Size >::type storage_type;
11fdf7f2
TL
64
65 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
f67539c2 66 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
11fdf7f2 67 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
7c673cae
FG
68
69 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
70 {
71 fence_before_store(order);
72 storage = v;
73 fence_after_store(order);
74 }
75
76 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
77 {
78 storage_type v = storage;
79 fence_after_load(order);
80 return v;
81 }
82
83 static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
84 {
85 return __sync_fetch_and_add(&storage, v);
86 }
87
88 static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
89 {
90 return __sync_fetch_and_sub(&storage, v);
91 }
92
93 static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
94 {
95 // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
96 // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
97 // add a check here and fall back to a CAS loop.
11fdf7f2 98 if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
7c673cae
FG
99 __sync_synchronize();
100 return __sync_lock_test_and_set(&storage, v);
101 }
102
103 static BOOST_FORCEINLINE bool compare_exchange_strong(
104 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
105 {
106 storage_type expected2 = expected;
107 storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
108
109 if (old_val == expected2)
110 {
111 return true;
112 }
113 else
114 {
115 expected = old_val;
116 return false;
117 }
118 }
119
120 static BOOST_FORCEINLINE bool compare_exchange_weak(
121 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
122 {
123 return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
124 }
125
126 static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
127 {
128 return __sync_fetch_and_and(&storage, v);
129 }
130
131 static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
132 {
133 return __sync_fetch_and_or(&storage, v);
134 }
135
136 static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
137 {
138 return __sync_fetch_and_xor(&storage, v);
139 }
140
141 static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
142 {
11fdf7f2 143 if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
7c673cae
FG
144 __sync_synchronize();
145 return !!__sync_lock_test_and_set(&storage, 1);
146 }
147
148 static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
149 {
150 __sync_lock_release(&storage);
151 if (order == memory_order_seq_cst)
152 __sync_synchronize();
153 }
7c673cae
FG
154};
155
156#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
157template< bool Signed >
158struct operations< 1u, Signed > :
159#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
11fdf7f2 160 public gcc_sync_operations< 1u, Signed >
7c673cae 161#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
11fdf7f2 162 public extending_cas_based_operations< gcc_sync_operations< 2u, Signed >, 1u, Signed >
7c673cae 163#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
11fdf7f2 164 public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 1u, Signed >
7c673cae 165#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
11fdf7f2 166 public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 1u, Signed >
7c673cae 167#else
11fdf7f2 168 public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 1u, Signed >
7c673cae
FG
169#endif
170{
7c673cae
FG
171};
172#endif
173
174#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
175template< bool Signed >
176struct operations< 2u, Signed > :
177#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
11fdf7f2 178 public gcc_sync_operations< 2u, Signed >
7c673cae 179#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
11fdf7f2 180 public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 2u, Signed >
7c673cae 181#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
11fdf7f2 182 public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 2u, Signed >
7c673cae 183#else
11fdf7f2 184 public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 2u, Signed >
7c673cae
FG
185#endif
186{
7c673cae
FG
187};
188#endif
189
190#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
191template< bool Signed >
192struct operations< 4u, Signed > :
193#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
11fdf7f2 194 public gcc_sync_operations< 4u, Signed >
7c673cae 195#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
11fdf7f2 196 public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 4u, Signed >
7c673cae 197#else
11fdf7f2 198 public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 4u, Signed >
7c673cae
FG
199#endif
200{
7c673cae
FG
201};
202#endif
203
204#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
205template< bool Signed >
206struct operations< 8u, Signed > :
207#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
11fdf7f2 208 public gcc_sync_operations< 8u, Signed >
7c673cae 209#else
11fdf7f2 210 public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 8u, Signed >
7c673cae
FG
211#endif
212{
7c673cae
FG
213};
214#endif
215
216#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
217template< bool Signed >
218struct operations< 16u, Signed > :
11fdf7f2 219 public gcc_sync_operations< 16u, Signed >
7c673cae 220{
7c673cae
FG
221};
222#endif
223
224BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
225{
226 if (order != memory_order_relaxed)
227 __sync_synchronize();
228}
229
230BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
231{
232 if (order != memory_order_relaxed)
233 __asm__ __volatile__ ("" ::: "memory");
234}
235
236} // namespace detail
237} // namespace atomics
238} // namespace boost
239
240#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_