]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/atomic/include/boost/atomic/detail/ops_emulated.hpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / atomic / include / boost / atomic / detail / ops_emulated.hpp
1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2014 Andrey Semashev
7 */
8 /*!
9 * \file atomic/detail/ops_emulated.hpp
10 *
11 * This header contains lockpool-based implementation of the \c operations template.
12 */
13
14 #ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
15 #define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
16
17 #include <cstddef>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/detail/config.hpp>
20 #include <boost/atomic/detail/storage_type.hpp>
21 #include <boost/atomic/detail/operations_fwd.hpp>
22 #include <boost/atomic/detail/lockpool.hpp>
23 #include <boost/atomic/capabilities.hpp>
24
25 #ifdef BOOST_HAS_PRAGMA_ONCE
26 #pragma once
27 #endif
28
29 namespace boost {
30 namespace atomics {
31 namespace detail {
32
33 template< typename T >
34 struct emulated_operations
35 {
36 typedef T storage_type;
37
38 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = false;
39
40 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
41 {
42 lockpool::scoped_lock lock(&storage);
43 const_cast< storage_type& >(storage) = v;
44 }
45
46 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
47 {
48 lockpool::scoped_lock lock(&storage);
49 return const_cast< storage_type const& >(storage);
50 }
51
52 static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
53 {
54 storage_type& s = const_cast< storage_type& >(storage);
55 lockpool::scoped_lock lock(&storage);
56 storage_type old_val = s;
57 s += v;
58 return old_val;
59 }
60
61 static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
62 {
63 storage_type& s = const_cast< storage_type& >(storage);
64 lockpool::scoped_lock lock(&storage);
65 storage_type old_val = s;
66 s -= v;
67 return old_val;
68 }
69
70 static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
71 {
72 storage_type& s = const_cast< storage_type& >(storage);
73 lockpool::scoped_lock lock(&storage);
74 storage_type old_val = s;
75 s = v;
76 return old_val;
77 }
78
79 static BOOST_FORCEINLINE bool compare_exchange_strong(
80 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
81 {
82 storage_type& s = const_cast< storage_type& >(storage);
83 lockpool::scoped_lock lock(&storage);
84 storage_type old_val = s;
85 const bool res = old_val == expected;
86 if (res)
87 s = desired;
88 expected = old_val;
89
90 return res;
91 }
92
93 static BOOST_FORCEINLINE bool compare_exchange_weak(
94 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
95 {
96 // Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
97 // is that MSVC-12 ICEs in this case.
98 storage_type& s = const_cast< storage_type& >(storage);
99 lockpool::scoped_lock lock(&storage);
100 storage_type old_val = s;
101 const bool res = old_val == expected;
102 if (res)
103 s = desired;
104 expected = old_val;
105
106 return res;
107 }
108
109 static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
110 {
111 storage_type& s = const_cast< storage_type& >(storage);
112 lockpool::scoped_lock lock(&storage);
113 storage_type old_val = s;
114 s &= v;
115 return old_val;
116 }
117
118 static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
119 {
120 storage_type& s = const_cast< storage_type& >(storage);
121 lockpool::scoped_lock lock(&storage);
122 storage_type old_val = s;
123 s |= v;
124 return old_val;
125 }
126
127 static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
128 {
129 storage_type& s = const_cast< storage_type& >(storage);
130 lockpool::scoped_lock lock(&storage);
131 storage_type old_val = s;
132 s ^= v;
133 return old_val;
134 }
135
136 static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
137 {
138 return !!exchange(storage, (storage_type)1, order);
139 }
140
141 static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
142 {
143 store(storage, (storage_type)0, order);
144 }
145
146 static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
147 {
148 return false;
149 }
150 };
151
152 template< std::size_t Size, bool Signed >
153 struct operations :
154 public emulated_operations< typename make_storage_type< Size, Signed >::type >
155 {
156 typedef typename make_storage_type< Size, Signed >::aligned aligned_storage_type;
157 };
158
159 } // namespace detail
160 } // namespace atomics
161 } // namespace boost
162
163 #endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_