]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/boost/atomic/detail/ops_gcc_sparc.hpp
buildsys: switch source download to quincy
[ceph.git] / ceph / src / boost / boost / atomic / detail / ops_gcc_sparc.hpp
CommitLineData
7c673cae
FG
1/*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2010 Helge Bahmann
7 * Copyright (c) 2013 Tim Blechmann
8 * Copyright (c) 2014 Andrey Semashev
9 */
10/*!
11 * \file atomic/detail/ops_gcc_sparc.hpp
12 *
13 * This header contains implementation of the \c operations template.
14 */
15
16#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
17#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
18
b32b8144 19#include <cstddef>
7c673cae
FG
20#include <boost/memory_order.hpp>
21#include <boost/atomic/detail/config.hpp>
f67539c2 22#include <boost/atomic/detail/storage_traits.hpp>
7c673cae
FG
23#include <boost/atomic/detail/operations_fwd.hpp>
24#include <boost/atomic/capabilities.hpp>
25#include <boost/atomic/detail/ops_cas_based.hpp>
26#include <boost/atomic/detail/ops_extending_cas_based.hpp>
27
28#ifdef BOOST_HAS_PRAGMA_ONCE
29#pragma once
30#endif
31
32namespace boost {
33namespace atomics {
34namespace detail {
35
36struct gcc_sparc_cas_base
37{
11fdf7f2 38 static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
7c673cae
FG
39 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
40
41 static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
42 {
43 if (order == memory_order_seq_cst)
44 __asm__ __volatile__ ("membar #Sync" ::: "memory");
11fdf7f2 45 else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
7c673cae
FG
46 __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
47 }
48
49 static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
50 {
51 if (order == memory_order_seq_cst)
52 __asm__ __volatile__ ("membar #Sync" ::: "memory");
11fdf7f2 53 else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
7c673cae
FG
54 __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
55 }
56
57 static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
58 {
59 if (order == memory_order_seq_cst)
60 __asm__ __volatile__ ("membar #Sync" ::: "memory");
61 }
62};
63
64template< bool Signed >
65struct gcc_sparc_cas32 :
66 public gcc_sparc_cas_base
67{
f67539c2 68 typedef typename storage_traits< 4u >::type storage_type;
7c673cae 69
b32b8144 70 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
f67539c2 71 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
b32b8144
FG
72 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
73
7c673cae
FG
74 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
75 {
76 fence_before(order);
77 storage = v;
78 fence_after_store(order);
79 }
80
81 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
82 {
83 storage_type v = storage;
84 fence_after(order);
85 return v;
86 }
87
88 static BOOST_FORCEINLINE bool compare_exchange_strong(
89 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
90 {
91 fence_before(success_order);
92 storage_type previous = expected;
93 __asm__ __volatile__
94 (
95 "cas [%1], %2, %0"
96 : "+r" (desired)
97 : "r" (&storage), "r" (previous)
98 : "memory"
99 );
100 const bool success = (desired == previous);
101 if (success)
102 fence_after(success_order);
103 else
104 fence_after(failure_order);
105 expected = desired;
106 return success;
107 }
108
109 static BOOST_FORCEINLINE bool compare_exchange_weak(
110 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
111 {
112 return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
113 }
114
115 static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
116 {
117 fence_before(order);
118 __asm__ __volatile__
119 (
120 "swap [%1], %0"
121 : "+r" (v)
122 : "r" (&storage)
123 : "memory"
124 );
125 fence_after(order);
126 return v;
127 }
7c673cae
FG
128};
129
130template< bool Signed >
131struct operations< 4u, Signed > :
132 public cas_based_operations< gcc_sparc_cas32< Signed > >
133{
134};
135
136template< bool Signed >
137struct operations< 1u, Signed > :
138 public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
139{
140};
141
142template< bool Signed >
143struct operations< 2u, Signed > :
144 public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
145{
146};
147
148template< bool Signed >
149struct gcc_sparc_cas64 :
150 public gcc_sparc_cas_base
151{
f67539c2 152 typedef typename storage_traits< 8u >::type storage_type;
7c673cae 153
b32b8144 154 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
f67539c2 155 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
b32b8144
FG
156 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
157
7c673cae
FG
158 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
159 {
160 fence_before(order);
161 storage = v;
162 fence_after_store(order);
163 }
164
165 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
166 {
167 storage_type v = storage;
168 fence_after(order);
169 return v;
170 }
171
172 static BOOST_FORCEINLINE bool compare_exchange_strong(
173 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
174 {
175 fence_before(success_order);
176 storage_type previous = expected;
177 __asm__ __volatile__
178 (
179 "casx [%1], %2, %0"
180 : "+r" (desired)
181 : "r" (&storage), "r" (previous)
182 : "memory"
183 );
184 const bool success = (desired == previous);
185 if (success)
186 fence_after(success_order);
187 else
188 fence_after(failure_order);
189 expected = desired;
190 return success;
191 }
192
193 static BOOST_FORCEINLINE bool compare_exchange_weak(
194 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
195 {
196 return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
197 }
7c673cae
FG
198};
199
200template< bool Signed >
201struct operations< 8u, Signed > :
202 public cas_based_operations< cas_based_exchange< gcc_sparc_cas64< Signed > > >
203{
204};
205
206
207BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
208{
209 switch (order)
210 {
211 case memory_order_release:
212 __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
213 break;
214 case memory_order_consume:
215 case memory_order_acquire:
216 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
217 break;
218 case memory_order_acq_rel:
219 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
220 break;
221 case memory_order_seq_cst:
222 __asm__ __volatile__ ("membar #Sync" ::: "memory");
223 break;
224 case memory_order_relaxed:
225 default:
226 break;
227 }
228}
229
230BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
231{
232 if (order != memory_order_relaxed)
233 __asm__ __volatile__ ("" ::: "memory");
234}
235
236} // namespace detail
237} // namespace atomics
238} // namespace boost
239
240#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_