]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Distributed under the Boost Software License, Version 1.0. | |
3 | * (See accompanying file LICENSE_1_0.txt or copy at | |
4 | * http://www.boost.org/LICENSE_1_0.txt) | |
5 | * | |
6 | * Copyright (c) 2009, 2011 Helge Bahmann | |
7 | * Copyright (c) 2009 Phil Endecott | |
8 | * Copyright (c) 2013 Tim Blechmann | |
9 | * Linux-specific code by Phil Endecott | |
10 | * Copyright (c) 2014 Andrey Semashev | |
11 | */ | |
12 | /*! | |
13 | * \file atomic/detail/ops_linux_arm.hpp | |
14 | * | |
15 | * This header contains implementation of the \c operations template. | |
16 | */ | |
17 | ||
18 | #ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ | |
19 | #define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ | |
20 | ||
21 | #include <boost/memory_order.hpp> | |
22 | #include <boost/atomic/detail/config.hpp> | |
23 | #include <boost/atomic/detail/storage_type.hpp> | |
24 | #include <boost/atomic/detail/operations_fwd.hpp> | |
25 | #include <boost/atomic/capabilities.hpp> | |
26 | #include <boost/atomic/detail/ops_cas_based.hpp> | |
27 | #include <boost/atomic/detail/ops_extending_cas_based.hpp> | |
28 | ||
29 | #ifdef BOOST_HAS_PRAGMA_ONCE | |
30 | #pragma once | |
31 | #endif | |
32 | ||
33 | namespace boost { | |
34 | namespace atomics { | |
35 | namespace detail { | |
36 | ||
37 | // Different ARM processors have different atomic instructions. In particular, | |
38 | // architecture versions before v6 (which are still in widespread use, e.g. the | |
39 | // Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap. | |
40 | // On Linux the kernel provides some support that lets us abstract away from | |
41 | // these differences: it provides emulated CAS and barrier functions at special | |
42 | // addresses that are guaranteed not to be interrupted by the kernel. Using | |
43 | // this facility is slightly slower than inline assembler would be, but much | |
44 | // faster than a system call. | |
45 | // | |
46 | // While this emulated CAS is "strong" in the sense that it does not fail | |
47 | // "spuriously" (i.e.: it never fails to perform the exchange when the value | |
48 | // found equals the value expected), it does not return the found value on | |
49 | // failure. To satisfy the atomic API, compare_exchange_{weak|strong} must | |
50 | // return the found value on failure, and we have to manually load this value | |
51 | // after the emulated CAS reports failure. This in turn introduces a race | |
52 | // between the CAS failing (due to the "wrong" value being found) and subsequently | |
53 | // loading (which might turn up the "right" value). From an application's | |
54 | // point of view this looks like "spurious failure", and therefore the | |
55 | // emulated CAS is only good enough to provide compare_exchange_weak | |
56 | // semantics. | |
57 | ||
58 | struct linux_arm_cas_base | |
59 | { | |
60 | static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true; | |
61 | ||
62 | static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT | |
63 | { | |
64 | if ((order & memory_order_release) != 0) | |
65 | hardware_full_fence(); | |
66 | } | |
67 | ||
68 | static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT | |
69 | { | |
70 | if (order == memory_order_seq_cst) | |
71 | hardware_full_fence(); | |
72 | } | |
73 | ||
74 | static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT | |
75 | { | |
76 | if ((order & (memory_order_consume | memory_order_acquire)) != 0) | |
77 | hardware_full_fence(); | |
78 | } | |
79 | ||
80 | static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT | |
81 | { | |
82 | typedef void (*kernel_dmb_t)(void); | |
83 | ((kernel_dmb_t)0xffff0fa0)(); | |
84 | } | |
85 | }; | |
86 | ||
87 | template< bool Signed > | |
88 | struct linux_arm_cas : | |
89 | public linux_arm_cas_base | |
90 | { | |
91 | typedef typename make_storage_type< 4u, Signed >::type storage_type; | |
92 | typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type; | |
93 | ||
94 | static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT | |
95 | { | |
96 | fence_before_store(order); | |
97 | storage = v; | |
98 | fence_after_store(order); | |
99 | } | |
100 | ||
101 | static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT | |
102 | { | |
103 | storage_type v = storage; | |
104 | fence_after_load(order); | |
105 | return v; | |
106 | } | |
107 | ||
108 | static BOOST_FORCEINLINE bool compare_exchange_strong( | |
109 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT | |
110 | { | |
111 | while (true) | |
112 | { | |
113 | storage_type tmp = expected; | |
114 | if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order)) | |
115 | return true; | |
116 | if (tmp != expected) | |
117 | { | |
118 | expected = tmp; | |
119 | return false; | |
120 | } | |
121 | } | |
122 | } | |
123 | ||
124 | static BOOST_FORCEINLINE bool compare_exchange_weak( | |
125 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT | |
126 | { | |
127 | typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr); | |
128 | ||
129 | if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0) | |
130 | { | |
131 | return true; | |
132 | } | |
133 | else | |
134 | { | |
135 | expected = storage; | |
136 | return false; | |
137 | } | |
138 | } | |
139 | ||
140 | static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT | |
141 | { | |
142 | return true; | |
143 | } | |
144 | }; | |
145 | ||
146 | template< bool Signed > | |
147 | struct operations< 1u, Signed > : | |
148 | public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 1u, Signed > | |
149 | { | |
150 | }; | |
151 | ||
152 | template< bool Signed > | |
153 | struct operations< 2u, Signed > : | |
154 | public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 2u, Signed > | |
155 | { | |
156 | }; | |
157 | ||
158 | template< bool Signed > | |
159 | struct operations< 4u, Signed > : | |
160 | public cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > > | |
161 | { | |
162 | }; | |
163 | ||
164 | BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT | |
165 | { | |
166 | if (order != memory_order_relaxed) | |
167 | linux_arm_cas_base::hardware_full_fence(); | |
168 | } | |
169 | ||
170 | BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT | |
171 | { | |
172 | if (order != memory_order_relaxed) | |
173 | __asm__ __volatile__ ("" ::: "memory"); | |
174 | } | |
175 | ||
176 | } // namespace detail | |
177 | } // namespace atomics | |
178 | } // namespace boost | |
179 | ||
180 | #endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ |