]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Distributed under the Boost Software License, Version 1.0. | |
3 | * (See accompanying file LICENSE_1_0.txt or copy at | |
4 | * http://www.boost.org/LICENSE_1_0.txt) | |
5 | * | |
6 | * Copyright (c) 2011 Helge Bahmann | |
7 | * Copyright (c) 2013-2014 Andrey Semashev | |
8 | */ | |
9 | /*! | |
10 | * \file lockpool.cpp | |
11 | * | |
12 | * This file contains implementation of the lockpool used to emulate atomic ops. | |
13 | */ | |
14 | ||
15 | #include <cstddef> | |
16 | #include <boost/config.hpp> | |
17 | #include <boost/assert.hpp> | |
18 | #include <boost/memory_order.hpp> | |
19 | #include <boost/atomic/capabilities.hpp> | |
20 | ||
21 | #if BOOST_ATOMIC_FLAG_LOCK_FREE == 2 | |
22 | #include <boost/atomic/detail/operations_lockfree.hpp> | |
23 | #elif !defined(BOOST_HAS_PTHREADS) | |
24 | #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available | |
25 | #else | |
26 | #include <pthread.h> | |
27 | #define BOOST_ATOMIC_USE_PTHREAD | |
28 | #endif | |
29 | ||
30 | #include <boost/atomic/detail/lockpool.hpp> | |
31 | #include <boost/atomic/detail/pause.hpp> | |
32 | ||
b32b8144 FG |
33 | #if defined(BOOST_MSVC) |
34 | #pragma warning(push) | |
35 | // 'struct_name' : structure was padded due to __declspec(align()) | |
36 | #pragma warning(disable: 4324) | |
37 | #endif | |
38 | ||
7c673cae FG |
39 | namespace boost { |
40 | namespace atomics { | |
41 | namespace detail { | |
42 | ||
43 | namespace { | |
44 | ||
45 | // This seems to be the maximum across all modern CPUs | |
46 | // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes | |
47 | #define BOOST_ATOMIC_CACHE_LINE_SIZE 64 | |
48 | ||
49 | #if defined(BOOST_ATOMIC_USE_PTHREAD) | |
50 | typedef pthread_mutex_t lock_type; | |
51 | #else | |
52 | typedef atomics::detail::operations< 1u, false > lock_operations; | |
53 | typedef lock_operations::storage_type lock_type; | |
54 | #endif | |
55 | ||
56 | enum | |
57 | { | |
58 | padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ? | |
59 | (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) : | |
60 | (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE)) | |
61 | }; | |
62 | ||
63 | template< unsigned int PaddingSize > | |
64 | struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock | |
65 | { | |
66 | lock_type lock; | |
67 | // The additional padding is needed to avoid false sharing between locks | |
68 | char padding[PaddingSize]; | |
69 | }; | |
70 | ||
71 | template< > | |
72 | struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u > | |
73 | { | |
74 | lock_type lock; | |
75 | }; | |
76 | ||
77 | typedef padded_lock< padding_size > padded_lock_t; | |
78 | ||
79 | static padded_lock_t g_lock_pool[41] | |
80 | #if defined(BOOST_ATOMIC_USE_PTHREAD) | |
81 | = | |
82 | { | |
83 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
84 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
85 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
86 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
87 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
88 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
89 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
90 | { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, | |
91 | { PTHREAD_MUTEX_INITIALIZER } | |
92 | } | |
93 | #endif | |
94 | ; | |
95 | ||
96 | } // namespace | |
97 | ||
98 | ||
99 | #if !defined(BOOST_ATOMIC_USE_PTHREAD) | |
100 | ||
101 | // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes. | |
102 | BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT : | |
103 | m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) | |
104 | { | |
105 | while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire)) | |
106 | { | |
107 | do | |
108 | { | |
109 | atomics::detail::pause(); | |
110 | } | |
111 | while (!!lock_operations::load(*static_cast< lock_type* >(m_lock), memory_order_relaxed)); | |
112 | } | |
113 | } | |
114 | ||
115 | BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT | |
116 | { | |
117 | lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release); | |
118 | } | |
119 | ||
120 | BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT; | |
121 | ||
122 | #else // !defined(BOOST_ATOMIC_USE_PTHREAD) | |
123 | ||
124 | BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT : | |
125 | m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) | |
126 | { | |
127 | BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0); | |
128 | } | |
129 | ||
130 | BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT | |
131 | { | |
132 | BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0); | |
133 | } | |
134 | ||
135 | #endif // !defined(BOOST_ATOMIC_USE_PTHREAD) | |
136 | ||
137 | BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT | |
138 | { | |
139 | #if BOOST_ATOMIC_THREAD_FENCE > 0 | |
140 | atomics::detail::thread_fence(memory_order_seq_cst); | |
141 | #else | |
142 | // Emulate full fence by locking/unlocking a mutex | |
143 | scoped_lock lock(0); | |
144 | #endif | |
145 | } | |
146 | ||
147 | BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT | |
148 | { | |
149 | // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier. | |
150 | #if BOOST_ATOMIC_SIGNAL_FENCE > 0 | |
151 | atomics::detail::signal_fence(memory_order_seq_cst); | |
152 | #endif | |
153 | } | |
154 | ||
155 | } // namespace detail | |
156 | } // namespace atomics | |
157 | } // namespace boost | |
b32b8144 FG |
158 | |
159 | #if defined(BOOST_MSVC) | |
160 | #pragma warning(pop) | |
161 | #endif |