]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/atomic/src/lockpool.cpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / atomic / src / lockpool.cpp
1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014 Andrey Semashev
8 */
9 /*!
10 * \file lockpool.cpp
11 *
12 * This file contains implementation of the lockpool used to emulate atomic ops.
13 */
14
15 #include <cstddef>
16 #include <boost/config.hpp>
17 #include <boost/assert.hpp>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/capabilities.hpp>
20
21 #if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
22 #include <boost/atomic/detail/operations_lockfree.hpp>
23 #elif !defined(BOOST_HAS_PTHREADS)
24 #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
25 #else
26 #include <pthread.h>
27 #define BOOST_ATOMIC_USE_PTHREAD
28 #endif
29
30 #include <boost/atomic/detail/lockpool.hpp>
31 #include <boost/atomic/detail/pause.hpp>
32
33 namespace boost {
34 namespace atomics {
35 namespace detail {
36
37 namespace {
38
39 // This seems to be the maximum across all modern CPUs
40 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
41 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
42
43 #if defined(BOOST_ATOMIC_USE_PTHREAD)
44 typedef pthread_mutex_t lock_type;
45 #else
46 typedef atomics::detail::operations< 1u, false > lock_operations;
47 typedef lock_operations::storage_type lock_type;
48 #endif
49
50 enum
51 {
52 padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
53 (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
54 (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
55 };
56
57 template< unsigned int PaddingSize >
58 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
59 {
60 lock_type lock;
61 // The additional padding is needed to avoid false sharing between locks
62 char padding[PaddingSize];
63 };
64
65 template< >
66 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
67 {
68 lock_type lock;
69 };
70
71 typedef padded_lock< padding_size > padded_lock_t;
72
73 static padded_lock_t g_lock_pool[41]
74 #if defined(BOOST_ATOMIC_USE_PTHREAD)
75 =
76 {
77 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
78 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
79 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
80 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
81 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
82 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
83 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
84 { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
85 { PTHREAD_MUTEX_INITIALIZER }
86 }
87 #endif
88 ;
89
90 } // namespace
91
92
93 #if !defined(BOOST_ATOMIC_USE_PTHREAD)
94
95 // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
96 BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
97 m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
98 {
99 while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire))
100 {
101 do
102 {
103 atomics::detail::pause();
104 }
105 while (!!lock_operations::load(*static_cast< lock_type* >(m_lock), memory_order_relaxed));
106 }
107 }
108
109 BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
110 {
111 lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release);
112 }
113
114 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
115
116 #else // !defined(BOOST_ATOMIC_USE_PTHREAD)
117
118 BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
119 m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
120 {
121 BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
122 }
123
124 BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
125 {
126 BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
127 }
128
129 #endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
130
131 BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT
132 {
133 #if BOOST_ATOMIC_THREAD_FENCE > 0
134 atomics::detail::thread_fence(memory_order_seq_cst);
135 #else
136 // Emulate full fence by locking/unlocking a mutex
137 scoped_lock lock(0);
138 #endif
139 }
140
141 BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT
142 {
143 // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
144 #if BOOST_ATOMIC_SIGNAL_FENCE > 0
145 atomics::detail::signal_fence(memory_order_seq_cst);
146 #endif
147 }
148
149 } // namespace detail
150 } // namespace atomics
151 } // namespace boost