2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014 Andrey Semashev
12 * This file contains implementation of the lockpool used to emulate atomic ops.
16 #include <boost/config.hpp>
17 #include <boost/assert.hpp>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/capabilities.hpp>
21 #if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
22 #include <boost/atomic/detail/operations_lockfree.hpp>
23 #elif !defined(BOOST_HAS_PTHREADS)
24 #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
27 #define BOOST_ATOMIC_USE_PTHREAD
30 #include <boost/atomic/detail/lockpool.hpp>
31 #include <boost/atomic/detail/pause.hpp>
33 #if defined(BOOST_MSVC)
35 // 'struct_name' : structure was padded due to __declspec(align())
36 #pragma warning(disable: 4324)
45 // Cache line size, in bytes
46 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
47 #if defined(__s390__) || defined(__s390x__)
48 #define BOOST_ATOMIC_CACHE_LINE_SIZE 256
49 #elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
50 #define BOOST_ATOMIC_CACHE_LINE_SIZE 128
52 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
55 #if defined(BOOST_ATOMIC_USE_PTHREAD)
56 typedef pthread_mutex_t lock_type
;
58 typedef atomics::detail::operations
< 1u, false > lock_operations
;
59 typedef lock_operations::storage_type lock_type
;
64 padding_size
= (sizeof(lock_type
) <= BOOST_ATOMIC_CACHE_LINE_SIZE
?
65 (BOOST_ATOMIC_CACHE_LINE_SIZE
- sizeof(lock_type
)) :
66 (BOOST_ATOMIC_CACHE_LINE_SIZE
- sizeof(lock_type
) % BOOST_ATOMIC_CACHE_LINE_SIZE
))
69 template< unsigned int PaddingSize
>
70 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE
) padded_lock
73 // The additional padding is needed to avoid false sharing between locks
74 char padding
[PaddingSize
];
78 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE
) padded_lock
< 0u >
83 typedef padded_lock
< padding_size
> padded_lock_t
;
85 static padded_lock_t g_lock_pool
[41]
86 #if defined(BOOST_ATOMIC_USE_PTHREAD)
89 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
90 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
91 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
92 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
93 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
94 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
95 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
96 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
97 { PTHREAD_MUTEX_INITIALIZER
}
105 #if !defined(BOOST_ATOMIC_USE_PTHREAD)
107 // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
108 BOOST_ATOMIC_DECL
lockpool::scoped_lock::scoped_lock(const volatile void* addr
) BOOST_NOEXCEPT
:
109 m_lock(&g_lock_pool
[reinterpret_cast< std::size_t >(addr
) % (sizeof(g_lock_pool
) / sizeof(*g_lock_pool
))].lock
)
111 while (lock_operations::test_and_set(*static_cast< lock_type
* >(m_lock
), memory_order_acquire
))
115 atomics::detail::pause();
117 while (!!lock_operations::load(*static_cast< lock_type
* >(m_lock
), memory_order_relaxed
));
121 BOOST_ATOMIC_DECL
lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
123 lock_operations::clear(*static_cast< lock_type
* >(m_lock
), memory_order_release
);
126 BOOST_ATOMIC_DECL
void signal_fence() BOOST_NOEXCEPT
;
128 #else // !defined(BOOST_ATOMIC_USE_PTHREAD)
130 BOOST_ATOMIC_DECL
lockpool::scoped_lock::scoped_lock(const volatile void* addr
) BOOST_NOEXCEPT
:
131 m_lock(&g_lock_pool
[reinterpret_cast< std::size_t >(addr
) % (sizeof(g_lock_pool
) / sizeof(*g_lock_pool
))].lock
)
133 BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t
* >(m_lock
)) == 0);
136 BOOST_ATOMIC_DECL
lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
138 BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t
* >(m_lock
)) == 0);
141 #endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
143 BOOST_ATOMIC_DECL
void lockpool::thread_fence() BOOST_NOEXCEPT
145 #if BOOST_ATOMIC_THREAD_FENCE > 0
146 atomics::detail::thread_fence(memory_order_seq_cst
);
148 // Emulate full fence by locking/unlocking a mutex
153 BOOST_ATOMIC_DECL
void lockpool::signal_fence() BOOST_NOEXCEPT
155 // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
156 #if BOOST_ATOMIC_SIGNAL_FENCE > 0
157 atomics::detail::signal_fence(memory_order_seq_cst
);
161 } // namespace detail
162 } // namespace atomics
165 #if defined(BOOST_MSVC)