2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014 Andrey Semashev
12 * This file contains implementation of the lockpool used to emulate atomic ops.
16 #include <boost/config.hpp>
17 #include <boost/assert.hpp>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/capabilities.hpp>
21 #if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
22 #include <boost/atomic/detail/operations_lockfree.hpp>
23 #elif !defined(BOOST_HAS_PTHREADS)
24 #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
27 #define BOOST_ATOMIC_USE_PTHREAD
30 #include <boost/atomic/detail/lockpool.hpp>
31 #include <boost/atomic/detail/pause.hpp>
39 // This seems to be the maximum across all modern CPUs
40 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
41 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
43 #if defined(BOOST_ATOMIC_USE_PTHREAD)
44 typedef pthread_mutex_t lock_type
;
46 typedef atomics::detail::operations
< 1u, false > lock_operations
;
47 typedef lock_operations::storage_type lock_type
;
52 padding_size
= (sizeof(lock_type
) <= BOOST_ATOMIC_CACHE_LINE_SIZE
?
53 (BOOST_ATOMIC_CACHE_LINE_SIZE
- sizeof(lock_type
)) :
54 (BOOST_ATOMIC_CACHE_LINE_SIZE
- sizeof(lock_type
) % BOOST_ATOMIC_CACHE_LINE_SIZE
))
57 template< unsigned int PaddingSize
>
58 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE
) padded_lock
61 // The additional padding is needed to avoid false sharing between locks
62 char padding
[PaddingSize
];
66 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE
) padded_lock
< 0u >
71 typedef padded_lock
< padding_size
> padded_lock_t
;
73 static padded_lock_t g_lock_pool
[41]
74 #if defined(BOOST_ATOMIC_USE_PTHREAD)
77 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
78 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
79 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
80 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
81 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
82 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
83 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
84 { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
}, { PTHREAD_MUTEX_INITIALIZER
},
85 { PTHREAD_MUTEX_INITIALIZER
}
93 #if !defined(BOOST_ATOMIC_USE_PTHREAD)
95 // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
96 BOOST_ATOMIC_DECL
lockpool::scoped_lock::scoped_lock(const volatile void* addr
) BOOST_NOEXCEPT
:
97 m_lock(&g_lock_pool
[reinterpret_cast< std::size_t >(addr
) % (sizeof(g_lock_pool
) / sizeof(*g_lock_pool
))].lock
)
99 while (lock_operations::test_and_set(*static_cast< lock_type
* >(m_lock
), memory_order_acquire
))
103 atomics::detail::pause();
105 while (!!lock_operations::load(*static_cast< lock_type
* >(m_lock
), memory_order_relaxed
));
109 BOOST_ATOMIC_DECL
lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
111 lock_operations::clear(*static_cast< lock_type
* >(m_lock
), memory_order_release
);
114 BOOST_ATOMIC_DECL
void signal_fence() BOOST_NOEXCEPT
;
116 #else // !defined(BOOST_ATOMIC_USE_PTHREAD)
118 BOOST_ATOMIC_DECL
lockpool::scoped_lock::scoped_lock(const volatile void* addr
) BOOST_NOEXCEPT
:
119 m_lock(&g_lock_pool
[reinterpret_cast< std::size_t >(addr
) % (sizeof(g_lock_pool
) / sizeof(*g_lock_pool
))].lock
)
121 BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t
* >(m_lock
)) == 0);
124 BOOST_ATOMIC_DECL
lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
126 BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t
* >(m_lock
)) == 0);
129 #endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
131 BOOST_ATOMIC_DECL
void lockpool::thread_fence() BOOST_NOEXCEPT
133 #if BOOST_ATOMIC_THREAD_FENCE > 0
134 atomics::detail::thread_fence(memory_order_seq_cst
);
136 // Emulate full fence by locking/unlocking a mutex
141 BOOST_ATOMIC_DECL
void lockpool::signal_fence() BOOST_NOEXCEPT
143 // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
144 #if BOOST_ATOMIC_SIGNAL_FENCE > 0
145 atomics::detail::signal_fence(memory_order_seq_cst
);
149 } // namespace detail
150 } // namespace atomics