]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/boost/atomic/detail/lock_pool.hpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / boost / atomic / detail / lock_pool.hpp
CommitLineData
f67539c2
TL
1/*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014, 2020 Andrey Semashev
8 */
9/*!
10 * \file atomic/detail/lock_pool.hpp
11 *
12 * This header contains declaration of the lock pool used to emulate atomic ops.
13 */
14
15#ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
16#define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
17
18#include <cstddef>
19#include <boost/atomic/detail/config.hpp>
20#include <boost/atomic/detail/link.hpp>
21#include <boost/atomic/detail/intptr.hpp>
20effc67
TL
22#if defined(BOOST_WINDOWS)
23#include <boost/winapi/thread.hpp>
24#elif defined(BOOST_HAS_NANOSLEEP)
25#include <time.h>
26#else
27#include <unistd.h>
28#endif
29#include <boost/atomic/detail/header.hpp>
f67539c2
TL
30
31#ifdef BOOST_HAS_PRAGMA_ONCE
32#pragma once
33#endif
34
35namespace boost {
36namespace atomics {
37namespace detail {
20effc67
TL
38
39BOOST_FORCEINLINE void wait_some() BOOST_NOEXCEPT
40{
41#if defined(BOOST_WINDOWS)
42 boost::winapi::SwitchToThread();
43#elif defined(BOOST_HAS_NANOSLEEP)
44 // Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
45 // pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
46 // to potentially migrate to this CPU and complete the tasks we're waiting for.
47 struct ::timespec ts = {};
48 ts.tv_sec = 0;
49 ts.tv_nsec = 1000;
50 ::nanosleep(&ts, NULL);
51#else
52 ::usleep(1);
53#endif
54}
55
f67539c2
TL
56namespace lock_pool {
57
20effc67
TL
58BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
59BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
60BOOST_ATOMIC_DECL void unlock(void* ls) BOOST_NOEXCEPT;
61
62BOOST_ATOMIC_DECL void* allocate_wait_state(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
63BOOST_ATOMIC_DECL void free_wait_state(void* ls, void* ws) BOOST_NOEXCEPT;
64BOOST_ATOMIC_DECL void wait(void* ls, void* ws) BOOST_NOEXCEPT;
65BOOST_ATOMIC_DECL void notify_one(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
66BOOST_ATOMIC_DECL void notify_all(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
f67539c2
TL
67
68BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
69BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
70
71template< std::size_t Alignment >
20effc67
TL
72BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) BOOST_NOEXCEPT
73{
74 atomics::detail::uintptr_t ptr = (atomics::detail::uintptr_t)addr;
75 atomics::detail::uintptr_t h = ptr / Alignment;
76
77 // Since many malloc/new implementations return pointers with higher alignment
78 // than indicated by Alignment, it makes sense to mix higher bits
79 // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
80 // on 32-bit - to 8 bytes.
81 BOOST_CONSTEXPR_OR_CONST std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
82 BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
83 h ^= ptr / malloc_alignment;
84
85 return h;
86}
87
88template< std::size_t Alignment, bool LongLock = false >
f67539c2
TL
89class scoped_lock
90{
91private:
92 void* m_lock;
93
94public:
20effc67 95 explicit scoped_lock(const volatile void* addr) BOOST_NOEXCEPT
f67539c2 96 {
20effc67
TL
97 atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
98 BOOST_IF_CONSTEXPR (!LongLock)
99 m_lock = lock_pool::short_lock(h);
100 else
101 m_lock = lock_pool::long_lock(h);
f67539c2
TL
102 }
103 ~scoped_lock() BOOST_NOEXCEPT
104 {
105 lock_pool::unlock(m_lock);
106 }
107
20effc67
TL
108 void* get_lock_state() const BOOST_NOEXCEPT
109 {
110 return m_lock;
111 }
112
f67539c2
TL
113 BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
114 BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
115};
116
20effc67
TL
117template< std::size_t Alignment >
118class scoped_wait_state :
119 public scoped_lock< Alignment, true >
120{
121private:
122 void* m_wait_state;
123
124public:
125 explicit scoped_wait_state(const volatile void* addr) BOOST_NOEXCEPT :
126 scoped_lock< Alignment, true >(addr)
127 {
128 m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
129 }
130 ~scoped_wait_state() BOOST_NOEXCEPT
131 {
132 lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
133 }
134
135 void wait() BOOST_NOEXCEPT
136 {
137 lock_pool::wait(this->get_lock_state(), m_wait_state);
138 }
139
140 BOOST_DELETED_FUNCTION(scoped_wait_state(scoped_wait_state const&))
141 BOOST_DELETED_FUNCTION(scoped_wait_state& operator=(scoped_wait_state const&))
142};
143
f67539c2
TL
144} // namespace lock_pool
145} // namespace detail
146} // namespace atomics
147} // namespace boost
148
20effc67
TL
149#include <boost/atomic/detail/footer.hpp>
150
f67539c2 151#endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_