]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / boost / fiber / detail / spinlock_ttas_adaptive_futex.hpp
1
2 // Copyright Oliver Kowalke 2016.
3 // Distributed under the Boost Software License, Version 1.0.
4 // (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
8 #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
9
10 #include <algorithm>
11 #include <atomic>
12 #include <cmath>
13 #include <random>
14 #include <thread>
15
16 #include <boost/fiber/detail/config.hpp>
17 #include <boost/fiber/detail/cpu_relax.hpp>
18 #include <boost/fiber/detail/futex.hpp>
19
20 // based on informations from:
21 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
22 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
23
24 namespace boost {
25 namespace fibers {
26 namespace detail {
27
28 class spinlock_ttas_adaptive_futex {
29 private:
30 template< typename FBSplk >
31 friend class spinlock_rtm;
32
33 std::atomic< std::int32_t > value_{ 0 };
34 std::atomic< std::int32_t > retries_{ 0 };
35
36 public:
37 spinlock_ttas_adaptive_futex() = default;
38
39 spinlock_ttas_adaptive_futex( spinlock_ttas_adaptive_futex const&) = delete;
40 spinlock_ttas_adaptive_futex & operator=( spinlock_ttas_adaptive_futex const&) = delete;
41
42 void lock() noexcept {
43 static thread_local std::minstd_rand generator{ std::random_device{}() };
44 std::int32_t collisions = 0, retries = 0, expected = 0;
45 const std::int32_t prev_retries = retries_.load( std::memory_order_relaxed);
46 const std::int32_t max_relax_retries = (std::min)(
47 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_SLEEP0), 2 * prev_retries + 10);
48 const std::int32_t max_sleep_retries = (std::min)(
49 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_YIELD), 2 * prev_retries + 10);
50 // after max. spins or collisions suspend via futex
51 while ( retries++ < BOOST_FIBERS_RETRY_THRESHOLD) {
52 // avoid using multiple pause instructions for a delay of a specific cycle count
53 // the delay of cpu_relax() (pause on Intel) depends on the processor family
54 // the cycle count can not guaranteed from one system to the next
55 // -> check the shared variable 'value_' in between each cpu_relax() to prevent
56 // unnecessarily long delays on some systems
57 // test shared variable 'status_'
58 // first access to 'value_' -> chache miss
59 // sucessive acccess to 'value_' -> cache hit
60 // if 'value_' was released by other fiber
61 // cached 'value_' is invalidated -> cache miss
62 if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
63 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
64 if ( max_relax_retries > retries) {
65 // give CPU a hint that this thread is in a "spin-wait" loop
66 // delays the next instruction's execution for a finite period of time (depends on processor family)
67 // the CPU is not under demand, parts of the pipeline are no longer being used
68 // -> reduces the power consumed by the CPU
69 // -> prevent pipeline stalls
70 cpu_relax();
71 } else if ( max_sleep_retries > retries) {
72 // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
73 // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
74 // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
75 // if and only if a thread of equal or greater priority is ready to run
76 static constexpr std::chrono::microseconds us0{ 0 };
77 std::this_thread::sleep_for( us0);
78 } else {
79 // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
80 // but only to another thread on the same processor
81 // instead of constant checking, a thread only checks if no other useful work is pending
82 std::this_thread::yield();
83 }
84 #else
85 // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
86 // but only to another thread on the same processor
87 // instead of constant checking, a thread only checks if no other useful work is pending
88 std::this_thread::yield();
89 #endif
90 } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire) ) {
91 // spinlock now contended
92 // utilize 'Binary Exponential Backoff' algorithm
93 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
94 std::uniform_int_distribution< std::int32_t > distribution{
95 0, static_cast< std::int32_t >( 1) << (std::min)(collisions, static_cast< std::int32_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
96 const std::int32_t z = distribution( generator);
97 ++collisions;
98 for ( std::int32_t i = 0; i < z; ++i) {
99 // -> reduces the power consumed by the CPU
100 // -> prevent pipeline stalls
101 cpu_relax();
102 }
103 } else {
104 // success, lock acquired
105 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
106 return;
107 }
108 }
109 // failure, lock not acquired
110 // pause via futex
111 if ( 2 != expected) {
112 expected = value_.exchange( 2, std::memory_order_acquire);
113 }
114 while ( 0 != expected) {
115 futex_wait( & value_, 2);
116 expected = value_.exchange( 2, std::memory_order_acquire);
117 }
118 // success, lock acquired
119 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
120 }
121
122 bool try_lock() noexcept {
123 std::int32_t expected = 0;
124 return value_.compare_exchange_strong( expected, 1, std::memory_order_acquire);
125 }
126
127 void unlock() noexcept {
128 if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) {
129 value_.store( 0, std::memory_order_release);
130 futex_wake( & value_);
131 }
132 }
133 };
134
135 }}}
136
137 #endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H