]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/fiber/detail/spinlock_ttas.hpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / boost / fiber / detail / spinlock_ttas.hpp
1
2 // Copyright Oliver Kowalke 2016.
3 // Distributed under the Boost Software License, Version 1.0.
4 // (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_H
8 #define BOOST_FIBERS_SPINLOCK_TTAS_H
9
10 #include <algorithm>
11 #include <atomic>
12 #include <chrono>
13 #include <cmath>
14 #include <random>
15 #include <thread>
16
17 #include <boost/fiber/detail/config.hpp>
18 #include <boost/fiber/detail/cpu_relax.hpp>
19 #include <boost/fiber/detail/spinlock_status.hpp>
20
21 // based on informations from:
22 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
23 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
24
25 namespace boost {
26 namespace fibers {
27 namespace detail {
28
29 class spinlock_ttas {
30 private:
31 template< typename FBSplk >
32 friend class spinlock_rtm;
33
34 std::atomic< spinlock_status > state_{ spinlock_status::unlocked };
35
36 public:
37 spinlock_ttas() = default;
38
39 spinlock_ttas( spinlock_ttas const&) = delete;
40 spinlock_ttas & operator=( spinlock_ttas const&) = delete;
41
42 void lock() noexcept {
43 static thread_local std::minstd_rand generator{ std::random_device{}() };
44 std::size_t collisions = 0 ;
45 for (;;) {
46 // avoid using multiple pause instructions for a delay of a specific cycle count
47 // the delay of cpu_relax() (pause on Intel) depends on the processor family
48 // the cycle count can not guaranteed from one system to the next
49 // -> check the shared variable 'state_' in between each cpu_relax() to prevent
50 // unnecessarily long delays on some systems
51 std::size_t retries = 0;
52 // test shared variable 'status_'
53 // first access to 'state_' -> chache miss
54 // sucessive acccess to 'state_' -> cache hit
55 // if 'state_' was released by other fiber
56 // cached 'state_' is invalidated -> cache miss
57 while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
58 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
59 if ( BOOST_FIBERS_SPIN_BEFORE_SLEEP0 > retries) {
60 ++retries;
61 // give CPU a hint that this thread is in a "spin-wait" loop
62 // delays the next instruction's execution for a finite period of time (depends on processor family)
63 // the CPU is not under demand, parts of the pipeline are no longer being used
64 // -> reduces the power consumed by the CPU
65 // -> prevent pipeline stalls
66 cpu_relax();
67 } else if ( BOOST_FIBERS_SPIN_BEFORE_YIELD > retries) {
68 ++retries;
69 // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
70 // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
71 // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
72 // if and only if a thread of equal or greater priority is ready to run
73 static constexpr std::chrono::microseconds us0{ 0 };
74 std::this_thread::sleep_for( us0);
75 } else {
76 // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
77 // but only to another thread on the same processor
78 // instead of constant checking, a thread only checks if no other useful work is pending
79 std::this_thread::yield();
80 }
81 #else
82 std::this_thread::yield();
83 #endif
84 }
85 // test-and-set shared variable 'status_'
86 // everytime 'status_' is signaled over the bus, even if the test failes
87 if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
88 // spinlock now contended
89 // utilize 'Binary Exponential Backoff' algorithm
90 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
91 std::uniform_int_distribution< std::size_t > distribution{
92 0, static_cast< std::size_t >( 1) << (std::min)(collisions, static_cast< std::size_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
93 const std::size_t z = distribution( generator);
94 ++collisions;
95 for ( std::size_t i = 0; i < z; ++i) {
96 // -> reduces the power consumed by the CPU
97 // -> prevent pipeline stalls
98 cpu_relax();
99 }
100 } else {
101 // success, thread has acquired the lock
102 break;
103 }
104 }
105 }
106
107 bool try_lock() noexcept {
108 return spinlock_status::unlocked == state_.exchange( spinlock_status::locked, std::memory_order_acquire);
109 }
110
111 void unlock() noexcept {
112 state_.store( spinlock_status::unlocked, std::memory_order_release);
113 }
114 };
115
116 }}}
117
118 #endif // BOOST_FIBERS_SPINLOCK_TTAS_H