]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/fiber/src/context.cpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / libs / fiber / src / context.cpp
CommitLineData
7c673cae
FG
1
2// Copyright Oliver Kowalke 2013.
3// Distributed under the Boost Software License, Version 1.0.
4// (See accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6
7#include "boost/fiber/context.hpp"
8
9#include <cstdlib>
10#include <mutex>
11#include <new>
12
13#include "boost/fiber/exceptions.hpp"
14#include "boost/fiber/scheduler.hpp"
15
16#ifdef BOOST_HAS_ABI_HEADERS
17# include BOOST_ABI_PREFIX
18#endif
19
20namespace boost {
21namespace fibers {
22
b32b8144
FG
23class main_context final : public context {
24public:
25 main_context() noexcept :
26 context{ 1, type::main_context, launch::post } {
27 }
28};
29
30class dispatcher_context final : public context {
31private:
11fdf7f2
TL
32 boost::context::fiber
33 run_( boost::context::fiber && c) {
34#if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
35 std::move( c).resume();
36#endif
b32b8144
FG
37 // execute scheduler::dispatch()
38 return get_scheduler()->dispatch();
39 }
40
41public:
92f5a8d4 42 dispatcher_context( boost::context::preallocated const& palloc, default_stack && salloc) :
b32b8144 43 context{ 0, type::dispatcher_context, launch::post } {
11fdf7f2
TL
44 c_ = boost::context::fiber{ std::allocator_arg, palloc, salloc,
45 std::bind( & dispatcher_context::run_, this, std::placeholders::_1) };
46#if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
47 c_ = std::move( c_).resume();
48#endif
b32b8144
FG
49 }
50};
51
52static intrusive_ptr< context > make_dispatcher_context() {
7c673cae 53 default_stack salloc; // use default satck-size
b32b8144 54 auto sctx = salloc.allocate();
7c673cae 55 // reserve space for control structure
b32b8144
FG
56 void * storage = reinterpret_cast< void * >(
57 ( reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sizeof( dispatcher_context) ) )
58 & ~ static_cast< uintptr_t >( 0xff) );
59 void * stack_bottom = reinterpret_cast< void * >(
60 reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sctx.size) );
61 const std::size_t size = reinterpret_cast< uintptr_t >( storage) - reinterpret_cast< uintptr_t >( stack_bottom);
7c673cae 62 // placement new of context on top of fiber's stack
b32b8144
FG
63 return intrusive_ptr< context >{
64 new ( storage) dispatcher_context{
92f5a8d4 65 boost::context::preallocated{ storage, size, sctx }, std::move( salloc) } };
7c673cae
FG
66}
67
68// schwarz counter
69struct context_initializer {
70 static thread_local context * active_;
71 static thread_local std::size_t counter_;
72
73 context_initializer() {
74 if ( 0 == counter_++) {
7c673cae 75 // main fiber context of this thread
b32b8144 76 context * main_ctx = new main_context{};
7c673cae 77 // scheduler of this thread
f67539c2 78 auto sched = new scheduler{};
7c673cae
FG
79 // attach main context to scheduler
80 sched->attach_main_context( main_ctx);
81 // create and attach dispatcher context to scheduler
b32b8144 82 sched->attach_dispatcher_context( make_dispatcher_context() );
7c673cae
FG
83 // make main context to active context
84 active_ = main_ctx;
7c673cae
FG
85 }
86 }
87
88 ~context_initializer() {
89 if ( 0 == --counter_) {
90 context * main_ctx = active_;
91 BOOST_ASSERT( main_ctx->is_context( type::main_context) );
92 scheduler * sched = main_ctx->get_scheduler();
b32b8144
FG
93 delete sched;
94 delete main_ctx;
7c673cae
FG
95 }
96 }
97};
98
99// zero-initialization
b32b8144
FG
100thread_local context * context_initializer::active_{ nullptr };
101thread_local std::size_t context_initializer::counter_{ 0 };
7c673cae
FG
102
103context *
104context::active() noexcept {
7c673cae
FG
105 // initialized the first time control passes; per thread
106 thread_local static context_initializer ctx_initializer;
107 return context_initializer::active_;
108}
109
110void
111context::reset_active() noexcept {
112 context_initializer::active_ = nullptr;
113}
114
7c673cae 115context::~context() {
b32b8144
FG
116 // protect for concurrent access
117 std::unique_lock< detail::spinlock > lk{ splk_ };
7c673cae 118 BOOST_ASSERT( ! ready_is_linked() );
b32b8144 119 BOOST_ASSERT( ! remote_ready_is_linked() );
7c673cae 120 BOOST_ASSERT( ! sleep_is_linked() );
b32b8144 121 if ( is_context( type::dispatcher_context) ) {
b32b8144 122 BOOST_ASSERT( nullptr == active() );
b32b8144
FG
123 }
124 BOOST_ASSERT( wait_queue_.empty() );
7c673cae
FG
125 delete properties_;
126}
127
7c673cae
FG
128context::id
129context::get_id() const noexcept {
b32b8144 130 return id{ const_cast< context * >( this) };
7c673cae
FG
131}
132
133void
134context::resume() noexcept {
135 context * prev = this;
136 // context_initializer::active_ will point to `this`
137 // prev will point to previous active context
138 std::swap( context_initializer::active_, prev);
b32b8144 139 // pass pointer to the context that resumes `this`
11fdf7f2 140 std::move( c_).resume_with([prev](boost::context::fiber && c){
b32b8144 141 prev->c_ = std::move( c);
11fdf7f2 142 return boost::context::fiber{};
b32b8144 143 });
7c673cae
FG
144}
145
146void
147context::resume( detail::spinlock_lock & lk) noexcept {
148 context * prev = this;
149 // context_initializer::active_ will point to `this`
150 // prev will point to previous active context
151 std::swap( context_initializer::active_, prev);
b32b8144 152 // pass pointer to the context that resumes `this`
11fdf7f2 153 std::move( c_).resume_with([prev,&lk](boost::context::fiber && c){
b32b8144
FG
154 prev->c_ = std::move( c);
155 lk.unlock();
11fdf7f2 156 return boost::context::fiber{};
b32b8144 157 });
7c673cae
FG
158}
159
160void
161context::resume( context * ready_ctx) noexcept {
162 context * prev = this;
163 // context_initializer::active_ will point to `this`
164 // prev will point to previous active context
165 std::swap( context_initializer::active_, prev);
b32b8144 166 // pass pointer to the context that resumes `this`
11fdf7f2 167 std::move( c_).resume_with([prev,ready_ctx](boost::context::fiber && c){
b32b8144
FG
168 prev->c_ = std::move( c);
169 context::active()->schedule( ready_ctx);
11fdf7f2 170 return boost::context::fiber{};
b32b8144 171 });
7c673cae
FG
172}
173
174void
175context::suspend() noexcept {
b32b8144 176 get_scheduler()->suspend();
7c673cae
FG
177}
178
179void
180context::suspend( detail::spinlock_lock & lk) noexcept {
b32b8144 181 get_scheduler()->suspend( lk);
7c673cae
FG
182}
183
184void
185context::join() {
186 // get active context
187 context * active_ctx = context::active();
188 // protect for concurrent access
b32b8144 189 std::unique_lock< detail::spinlock > lk{ splk_ };
7c673cae 190 // wait for context which is not terminated
b32b8144 191 if ( ! terminated_) {
7c673cae
FG
192 // push active context to wait-queue, member
193 // of the context which has to be joined by
194 // the active context
20effc67 195 wait_queue_.suspend_and_wait( lk, active_ctx);
7c673cae
FG
196 // active context resumed
197 BOOST_ASSERT( context::active() == active_ctx);
198 }
199}
200
201void
202context::yield() noexcept {
203 // yield active context
b32b8144 204 get_scheduler()->yield( context::active() );
7c673cae
FG
205}
206
11fdf7f2 207boost::context::fiber
7c673cae
FG
208context::suspend_with_cc() noexcept {
209 context * prev = this;
210 // context_initializer::active_ will point to `this`
211 // prev will point to previous active context
212 std::swap( context_initializer::active_, prev);
b32b8144 213 // pass pointer to the context that resumes `this`
11fdf7f2 214 return std::move( c_).resume_with([prev](boost::context::fiber && c){
b32b8144 215 prev->c_ = std::move( c);
11fdf7f2 216 return boost::context::fiber{};
b32b8144 217 });
7c673cae 218}
7c673cae 219
11fdf7f2 220boost::context::fiber
b32b8144 221context::terminate() noexcept {
7c673cae 222 // protect for concurrent access
b32b8144 223 std::unique_lock< detail::spinlock > lk{ splk_ };
7c673cae 224 // mark as terminated
b32b8144 225 terminated_ = true;
7c673cae 226 // notify all waiting fibers
20effc67 227 wait_queue_.notify_all();
b32b8144 228 BOOST_ASSERT( wait_queue_.empty() );
7c673cae
FG
229 // release fiber-specific-data
230 for ( fss_data_t::value_type & data : fss_data_) {
231 data.second.do_cleanup();
232 }
233 fss_data_.clear();
234 // switch to another context
b32b8144 235 return get_scheduler()->terminate( lk, this);
7c673cae
FG
236}
237
238bool
239context::wait_until( std::chrono::steady_clock::time_point const& tp) noexcept {
b32b8144
FG
240 BOOST_ASSERT( nullptr != get_scheduler() );
241 BOOST_ASSERT( this == active() );
242 return get_scheduler()->wait_until( this, tp);
7c673cae
FG
243}
244
245bool
246context::wait_until( std::chrono::steady_clock::time_point const& tp,
20effc67
TL
247 detail::spinlock_lock & lk,
248 waker && w) noexcept {
b32b8144
FG
249 BOOST_ASSERT( nullptr != get_scheduler() );
250 BOOST_ASSERT( this == active() );
20effc67
TL
251 return get_scheduler()->wait_until( this, tp, lk, std::move(w));
252}
253
254
255bool context::wake(const size_t epoch) noexcept
256{
257 size_t expected = epoch;
258 bool is_last_waker = waker_epoch_.compare_exchange_strong(expected, epoch + 1, std::memory_order_acq_rel);
259 if ( ! is_last_waker) {
260 // waker_epoch_ has been incremented before, so consider this wake
261 // operation as outdated and do nothing
262 return false;
263 }
264
265 BOOST_ASSERT( context::active() != this);
266 if ( context::active()->get_scheduler() == get_scheduler()) {
267 get_scheduler()->schedule( this);
268 } else {
269 get_scheduler()->schedule_from_remote( this);
270 }
271 return true;
7c673cae
FG
272}
273
20effc67 274
7c673cae 275void
b32b8144 276context::schedule( context * ctx) noexcept {
7c673cae
FG
277 //BOOST_ASSERT( nullptr != ctx);
278 BOOST_ASSERT( this != ctx);
b32b8144
FG
279 BOOST_ASSERT( nullptr != get_scheduler() );
280 BOOST_ASSERT( nullptr != ctx->get_scheduler() );
281#if ! defined(BOOST_FIBERS_NO_ATOMICS)
7c673cae
FG
282 // FIXME: comparing scheduler address' must be synchronized?
283 // what if ctx is migrated between threads
284 // (other scheduler assigned)
b32b8144 285 if ( scheduler_ == ctx->get_scheduler() ) {
7c673cae 286 // local
b32b8144 287 get_scheduler()->schedule( ctx);
7c673cae
FG
288 } else {
289 // remote
b32b8144 290 ctx->get_scheduler()->schedule_from_remote( ctx);
7c673cae 291 }
b32b8144
FG
292#else
293 BOOST_ASSERT( get_scheduler() == ctx->get_scheduler() );
294 get_scheduler()->schedule( ctx);
295#endif
7c673cae
FG
296}
297
298void *
299context::get_fss_data( void const * vp) const {
f67539c2
TL
300 auto key = reinterpret_cast< uintptr_t >( vp);
301 auto i = fss_data_.find( key);
7c673cae
FG
302 return fss_data_.end() != i ? i->second.vp : nullptr;
303}
304
305void
306context::set_fss_data( void const * vp,
307 detail::fss_cleanup_function::ptr_t const& cleanup_fn,
308 void * data,
309 bool cleanup_existing) {
310 BOOST_ASSERT( cleanup_fn);
f67539c2
TL
311 auto key = reinterpret_cast< uintptr_t >( vp);
312 auto i = fss_data_.find( key);
7c673cae
FG
313 if ( fss_data_.end() != i) {
314 if( cleanup_existing) {
315 i->second.do_cleanup();
316 }
317 if ( nullptr != data) {
92f5a8d4 318 i->second = fss_data{ data, cleanup_fn };
7c673cae
FG
319 } else {
320 fss_data_.erase( i);
321 }
322 } else {
323 fss_data_.insert(
324 std::make_pair(
325 key,
b32b8144 326 fss_data{ data, cleanup_fn } ) );
7c673cae
FG
327 }
328}
329
330void
331context::set_properties( fiber_properties * props) noexcept {
332 delete properties_;
333 properties_ = props;
334}
335
336bool
337context::worker_is_linked() const noexcept {
338 return worker_hook_.is_linked();
339}
340
341bool
b32b8144
FG
342context::ready_is_linked() const noexcept {
343 return ready_hook_.is_linked();
7c673cae
FG
344}
345
346bool
b32b8144
FG
347context::remote_ready_is_linked() const noexcept {
348 return remote_ready_hook_.is_linked();
7c673cae
FG
349}
350
351bool
352context::sleep_is_linked() const noexcept {
353 return sleep_hook_.is_linked();
354}
355
b32b8144
FG
356bool
357context::terminated_is_linked() const noexcept {
358 return terminated_hook_.is_linked();
359}
360
7c673cae
FG
361void
362context::worker_unlink() noexcept {
b32b8144 363 BOOST_ASSERT( worker_is_linked() );
7c673cae
FG
364 worker_hook_.unlink();
365}
366
367void
368context::ready_unlink() noexcept {
b32b8144 369 BOOST_ASSERT( ready_is_linked() );
7c673cae
FG
370 ready_hook_.unlink();
371}
372
373void
374context::sleep_unlink() noexcept {
b32b8144 375 BOOST_ASSERT( sleep_is_linked() );
7c673cae
FG
376 sleep_hook_.unlink();
377}
378
7c673cae
FG
379void
380context::detach() noexcept {
381 BOOST_ASSERT( context::active() != this);
b32b8144 382 get_scheduler()->detach_worker_context( this);
7c673cae
FG
383}
384
385void
386context::attach( context * ctx) noexcept {
387 BOOST_ASSERT( nullptr != ctx);
b32b8144 388 get_scheduler()->attach_worker_context( ctx);
7c673cae
FG
389}
390
391}}
392
393#ifdef BOOST_HAS_ABI_HEADERS
394# include BOOST_ABI_SUFFIX
395#endif