2 // Copyright Oliver Kowalke 2013.
3 // Distributed under the Boost Software License, Version 1.0.
4 // (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
7 #include "boost/fiber/context.hpp"
13 #include "boost/fiber/exceptions.hpp"
14 #include "boost/fiber/scheduler.hpp"
16 #ifdef BOOST_HAS_ABI_HEADERS
17 # include BOOST_ABI_PREFIX
23 class main_context final
: public context
{
25 main_context() noexcept
:
26 context
{ 1, type::main_context
, launch::post
} {
30 class dispatcher_context final
: public context
{
33 run_( boost::context::fiber
&& c
) {
34 #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
35 std::move( c
).resume();
37 // execute scheduler::dispatch()
38 return get_scheduler()->dispatch();
42 dispatcher_context( boost::context::preallocated
const& palloc
, default_stack
&& salloc
) :
43 context
{ 0, type::dispatcher_context
, launch::post
} {
44 c_
= boost::context::fiber
{ std::allocator_arg
, palloc
, salloc
,
45 std::bind( & dispatcher_context::run_
, this, std::placeholders::_1
) };
46 #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
47 c_
= std::move( c_
).resume();
52 static intrusive_ptr
< context
> make_dispatcher_context() {
53 default_stack salloc
; // use default satck-size
54 auto sctx
= salloc
.allocate();
55 // reserve space for control structure
56 void * storage
= reinterpret_cast< void * >(
57 ( reinterpret_cast< uintptr_t >( sctx
.sp
) - static_cast< uintptr_t >( sizeof( dispatcher_context
) ) )
58 & ~ static_cast< uintptr_t >( 0xff) );
59 void * stack_bottom
= reinterpret_cast< void * >(
60 reinterpret_cast< uintptr_t >( sctx
.sp
) - static_cast< uintptr_t >( sctx
.size
) );
61 const std::size_t size
= reinterpret_cast< uintptr_t >( storage
) - reinterpret_cast< uintptr_t >( stack_bottom
);
62 // placement new of context on top of fiber's stack
63 return intrusive_ptr
< context
>{
64 new ( storage
) dispatcher_context
{
65 boost::context::preallocated
{ storage
, size
, sctx
}, std::move( salloc
) } };
69 struct context_initializer
{
70 static thread_local context
* active_
;
71 static thread_local
std::size_t counter_
;
73 context_initializer() {
74 if ( 0 == counter_
++) {
75 // main fiber context of this thread
76 context
* main_ctx
= new main_context
{};
77 // scheduler of this thread
78 auto sched
= new scheduler
{};
79 // attach main context to scheduler
80 sched
->attach_main_context( main_ctx
);
81 // create and attach dispatcher context to scheduler
82 sched
->attach_dispatcher_context( make_dispatcher_context() );
83 // make main context to active context
88 ~context_initializer() {
89 if ( 0 == --counter_
) {
90 context
* main_ctx
= active_
;
91 BOOST_ASSERT( main_ctx
->is_context( type::main_context
) );
92 scheduler
* sched
= main_ctx
->get_scheduler();
99 // zero-initialization
100 thread_local context
* context_initializer::active_
{ nullptr };
101 thread_local
std::size_t context_initializer::counter_
{ 0 };
104 context::active() noexcept
{
105 // initialized the first time control passes; per thread
106 thread_local
static context_initializer ctx_initializer
;
107 return context_initializer::active_
;
111 context::reset_active() noexcept
{
112 context_initializer::active_
= nullptr;
115 context::~context() {
116 // protect for concurrent access
117 std::unique_lock
< detail::spinlock
> lk
{ splk_
};
118 BOOST_ASSERT( ! ready_is_linked() );
119 BOOST_ASSERT( ! remote_ready_is_linked() );
120 BOOST_ASSERT( ! sleep_is_linked() );
121 BOOST_ASSERT( ! wait_is_linked() );
122 if ( is_context( type::dispatcher_context
) ) {
123 // dispatcher-context is resumed by main-context
124 // while the scheduler is deconstructed
125 #ifdef BOOST_DISABLE_ASSERTS
126 wait_queue_
.pop_front();
128 context
* ctx
= & wait_queue_
.front();
129 wait_queue_
.pop_front();
130 BOOST_ASSERT( ctx
->is_context( type::main_context
) );
131 BOOST_ASSERT( nullptr == active() );
134 BOOST_ASSERT( wait_queue_
.empty() );
139 context::get_id() const noexcept
{
140 return id
{ const_cast< context
* >( this) };
144 context::resume() noexcept
{
145 context
* prev
= this;
146 // context_initializer::active_ will point to `this`
147 // prev will point to previous active context
148 std::swap( context_initializer::active_
, prev
);
149 // pass pointer to the context that resumes `this`
150 std::move( c_
).resume_with([prev
](boost::context::fiber
&& c
){
151 prev
->c_
= std::move( c
);
152 return boost::context::fiber
{};
157 context::resume( detail::spinlock_lock
& lk
) noexcept
{
158 context
* prev
= this;
159 // context_initializer::active_ will point to `this`
160 // prev will point to previous active context
161 std::swap( context_initializer::active_
, prev
);
162 // pass pointer to the context that resumes `this`
163 std::move( c_
).resume_with([prev
,&lk
](boost::context::fiber
&& c
){
164 prev
->c_
= std::move( c
);
166 return boost::context::fiber
{};
171 context::resume( context
* ready_ctx
) noexcept
{
172 context
* prev
= this;
173 // context_initializer::active_ will point to `this`
174 // prev will point to previous active context
175 std::swap( context_initializer::active_
, prev
);
176 // pass pointer to the context that resumes `this`
177 std::move( c_
).resume_with([prev
,ready_ctx
](boost::context::fiber
&& c
){
178 prev
->c_
= std::move( c
);
179 context::active()->schedule( ready_ctx
);
180 return boost::context::fiber
{};
185 context::suspend() noexcept
{
186 get_scheduler()->suspend();
190 context::suspend( detail::spinlock_lock
& lk
) noexcept
{
191 get_scheduler()->suspend( lk
);
196 // get active context
197 context
* active_ctx
= context::active();
198 // protect for concurrent access
199 std::unique_lock
< detail::spinlock
> lk
{ splk_
};
200 // wait for context which is not terminated
201 if ( ! terminated_
) {
202 // push active context to wait-queue, member
203 // of the context which has to be joined by
204 // the active context
205 active_ctx
->wait_link( wait_queue_
);
206 // suspend active context
207 active_ctx
->get_scheduler()->suspend( lk
);
208 // active context resumed
209 BOOST_ASSERT( context::active() == active_ctx
);
214 context::yield() noexcept
{
215 // yield active context
216 get_scheduler()->yield( context::active() );
219 boost::context::fiber
220 context::suspend_with_cc() noexcept
{
221 context
* prev
= this;
222 // context_initializer::active_ will point to `this`
223 // prev will point to previous active context
224 std::swap( context_initializer::active_
, prev
);
225 // pass pointer to the context that resumes `this`
226 return std::move( c_
).resume_with([prev
](boost::context::fiber
&& c
){
227 prev
->c_
= std::move( c
);
228 return boost::context::fiber
{};
232 boost::context::fiber
233 context::terminate() noexcept
{
234 // protect for concurrent access
235 std::unique_lock
< detail::spinlock
> lk
{ splk_
};
236 // mark as terminated
238 // notify all waiting fibers
239 while ( ! wait_queue_
.empty() ) {
240 context
* ctx
= & wait_queue_
.front();
241 // remove fiber from wait-queue
242 wait_queue_
.pop_front();
246 BOOST_ASSERT( wait_queue_
.empty() );
247 // release fiber-specific-data
248 for ( fss_data_t::value_type
& data
: fss_data_
) {
249 data
.second
.do_cleanup();
252 // switch to another context
253 return get_scheduler()->terminate( lk
, this);
257 context::wait_until( std::chrono::steady_clock::time_point
const& tp
) noexcept
{
258 BOOST_ASSERT( nullptr != get_scheduler() );
259 BOOST_ASSERT( this == active() );
260 return get_scheduler()->wait_until( this, tp
);
264 context::wait_until( std::chrono::steady_clock::time_point
const& tp
,
265 detail::spinlock_lock
& lk
) noexcept
{
266 BOOST_ASSERT( nullptr != get_scheduler() );
267 BOOST_ASSERT( this == active() );
268 return get_scheduler()->wait_until( this, tp
, lk
);
272 context::schedule( context
* ctx
) noexcept
{
273 //BOOST_ASSERT( nullptr != ctx);
274 BOOST_ASSERT( this != ctx
);
275 BOOST_ASSERT( nullptr != get_scheduler() );
276 BOOST_ASSERT( nullptr != ctx
->get_scheduler() );
277 #if ! defined(BOOST_FIBERS_NO_ATOMICS)
278 // FIXME: comparing scheduler address' must be synchronized?
279 // what if ctx is migrated between threads
280 // (other scheduler assigned)
281 if ( scheduler_
== ctx
->get_scheduler() ) {
283 get_scheduler()->schedule( ctx
);
286 ctx
->get_scheduler()->schedule_from_remote( ctx
);
289 BOOST_ASSERT( get_scheduler() == ctx
->get_scheduler() );
290 get_scheduler()->schedule( ctx
);
295 context::get_fss_data( void const * vp
) const {
296 auto key
= reinterpret_cast< uintptr_t >( vp
);
297 auto i
= fss_data_
.find( key
);
298 return fss_data_
.end() != i
? i
->second
.vp
: nullptr;
302 context::set_fss_data( void const * vp
,
303 detail::fss_cleanup_function::ptr_t
const& cleanup_fn
,
305 bool cleanup_existing
) {
306 BOOST_ASSERT( cleanup_fn
);
307 auto key
= reinterpret_cast< uintptr_t >( vp
);
308 auto i
= fss_data_
.find( key
);
309 if ( fss_data_
.end() != i
) {
310 if( cleanup_existing
) {
311 i
->second
.do_cleanup();
313 if ( nullptr != data
) {
314 i
->second
= fss_data
{ data
, cleanup_fn
};
322 fss_data
{ data
, cleanup_fn
} ) );
327 context::set_properties( fiber_properties
* props
) noexcept
{
333 context::worker_is_linked() const noexcept
{
334 return worker_hook_
.is_linked();
338 context::ready_is_linked() const noexcept
{
339 return ready_hook_
.is_linked();
343 context::remote_ready_is_linked() const noexcept
{
344 return remote_ready_hook_
.is_linked();
348 context::sleep_is_linked() const noexcept
{
349 return sleep_hook_
.is_linked();
353 context::terminated_is_linked() const noexcept
{
354 return terminated_hook_
.is_linked();
358 context::wait_is_linked() const noexcept
{
359 return wait_hook_
.is_linked();
363 context::worker_unlink() noexcept
{
364 BOOST_ASSERT( worker_is_linked() );
365 worker_hook_
.unlink();
369 context::ready_unlink() noexcept
{
370 BOOST_ASSERT( ready_is_linked() );
371 ready_hook_
.unlink();
375 context::sleep_unlink() noexcept
{
376 BOOST_ASSERT( sleep_is_linked() );
377 sleep_hook_
.unlink();
381 context::wait_unlink() noexcept
{
382 BOOST_ASSERT( wait_is_linked() );
387 context::detach() noexcept
{
388 BOOST_ASSERT( context::active() != this);
389 get_scheduler()->detach_worker_context( this);
393 context::attach( context
* ctx
) noexcept
{
394 BOOST_ASSERT( nullptr != ctx
);
395 get_scheduler()->attach_worker_context( ctx
);
400 #ifdef BOOST_HAS_ABI_HEADERS
401 # include BOOST_ABI_SUFFIX