2 // detail/impl/scheduler.ipp
3 // ~~~~~~~~~~~~~~~~~~~~~~~~~
5 // Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com)
7 // Distributed under the Boost Software License, Version 1.0. (See accompanying
8 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
11 #ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
12 #define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
14 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
16 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
18 #include <boost/asio/detail/config.hpp>
20 #include <boost/asio/detail/concurrency_hint.hpp>
21 #include <boost/asio/detail/event.hpp>
22 #include <boost/asio/detail/limits.hpp>
23 #include <boost/asio/detail/reactor.hpp>
24 #include <boost/asio/detail/scheduler.hpp>
25 #include <boost/asio/detail/scheduler_thread_info.hpp>
27 #include <boost/asio/detail/push_options.hpp>
33 struct scheduler::task_cleanup
37 if (this_thread_->private_outstanding_work > 0)
39 boost::asio::detail::increment(
40 scheduler_->outstanding_work_,
41 this_thread_->private_outstanding_work);
43 this_thread_->private_outstanding_work = 0;
45 // Enqueue the completed operations and reinsert the task at the end of
46 // the operation queue.
48 scheduler_->task_interrupted_ = true;
49 scheduler_->op_queue_.push(this_thread_->private_op_queue);
50 scheduler_->op_queue_.push(&scheduler_->task_operation_);
53 scheduler* scheduler_;
54 mutex::scoped_lock* lock_;
55 thread_info* this_thread_;
58 struct scheduler::work_cleanup
62 if (this_thread_->private_outstanding_work > 1)
64 boost::asio::detail::increment(
65 scheduler_->outstanding_work_,
66 this_thread_->private_outstanding_work - 1);
68 else if (this_thread_->private_outstanding_work < 1)
70 scheduler_->work_finished();
72 this_thread_->private_outstanding_work = 0;
74 #if defined(BOOST_ASIO_HAS_THREADS)
75 if (!this_thread_->private_op_queue.empty())
78 scheduler_->op_queue_.push(this_thread_->private_op_queue);
80 #endif // defined(BOOST_ASIO_HAS_THREADS)
83 scheduler* scheduler_;
84 mutex::scoped_lock* lock_;
85 thread_info* this_thread_;
89 boost::asio::execution_context& ctx, int concurrency_hint)
90 : boost::asio::detail::execution_context_service_base<scheduler>(ctx),
91 one_thread_(concurrency_hint == 1
92 || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
93 SCHEDULER, concurrency_hint)
94 || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
95 REACTOR_IO, concurrency_hint)),
96 mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
97 SCHEDULER, concurrency_hint)),
99 task_interrupted_(true),
100 outstanding_work_(0),
103 concurrency_hint_(concurrency_hint)
105 BOOST_ASIO_HANDLER_TRACKING_INIT;
108 void scheduler::shutdown()
110 mutex::scoped_lock lock(mutex_);
114 // Destroy handler objects.
115 while (!op_queue_.empty())
117 operation* o = op_queue_.front();
119 if (o != &task_operation_)
123 // Reset to initial state.
127 void scheduler::init_task()
129 mutex::scoped_lock lock(mutex_);
130 if (!shutdown_ && !task_)
132 task_ = &use_service<reactor>(this->context());
133 op_queue_.push(&task_operation_);
134 wake_one_thread_and_unlock(lock);
138 std::size_t scheduler::run(boost::system::error_code& ec)
140 ec = boost::system::error_code();
141 if (outstanding_work_ == 0)
147 thread_info this_thread;
148 this_thread.private_outstanding_work = 0;
149 thread_call_stack::context ctx(this, this_thread);
151 mutex::scoped_lock lock(mutex_);
154 for (; do_run_one(lock, this_thread, ec); lock.lock())
155 if (n != (std::numeric_limits<std::size_t>::max)())
160 std::size_t scheduler::run_one(boost::system::error_code& ec)
162 ec = boost::system::error_code();
163 if (outstanding_work_ == 0)
169 thread_info this_thread;
170 this_thread.private_outstanding_work = 0;
171 thread_call_stack::context ctx(this, this_thread);
173 mutex::scoped_lock lock(mutex_);
175 return do_run_one(lock, this_thread, ec);
178 std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec)
180 ec = boost::system::error_code();
181 if (outstanding_work_ == 0)
187 thread_info this_thread;
188 this_thread.private_outstanding_work = 0;
189 thread_call_stack::context ctx(this, this_thread);
191 mutex::scoped_lock lock(mutex_);
193 return do_wait_one(lock, this_thread, usec, ec);
196 std::size_t scheduler::poll(boost::system::error_code& ec)
198 ec = boost::system::error_code();
199 if (outstanding_work_ == 0)
205 thread_info this_thread;
206 this_thread.private_outstanding_work = 0;
207 thread_call_stack::context ctx(this, this_thread);
209 mutex::scoped_lock lock(mutex_);
211 #if defined(BOOST_ASIO_HAS_THREADS)
212 // We want to support nested calls to poll() and poll_one(), so any handlers
213 // that are already on a thread-private queue need to be put on to the main
216 if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
217 op_queue_.push(outer_info->private_op_queue);
218 #endif // defined(BOOST_ASIO_HAS_THREADS)
221 for (; do_poll_one(lock, this_thread, ec); lock.lock())
222 if (n != (std::numeric_limits<std::size_t>::max)())
227 std::size_t scheduler::poll_one(boost::system::error_code& ec)
229 ec = boost::system::error_code();
230 if (outstanding_work_ == 0)
236 thread_info this_thread;
237 this_thread.private_outstanding_work = 0;
238 thread_call_stack::context ctx(this, this_thread);
240 mutex::scoped_lock lock(mutex_);
242 #if defined(BOOST_ASIO_HAS_THREADS)
243 // We want to support nested calls to poll() and poll_one(), so any handlers
244 // that are already on a thread-private queue need to be put on to the main
247 if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
248 op_queue_.push(outer_info->private_op_queue);
249 #endif // defined(BOOST_ASIO_HAS_THREADS)
251 return do_poll_one(lock, this_thread, ec);
254 void scheduler::stop()
256 mutex::scoped_lock lock(mutex_);
257 stop_all_threads(lock);
260 bool scheduler::stopped() const
262 mutex::scoped_lock lock(mutex_);
266 void scheduler::restart()
268 mutex::scoped_lock lock(mutex_);
272 void scheduler::compensating_work_started()
274 thread_info_base* this_thread = thread_call_stack::contains(this);
275 ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
278 void scheduler::post_immediate_completion(
279 scheduler::operation* op, bool is_continuation)
281 #if defined(BOOST_ASIO_HAS_THREADS)
282 if (one_thread_ || is_continuation)
284 if (thread_info_base* this_thread = thread_call_stack::contains(this))
286 ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
287 static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
291 #else // defined(BOOST_ASIO_HAS_THREADS)
292 (void)is_continuation;
293 #endif // defined(BOOST_ASIO_HAS_THREADS)
296 mutex::scoped_lock lock(mutex_);
298 wake_one_thread_and_unlock(lock);
301 void scheduler::post_deferred_completion(scheduler::operation* op)
303 #if defined(BOOST_ASIO_HAS_THREADS)
306 if (thread_info_base* this_thread = thread_call_stack::contains(this))
308 static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
312 #endif // defined(BOOST_ASIO_HAS_THREADS)
314 mutex::scoped_lock lock(mutex_);
316 wake_one_thread_and_unlock(lock);
319 void scheduler::post_deferred_completions(
320 op_queue<scheduler::operation>& ops)
324 #if defined(BOOST_ASIO_HAS_THREADS)
327 if (thread_info_base* this_thread = thread_call_stack::contains(this))
329 static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
333 #endif // defined(BOOST_ASIO_HAS_THREADS)
335 mutex::scoped_lock lock(mutex_);
337 wake_one_thread_and_unlock(lock);
341 void scheduler::do_dispatch(
342 scheduler::operation* op)
345 mutex::scoped_lock lock(mutex_);
347 wake_one_thread_and_unlock(lock);
350 void scheduler::abandon_operations(
351 op_queue<scheduler::operation>& ops)
353 op_queue<scheduler::operation> ops2;
357 std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
358 scheduler::thread_info& this_thread,
359 const boost::system::error_code& ec)
363 if (!op_queue_.empty())
365 // Prepare to execute first handler from queue.
366 operation* o = op_queue_.front();
368 bool more_handlers = (!op_queue_.empty());
370 if (o == &task_operation_)
372 task_interrupted_ = more_handlers;
374 if (more_handlers && !one_thread_)
375 wakeup_event_.unlock_and_signal_one(lock);
379 task_cleanup on_exit = { this, &lock, &this_thread };
382 // Run the task. May throw an exception. Only block if the operation
383 // queue is empty and we're not polling, otherwise we want to return
384 // as soon as possible.
385 task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
389 std::size_t task_result = o->task_result_;
391 if (more_handlers && !one_thread_)
392 wake_one_thread_and_unlock(lock);
396 // Ensure the count of outstanding work is decremented on block exit.
397 work_cleanup on_exit = { this, &lock, &this_thread };
400 // Complete the operation. May throw an exception. Deletes the object.
401 o->complete(this, ec, task_result);
408 wakeup_event_.clear(lock);
409 wakeup_event_.wait(lock);
416 std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
417 scheduler::thread_info& this_thread, long usec,
418 const boost::system::error_code& ec)
423 operation* o = op_queue_.front();
426 wakeup_event_.clear(lock);
427 wakeup_event_.wait_for_usec(lock, usec);
428 usec = 0; // Wait at most once.
429 o = op_queue_.front();
432 if (o == &task_operation_)
435 bool more_handlers = (!op_queue_.empty());
437 task_interrupted_ = more_handlers;
439 if (more_handlers && !one_thread_)
440 wakeup_event_.unlock_and_signal_one(lock);
445 task_cleanup on_exit = { this, &lock, &this_thread };
448 // Run the task. May throw an exception. Only block if the operation
449 // queue is empty and we're not polling, otherwise we want to return
450 // as soon as possible.
451 task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
454 o = op_queue_.front();
455 if (o == &task_operation_)
458 wakeup_event_.maybe_unlock_and_signal_one(lock);
467 bool more_handlers = (!op_queue_.empty());
469 std::size_t task_result = o->task_result_;
471 if (more_handlers && !one_thread_)
472 wake_one_thread_and_unlock(lock);
476 // Ensure the count of outstanding work is decremented on block exit.
477 work_cleanup on_exit = { this, &lock, &this_thread };
480 // Complete the operation. May throw an exception. Deletes the object.
481 o->complete(this, ec, task_result);
486 std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
487 scheduler::thread_info& this_thread,
488 const boost::system::error_code& ec)
493 operation* o = op_queue_.front();
494 if (o == &task_operation_)
500 task_cleanup c = { this, &lock, &this_thread };
503 // Run the task. May throw an exception. Only block if the operation
504 // queue is empty and we're not polling, otherwise we want to return
505 // as soon as possible.
506 task_->run(0, this_thread.private_op_queue);
509 o = op_queue_.front();
510 if (o == &task_operation_)
512 wakeup_event_.maybe_unlock_and_signal_one(lock);
521 bool more_handlers = (!op_queue_.empty());
523 std::size_t task_result = o->task_result_;
525 if (more_handlers && !one_thread_)
526 wake_one_thread_and_unlock(lock);
530 // Ensure the count of outstanding work is decremented on block exit.
531 work_cleanup on_exit = { this, &lock, &this_thread };
534 // Complete the operation. May throw an exception. Deletes the object.
535 o->complete(this, ec, task_result);
540 void scheduler::stop_all_threads(
541 mutex::scoped_lock& lock)
544 wakeup_event_.signal_all(lock);
546 if (!task_interrupted_ && task_)
548 task_interrupted_ = true;
553 void scheduler::wake_one_thread_and_unlock(
554 mutex::scoped_lock& lock)
556 if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
558 if (!task_interrupted_ && task_)
560 task_interrupted_ = true;
567 } // namespace detail
571 #include <boost/asio/detail/pop_options.hpp>
573 #endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP