]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/asio/detail/impl/win_iocp_io_context.ipp
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / boost / boost / asio / detail / impl / win_iocp_io_context.ipp
1 //
2 // detail/impl/win_iocp_io_context.ipp
3 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 //
5 // Copyright (c) 2003-2022 Christopher M. Kohlhoff (chris at kohlhoff dot com)
6 //
7 // Distributed under the Boost Software License, Version 1.0. (See accompanying
8 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
9 //
10
11 #ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
12 #define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
13
14 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
15 # pragma once
16 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
17
18 #include <boost/asio/detail/config.hpp>
19
20 #if defined(BOOST_ASIO_HAS_IOCP)
21
22 #include <boost/asio/error.hpp>
23 #include <boost/asio/detail/cstdint.hpp>
24 #include <boost/asio/detail/handler_alloc_helpers.hpp>
25 #include <boost/asio/detail/handler_invoke_helpers.hpp>
26 #include <boost/asio/detail/limits.hpp>
27 #include <boost/asio/detail/thread.hpp>
28 #include <boost/asio/detail/throw_error.hpp>
29 #include <boost/asio/detail/win_iocp_io_context.hpp>
30
31 #include <boost/asio/detail/push_options.hpp>
32
33 namespace boost {
34 namespace asio {
35 namespace detail {
36
37 struct win_iocp_io_context::thread_function
38 {
39 explicit thread_function(win_iocp_io_context* s)
40 : this_(s)
41 {
42 }
43
44 void operator()()
45 {
46 boost::system::error_code ec;
47 this_->run(ec);
48 }
49
50 win_iocp_io_context* this_;
51 };
52
53 struct win_iocp_io_context::work_finished_on_block_exit
54 {
55 ~work_finished_on_block_exit()
56 {
57 io_context_->work_finished();
58 }
59
60 win_iocp_io_context* io_context_;
61 };
62
63 struct win_iocp_io_context::timer_thread_function
64 {
65 void operator()()
66 {
67 while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)
68 {
69 if (::WaitForSingleObject(io_context_->waitable_timer_.handle,
70 INFINITE) == WAIT_OBJECT_0)
71 {
72 ::InterlockedExchange(&io_context_->dispatch_required_, 1);
73 ::PostQueuedCompletionStatus(io_context_->iocp_.handle,
74 0, wake_for_dispatch, 0);
75 }
76 }
77 }
78
79 win_iocp_io_context* io_context_;
80 };
81
82 win_iocp_io_context::win_iocp_io_context(
83 boost::asio::execution_context& ctx, int concurrency_hint, bool own_thread)
84 : execution_context_service_base<win_iocp_io_context>(ctx),
85 iocp_(),
86 outstanding_work_(0),
87 stopped_(0),
88 stop_event_posted_(0),
89 shutdown_(0),
90 gqcs_timeout_(get_gqcs_timeout()),
91 dispatch_required_(0),
92 concurrency_hint_(concurrency_hint)
93 {
94 BOOST_ASIO_HANDLER_TRACKING_INIT;
95
96 iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
97 static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0)));
98 if (!iocp_.handle)
99 {
100 DWORD last_error = ::GetLastError();
101 boost::system::error_code ec(last_error,
102 boost::asio::error::get_system_category());
103 boost::asio::detail::throw_error(ec, "iocp");
104 }
105
106 if (own_thread)
107 {
108 ::InterlockedIncrement(&outstanding_work_);
109 thread_.reset(new boost::asio::detail::thread(thread_function(this)));
110 }
111 }
112
113 win_iocp_io_context::~win_iocp_io_context()
114 {
115 if (thread_.get())
116 {
117 stop();
118 thread_->join();
119 thread_.reset();
120 }
121 }
122
123 void win_iocp_io_context::shutdown()
124 {
125 ::InterlockedExchange(&shutdown_, 1);
126
127 if (timer_thread_.get())
128 {
129 LARGE_INTEGER timeout;
130 timeout.QuadPart = 1;
131 ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
132 }
133
134 if (thread_.get())
135 {
136 stop();
137 thread_->join();
138 thread_.reset();
139 ::InterlockedDecrement(&outstanding_work_);
140 }
141
142 while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
143 {
144 op_queue<win_iocp_operation> ops;
145 timer_queues_.get_all_timers(ops);
146 ops.push(completed_ops_);
147 if (!ops.empty())
148 {
149 while (win_iocp_operation* op = ops.front())
150 {
151 ops.pop();
152 ::InterlockedDecrement(&outstanding_work_);
153 op->destroy();
154 }
155 }
156 else
157 {
158 DWORD bytes_transferred = 0;
159 dword_ptr_t completion_key = 0;
160 LPOVERLAPPED overlapped = 0;
161 ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
162 &completion_key, &overlapped, gqcs_timeout_);
163 if (overlapped)
164 {
165 ::InterlockedDecrement(&outstanding_work_);
166 static_cast<win_iocp_operation*>(overlapped)->destroy();
167 }
168 }
169 }
170
171 if (timer_thread_.get())
172 timer_thread_->join();
173 }
174
175 boost::system::error_code win_iocp_io_context::register_handle(
176 HANDLE handle, boost::system::error_code& ec)
177 {
178 if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
179 {
180 DWORD last_error = ::GetLastError();
181 ec = boost::system::error_code(last_error,
182 boost::asio::error::get_system_category());
183 }
184 else
185 {
186 ec = boost::system::error_code();
187 }
188 return ec;
189 }
190
191 size_t win_iocp_io_context::run(boost::system::error_code& ec)
192 {
193 if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
194 {
195 stop();
196 ec = boost::system::error_code();
197 return 0;
198 }
199
200 win_iocp_thread_info this_thread;
201 thread_call_stack::context ctx(this, this_thread);
202
203 size_t n = 0;
204 while (do_one(INFINITE, this_thread, ec))
205 if (n != (std::numeric_limits<size_t>::max)())
206 ++n;
207 return n;
208 }
209
210 size_t win_iocp_io_context::run_one(boost::system::error_code& ec)
211 {
212 if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
213 {
214 stop();
215 ec = boost::system::error_code();
216 return 0;
217 }
218
219 win_iocp_thread_info this_thread;
220 thread_call_stack::context ctx(this, this_thread);
221
222 return do_one(INFINITE, this_thread, ec);
223 }
224
225 size_t win_iocp_io_context::wait_one(long usec, boost::system::error_code& ec)
226 {
227 if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
228 {
229 stop();
230 ec = boost::system::error_code();
231 return 0;
232 }
233
234 win_iocp_thread_info this_thread;
235 thread_call_stack::context ctx(this, this_thread);
236
237 return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), this_thread, ec);
238 }
239
240 size_t win_iocp_io_context::poll(boost::system::error_code& ec)
241 {
242 if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
243 {
244 stop();
245 ec = boost::system::error_code();
246 return 0;
247 }
248
249 win_iocp_thread_info this_thread;
250 thread_call_stack::context ctx(this, this_thread);
251
252 size_t n = 0;
253 while (do_one(0, this_thread, ec))
254 if (n != (std::numeric_limits<size_t>::max)())
255 ++n;
256 return n;
257 }
258
259 size_t win_iocp_io_context::poll_one(boost::system::error_code& ec)
260 {
261 if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
262 {
263 stop();
264 ec = boost::system::error_code();
265 return 0;
266 }
267
268 win_iocp_thread_info this_thread;
269 thread_call_stack::context ctx(this, this_thread);
270
271 return do_one(0, this_thread, ec);
272 }
273
274 void win_iocp_io_context::stop()
275 {
276 if (::InterlockedExchange(&stopped_, 1) == 0)
277 {
278 if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
279 {
280 if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
281 {
282 DWORD last_error = ::GetLastError();
283 boost::system::error_code ec(last_error,
284 boost::asio::error::get_system_category());
285 boost::asio::detail::throw_error(ec, "pqcs");
286 }
287 }
288 }
289 }
290
291 bool win_iocp_io_context::can_dispatch()
292 {
293 return thread_call_stack::contains(this) != 0;
294 }
295
296 void win_iocp_io_context::capture_current_exception()
297 {
298 if (thread_info_base* this_thread = thread_call_stack::contains(this))
299 this_thread->capture_current_exception();
300 }
301
302 void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)
303 {
304 // Flag the operation as ready.
305 op->ready_ = 1;
306
307 // Enqueue the operation on the I/O completion port.
308 if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
309 {
310 // Out of resources. Put on completed queue instead.
311 mutex::scoped_lock lock(dispatch_mutex_);
312 completed_ops_.push(op);
313 ::InterlockedExchange(&dispatch_required_, 1);
314 }
315 }
316
317 void win_iocp_io_context::post_deferred_completions(
318 op_queue<win_iocp_operation>& ops)
319 {
320 while (win_iocp_operation* op = ops.front())
321 {
322 ops.pop();
323
324 // Flag the operation as ready.
325 op->ready_ = 1;
326
327 // Enqueue the operation on the I/O completion port.
328 if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
329 {
330 // Out of resources. Put on completed queue instead.
331 mutex::scoped_lock lock(dispatch_mutex_);
332 completed_ops_.push(op);
333 completed_ops_.push(ops);
334 ::InterlockedExchange(&dispatch_required_, 1);
335 }
336 }
337 }
338
339 void win_iocp_io_context::abandon_operations(
340 op_queue<win_iocp_operation>& ops)
341 {
342 while (win_iocp_operation* op = ops.front())
343 {
344 ops.pop();
345 ::InterlockedDecrement(&outstanding_work_);
346 op->destroy();
347 }
348 }
349
350 void win_iocp_io_context::on_pending(win_iocp_operation* op)
351 {
352 if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
353 {
354 // Enqueue the operation on the I/O completion port.
355 if (!::PostQueuedCompletionStatus(iocp_.handle,
356 0, overlapped_contains_result, op))
357 {
358 // Out of resources. Put on completed queue instead.
359 mutex::scoped_lock lock(dispatch_mutex_);
360 completed_ops_.push(op);
361 ::InterlockedExchange(&dispatch_required_, 1);
362 }
363 }
364 }
365
366 void win_iocp_io_context::on_completion(win_iocp_operation* op,
367 DWORD last_error, DWORD bytes_transferred)
368 {
369 // Flag that the operation is ready for invocation.
370 op->ready_ = 1;
371
372 // Store results in the OVERLAPPED structure.
373 op->Internal = reinterpret_cast<ulong_ptr_t>(
374 &boost::asio::error::get_system_category());
375 op->Offset = last_error;
376 op->OffsetHigh = bytes_transferred;
377
378 // Enqueue the operation on the I/O completion port.
379 if (!::PostQueuedCompletionStatus(iocp_.handle,
380 0, overlapped_contains_result, op))
381 {
382 // Out of resources. Put on completed queue instead.
383 mutex::scoped_lock lock(dispatch_mutex_);
384 completed_ops_.push(op);
385 ::InterlockedExchange(&dispatch_required_, 1);
386 }
387 }
388
389 void win_iocp_io_context::on_completion(win_iocp_operation* op,
390 const boost::system::error_code& ec, DWORD bytes_transferred)
391 {
392 // Flag that the operation is ready for invocation.
393 op->ready_ = 1;
394
395 // Store results in the OVERLAPPED structure.
396 op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
397 op->Offset = ec.value();
398 op->OffsetHigh = bytes_transferred;
399
400 // Enqueue the operation on the I/O completion port.
401 if (!::PostQueuedCompletionStatus(iocp_.handle,
402 0, overlapped_contains_result, op))
403 {
404 // Out of resources. Put on completed queue instead.
405 mutex::scoped_lock lock(dispatch_mutex_);
406 completed_ops_.push(op);
407 ::InterlockedExchange(&dispatch_required_, 1);
408 }
409 }
410
411 size_t win_iocp_io_context::do_one(DWORD msec,
412 win_iocp_thread_info& this_thread, boost::system::error_code& ec)
413 {
414 for (;;)
415 {
416 // Try to acquire responsibility for dispatching timers and completed ops.
417 if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
418 {
419 mutex::scoped_lock lock(dispatch_mutex_);
420
421 // Dispatch pending timers and operations.
422 op_queue<win_iocp_operation> ops;
423 ops.push(completed_ops_);
424 timer_queues_.get_ready_timers(ops);
425 post_deferred_completions(ops);
426 update_timeout();
427 }
428
429 // Get the next operation from the queue.
430 DWORD bytes_transferred = 0;
431 dword_ptr_t completion_key = 0;
432 LPOVERLAPPED overlapped = 0;
433 ::SetLastError(0);
434 BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,
435 &bytes_transferred, &completion_key, &overlapped,
436 msec < gqcs_timeout_ ? msec : gqcs_timeout_);
437 DWORD last_error = ::GetLastError();
438
439 if (overlapped)
440 {
441 win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
442 boost::system::error_code result_ec(last_error,
443 boost::asio::error::get_system_category());
444
445 // We may have been passed the last_error and bytes_transferred in the
446 // OVERLAPPED structure itself.
447 if (completion_key == overlapped_contains_result)
448 {
449 result_ec = boost::system::error_code(static_cast<int>(op->Offset),
450 *reinterpret_cast<boost::system::error_category*>(op->Internal));
451 bytes_transferred = op->OffsetHigh;
452 }
453
454 // Otherwise ensure any result has been saved into the OVERLAPPED
455 // structure.
456 else
457 {
458 op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
459 op->Offset = result_ec.value();
460 op->OffsetHigh = bytes_transferred;
461 }
462
463 // Dispatch the operation only if ready. The operation may not be ready
464 // if the initiating function (e.g. a call to WSARecv) has not yet
465 // returned. This is because the initiating function still wants access
466 // to the operation's OVERLAPPED structure.
467 if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
468 {
469 // Ensure the count of outstanding work is decremented on block exit.
470 work_finished_on_block_exit on_exit = { this };
471 (void)on_exit;
472
473 op->complete(this, result_ec, bytes_transferred);
474 this_thread.rethrow_pending_exception();
475 ec = boost::system::error_code();
476 return 1;
477 }
478 }
479 else if (!ok)
480 {
481 if (last_error != WAIT_TIMEOUT)
482 {
483 ec = boost::system::error_code(last_error,
484 boost::asio::error::get_system_category());
485 return 0;
486 }
487
488 // If we're waiting indefinitely we need to keep going until we get a
489 // real handler.
490 if (msec == INFINITE)
491 continue;
492
493 ec = boost::system::error_code();
494 return 0;
495 }
496 else if (completion_key == wake_for_dispatch)
497 {
498 // We have been woken up to try to acquire responsibility for dispatching
499 // timers and completed operations.
500 }
501 else
502 {
503 // Indicate that there is no longer an in-flight stop event.
504 ::InterlockedExchange(&stop_event_posted_, 0);
505
506 // The stopped_ flag is always checked to ensure that any leftover
507 // stop events from a previous run invocation are ignored.
508 if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
509 {
510 // Wake up next thread that is blocked on GetQueuedCompletionStatus.
511 if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
512 {
513 if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
514 {
515 last_error = ::GetLastError();
516 ec = boost::system::error_code(last_error,
517 boost::asio::error::get_system_category());
518 return 0;
519 }
520 }
521
522 ec = boost::system::error_code();
523 return 0;
524 }
525 }
526 }
527 }
528
529 DWORD win_iocp_io_context::get_gqcs_timeout()
530 {
531 OSVERSIONINFOEX osvi;
532 ZeroMemory(&osvi, sizeof(osvi));
533 osvi.dwOSVersionInfoSize = sizeof(osvi);
534 osvi.dwMajorVersion = 6ul;
535
536 const uint64_t condition_mask = ::VerSetConditionMask(
537 0, VER_MAJORVERSION, VER_GREATER_EQUAL);
538
539 if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask))
540 return INFINITE;
541
542 return default_gqcs_timeout;
543 }
544
545 void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)
546 {
547 mutex::scoped_lock lock(dispatch_mutex_);
548
549 timer_queues_.insert(&queue);
550
551 if (!waitable_timer_.handle)
552 {
553 waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
554 if (waitable_timer_.handle == 0)
555 {
556 DWORD last_error = ::GetLastError();
557 boost::system::error_code ec(last_error,
558 boost::asio::error::get_system_category());
559 boost::asio::detail::throw_error(ec, "timer");
560 }
561
562 LARGE_INTEGER timeout;
563 timeout.QuadPart = -max_timeout_usec;
564 timeout.QuadPart *= 10;
565 ::SetWaitableTimer(waitable_timer_.handle,
566 &timeout, max_timeout_msec, 0, 0, FALSE);
567 }
568
569 if (!timer_thread_.get())
570 {
571 timer_thread_function thread_function = { this };
572 timer_thread_.reset(new thread(thread_function, 65536));
573 }
574 }
575
576 void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)
577 {
578 mutex::scoped_lock lock(dispatch_mutex_);
579
580 timer_queues_.erase(&queue);
581 }
582
583 void win_iocp_io_context::update_timeout()
584 {
585 if (timer_thread_.get())
586 {
587 // There's no point updating the waitable timer if the new timeout period
588 // exceeds the maximum timeout. In that case, we might as well wait for the
589 // existing period of the timer to expire.
590 long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
591 if (timeout_usec < max_timeout_usec)
592 {
593 LARGE_INTEGER timeout;
594 timeout.QuadPart = -timeout_usec;
595 timeout.QuadPart *= 10;
596 ::SetWaitableTimer(waitable_timer_.handle,
597 &timeout, max_timeout_msec, 0, 0, FALSE);
598 }
599 }
600 }
601
602 } // namespace detail
603 } // namespace asio
604 } // namespace boost
605
606 #include <boost/asio/detail/pop_options.hpp>
607
608 #endif // defined(BOOST_ASIO_HAS_IOCP)
609
610 #endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP