]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // |
2 | // detail/impl/kqueue_reactor.ipp | |
3 | // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
4 | // | |
b32b8144 | 5 | // Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com) |
7c673cae FG |
6 | // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) |
7 | // | |
8 | // Distributed under the Boost Software License, Version 1.0. (See accompanying | |
9 | // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | |
10 | // | |
11 | ||
12 | #ifndef BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP | |
13 | #define BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP | |
14 | ||
15 | #if defined(_MSC_VER) && (_MSC_VER >= 1200) | |
16 | # pragma once | |
17 | #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) | |
18 | ||
19 | #include <boost/asio/detail/config.hpp> | |
20 | ||
21 | #if defined(BOOST_ASIO_HAS_KQUEUE) | |
22 | ||
23 | #include <boost/asio/detail/kqueue_reactor.hpp> | |
b32b8144 | 24 | #include <boost/asio/detail/scheduler.hpp> |
7c673cae FG |
25 | #include <boost/asio/detail/throw_error.hpp> |
26 | #include <boost/asio/error.hpp> | |
27 | ||
28 | #include <boost/asio/detail/push_options.hpp> | |
29 | ||
30 | #if defined(__NetBSD__) | |
31 | # define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ | |
32 | EV_SET(ev, ident, filt, flags, fflags, data, \ | |
33 | reinterpret_cast<intptr_t>(static_cast<void*>(udata))) | |
34 | #else | |
35 | # define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ | |
36 | EV_SET(ev, ident, filt, flags, fflags, data, udata) | |
37 | #endif | |
38 | ||
39 | namespace boost { | |
40 | namespace asio { | |
41 | namespace detail { | |
42 | ||
b32b8144 FG |
43 | kqueue_reactor::kqueue_reactor(boost::asio::execution_context& ctx) |
44 | : execution_context_service_base<kqueue_reactor>(ctx), | |
45 | scheduler_(use_service<scheduler>(ctx)), | |
46 | mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING( | |
47 | REACTOR_REGISTRATION, scheduler_.concurrency_hint())), | |
7c673cae FG |
48 | kqueue_fd_(do_kqueue_create()), |
49 | interrupter_(), | |
b32b8144 FG |
50 | shutdown_(false), |
51 | registered_descriptors_mutex_(mutex_.enabled()) | |
7c673cae FG |
52 | { |
53 | struct kevent events[1]; | |
54 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), | |
55 | EVFILT_READ, EV_ADD, 0, 0, &interrupter_); | |
56 | if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) | |
57 | { | |
58 | boost::system::error_code error(errno, | |
59 | boost::asio::error::get_system_category()); | |
60 | boost::asio::detail::throw_error(error); | |
61 | } | |
62 | } | |
63 | ||
64 | kqueue_reactor::~kqueue_reactor() | |
65 | { | |
66 | close(kqueue_fd_); | |
67 | } | |
68 | ||
b32b8144 | 69 | void kqueue_reactor::shutdown() |
7c673cae FG |
70 | { |
71 | mutex::scoped_lock lock(mutex_); | |
72 | shutdown_ = true; | |
73 | lock.unlock(); | |
74 | ||
75 | op_queue<operation> ops; | |
76 | ||
77 | while (descriptor_state* state = registered_descriptors_.first()) | |
78 | { | |
79 | for (int i = 0; i < max_ops; ++i) | |
80 | ops.push(state->op_queue_[i]); | |
81 | state->shutdown_ = true; | |
82 | registered_descriptors_.free(state); | |
83 | } | |
84 | ||
85 | timer_queues_.get_all_timers(ops); | |
86 | ||
b32b8144 | 87 | scheduler_.abandon_operations(ops); |
7c673cae FG |
88 | } |
89 | ||
b32b8144 FG |
90 | void kqueue_reactor::notify_fork( |
91 | boost::asio::execution_context::fork_event fork_ev) | |
7c673cae | 92 | { |
b32b8144 | 93 | if (fork_ev == boost::asio::execution_context::fork_child) |
7c673cae FG |
94 | { |
95 | // The kqueue descriptor is automatically closed in the child. | |
96 | kqueue_fd_ = -1; | |
97 | kqueue_fd_ = do_kqueue_create(); | |
98 | ||
99 | interrupter_.recreate(); | |
100 | ||
101 | struct kevent events[2]; | |
102 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), | |
103 | EVFILT_READ, EV_ADD, 0, 0, &interrupter_); | |
104 | if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) | |
105 | { | |
106 | boost::system::error_code ec(errno, | |
107 | boost::asio::error::get_system_category()); | |
108 | boost::asio::detail::throw_error(ec, "kqueue interrupter registration"); | |
109 | } | |
110 | ||
111 | // Re-register all descriptors with kqueue. | |
112 | mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); | |
113 | for (descriptor_state* state = registered_descriptors_.first(); | |
114 | state != 0; state = state->next_) | |
115 | { | |
116 | if (state->num_kevents_ > 0) | |
117 | { | |
118 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_, | |
119 | EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state); | |
120 | BOOST_ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_, | |
121 | EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state); | |
122 | if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1) | |
123 | { | |
124 | boost::system::error_code ec(errno, | |
125 | boost::asio::error::get_system_category()); | |
126 | boost::asio::detail::throw_error(ec, "kqueue re-registration"); | |
127 | } | |
128 | } | |
129 | } | |
130 | } | |
131 | } | |
132 | ||
133 | void kqueue_reactor::init_task() | |
134 | { | |
b32b8144 | 135 | scheduler_.init_task(); |
7c673cae FG |
136 | } |
137 | ||
138 | int kqueue_reactor::register_descriptor(socket_type descriptor, | |
139 | kqueue_reactor::per_descriptor_data& descriptor_data) | |
140 | { | |
141 | descriptor_data = allocate_descriptor_state(); | |
142 | ||
b32b8144 FG |
143 | BOOST_ASIO_HANDLER_REACTOR_REGISTRATION(( |
144 | context(), static_cast<uintmax_t>(descriptor), | |
145 | reinterpret_cast<uintmax_t>(descriptor_data))); | |
146 | ||
7c673cae FG |
147 | mutex::scoped_lock lock(descriptor_data->mutex_); |
148 | ||
149 | descriptor_data->descriptor_ = descriptor; | |
150 | descriptor_data->num_kevents_ = 0; | |
151 | descriptor_data->shutdown_ = false; | |
152 | ||
153 | return 0; | |
154 | } | |
155 | ||
156 | int kqueue_reactor::register_internal_descriptor( | |
157 | int op_type, socket_type descriptor, | |
158 | kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op) | |
159 | { | |
160 | descriptor_data = allocate_descriptor_state(); | |
161 | ||
b32b8144 FG |
162 | BOOST_ASIO_HANDLER_REACTOR_REGISTRATION(( |
163 | context(), static_cast<uintmax_t>(descriptor), | |
164 | reinterpret_cast<uintmax_t>(descriptor_data))); | |
165 | ||
7c673cae FG |
166 | mutex::scoped_lock lock(descriptor_data->mutex_); |
167 | ||
168 | descriptor_data->descriptor_ = descriptor; | |
169 | descriptor_data->num_kevents_ = 1; | |
170 | descriptor_data->shutdown_ = false; | |
171 | descriptor_data->op_queue_[op_type].push(op); | |
172 | ||
173 | struct kevent events[1]; | |
174 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, | |
175 | EV_ADD | EV_CLEAR, 0, 0, descriptor_data); | |
176 | if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) | |
177 | return errno; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | void kqueue_reactor::move_descriptor(socket_type, | |
183 | kqueue_reactor::per_descriptor_data& target_descriptor_data, | |
184 | kqueue_reactor::per_descriptor_data& source_descriptor_data) | |
185 | { | |
186 | target_descriptor_data = source_descriptor_data; | |
187 | source_descriptor_data = 0; | |
188 | } | |
189 | ||
190 | void kqueue_reactor::start_op(int op_type, socket_type descriptor, | |
191 | kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op, | |
192 | bool is_continuation, bool allow_speculative) | |
193 | { | |
194 | if (!descriptor_data) | |
195 | { | |
196 | op->ec_ = boost::asio::error::bad_descriptor; | |
197 | post_immediate_completion(op, is_continuation); | |
198 | return; | |
199 | } | |
200 | ||
201 | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); | |
202 | ||
203 | if (descriptor_data->shutdown_) | |
204 | { | |
205 | post_immediate_completion(op, is_continuation); | |
206 | return; | |
207 | } | |
208 | ||
209 | if (descriptor_data->op_queue_[op_type].empty()) | |
210 | { | |
211 | static const int num_kevents[max_ops] = { 1, 2, 1 }; | |
212 | ||
213 | if (allow_speculative | |
214 | && (op_type != read_op | |
215 | || descriptor_data->op_queue_[except_op].empty())) | |
216 | { | |
217 | if (op->perform()) | |
218 | { | |
219 | descriptor_lock.unlock(); | |
b32b8144 | 220 | scheduler_.post_immediate_completion(op, is_continuation); |
7c673cae FG |
221 | return; |
222 | } | |
223 | ||
224 | if (descriptor_data->num_kevents_ < num_kevents[op_type]) | |
225 | { | |
226 | struct kevent events[2]; | |
227 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, | |
228 | EV_ADD | EV_CLEAR, 0, 0, descriptor_data); | |
229 | BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, | |
230 | EV_ADD | EV_CLEAR, 0, 0, descriptor_data); | |
231 | if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1) | |
232 | { | |
233 | descriptor_data->num_kevents_ = num_kevents[op_type]; | |
234 | } | |
235 | else | |
236 | { | |
237 | op->ec_ = boost::system::error_code(errno, | |
238 | boost::asio::error::get_system_category()); | |
b32b8144 | 239 | scheduler_.post_immediate_completion(op, is_continuation); |
7c673cae FG |
240 | return; |
241 | } | |
242 | } | |
243 | } | |
244 | else | |
245 | { | |
246 | if (descriptor_data->num_kevents_ < num_kevents[op_type]) | |
247 | descriptor_data->num_kevents_ = num_kevents[op_type]; | |
248 | ||
249 | struct kevent events[2]; | |
250 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, | |
251 | EV_ADD | EV_CLEAR, 0, 0, descriptor_data); | |
252 | BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, | |
253 | EV_ADD | EV_CLEAR, 0, 0, descriptor_data); | |
254 | ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); | |
255 | } | |
256 | } | |
257 | ||
258 | descriptor_data->op_queue_[op_type].push(op); | |
b32b8144 | 259 | scheduler_.work_started(); |
7c673cae FG |
260 | } |
261 | ||
262 | void kqueue_reactor::cancel_ops(socket_type, | |
263 | kqueue_reactor::per_descriptor_data& descriptor_data) | |
264 | { | |
265 | if (!descriptor_data) | |
266 | return; | |
267 | ||
268 | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); | |
269 | ||
270 | op_queue<operation> ops; | |
271 | for (int i = 0; i < max_ops; ++i) | |
272 | { | |
273 | while (reactor_op* op = descriptor_data->op_queue_[i].front()) | |
274 | { | |
275 | op->ec_ = boost::asio::error::operation_aborted; | |
276 | descriptor_data->op_queue_[i].pop(); | |
277 | ops.push(op); | |
278 | } | |
279 | } | |
280 | ||
281 | descriptor_lock.unlock(); | |
282 | ||
b32b8144 | 283 | scheduler_.post_deferred_completions(ops); |
7c673cae FG |
284 | } |
285 | ||
286 | void kqueue_reactor::deregister_descriptor(socket_type descriptor, | |
287 | kqueue_reactor::per_descriptor_data& descriptor_data, bool closing) | |
288 | { | |
289 | if (!descriptor_data) | |
290 | return; | |
291 | ||
292 | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); | |
293 | ||
294 | if (!descriptor_data->shutdown_) | |
295 | { | |
296 | if (closing) | |
297 | { | |
298 | // The descriptor will be automatically removed from the kqueue when it | |
299 | // is closed. | |
300 | } | |
301 | else | |
302 | { | |
303 | struct kevent events[2]; | |
304 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, | |
305 | EVFILT_READ, EV_DELETE, 0, 0, 0); | |
306 | BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, | |
307 | EVFILT_WRITE, EV_DELETE, 0, 0, 0); | |
308 | ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); | |
309 | } | |
310 | ||
311 | op_queue<operation> ops; | |
312 | for (int i = 0; i < max_ops; ++i) | |
313 | { | |
314 | while (reactor_op* op = descriptor_data->op_queue_[i].front()) | |
315 | { | |
316 | op->ec_ = boost::asio::error::operation_aborted; | |
317 | descriptor_data->op_queue_[i].pop(); | |
318 | ops.push(op); | |
319 | } | |
320 | } | |
321 | ||
322 | descriptor_data->descriptor_ = -1; | |
323 | descriptor_data->shutdown_ = true; | |
324 | ||
325 | descriptor_lock.unlock(); | |
326 | ||
b32b8144 FG |
327 | BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION(( |
328 | context(), static_cast<uintmax_t>(descriptor), | |
329 | reinterpret_cast<uintmax_t>(descriptor_data))); | |
7c673cae | 330 | |
b32b8144 FG |
331 | scheduler_.post_deferred_completions(ops); |
332 | ||
333 | // Leave descriptor_data set so that it will be freed by the subsequent | |
334 | // call to cleanup_descriptor_data. | |
335 | } | |
336 | else | |
337 | { | |
338 | // We are shutting down, so prevent cleanup_descriptor_data from freeing | |
339 | // the descriptor_data object and let the destructor free it instead. | |
340 | descriptor_data = 0; | |
7c673cae FG |
341 | } |
342 | } | |
343 | ||
344 | void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor, | |
345 | kqueue_reactor::per_descriptor_data& descriptor_data) | |
346 | { | |
347 | if (!descriptor_data) | |
348 | return; | |
349 | ||
350 | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); | |
351 | ||
352 | if (!descriptor_data->shutdown_) | |
353 | { | |
354 | struct kevent events[2]; | |
355 | BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, | |
356 | EVFILT_READ, EV_DELETE, 0, 0, 0); | |
357 | BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, | |
358 | EVFILT_WRITE, EV_DELETE, 0, 0, 0); | |
359 | ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); | |
360 | ||
361 | op_queue<operation> ops; | |
362 | for (int i = 0; i < max_ops; ++i) | |
363 | ops.push(descriptor_data->op_queue_[i]); | |
364 | ||
365 | descriptor_data->descriptor_ = -1; | |
366 | descriptor_data->shutdown_ = true; | |
367 | ||
368 | descriptor_lock.unlock(); | |
369 | ||
b32b8144 FG |
370 | BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION(( |
371 | context(), static_cast<uintmax_t>(descriptor), | |
372 | reinterpret_cast<uintmax_t>(descriptor_data))); | |
373 | ||
374 | // Leave descriptor_data set so that it will be freed by the subsequent | |
375 | // call to cleanup_descriptor_data. | |
376 | } | |
377 | else | |
378 | { | |
379 | // We are shutting down, so prevent cleanup_descriptor_data from freeing | |
380 | // the descriptor_data object and let the destructor free it instead. | |
381 | descriptor_data = 0; | |
382 | } | |
383 | } | |
384 | ||
385 | void kqueue_reactor::cleanup_descriptor_data( | |
386 | per_descriptor_data& descriptor_data) | |
387 | { | |
388 | if (descriptor_data) | |
389 | { | |
7c673cae FG |
390 | free_descriptor_state(descriptor_data); |
391 | descriptor_data = 0; | |
392 | } | |
393 | } | |
394 | ||
b32b8144 | 395 | void kqueue_reactor::run(long usec, op_queue<operation>& ops) |
7c673cae FG |
396 | { |
397 | mutex::scoped_lock lock(mutex_); | |
398 | ||
399 | // Determine how long to block while waiting for events. | |
400 | timespec timeout_buf = { 0, 0 }; | |
b32b8144 | 401 | timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf; |
7c673cae FG |
402 | |
403 | lock.unlock(); | |
404 | ||
405 | // Block on the kqueue descriptor. | |
406 | struct kevent events[128]; | |
407 | int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout); | |
408 | ||
b32b8144 FG |
409 | #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING) |
410 | // Trace the waiting events. | |
411 | for (int i = 0; i < num_events; ++i) | |
412 | { | |
413 | void* ptr = reinterpret_cast<void*>(events[i].udata); | |
414 | if (ptr != &interrupter_) | |
415 | { | |
416 | unsigned event_mask = 0; | |
417 | switch (events[i].filter) | |
418 | { | |
419 | case EVFILT_READ: | |
420 | event_mask |= BOOST_ASIO_HANDLER_REACTOR_READ_EVENT; | |
421 | break; | |
422 | case EVFILT_WRITE: | |
423 | event_mask |= BOOST_ASIO_HANDLER_REACTOR_WRITE_EVENT; | |
424 | break; | |
425 | } | |
426 | if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0) | |
427 | event_mask |= BOOST_ASIO_HANDLER_REACTOR_ERROR_EVENT; | |
428 | BOOST_ASIO_HANDLER_REACTOR_EVENTS((context(), | |
429 | reinterpret_cast<uintmax_t>(ptr), event_mask)); | |
430 | } | |
431 | } | |
432 | #endif // defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING) | |
433 | ||
7c673cae FG |
434 | // Dispatch the waiting events. |
435 | for (int i = 0; i < num_events; ++i) | |
436 | { | |
437 | void* ptr = reinterpret_cast<void*>(events[i].udata); | |
438 | if (ptr == &interrupter_) | |
439 | { | |
440 | interrupter_.reset(); | |
441 | } | |
442 | else | |
443 | { | |
444 | descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr); | |
445 | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); | |
446 | ||
447 | if (events[i].filter == EVFILT_WRITE | |
448 | && descriptor_data->num_kevents_ == 2 | |
449 | && descriptor_data->op_queue_[write_op].empty()) | |
450 | { | |
451 | // Some descriptor types, like serial ports, don't seem to support | |
452 | // EV_CLEAR with EVFILT_WRITE. Since we have no pending write | |
453 | // operations we'll remove the EVFILT_WRITE registration here so that | |
454 | // we don't end up in a tight spin. | |
455 | struct kevent delete_events[1]; | |
456 | BOOST_ASIO_KQUEUE_EV_SET(&delete_events[0], | |
457 | descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0); | |
458 | ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0); | |
459 | descriptor_data->num_kevents_ = 1; | |
460 | } | |
461 | ||
462 | // Exception operations must be processed first to ensure that any | |
463 | // out-of-band data is read before normal data. | |
464 | #if defined(__NetBSD__) | |
465 | static const unsigned int filter[max_ops] = | |
466 | #else | |
467 | static const int filter[max_ops] = | |
468 | #endif | |
469 | { EVFILT_READ, EVFILT_WRITE, EVFILT_READ }; | |
470 | for (int j = max_ops - 1; j >= 0; --j) | |
471 | { | |
472 | if (events[i].filter == filter[j]) | |
473 | { | |
474 | if (j != except_op || events[i].flags & EV_OOBAND) | |
475 | { | |
476 | while (reactor_op* op = descriptor_data->op_queue_[j].front()) | |
477 | { | |
478 | if (events[i].flags & EV_ERROR) | |
479 | { | |
480 | op->ec_ = boost::system::error_code( | |
481 | static_cast<int>(events[i].data), | |
482 | boost::asio::error::get_system_category()); | |
483 | descriptor_data->op_queue_[j].pop(); | |
484 | ops.push(op); | |
485 | } | |
486 | if (op->perform()) | |
487 | { | |
488 | descriptor_data->op_queue_[j].pop(); | |
489 | ops.push(op); | |
490 | } | |
491 | else | |
492 | break; | |
493 | } | |
494 | } | |
495 | } | |
496 | } | |
497 | } | |
498 | } | |
499 | ||
500 | lock.lock(); | |
501 | timer_queues_.get_ready_timers(ops); | |
502 | } | |
503 | ||
504 | void kqueue_reactor::interrupt() | |
505 | { | |
506 | interrupter_.interrupt(); | |
507 | } | |
508 | ||
509 | int kqueue_reactor::do_kqueue_create() | |
510 | { | |
511 | int fd = ::kqueue(); | |
512 | if (fd == -1) | |
513 | { | |
514 | boost::system::error_code ec(errno, | |
515 | boost::asio::error::get_system_category()); | |
516 | boost::asio::detail::throw_error(ec, "kqueue"); | |
517 | } | |
518 | return fd; | |
519 | } | |
520 | ||
521 | kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state() | |
522 | { | |
523 | mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); | |
b32b8144 FG |
524 | return registered_descriptors_.alloc(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING( |
525 | REACTOR_IO, scheduler_.concurrency_hint())); | |
7c673cae FG |
526 | } |
527 | ||
528 | void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s) | |
529 | { | |
530 | mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); | |
531 | registered_descriptors_.free(s); | |
532 | } | |
533 | ||
534 | void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue) | |
535 | { | |
536 | mutex::scoped_lock lock(mutex_); | |
537 | timer_queues_.insert(&queue); | |
538 | } | |
539 | ||
540 | void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue) | |
541 | { | |
542 | mutex::scoped_lock lock(mutex_); | |
543 | timer_queues_.erase(&queue); | |
544 | } | |
545 | ||
b32b8144 | 546 | timespec* kqueue_reactor::get_timeout(long usec, timespec& ts) |
7c673cae FG |
547 | { |
548 | // By default we will wait no longer than 5 minutes. This will ensure that | |
549 | // any changes to the system clock are detected after no longer than this. | |
b32b8144 FG |
550 | const long max_usec = 5 * 60 * 1000 * 1000; |
551 | usec = timer_queues_.wait_duration_usec( | |
552 | (usec < 0 || max_usec < usec) ? max_usec : usec); | |
7c673cae FG |
553 | ts.tv_sec = usec / 1000000; |
554 | ts.tv_nsec = (usec % 1000000) * 1000; | |
555 | return &ts; | |
556 | } | |
557 | ||
558 | } // namespace detail | |
559 | } // namespace asio | |
560 | } // namespace boost | |
561 | ||
562 | #undef BOOST_ASIO_KQUEUE_EV_SET | |
563 | ||
564 | #include <boost/asio/detail/pop_options.hpp> | |
565 | ||
566 | #endif // defined(BOOST_ASIO_HAS_KQUEUE) | |
567 | ||
568 | #endif // BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP |