]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/lockfree/stack.hpp
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / boost / boost / lockfree / stack.hpp
1 // Copyright (C) 2008-2013 Tim Blechmann
2 //
3 // Distributed under the Boost Software License, Version 1.0. (See
4 // accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
9
10 #include <boost/assert.hpp>
11 #include <boost/checked_delete.hpp>
12 #include <boost/core/allocator_access.hpp>
13 #include <boost/core/no_exceptions_support.hpp>
14 #include <boost/integer_traits.hpp>
15 #include <boost/static_assert.hpp>
16 #include <boost/tuple/tuple.hpp>
17 #include <boost/type_traits/is_copy_constructible.hpp>
18
19 #include <boost/lockfree/detail/atomic.hpp>
20 #include <boost/lockfree/detail/copy_payload.hpp>
21 #include <boost/lockfree/detail/freelist.hpp>
22 #include <boost/lockfree/detail/parameter.hpp>
23 #include <boost/lockfree/detail/tagged_ptr.hpp>
24
25 #include <boost/lockfree/lockfree_forward.hpp>
26
27 #ifdef BOOST_HAS_PRAGMA_ONCE
28 #pragma once
29 #endif
30
31 namespace boost {
32 namespace lockfree {
33 namespace detail {
34
35 typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
36 boost::parameter::optional<tag::capacity>
37 > stack_signature;
38
39 }
40
41 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
42 * construction/destruction has to be synchronized. It uses a freelist for memory management,
43 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
44 *
45 * \b Policies:
46 *
47 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
48 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
49 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
50 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
51 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
52 * to achieve lock-freedom.
53 *
54 * - \c boost::lockfree::capacity<>, optional <br>
55 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
56 * It this option implies \c fixed_sized<true>
57 *
58 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
59 * Specifies the allocator that is used for the internal freelist
60 *
61 * \b Requirements:
62 * - T must have a copy constructor
63 * */
64 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
65 template <typename T, class A0, class A1, class A2>
66 #else
67 template <typename T, typename ...Options>
68 #endif
69 class stack
70 {
71 private:
72 #ifndef BOOST_DOXYGEN_INVOKED
73 BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
74
75 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
76 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
77 #else
78 typedef typename detail::stack_signature::bind<Options...>::type bound_args;
79 #endif
80
81 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
82 static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
83 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
84 static const bool node_based = !(has_capacity || fixed_sized);
85 static const bool compile_time_sized = has_capacity;
86
87 struct node
88 {
89 node(T const & val):
90 v(val)
91 {}
92
93 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
94 handle_t next;
95 const T v;
96 };
97
98 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
99 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
100 typedef typename pool_t::tagged_node_handle tagged_node_handle;
101
102 // check compile-time capacity
103 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
104 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
105 mpl::true_
106 >::type::value));
107
108 struct implementation_defined
109 {
110 typedef node_allocator allocator;
111 typedef std::size_t size_type;
112 };
113
114 #endif
115
116 BOOST_DELETED_FUNCTION(stack(stack const&))
117 BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
118
119 public:
120 typedef T value_type;
121 typedef typename implementation_defined::allocator allocator;
122 typedef typename implementation_defined::size_type size_type;
123
124 /**
125 * \return true, if implementation is lock-free.
126 *
127 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
128 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
129 * there is no possibility to provide a completely accurate implementation, because one would need to test
130 * every internal node, which is impossible if further nodes will be allocated from the operating system.
131 *
132 * */
133 bool is_lock_free (void) const
134 {
135 return tos.is_lock_free() && pool.is_lock_free();
136 }
137
138 /** Construct a fixed-sized stack
139 *
140 * \pre Must specify a capacity<> argument
141 * */
142 stack(void):
143 pool(node_allocator(), capacity)
144 {
145 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
146 // this function and this function may be compiled even when it isn't being used.
147 BOOST_ASSERT(has_capacity);
148 initialize();
149 }
150
151 /** Construct a fixed-sized stack with a custom allocator
152 *
153 * \pre Must specify a capacity<> argument
154 * */
155 template <typename U>
156 explicit stack(typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
157 pool(alloc, capacity)
158 {
159 BOOST_STATIC_ASSERT(has_capacity);
160 initialize();
161 }
162
163 /** Construct a fixed-sized stack with a custom allocator
164 *
165 * \pre Must specify a capacity<> argument
166 * */
167 explicit stack(allocator const & alloc):
168 pool(alloc, capacity)
169 {
170 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
171 // this function and this function may be compiled even when it isn't being used.
172 BOOST_ASSERT(has_capacity);
173 initialize();
174 }
175
176 /** Construct a variable-sized stack
177 *
178 * Allocate n nodes initially for the freelist
179 *
180 * \pre Must \b not specify a capacity<> argument
181 * */
182 explicit stack(size_type n):
183 pool(node_allocator(), n)
184 {
185 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
186 // this function and this function may be compiled even when it isn't being used.
187 BOOST_ASSERT(!has_capacity);
188 initialize();
189 }
190
191 /** Construct a variable-sized stack with a custom allocator
192 *
193 * Allocate n nodes initially for the freelist
194 *
195 * \pre Must \b not specify a capacity<> argument
196 * */
197 template <typename U>
198 stack(size_type n, typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
199 pool(alloc, n)
200 {
201 BOOST_STATIC_ASSERT(!has_capacity);
202 initialize();
203 }
204
205 /** Allocate n nodes for freelist
206 *
207 * \pre only valid if no capacity<> argument given
208 * \note thread-safe, may block if memory allocator blocks
209 *
210 * */
211 void reserve(size_type n)
212 {
213 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
214 // this function and this function may be compiled even when it isn't being used.
215 BOOST_ASSERT(!has_capacity);
216 pool.template reserve<true>(n);
217 }
218
219 /** Allocate n nodes for freelist
220 *
221 * \pre only valid if no capacity<> argument given
222 * \note not thread-safe, may block if memory allocator blocks
223 *
224 * */
225 void reserve_unsafe(size_type n)
226 {
227 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
228 // this function and this function may be compiled even when it isn't being used.
229 BOOST_ASSERT(!has_capacity);
230 pool.template reserve<false>(n);
231 }
232
233 /** Destroys stack, free all nodes from freelist.
234 *
235 * \note not thread-safe
236 *
237 * */
238 ~stack(void)
239 {
240 detail::consume_noop consume_functor;
241 (void)consume_all(consume_functor);
242 }
243
244 private:
245 #ifndef BOOST_DOXYGEN_INVOKED
246 void initialize(void)
247 {
248 tos.store(tagged_node_handle(pool.null_handle(), 0));
249 }
250
251 void link_nodes_atomic(node * new_top_node, node * end_node)
252 {
253 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
254 for (;;) {
255 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
256 end_node->next = pool.get_handle(old_tos);
257
258 if (tos.compare_exchange_weak(old_tos, new_tos))
259 break;
260 }
261 }
262
263 void link_nodes_unsafe(node * new_top_node, node * end_node)
264 {
265 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
266
267 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
268 end_node->next = pool.get_handle(old_tos);
269
270 tos.store(new_tos, memory_order_relaxed);
271 }
272
273 template <bool Threadsafe, bool Bounded, typename ConstIterator>
274 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
275 {
276 ConstIterator it = begin;
277 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
278 if (end_node == NULL) {
279 ret = begin;
280 return make_tuple<node*, node*>(NULL, NULL);
281 }
282
283 node * new_top_node = end_node;
284 end_node->next = NULL;
285
286 BOOST_TRY {
287 /* link nodes */
288 for (; it != end; ++it) {
289 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
290 if (newnode == NULL)
291 break;
292 newnode->next = new_top_node;
293 new_top_node = newnode;
294 }
295 } BOOST_CATCH (...) {
296 for (node * current_node = new_top_node; current_node != NULL;) {
297 node * next = current_node->next;
298 pool.template destruct<Threadsafe>(current_node);
299 current_node = next;
300 }
301 BOOST_RETHROW;
302 }
303 BOOST_CATCH_END
304
305 ret = it;
306 return make_tuple(new_top_node, end_node);
307 }
308 #endif
309
310 public:
311 /** Pushes object t to the stack.
312 *
313 * \post object will be pushed to the stack, if internal node can be allocated
314 * \returns true, if the push operation is successful.
315 *
316 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
317 * from the OS. This may not be lock-free.
318 * \throws if memory allocator throws
319 * */
320 bool push(T const & v)
321 {
322 return do_push<false>(v);
323 }
324
325 /** Pushes object t to the stack.
326 *
327 * \post object will be pushed to the stack, if internal node can be allocated
328 * \returns true, if the push operation is successful.
329 *
330 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
331 * */
332 bool bounded_push(T const & v)
333 {
334 return do_push<true>(v);
335 }
336
337 #ifndef BOOST_DOXYGEN_INVOKED
338 private:
339 template <bool Bounded>
340 bool do_push(T const & v)
341 {
342 node * newnode = pool.template construct<true, Bounded>(v);
343 if (newnode == 0)
344 return false;
345
346 link_nodes_atomic(newnode, newnode);
347 return true;
348 }
349
350 template <bool Bounded, typename ConstIterator>
351 ConstIterator do_push(ConstIterator begin, ConstIterator end)
352 {
353 node * new_top_node;
354 node * end_node;
355 ConstIterator ret;
356
357 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
358 if (new_top_node)
359 link_nodes_atomic(new_top_node, end_node);
360
361 return ret;
362 }
363
364 public:
365 #endif
366
367 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
368 *
369 * \return iterator to the first element, which has not been pushed
370 *
371 * \note Operation is applied atomically
372 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
373 * from the OS. This may not be lock-free.
374 * \throws if memory allocator throws
375 */
376 template <typename ConstIterator>
377 ConstIterator push(ConstIterator begin, ConstIterator end)
378 {
379 return do_push<false, ConstIterator>(begin, end);
380 }
381
382 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
383 *
384 * \return iterator to the first element, which has not been pushed
385 *
386 * \note Operation is applied atomically
387 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
388 * \throws if memory allocator throws
389 */
390 template <typename ConstIterator>
391 ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
392 {
393 return do_push<true, ConstIterator>(begin, end);
394 }
395
396
397 /** Pushes object t to the stack.
398 *
399 * \post object will be pushed to the stack, if internal node can be allocated
400 * \returns true, if the push operation is successful.
401 *
402 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
403 * from the OS. This may not be lock-free.
404 * \throws if memory allocator throws
405 * */
406 bool unsynchronized_push(T const & v)
407 {
408 node * newnode = pool.template construct<false, false>(v);
409 if (newnode == 0)
410 return false;
411
412 link_nodes_unsafe(newnode, newnode);
413 return true;
414 }
415
416 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
417 *
418 * \return iterator to the first element, which has not been pushed
419 *
420 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
421 * from the OS. This may not be lock-free.
422 * \throws if memory allocator throws
423 */
424 template <typename ConstIterator>
425 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
426 {
427 node * new_top_node;
428 node * end_node;
429 ConstIterator ret;
430
431 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
432 if (new_top_node)
433 link_nodes_unsafe(new_top_node, end_node);
434
435 return ret;
436 }
437
438
439 /** Pops object from stack.
440 *
441 * \post if pop operation is successful, object will be copied to ret.
442 * \returns true, if the pop operation is successful, false if stack was empty.
443 *
444 * \note Thread-safe and non-blocking
445 *
446 * */
447 bool pop(T & ret)
448 {
449 return pop<T>(ret);
450 }
451
452 /** Pops object from stack.
453 *
454 * \pre type T must be convertible to U
455 * \post if pop operation is successful, object will be copied to ret.
456 * \returns true, if the pop operation is successful, false if stack was empty.
457 *
458 * \note Thread-safe and non-blocking
459 *
460 * */
461 template <typename U>
462 bool pop(U & ret)
463 {
464 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
465 detail::consume_via_copy<U> consumer(ret);
466
467 return consume_one(consumer);
468 }
469
470
471 /** Pops object from stack.
472 *
473 * \post if pop operation is successful, object will be copied to ret.
474 * \returns true, if the pop operation is successful, false if stack was empty.
475 *
476 * \note Not thread-safe, but non-blocking
477 *
478 * */
479 bool unsynchronized_pop(T & ret)
480 {
481 return unsynchronized_pop<T>(ret);
482 }
483
484 /** Pops object from stack.
485 *
486 * \pre type T must be convertible to U
487 * \post if pop operation is successful, object will be copied to ret.
488 * \returns true, if the pop operation is successful, false if stack was empty.
489 *
490 * \note Not thread-safe, but non-blocking
491 *
492 * */
493 template <typename U>
494 bool unsynchronized_pop(U & ret)
495 {
496 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
497 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
498 node * old_tos_pointer = pool.get_pointer(old_tos);
499
500 if (!pool.get_pointer(old_tos))
501 return false;
502
503 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
504 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
505
506 tos.store(new_tos, memory_order_relaxed);
507 detail::copy_payload(old_tos_pointer->v, ret);
508 pool.template destruct<false>(old_tos);
509 return true;
510 }
511
512 /** consumes one element via a functor
513 *
514 * pops one element from the stack and applies the functor on this object
515 *
516 * \returns true, if one element was consumed
517 *
518 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
519 * */
520 template <typename Functor>
521 bool consume_one(Functor & f)
522 {
523 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
524
525 for (;;) {
526 node * old_tos_pointer = pool.get_pointer(old_tos);
527 if (!old_tos_pointer)
528 return false;
529
530 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
531
532 if (tos.compare_exchange_weak(old_tos, new_tos)) {
533 f(old_tos_pointer->v);
534 pool.template destruct<true>(old_tos);
535 return true;
536 }
537 }
538 }
539
540 /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
541 template <typename Functor>
542 bool consume_one(Functor const & f)
543 {
544 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
545
546 for (;;) {
547 node * old_tos_pointer = pool.get_pointer(old_tos);
548 if (!old_tos_pointer)
549 return false;
550
551 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
552
553 if (tos.compare_exchange_weak(old_tos, new_tos)) {
554 f(old_tos_pointer->v);
555 pool.template destruct<true>(old_tos);
556 return true;
557 }
558 }
559 }
560
561 /** consumes all elements via a functor
562 *
563 * sequentially pops all elements from the stack and applies the functor on each object
564 *
565 * \returns number of elements that are consumed
566 *
567 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
568 * */
569 template <typename Functor>
570 size_t consume_all(Functor & f)
571 {
572 size_t element_count = 0;
573 while (consume_one(f))
574 element_count += 1;
575
576 return element_count;
577 }
578
579 /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
580 template <typename Functor>
581 size_t consume_all(Functor const & f)
582 {
583 size_t element_count = 0;
584 while (consume_one(f))
585 element_count += 1;
586
587 return element_count;
588 }
589
590 /** consumes all elements via a functor
591 *
592 * atomically pops all elements from the stack and applies the functor on each object
593 *
594 * \returns number of elements that are consumed
595 *
596 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
597 * */
598 template <typename Functor>
599 size_t consume_all_atomic(Functor & f)
600 {
601 size_t element_count = 0;
602 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
603
604 for (;;) {
605 node * old_tos_pointer = pool.get_pointer(old_tos);
606 if (!old_tos_pointer)
607 return 0;
608
609 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
610
611 if (tos.compare_exchange_weak(old_tos, new_tos))
612 break;
613 }
614
615 tagged_node_handle nodes_to_consume = old_tos;
616
617 for(;;) {
618 node * node_pointer = pool.get_pointer(nodes_to_consume);
619 f(node_pointer->v);
620 element_count += 1;
621
622 node * next_node = pool.get_pointer(node_pointer->next);
623
624 if (!next_node) {
625 pool.template destruct<true>(nodes_to_consume);
626 break;
627 }
628
629 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
630 pool.template destruct<true>(nodes_to_consume);
631 nodes_to_consume = next;
632 }
633
634 return element_count;
635 }
636
637 /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
638 template <typename Functor>
639 size_t consume_all_atomic(Functor const & f)
640 {
641 size_t element_count = 0;
642 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
643
644 for (;;) {
645 node * old_tos_pointer = pool.get_pointer(old_tos);
646 if (!old_tos_pointer)
647 return 0;
648
649 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
650
651 if (tos.compare_exchange_weak(old_tos, new_tos))
652 break;
653 }
654
655 tagged_node_handle nodes_to_consume = old_tos;
656
657 for(;;) {
658 node * node_pointer = pool.get_pointer(nodes_to_consume);
659 f(node_pointer->v);
660 element_count += 1;
661
662 node * next_node = pool.get_pointer(node_pointer->next);
663
664 if (!next_node) {
665 pool.template destruct<true>(nodes_to_consume);
666 break;
667 }
668
669 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
670 pool.template destruct<true>(nodes_to_consume);
671 nodes_to_consume = next;
672 }
673
674 return element_count;
675 }
676
677 /** consumes all elements via a functor
678 *
679 * atomically pops all elements from the stack and applies the functor on each object in reversed order
680 *
681 * \returns number of elements that are consumed
682 *
683 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
684 * */
685 template <typename Functor>
686 size_t consume_all_atomic_reversed(Functor & f)
687 {
688 size_t element_count = 0;
689 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
690
691 for (;;) {
692 node * old_tos_pointer = pool.get_pointer(old_tos);
693 if (!old_tos_pointer)
694 return 0;
695
696 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
697
698 if (tos.compare_exchange_weak(old_tos, new_tos))
699 break;
700 }
701
702 tagged_node_handle nodes_to_consume = old_tos;
703
704 node * last_node_pointer = NULL;
705 tagged_node_handle nodes_in_reversed_order;
706 for(;;) {
707 node * node_pointer = pool.get_pointer(nodes_to_consume);
708 node * next_node = pool.get_pointer(node_pointer->next);
709
710 node_pointer->next = pool.get_handle(last_node_pointer);
711 last_node_pointer = node_pointer;
712
713 if (!next_node) {
714 nodes_in_reversed_order = nodes_to_consume;
715 break;
716 }
717
718 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
719 nodes_to_consume = next;
720 }
721
722 for(;;) {
723 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
724 f(node_pointer->v);
725 element_count += 1;
726
727 node * next_node = pool.get_pointer(node_pointer->next);
728
729 if (!next_node) {
730 pool.template destruct<true>(nodes_in_reversed_order);
731 break;
732 }
733
734 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
735 pool.template destruct<true>(nodes_in_reversed_order);
736 nodes_in_reversed_order = next;
737 }
738
739 return element_count;
740 }
741
742 /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
743 template <typename Functor>
744 size_t consume_all_atomic_reversed(Functor const & f)
745 {
746 size_t element_count = 0;
747 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
748
749 for (;;) {
750 node * old_tos_pointer = pool.get_pointer(old_tos);
751 if (!old_tos_pointer)
752 return 0;
753
754 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
755
756 if (tos.compare_exchange_weak(old_tos, new_tos))
757 break;
758 }
759
760 tagged_node_handle nodes_to_consume = old_tos;
761
762 node * last_node_pointer = NULL;
763 tagged_node_handle nodes_in_reversed_order;
764 for(;;) {
765 node * node_pointer = pool.get_pointer(nodes_to_consume);
766 node * next_node = pool.get_pointer(node_pointer->next);
767
768 node_pointer->next = pool.get_handle(last_node_pointer);
769 last_node_pointer = node_pointer;
770
771 if (!next_node) {
772 nodes_in_reversed_order = nodes_to_consume;
773 break;
774 }
775
776 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
777 nodes_to_consume = next;
778 }
779
780 for(;;) {
781 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
782 f(node_pointer->v);
783 element_count += 1;
784
785 node * next_node = pool.get_pointer(node_pointer->next);
786
787 if (!next_node) {
788 pool.template destruct<true>(nodes_in_reversed_order);
789 break;
790 }
791
792 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
793 pool.template destruct<true>(nodes_in_reversed_order);
794 nodes_in_reversed_order = next;
795 }
796
797 return element_count;
798 }
799 /**
800 * \return true, if stack is empty.
801 *
802 * \note It only guarantees that at some point during the execution of the function the stack has been empty.
803 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
804 * */
805 bool empty(void) const
806 {
807 return pool.get_pointer(tos.load()) == NULL;
808 }
809
810 private:
811 #ifndef BOOST_DOXYGEN_INVOKED
812 detail::atomic<tagged_node_handle> tos;
813
814 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
815 char padding[padding_size];
816
817 pool_t pool;
818 #endif
819 };
820
821 } /* namespace lockfree */
822 } /* namespace boost */
823
824 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */