]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/lockfree/stack.hpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / boost / lockfree / stack.hpp
1 // Copyright (C) 2008-2013 Tim Blechmann
2 //
3 // Distributed under the Boost Software License, Version 1.0. (See
4 // accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
9
10 #include <boost/assert.hpp>
11 #include <boost/checked_delete.hpp>
12 #include <boost/core/allocator_access.hpp>
13 #include <boost/core/no_exceptions_support.hpp>
14 #include <boost/integer_traits.hpp>
15 #include <boost/static_assert.hpp>
16 #include <boost/tuple/tuple.hpp>
17 #include <boost/type_traits/is_copy_constructible.hpp>
18
19 #include <boost/lockfree/detail/atomic.hpp>
20 #include <boost/lockfree/detail/copy_payload.hpp>
21 #include <boost/lockfree/detail/freelist.hpp>
22 #include <boost/lockfree/detail/parameter.hpp>
23 #include <boost/lockfree/detail/tagged_ptr.hpp>
24
25 #include <boost/lockfree/lockfree_forward.hpp>
26
27 #ifdef BOOST_HAS_PRAGMA_ONCE
28 #pragma once
29 #endif
30
31 namespace boost {
32 namespace lockfree {
33 namespace detail {
34
35 typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
36 boost::parameter::optional<tag::capacity>
37 > stack_signature;
38
39 }
40
41 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
42 * construction/destruction has to be synchronized. It uses a freelist for memory management,
43 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
44 *
45 * \b Policies:
46 *
47 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
48 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
49 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
50 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
51 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
52 * to achieve lock-freedom.
53 *
54 * - \c boost::lockfree::capacity<>, optional <br>
55 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
56 * It this option implies \c fixed_sized<true>
57 *
58 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
59 * Specifies the allocator that is used for the internal freelist
60 *
61 * \b Requirements:
62 * - T must have a copy constructor
63 * */
64 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
65 template <typename T, class A0, class A1, class A2>
66 #else
67 template <typename T, typename ...Options>
68 #endif
69 class stack
70 {
71 private:
72 #ifndef BOOST_DOXYGEN_INVOKED
73 BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
74
75 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
76 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
77 #else
78 typedef typename detail::stack_signature::bind<Options...>::type bound_args;
79 #endif
80
81 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
82 static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
83 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
84 static const bool node_based = !(has_capacity || fixed_sized);
85 static const bool compile_time_sized = has_capacity;
86
87 struct node
88 {
89 node(T const & val):
90 v(val)
91 {}
92
93 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
94 handle_t next;
95 const T v;
96 };
97
98 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
99 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
100 typedef typename pool_t::tagged_node_handle tagged_node_handle;
101
102 // check compile-time capacity
103 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
104 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
105 mpl::true_
106 >::type::value));
107
108 struct implementation_defined
109 {
110 typedef node_allocator allocator;
111 typedef std::size_t size_type;
112 };
113
114 #endif
115
116 BOOST_DELETED_FUNCTION(stack(stack const&))
117 BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
118
119 public:
120 typedef T value_type;
121 typedef typename implementation_defined::allocator allocator;
122 typedef typename implementation_defined::size_type size_type;
123
124 /**
125 * \return true, if implementation is lock-free.
126 *
127 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
128 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
129 * there is no possibility to provide a completely accurate implementation, because one would need to test
130 * every internal node, which is impossible if further nodes will be allocated from the operating system.
131 *
132 * */
133 bool is_lock_free (void) const
134 {
135 return tos.is_lock_free() && pool.is_lock_free();
136 }
137
138 /** Construct a fixed-sized stack
139 *
140 * \pre Must specify a capacity<> argument
141 * */
142 stack(void):
143 pool(node_allocator(), capacity)
144 {
145 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
146 // this function and this function may be compiled even when it isn't being used.
147 BOOST_ASSERT(has_capacity);
148 initialize();
149 }
150
151 /** Construct a fixed-sized stack with a custom allocator
152 *
153 * \pre Must specify a capacity<> argument
154 * */
155 template <typename U>
156 explicit stack(typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
157 pool(alloc, capacity)
158 {
159 BOOST_STATIC_ASSERT(has_capacity);
160 initialize();
161 }
162
163 /** Construct a fixed-sized stack with a custom allocator
164 *
165 * \pre Must specify a capacity<> argument
166 * */
167 explicit stack(allocator const & alloc):
168 pool(alloc, capacity)
169 {
170 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
171 // this function and this function may be compiled even when it isn't being used.
172 BOOST_ASSERT(has_capacity);
173 initialize();
174 }
175
176 /** Construct a variable-sized stack
177 *
178 * Allocate n nodes initially for the freelist
179 *
180 * \pre Must \b not specify a capacity<> argument
181 * */
182 explicit stack(size_type n):
183 pool(node_allocator(), n)
184 {
185 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
186 // this function and this function may be compiled even when it isn't being used.
187 BOOST_ASSERT(!has_capacity);
188 initialize();
189 }
190
191 /** Construct a variable-sized stack with a custom allocator
192 *
193 * Allocate n nodes initially for the freelist
194 *
195 * \pre Must \b not specify a capacity<> argument
196 * */
197 template <typename U>
198 stack(size_type n, typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
199 pool(alloc, n)
200 {
201 BOOST_STATIC_ASSERT(!has_capacity);
202 initialize();
203 }
204
205 /** Allocate n nodes for freelist
206 *
207 * \pre only valid if no capacity<> argument given
208 * \note thread-safe, may block if memory allocator blocks
209 *
210 * */
211 void reserve(size_type n)
212 {
213 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
214 // this function and this function may be compiled even when it isn't being used.
215 BOOST_ASSERT(!has_capacity);
216 pool.template reserve<true>(n);
217 }
218
219 /** Allocate n nodes for freelist
220 *
221 * \pre only valid if no capacity<> argument given
222 * \note not thread-safe, may block if memory allocator blocks
223 *
224 * */
225 void reserve_unsafe(size_type n)
226 {
227 // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
228 // this function and this function may be compiled even when it isn't being used.
229 BOOST_ASSERT(!has_capacity);
230 pool.template reserve<false>(n);
231 }
232
233 /** Destroys stack, free all nodes from freelist.
234 *
235 * \note not thread-safe
236 *
237 * */
238 ~stack(void)
239 {
240 T dummy;
241 while(unsynchronized_pop(dummy))
242 {}
243 }
244
245 private:
246 #ifndef BOOST_DOXYGEN_INVOKED
247 void initialize(void)
248 {
249 tos.store(tagged_node_handle(pool.null_handle(), 0));
250 }
251
252 void link_nodes_atomic(node * new_top_node, node * end_node)
253 {
254 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
255 for (;;) {
256 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
257 end_node->next = pool.get_handle(old_tos);
258
259 if (tos.compare_exchange_weak(old_tos, new_tos))
260 break;
261 }
262 }
263
264 void link_nodes_unsafe(node * new_top_node, node * end_node)
265 {
266 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
267
268 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
269 end_node->next = pool.get_handle(old_tos);
270
271 tos.store(new_tos, memory_order_relaxed);
272 }
273
274 template <bool Threadsafe, bool Bounded, typename ConstIterator>
275 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
276 {
277 ConstIterator it = begin;
278 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
279 if (end_node == NULL) {
280 ret = begin;
281 return make_tuple<node*, node*>(NULL, NULL);
282 }
283
284 node * new_top_node = end_node;
285 end_node->next = NULL;
286
287 BOOST_TRY {
288 /* link nodes */
289 for (; it != end; ++it) {
290 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
291 if (newnode == NULL)
292 break;
293 newnode->next = new_top_node;
294 new_top_node = newnode;
295 }
296 } BOOST_CATCH (...) {
297 for (node * current_node = new_top_node; current_node != NULL;) {
298 node * next = current_node->next;
299 pool.template destruct<Threadsafe>(current_node);
300 current_node = next;
301 }
302 BOOST_RETHROW;
303 }
304 BOOST_CATCH_END
305
306 ret = it;
307 return make_tuple(new_top_node, end_node);
308 }
309 #endif
310
311 public:
312 /** Pushes object t to the stack.
313 *
314 * \post object will be pushed to the stack, if internal node can be allocated
315 * \returns true, if the push operation is successful.
316 *
317 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
318 * from the OS. This may not be lock-free.
319 * \throws if memory allocator throws
320 * */
321 bool push(T const & v)
322 {
323 return do_push<false>(v);
324 }
325
326 /** Pushes object t to the stack.
327 *
328 * \post object will be pushed to the stack, if internal node can be allocated
329 * \returns true, if the push operation is successful.
330 *
331 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
332 * */
333 bool bounded_push(T const & v)
334 {
335 return do_push<true>(v);
336 }
337
338 #ifndef BOOST_DOXYGEN_INVOKED
339 private:
340 template <bool Bounded>
341 bool do_push(T const & v)
342 {
343 node * newnode = pool.template construct<true, Bounded>(v);
344 if (newnode == 0)
345 return false;
346
347 link_nodes_atomic(newnode, newnode);
348 return true;
349 }
350
351 template <bool Bounded, typename ConstIterator>
352 ConstIterator do_push(ConstIterator begin, ConstIterator end)
353 {
354 node * new_top_node;
355 node * end_node;
356 ConstIterator ret;
357
358 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
359 if (new_top_node)
360 link_nodes_atomic(new_top_node, end_node);
361
362 return ret;
363 }
364
365 public:
366 #endif
367
368 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
369 *
370 * \return iterator to the first element, which has not been pushed
371 *
372 * \note Operation is applied atomically
373 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
374 * from the OS. This may not be lock-free.
375 * \throws if memory allocator throws
376 */
377 template <typename ConstIterator>
378 ConstIterator push(ConstIterator begin, ConstIterator end)
379 {
380 return do_push<false, ConstIterator>(begin, end);
381 }
382
383 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
384 *
385 * \return iterator to the first element, which has not been pushed
386 *
387 * \note Operation is applied atomically
388 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
389 * \throws if memory allocator throws
390 */
391 template <typename ConstIterator>
392 ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
393 {
394 return do_push<true, ConstIterator>(begin, end);
395 }
396
397
398 /** Pushes object t to the stack.
399 *
400 * \post object will be pushed to the stack, if internal node can be allocated
401 * \returns true, if the push operation is successful.
402 *
403 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
404 * from the OS. This may not be lock-free.
405 * \throws if memory allocator throws
406 * */
407 bool unsynchronized_push(T const & v)
408 {
409 node * newnode = pool.template construct<false, false>(v);
410 if (newnode == 0)
411 return false;
412
413 link_nodes_unsafe(newnode, newnode);
414 return true;
415 }
416
417 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
418 *
419 * \return iterator to the first element, which has not been pushed
420 *
421 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
422 * from the OS. This may not be lock-free.
423 * \throws if memory allocator throws
424 */
425 template <typename ConstIterator>
426 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
427 {
428 node * new_top_node;
429 node * end_node;
430 ConstIterator ret;
431
432 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
433 if (new_top_node)
434 link_nodes_unsafe(new_top_node, end_node);
435
436 return ret;
437 }
438
439
440 /** Pops object from stack.
441 *
442 * \post if pop operation is successful, object will be copied to ret.
443 * \returns true, if the pop operation is successful, false if stack was empty.
444 *
445 * \note Thread-safe and non-blocking
446 *
447 * */
448 bool pop(T & ret)
449 {
450 return pop<T>(ret);
451 }
452
453 /** Pops object from stack.
454 *
455 * \pre type T must be convertible to U
456 * \post if pop operation is successful, object will be copied to ret.
457 * \returns true, if the pop operation is successful, false if stack was empty.
458 *
459 * \note Thread-safe and non-blocking
460 *
461 * */
462 template <typename U>
463 bool pop(U & ret)
464 {
465 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
466 detail::consume_via_copy<U> consumer(ret);
467
468 return consume_one(consumer);
469 }
470
471
472 /** Pops object from stack.
473 *
474 * \post if pop operation is successful, object will be copied to ret.
475 * \returns true, if the pop operation is successful, false if stack was empty.
476 *
477 * \note Not thread-safe, but non-blocking
478 *
479 * */
480 bool unsynchronized_pop(T & ret)
481 {
482 return unsynchronized_pop<T>(ret);
483 }
484
485 /** Pops object from stack.
486 *
487 * \pre type T must be convertible to U
488 * \post if pop operation is successful, object will be copied to ret.
489 * \returns true, if the pop operation is successful, false if stack was empty.
490 *
491 * \note Not thread-safe, but non-blocking
492 *
493 * */
494 template <typename U>
495 bool unsynchronized_pop(U & ret)
496 {
497 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
498 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
499 node * old_tos_pointer = pool.get_pointer(old_tos);
500
501 if (!pool.get_pointer(old_tos))
502 return false;
503
504 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
505 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
506
507 tos.store(new_tos, memory_order_relaxed);
508 detail::copy_payload(old_tos_pointer->v, ret);
509 pool.template destruct<false>(old_tos);
510 return true;
511 }
512
513 /** consumes one element via a functor
514 *
515 * pops one element from the stack and applies the functor on this object
516 *
517 * \returns true, if one element was consumed
518 *
519 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
520 * */
521 template <typename Functor>
522 bool consume_one(Functor & f)
523 {
524 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
525
526 for (;;) {
527 node * old_tos_pointer = pool.get_pointer(old_tos);
528 if (!old_tos_pointer)
529 return false;
530
531 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
532
533 if (tos.compare_exchange_weak(old_tos, new_tos)) {
534 f(old_tos_pointer->v);
535 pool.template destruct<true>(old_tos);
536 return true;
537 }
538 }
539 }
540
541 /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
542 template <typename Functor>
543 bool consume_one(Functor const & f)
544 {
545 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
546
547 for (;;) {
548 node * old_tos_pointer = pool.get_pointer(old_tos);
549 if (!old_tos_pointer)
550 return false;
551
552 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
553
554 if (tos.compare_exchange_weak(old_tos, new_tos)) {
555 f(old_tos_pointer->v);
556 pool.template destruct<true>(old_tos);
557 return true;
558 }
559 }
560 }
561
562 /** consumes all elements via a functor
563 *
564 * sequentially pops all elements from the stack and applies the functor on each object
565 *
566 * \returns number of elements that are consumed
567 *
568 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
569 * */
570 template <typename Functor>
571 size_t consume_all(Functor & f)
572 {
573 size_t element_count = 0;
574 while (consume_one(f))
575 element_count += 1;
576
577 return element_count;
578 }
579
580 /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
581 template <typename Functor>
582 size_t consume_all(Functor const & f)
583 {
584 size_t element_count = 0;
585 while (consume_one(f))
586 element_count += 1;
587
588 return element_count;
589 }
590
591 /** consumes all elements via a functor
592 *
593 * atomically pops all elements from the stack and applies the functor on each object
594 *
595 * \returns number of elements that are consumed
596 *
597 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
598 * */
599 template <typename Functor>
600 size_t consume_all_atomic(Functor & f)
601 {
602 size_t element_count = 0;
603 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
604
605 for (;;) {
606 node * old_tos_pointer = pool.get_pointer(old_tos);
607 if (!old_tos_pointer)
608 return 0;
609
610 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
611
612 if (tos.compare_exchange_weak(old_tos, new_tos))
613 break;
614 }
615
616 tagged_node_handle nodes_to_consume = old_tos;
617
618 for(;;) {
619 node * node_pointer = pool.get_pointer(nodes_to_consume);
620 f(node_pointer->v);
621 element_count += 1;
622
623 node * next_node = pool.get_pointer(node_pointer->next);
624
625 if (!next_node) {
626 pool.template destruct<true>(nodes_to_consume);
627 break;
628 }
629
630 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
631 pool.template destruct<true>(nodes_to_consume);
632 nodes_to_consume = next;
633 }
634
635 return element_count;
636 }
637
638 /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
639 template <typename Functor>
640 size_t consume_all_atomic(Functor const & f)
641 {
642 size_t element_count = 0;
643 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
644
645 for (;;) {
646 node * old_tos_pointer = pool.get_pointer(old_tos);
647 if (!old_tos_pointer)
648 return 0;
649
650 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
651
652 if (tos.compare_exchange_weak(old_tos, new_tos))
653 break;
654 }
655
656 tagged_node_handle nodes_to_consume = old_tos;
657
658 for(;;) {
659 node * node_pointer = pool.get_pointer(nodes_to_consume);
660 f(node_pointer->v);
661 element_count += 1;
662
663 node * next_node = pool.get_pointer(node_pointer->next);
664
665 if (!next_node) {
666 pool.template destruct<true>(nodes_to_consume);
667 break;
668 }
669
670 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
671 pool.template destruct<true>(nodes_to_consume);
672 nodes_to_consume = next;
673 }
674
675 return element_count;
676 }
677
678 /** consumes all elements via a functor
679 *
680 * atomically pops all elements from the stack and applies the functor on each object in reversed order
681 *
682 * \returns number of elements that are consumed
683 *
684 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
685 * */
686 template <typename Functor>
687 size_t consume_all_atomic_reversed(Functor & f)
688 {
689 size_t element_count = 0;
690 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
691
692 for (;;) {
693 node * old_tos_pointer = pool.get_pointer(old_tos);
694 if (!old_tos_pointer)
695 return 0;
696
697 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
698
699 if (tos.compare_exchange_weak(old_tos, new_tos))
700 break;
701 }
702
703 tagged_node_handle nodes_to_consume = old_tos;
704
705 node * last_node_pointer = NULL;
706 tagged_node_handle nodes_in_reversed_order;
707 for(;;) {
708 node * node_pointer = pool.get_pointer(nodes_to_consume);
709 node * next_node = pool.get_pointer(node_pointer->next);
710
711 node_pointer->next = pool.get_handle(last_node_pointer);
712 last_node_pointer = node_pointer;
713
714 if (!next_node) {
715 nodes_in_reversed_order = nodes_to_consume;
716 break;
717 }
718
719 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
720 nodes_to_consume = next;
721 }
722
723 for(;;) {
724 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
725 f(node_pointer->v);
726 element_count += 1;
727
728 node * next_node = pool.get_pointer(node_pointer->next);
729
730 if (!next_node) {
731 pool.template destruct<true>(nodes_in_reversed_order);
732 break;
733 }
734
735 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
736 pool.template destruct<true>(nodes_in_reversed_order);
737 nodes_in_reversed_order = next;
738 }
739
740 return element_count;
741 }
742
743 /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
744 template <typename Functor>
745 size_t consume_all_atomic_reversed(Functor const & f)
746 {
747 size_t element_count = 0;
748 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
749
750 for (;;) {
751 node * old_tos_pointer = pool.get_pointer(old_tos);
752 if (!old_tos_pointer)
753 return 0;
754
755 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
756
757 if (tos.compare_exchange_weak(old_tos, new_tos))
758 break;
759 }
760
761 tagged_node_handle nodes_to_consume = old_tos;
762
763 node * last_node_pointer = NULL;
764 tagged_node_handle nodes_in_reversed_order;
765 for(;;) {
766 node * node_pointer = pool.get_pointer(nodes_to_consume);
767 node * next_node = pool.get_pointer(node_pointer->next);
768
769 node_pointer->next = pool.get_handle(last_node_pointer);
770 last_node_pointer = node_pointer;
771
772 if (!next_node) {
773 nodes_in_reversed_order = nodes_to_consume;
774 break;
775 }
776
777 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
778 nodes_to_consume = next;
779 }
780
781 for(;;) {
782 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
783 f(node_pointer->v);
784 element_count += 1;
785
786 node * next_node = pool.get_pointer(node_pointer->next);
787
788 if (!next_node) {
789 pool.template destruct<true>(nodes_in_reversed_order);
790 break;
791 }
792
793 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
794 pool.template destruct<true>(nodes_in_reversed_order);
795 nodes_in_reversed_order = next;
796 }
797
798 return element_count;
799 }
800 /**
801 * \return true, if stack is empty.
802 *
803 * \note It only guarantees that at some point during the execution of the function the stack has been empty.
804 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
805 * */
806 bool empty(void) const
807 {
808 return pool.get_pointer(tos.load()) == NULL;
809 }
810
811 private:
812 #ifndef BOOST_DOXYGEN_INVOKED
813 detail::atomic<tagged_node_handle> tos;
814
815 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
816 char padding[padding_size];
817
818 pool_t pool;
819 #endif
820 };
821
822 } /* namespace lockfree */
823 } /* namespace boost */
824
825 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */