]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/lockfree/include/boost/lockfree/stack.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / lockfree / include / boost / lockfree / stack.hpp
CommitLineData
7c673cae
FG
1// Copyright (C) 2008-2013 Tim Blechmann
2//
3// Distributed under the Boost Software License, Version 1.0. (See
4// accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6
7#ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8#define BOOST_LOCKFREE_STACK_HPP_INCLUDED
9
10#include <boost/assert.hpp>
11#include <boost/checked_delete.hpp>
12#include <boost/core/no_exceptions_support.hpp>
13#include <boost/integer_traits.hpp>
14#include <boost/static_assert.hpp>
15#include <boost/tuple/tuple.hpp>
16#include <boost/type_traits/is_copy_constructible.hpp>
17
18#include <boost/lockfree/detail/atomic.hpp>
19#include <boost/lockfree/detail/copy_payload.hpp>
20#include <boost/lockfree/detail/freelist.hpp>
21#include <boost/lockfree/detail/parameter.hpp>
22#include <boost/lockfree/detail/tagged_ptr.hpp>
23
24#include <boost/lockfree/lockfree_forward.hpp>
25
26#ifdef BOOST_HAS_PRAGMA_ONCE
27#pragma once
28#endif
29
30namespace boost {
31namespace lockfree {
32namespace detail {
33
34typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
35 boost::parameter::optional<tag::capacity>
36 > stack_signature;
37
38}
39
40/** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
41 * construction/destruction has to be synchronized. It uses a freelist for memory management,
42 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
43 *
44 * \b Policies:
45 *
46 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
47 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
48 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
49 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
50 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
51 * to achieve lock-freedom.
52 *
53 * - \c boost::lockfree::capacity<>, optional <br>
54 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
55 * It this option implies \c fixed_sized<true>
56 *
57 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
58 * Specifies the allocator that is used for the internal freelist
59 *
60 * \b Requirements:
61 * - T must have a copy constructor
62 * */
63#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
64template <typename T, class A0, class A1, class A2>
65#else
66template <typename T, typename ...Options>
67#endif
68class stack
69{
70private:
71#ifndef BOOST_DOXYGEN_INVOKED
72 BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
73
74#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
75 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
76#else
77 typedef typename detail::stack_signature::bind<Options...>::type bound_args;
78#endif
79
80 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
81 static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
82 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
83 static const bool node_based = !(has_capacity || fixed_sized);
84 static const bool compile_time_sized = has_capacity;
85
86 struct node
87 {
88 node(T const & val):
89 v(val)
90 {}
91
92 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
93 handle_t next;
94 const T v;
95 };
96
97 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
98 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
99 typedef typename pool_t::tagged_node_handle tagged_node_handle;
100
101 // check compile-time capacity
102 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
103 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
104 mpl::true_
105 >::type::value));
106
107 struct implementation_defined
108 {
109 typedef node_allocator allocator;
110 typedef std::size_t size_type;
111 };
112
113#endif
114
115 BOOST_DELETED_FUNCTION(stack(stack const&))
116 BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
117
118public:
119 typedef T value_type;
120 typedef typename implementation_defined::allocator allocator;
121 typedef typename implementation_defined::size_type size_type;
122
123 /**
124 * \return true, if implementation is lock-free.
125 *
126 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
127 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
128 * there is no possibility to provide a completely accurate implementation, because one would need to test
129 * every internal node, which is impossible if further nodes will be allocated from the operating system.
130 *
131 * */
132 bool is_lock_free (void) const
133 {
134 return tos.is_lock_free() && pool.is_lock_free();
135 }
136
137 //! Construct stack
138 // @{
139 stack(void):
140 pool(node_allocator(), capacity)
141 {
142 BOOST_ASSERT(has_capacity);
143 initialize();
144 }
145
146 template <typename U>
147 explicit stack(typename node_allocator::template rebind<U>::other const & alloc):
148 pool(alloc, capacity)
149 {
150 BOOST_STATIC_ASSERT(has_capacity);
151 initialize();
152 }
153
154 explicit stack(allocator const & alloc):
155 pool(alloc, capacity)
156 {
157 BOOST_ASSERT(has_capacity);
158 initialize();
159 }
160 // @}
161
162 //! Construct stack, allocate n nodes for the freelist.
163 // @{
164 explicit stack(size_type n):
165 pool(node_allocator(), n)
166 {
167 BOOST_ASSERT(!has_capacity);
168 initialize();
169 }
170
171 template <typename U>
172 stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
173 pool(alloc, n)
174 {
175 BOOST_STATIC_ASSERT(!has_capacity);
176 initialize();
177 }
178 // @}
179
180 /** Allocate n nodes for freelist
181 *
182 * \pre only valid if no capacity<> argument given
183 * \note thread-safe, may block if memory allocator blocks
184 *
185 * */
186 void reserve(size_type n)
187 {
188 BOOST_STATIC_ASSERT(!has_capacity);
189 pool.template reserve<true>(n);
190 }
191
192 /** Allocate n nodes for freelist
193 *
194 * \pre only valid if no capacity<> argument given
195 * \note not thread-safe, may block if memory allocator blocks
196 *
197 * */
198 void reserve_unsafe(size_type n)
199 {
200 BOOST_STATIC_ASSERT(!has_capacity);
201 pool.template reserve<false>(n);
202 }
203
204 /** Destroys stack, free all nodes from freelist.
205 *
206 * \note not thread-safe
207 *
208 * */
209 ~stack(void)
210 {
211 T dummy;
212 while(unsynchronized_pop(dummy))
213 {}
214 }
215
216private:
217#ifndef BOOST_DOXYGEN_INVOKED
218 void initialize(void)
219 {
220 tos.store(tagged_node_handle(pool.null_handle(), 0));
221 }
222
223 void link_nodes_atomic(node * new_top_node, node * end_node)
224 {
225 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
226 for (;;) {
227 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
228 end_node->next = pool.get_handle(old_tos);
229
230 if (tos.compare_exchange_weak(old_tos, new_tos))
231 break;
232 }
233 }
234
235 void link_nodes_unsafe(node * new_top_node, node * end_node)
236 {
237 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
238
239 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
240 end_node->next = pool.get_pointer(old_tos);
241
242 tos.store(new_tos, memory_order_relaxed);
243 }
244
245 template <bool Threadsafe, bool Bounded, typename ConstIterator>
246 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
247 {
248 ConstIterator it = begin;
249 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
250 if (end_node == NULL) {
251 ret = begin;
252 return make_tuple<node*, node*>(NULL, NULL);
253 }
254
255 node * new_top_node = end_node;
256 end_node->next = NULL;
257
258 BOOST_TRY {
259 /* link nodes */
260 for (; it != end; ++it) {
261 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
262 if (newnode == NULL)
263 break;
264 newnode->next = new_top_node;
265 new_top_node = newnode;
266 }
267 } BOOST_CATCH (...) {
268 for (node * current_node = new_top_node; current_node != NULL;) {
269 node * next = current_node->next;
270 pool.template destruct<Threadsafe>(current_node);
271 current_node = next;
272 }
273 BOOST_RETHROW;
274 }
275 BOOST_CATCH_END
276
277 ret = it;
278 return make_tuple(new_top_node, end_node);
279 }
280#endif
281
282public:
283 /** Pushes object t to the stack.
284 *
285 * \post object will be pushed to the stack, if internal node can be allocated
286 * \returns true, if the push operation is successful.
287 *
288 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
289 * from the OS. This may not be lock-free.
290 * \throws if memory allocator throws
291 * */
292 bool push(T const & v)
293 {
294 return do_push<false>(v);
295 }
296
297 /** Pushes object t to the stack.
298 *
299 * \post object will be pushed to the stack, if internal node can be allocated
300 * \returns true, if the push operation is successful.
301 *
302 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
303 * */
304 bool bounded_push(T const & v)
305 {
306 return do_push<true>(v);
307 }
308
309#ifndef BOOST_DOXYGEN_INVOKED
310private:
311 template <bool Bounded>
312 bool do_push(T const & v)
313 {
314 node * newnode = pool.template construct<true, Bounded>(v);
315 if (newnode == 0)
316 return false;
317
318 link_nodes_atomic(newnode, newnode);
319 return true;
320 }
321
322 template <bool Bounded, typename ConstIterator>
323 ConstIterator do_push(ConstIterator begin, ConstIterator end)
324 {
325 node * new_top_node;
326 node * end_node;
327 ConstIterator ret;
328
329 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
330 if (new_top_node)
331 link_nodes_atomic(new_top_node, end_node);
332
333 return ret;
334 }
335
336public:
337#endif
338
339 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
340 *
341 * \return iterator to the first element, which has not been pushed
342 *
343 * \note Operation is applied atomically
344 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
345 * from the OS. This may not be lock-free.
346 * \throws if memory allocator throws
347 */
348 template <typename ConstIterator>
349 ConstIterator push(ConstIterator begin, ConstIterator end)
350 {
351 return do_push<false, ConstIterator>(begin, end);
352 }
353
354 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
355 *
356 * \return iterator to the first element, which has not been pushed
357 *
358 * \note Operation is applied atomically
359 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
360 * \throws if memory allocator throws
361 */
362 template <typename ConstIterator>
363 ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
364 {
365 return do_push<true, ConstIterator>(begin, end);
366 }
367
368
369 /** Pushes object t to the stack.
370 *
371 * \post object will be pushed to the stack, if internal node can be allocated
372 * \returns true, if the push operation is successful.
373 *
374 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
375 * from the OS. This may not be lock-free.
376 * \throws if memory allocator throws
377 * */
378 bool unsynchronized_push(T const & v)
379 {
380 node * newnode = pool.template construct<false, false>(v);
381 if (newnode == 0)
382 return false;
383
384 link_nodes_unsafe(newnode, newnode);
385 return true;
386 }
387
388 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
389 *
390 * \return iterator to the first element, which has not been pushed
391 *
392 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
393 * from the OS. This may not be lock-free.
394 * \throws if memory allocator throws
395 */
396 template <typename ConstIterator>
397 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
398 {
399 node * new_top_node;
400 node * end_node;
401 ConstIterator ret;
402
403 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
404 if (new_top_node)
405 link_nodes_unsafe(new_top_node, end_node);
406
407 return ret;
408 }
409
410
411 /** Pops object from stack.
412 *
413 * \post if pop operation is successful, object will be copied to ret.
414 * \returns true, if the pop operation is successful, false if stack was empty.
415 *
416 * \note Thread-safe and non-blocking
417 *
418 * */
419 bool pop(T & ret)
420 {
421 return pop<T>(ret);
422 }
423
424 /** Pops object from stack.
425 *
426 * \pre type T must be convertible to U
427 * \post if pop operation is successful, object will be copied to ret.
428 * \returns true, if the pop operation is successful, false if stack was empty.
429 *
430 * \note Thread-safe and non-blocking
431 *
432 * */
433 template <typename U>
434 bool pop(U & ret)
435 {
436 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
437 detail::consume_via_copy<U> consumer(ret);
438
439 return consume_one(consumer);
440 }
441
442
443 /** Pops object from stack.
444 *
445 * \post if pop operation is successful, object will be copied to ret.
446 * \returns true, if the pop operation is successful, false if stack was empty.
447 *
448 * \note Not thread-safe, but non-blocking
449 *
450 * */
451 bool unsynchronized_pop(T & ret)
452 {
453 return unsynchronized_pop<T>(ret);
454 }
455
456 /** Pops object from stack.
457 *
458 * \pre type T must be convertible to U
459 * \post if pop operation is successful, object will be copied to ret.
460 * \returns true, if the pop operation is successful, false if stack was empty.
461 *
462 * \note Not thread-safe, but non-blocking
463 *
464 * */
465 template <typename U>
466 bool unsynchronized_pop(U & ret)
467 {
468 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
469 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
470 node * old_tos_pointer = pool.get_pointer(old_tos);
471
472 if (!pool.get_pointer(old_tos))
473 return false;
474
475 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
476 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
477
478 tos.store(new_tos, memory_order_relaxed);
479 detail::copy_payload(old_tos_pointer->v, ret);
480 pool.template destruct<false>(old_tos);
481 return true;
482 }
483
484 /** consumes one element via a functor
485 *
486 * pops one element from the stack and applies the functor on this object
487 *
488 * \returns true, if one element was consumed
489 *
490 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
491 * */
492 template <typename Functor>
493 bool consume_one(Functor & f)
494 {
495 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
496
497 for (;;) {
498 node * old_tos_pointer = pool.get_pointer(old_tos);
499 if (!old_tos_pointer)
500 return false;
501
502 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
503
504 if (tos.compare_exchange_weak(old_tos, new_tos)) {
505 f(old_tos_pointer->v);
506 pool.template destruct<true>(old_tos);
507 return true;
508 }
509 }
510 }
511
512 /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
513 template <typename Functor>
514 bool consume_one(Functor const & f)
515 {
516 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
517
518 for (;;) {
519 node * old_tos_pointer = pool.get_pointer(old_tos);
520 if (!old_tos_pointer)
521 return false;
522
523 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
524
525 if (tos.compare_exchange_weak(old_tos, new_tos)) {
526 f(old_tos_pointer->v);
527 pool.template destruct<true>(old_tos);
528 return true;
529 }
530 }
531 }
532
533 /** consumes all elements via a functor
534 *
535 * sequentially pops all elements from the stack and applies the functor on each object
536 *
537 * \returns number of elements that are consumed
538 *
539 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
540 * */
541 template <typename Functor>
542 size_t consume_all(Functor & f)
543 {
544 size_t element_count = 0;
545 while (consume_one(f))
546 element_count += 1;
547
548 return element_count;
549 }
550
551 /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
552 template <typename Functor>
553 size_t consume_all(Functor const & f)
554 {
555 size_t element_count = 0;
556 while (consume_one(f))
557 element_count += 1;
558
559 return element_count;
560 }
561
562 /** consumes all elements via a functor
563 *
564 * atomically pops all elements from the stack and applies the functor on each object
565 *
566 * \returns number of elements that are consumed
567 *
568 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
569 * */
570 template <typename Functor>
571 size_t consume_all_atomic(Functor & f)
572 {
573 size_t element_count = 0;
574 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
575
576 for (;;) {
577 node * old_tos_pointer = pool.get_pointer(old_tos);
578 if (!old_tos_pointer)
579 return 0;
580
581 tagged_node_handle new_tos(typename pool_t::index_t(NULL), old_tos.get_next_tag());
582
583 if (tos.compare_exchange_weak(old_tos, new_tos))
584 break;
585 }
586
587 tagged_node_handle nodes_to_consume = old_tos;
588
589 for(;;) {
590 node * node_pointer = pool.get_pointer(nodes_to_consume);
591 f(node_pointer->v);
592 element_count += 1;
593
594 node * next_node = pool.get_pointer(node_pointer->next);
595
596 if (!next_node) {
597 pool.template destruct<true>(nodes_to_consume);
598 break;
599 }
600
601 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
602 pool.template destruct<true>(nodes_to_consume);
603 nodes_to_consume = next;
604 }
605
606 return element_count;
607 }
608
609 /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
610 template <typename Functor>
611 size_t consume_all_atomic(Functor const & f)
612 {
613 size_t element_count = 0;
614 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
615
616 for (;;) {
617 node * old_tos_pointer = pool.get_pointer(old_tos);
618 if (!old_tos_pointer)
619 return 0;
620
621 tagged_node_handle new_tos(typename pool_t::index_t(NULL), old_tos.get_next_tag());
622
623 if (tos.compare_exchange_weak(old_tos, new_tos))
624 break;
625 }
626
627 tagged_node_handle nodes_to_consume = old_tos;
628
629 for(;;) {
630 node * node_pointer = pool.get_pointer(nodes_to_consume);
631 f(node_pointer->v);
632 element_count += 1;
633
634 node * next_node = pool.get_pointer(node_pointer->next);
635
636 if (!next_node) {
637 pool.template destruct<true>(nodes_to_consume);
638 break;
639 }
640
641 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
642 pool.template destruct<true>(nodes_to_consume);
643 nodes_to_consume = next;
644 }
645
646 return element_count;
647 }
648
649 /** consumes all elements via a functor
650 *
651 * atomically pops all elements from the stack and applies the functor on each object in reversed order
652 *
653 * \returns number of elements that are consumed
654 *
655 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
656 * */
657 template <typename Functor>
658 size_t consume_all_atomic_reversed(Functor & f)
659 {
660 size_t element_count = 0;
661 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
662
663 for (;;) {
664 node * old_tos_pointer = pool.get_pointer(old_tos);
665 if (!old_tos_pointer)
666 return 0;
667
668 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
669
670 if (tos.compare_exchange_weak(old_tos, new_tos))
671 break;
672 }
673
674 tagged_node_handle nodes_to_consume = old_tos;
675
676 node * last_node_pointer = NULL;
677 tagged_node_handle nodes_in_reversed_order;
678 for(;;) {
679 node * node_pointer = pool.get_pointer(nodes_to_consume);
680 node * next_node = pool.get_pointer(node_pointer->next);
681
682 node_pointer->next = pool.get_handle(last_node_pointer);
683 last_node_pointer = node_pointer;
684
685 if (!next_node) {
686 nodes_in_reversed_order = nodes_to_consume;
687 break;
688 }
689
690 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
691 nodes_to_consume = next;
692 }
693
694 for(;;) {
695 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
696 f(node_pointer->v);
697 element_count += 1;
698
699 node * next_node = pool.get_pointer(node_pointer->next);
700
701 if (!next_node) {
702 pool.template destruct<true>(nodes_in_reversed_order);
703 break;
704 }
705
706 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
707 pool.template destruct<true>(nodes_in_reversed_order);
708 nodes_in_reversed_order = next;
709 }
710
711 return element_count;
712 }
713
714 /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
715 template <typename Functor>
716 size_t consume_all_atomic_reversed(Functor const & f)
717 {
718 size_t element_count = 0;
719 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
720
721 for (;;) {
722 node * old_tos_pointer = pool.get_pointer(old_tos);
723 if (!old_tos_pointer)
724 return 0;
725
726 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
727
728 if (tos.compare_exchange_weak(old_tos, new_tos))
729 break;
730 }
731
732 tagged_node_handle nodes_to_consume = old_tos;
733
734 node * last_node_pointer = NULL;
735 tagged_node_handle nodes_in_reversed_order;
736 for(;;) {
737 node * node_pointer = pool.get_pointer(nodes_to_consume);
738 node * next_node = pool.get_pointer(node_pointer->next);
739
740 node_pointer->next = pool.get_handle(last_node_pointer);
741 last_node_pointer = node_pointer;
742
743 if (!next_node) {
744 nodes_in_reversed_order = nodes_to_consume;
745 break;
746 }
747
748 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
749 nodes_to_consume = next;
750 }
751
752 for(;;) {
753 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
754 f(node_pointer->v);
755 element_count += 1;
756
757 node * next_node = pool.get_pointer(node_pointer->next);
758
759 if (!next_node) {
760 pool.template destruct<true>(nodes_in_reversed_order);
761 break;
762 }
763
764 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
765 pool.template destruct<true>(nodes_in_reversed_order);
766 nodes_in_reversed_order = next;
767 }
768
769 return element_count;
770 }
771 /**
772 * \return true, if stack is empty.
773 *
774 * \note It only guarantees that at some point during the execution of the function the stack has been empty.
775 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
776 * */
777 bool empty(void) const
778 {
779 return pool.get_pointer(tos.load()) == NULL;
780 }
781
782private:
783#ifndef BOOST_DOXYGEN_INVOKED
784 detail::atomic<tagged_node_handle> tos;
785
786 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
787 char padding[padding_size];
788
789 pool_t pool;
790#endif
791};
792
793} /* namespace lockfree */
794} /* namespace boost */
795
796#endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */