1 // Copyright (C) 2008-2013 Tim Blechmann
3 // Distributed under the Boost Software License, Version 1.0. (See
4 // accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
7 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
10 #include <boost/assert.hpp>
11 #include <boost/checked_delete.hpp>
12 #include <boost/core/no_exceptions_support.hpp>
13 #include <boost/integer_traits.hpp>
14 #include <boost/static_assert.hpp>
15 #include <boost/tuple/tuple.hpp>
16 #include <boost/type_traits/is_copy_constructible.hpp>
18 #include <boost/lockfree/detail/atomic.hpp>
19 #include <boost/lockfree/detail/copy_payload.hpp>
20 #include <boost/lockfree/detail/freelist.hpp>
21 #include <boost/lockfree/detail/parameter.hpp>
22 #include <boost/lockfree/detail/tagged_ptr.hpp>
24 #include <boost/lockfree/lockfree_forward.hpp>
26 #ifdef BOOST_HAS_PRAGMA_ONCE
34 typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
35 boost::parameter::optional<tag::capacity>
40 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
41 * construction/destruction has to be synchronized. It uses a freelist for memory management,
42 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
46 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
47 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
48 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
49 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
50 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
51 * to achieve lock-freedom.
53 * - \c boost::lockfree::capacity<>, optional <br>
54 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
55 * It this option implies \c fixed_sized<true>
57 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
58 * Specifies the allocator that is used for the internal freelist
61 * - T must have a copy constructor
63 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
64 template <typename T, class A0, class A1, class A2>
66 template <typename T, typename ...Options>
71 #ifndef BOOST_DOXYGEN_INVOKED
72 BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
74 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
75 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
77 typedef typename detail::stack_signature::bind<Options...>::type bound_args;
80 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
81 static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
82 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
83 static const bool node_based = !(has_capacity || fixed_sized);
84 static const bool compile_time_sized = has_capacity;
92 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
97 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
98 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
99 typedef typename pool_t::tagged_node_handle tagged_node_handle;
101 // check compile-time capacity
102 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
103 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
107 struct implementation_defined
109 typedef node_allocator allocator;
110 typedef std::size_t size_type;
115 BOOST_DELETED_FUNCTION(stack(stack const&))
116 BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
119 typedef T value_type;
120 typedef typename implementation_defined::allocator allocator;
121 typedef typename implementation_defined::size_type size_type;
124 * \return true, if implementation is lock-free.
126 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
127 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
128 * there is no possibility to provide a completely accurate implementation, because one would need to test
129 * every internal node, which is impossible if further nodes will be allocated from the operating system.
132 bool is_lock_free (void) const
134 return tos.is_lock_free() && pool.is_lock_free();
140 pool(node_allocator(), capacity)
142 BOOST_ASSERT(has_capacity);
146 template <typename U>
147 explicit stack(typename node_allocator::template rebind<U>::other const & alloc):
148 pool(alloc, capacity)
150 BOOST_STATIC_ASSERT(has_capacity);
154 explicit stack(allocator const & alloc):
155 pool(alloc, capacity)
157 BOOST_ASSERT(has_capacity);
162 //! Construct stack, allocate n nodes for the freelist.
164 explicit stack(size_type n):
165 pool(node_allocator(), n)
167 BOOST_ASSERT(!has_capacity);
171 template <typename U>
172 stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
175 BOOST_STATIC_ASSERT(!has_capacity);
180 /** Allocate n nodes for freelist
182 * \pre only valid if no capacity<> argument given
183 * \note thread-safe, may block if memory allocator blocks
186 void reserve(size_type n)
188 BOOST_STATIC_ASSERT(!has_capacity);
189 pool.template reserve<true>(n);
192 /** Allocate n nodes for freelist
194 * \pre only valid if no capacity<> argument given
195 * \note not thread-safe, may block if memory allocator blocks
198 void reserve_unsafe(size_type n)
200 BOOST_STATIC_ASSERT(!has_capacity);
201 pool.template reserve<false>(n);
204 /** Destroys stack, free all nodes from freelist.
206 * \note not thread-safe
212 while(unsynchronized_pop(dummy))
217 #ifndef BOOST_DOXYGEN_INVOKED
218 void initialize(void)
220 tos.store(tagged_node_handle(pool.null_handle(), 0));
223 void link_nodes_atomic(node * new_top_node, node * end_node)
225 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
227 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
228 end_node->next = pool.get_handle(old_tos);
230 if (tos.compare_exchange_weak(old_tos, new_tos))
235 void link_nodes_unsafe(node * new_top_node, node * end_node)
237 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
239 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
240 end_node->next = pool.get_pointer(old_tos);
242 tos.store(new_tos, memory_order_relaxed);
245 template <bool Threadsafe, bool Bounded, typename ConstIterator>
246 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
248 ConstIterator it = begin;
249 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
250 if (end_node == NULL) {
252 return make_tuple<node*, node*>(NULL, NULL);
255 node * new_top_node = end_node;
256 end_node->next = NULL;
260 for (; it != end; ++it) {
261 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
264 newnode->next = new_top_node;
265 new_top_node = newnode;
267 } BOOST_CATCH (...) {
268 for (node * current_node = new_top_node; current_node != NULL;) {
269 node * next = current_node->next;
270 pool.template destruct<Threadsafe>(current_node);
278 return make_tuple(new_top_node, end_node);
283 /** Pushes object t to the stack.
285 * \post object will be pushed to the stack, if internal node can be allocated
286 * \returns true, if the push operation is successful.
288 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
289 * from the OS. This may not be lock-free.
290 * \throws if memory allocator throws
292 bool push(T const & v)
294 return do_push<false>(v);
297 /** Pushes object t to the stack.
299 * \post object will be pushed to the stack, if internal node can be allocated
300 * \returns true, if the push operation is successful.
302 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
304 bool bounded_push(T const & v)
306 return do_push<true>(v);
309 #ifndef BOOST_DOXYGEN_INVOKED
311 template <bool Bounded>
312 bool do_push(T const & v)
314 node * newnode = pool.template construct<true, Bounded>(v);
318 link_nodes_atomic(newnode, newnode);
322 template <bool Bounded, typename ConstIterator>
323 ConstIterator do_push(ConstIterator begin, ConstIterator end)
329 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
331 link_nodes_atomic(new_top_node, end_node);
339 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
341 * \return iterator to the first element, which has not been pushed
343 * \note Operation is applied atomically
344 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
345 * from the OS. This may not be lock-free.
346 * \throws if memory allocator throws
348 template <typename ConstIterator>
349 ConstIterator push(ConstIterator begin, ConstIterator end)
351 return do_push<false, ConstIterator>(begin, end);
354 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
356 * \return iterator to the first element, which has not been pushed
358 * \note Operation is applied atomically
359 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
360 * \throws if memory allocator throws
362 template <typename ConstIterator>
363 ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
365 return do_push<true, ConstIterator>(begin, end);
369 /** Pushes object t to the stack.
371 * \post object will be pushed to the stack, if internal node can be allocated
372 * \returns true, if the push operation is successful.
374 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
375 * from the OS. This may not be lock-free.
376 * \throws if memory allocator throws
378 bool unsynchronized_push(T const & v)
380 node * newnode = pool.template construct<false, false>(v);
384 link_nodes_unsafe(newnode, newnode);
388 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
390 * \return iterator to the first element, which has not been pushed
392 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
393 * from the OS. This may not be lock-free.
394 * \throws if memory allocator throws
396 template <typename ConstIterator>
397 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
403 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
405 link_nodes_unsafe(new_top_node, end_node);
411 /** Pops object from stack.
413 * \post if pop operation is successful, object will be copied to ret.
414 * \returns true, if the pop operation is successful, false if stack was empty.
416 * \note Thread-safe and non-blocking
424 /** Pops object from stack.
426 * \pre type T must be convertible to U
427 * \post if pop operation is successful, object will be copied to ret.
428 * \returns true, if the pop operation is successful, false if stack was empty.
430 * \note Thread-safe and non-blocking
433 template <typename U>
436 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
437 detail::consume_via_copy<U> consumer(ret);
439 return consume_one(consumer);
443 /** Pops object from stack.
445 * \post if pop operation is successful, object will be copied to ret.
446 * \returns true, if the pop operation is successful, false if stack was empty.
448 * \note Not thread-safe, but non-blocking
451 bool unsynchronized_pop(T & ret)
453 return unsynchronized_pop<T>(ret);
456 /** Pops object from stack.
458 * \pre type T must be convertible to U
459 * \post if pop operation is successful, object will be copied to ret.
460 * \returns true, if the pop operation is successful, false if stack was empty.
462 * \note Not thread-safe, but non-blocking
465 template <typename U>
466 bool unsynchronized_pop(U & ret)
468 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
469 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
470 node * old_tos_pointer = pool.get_pointer(old_tos);
472 if (!pool.get_pointer(old_tos))
475 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
476 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
478 tos.store(new_tos, memory_order_relaxed);
479 detail::copy_payload(old_tos_pointer->v, ret);
480 pool.template destruct<false>(old_tos);
484 /** consumes one element via a functor
486 * pops one element from the stack and applies the functor on this object
488 * \returns true, if one element was consumed
490 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
492 template <typename Functor>
493 bool consume_one(Functor & f)
495 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
498 node * old_tos_pointer = pool.get_pointer(old_tos);
499 if (!old_tos_pointer)
502 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
504 if (tos.compare_exchange_weak(old_tos, new_tos)) {
505 f(old_tos_pointer->v);
506 pool.template destruct<true>(old_tos);
512 /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
513 template <typename Functor>
514 bool consume_one(Functor const & f)
516 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
519 node * old_tos_pointer = pool.get_pointer(old_tos);
520 if (!old_tos_pointer)
523 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
525 if (tos.compare_exchange_weak(old_tos, new_tos)) {
526 f(old_tos_pointer->v);
527 pool.template destruct<true>(old_tos);
533 /** consumes all elements via a functor
535 * sequentially pops all elements from the stack and applies the functor on each object
537 * \returns number of elements that are consumed
539 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
541 template <typename Functor>
542 size_t consume_all(Functor & f)
544 size_t element_count = 0;
545 while (consume_one(f))
548 return element_count;
551 /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
552 template <typename Functor>
553 size_t consume_all(Functor const & f)
555 size_t element_count = 0;
556 while (consume_one(f))
559 return element_count;
562 /** consumes all elements via a functor
564 * atomically pops all elements from the stack and applies the functor on each object
566 * \returns number of elements that are consumed
568 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
570 template <typename Functor>
571 size_t consume_all_atomic(Functor & f)
573 size_t element_count = 0;
574 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
577 node * old_tos_pointer = pool.get_pointer(old_tos);
578 if (!old_tos_pointer)
581 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
583 if (tos.compare_exchange_weak(old_tos, new_tos))
587 tagged_node_handle nodes_to_consume = old_tos;
590 node * node_pointer = pool.get_pointer(nodes_to_consume);
594 node * next_node = pool.get_pointer(node_pointer->next);
597 pool.template destruct<true>(nodes_to_consume);
601 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
602 pool.template destruct<true>(nodes_to_consume);
603 nodes_to_consume = next;
606 return element_count;
609 /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
610 template <typename Functor>
611 size_t consume_all_atomic(Functor const & f)
613 size_t element_count = 0;
614 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
617 node * old_tos_pointer = pool.get_pointer(old_tos);
618 if (!old_tos_pointer)
621 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
623 if (tos.compare_exchange_weak(old_tos, new_tos))
627 tagged_node_handle nodes_to_consume = old_tos;
630 node * node_pointer = pool.get_pointer(nodes_to_consume);
634 node * next_node = pool.get_pointer(node_pointer->next);
637 pool.template destruct<true>(nodes_to_consume);
641 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
642 pool.template destruct<true>(nodes_to_consume);
643 nodes_to_consume = next;
646 return element_count;
649 /** consumes all elements via a functor
651 * atomically pops all elements from the stack and applies the functor on each object in reversed order
653 * \returns number of elements that are consumed
655 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
657 template <typename Functor>
658 size_t consume_all_atomic_reversed(Functor & f)
660 size_t element_count = 0;
661 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
664 node * old_tos_pointer = pool.get_pointer(old_tos);
665 if (!old_tos_pointer)
668 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
670 if (tos.compare_exchange_weak(old_tos, new_tos))
674 tagged_node_handle nodes_to_consume = old_tos;
676 node * last_node_pointer = NULL;
677 tagged_node_handle nodes_in_reversed_order;
679 node * node_pointer = pool.get_pointer(nodes_to_consume);
680 node * next_node = pool.get_pointer(node_pointer->next);
682 node_pointer->next = pool.get_handle(last_node_pointer);
683 last_node_pointer = node_pointer;
686 nodes_in_reversed_order = nodes_to_consume;
690 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
691 nodes_to_consume = next;
695 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
699 node * next_node = pool.get_pointer(node_pointer->next);
702 pool.template destruct<true>(nodes_in_reversed_order);
706 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
707 pool.template destruct<true>(nodes_in_reversed_order);
708 nodes_in_reversed_order = next;
711 return element_count;
714 /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
715 template <typename Functor>
716 size_t consume_all_atomic_reversed(Functor const & f)
718 size_t element_count = 0;
719 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
722 node * old_tos_pointer = pool.get_pointer(old_tos);
723 if (!old_tos_pointer)
726 tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
728 if (tos.compare_exchange_weak(old_tos, new_tos))
732 tagged_node_handle nodes_to_consume = old_tos;
734 node * last_node_pointer = NULL;
735 tagged_node_handle nodes_in_reversed_order;
737 node * node_pointer = pool.get_pointer(nodes_to_consume);
738 node * next_node = pool.get_pointer(node_pointer->next);
740 node_pointer->next = pool.get_handle(last_node_pointer);
741 last_node_pointer = node_pointer;
744 nodes_in_reversed_order = nodes_to_consume;
748 tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
749 nodes_to_consume = next;
753 node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
757 node * next_node = pool.get_pointer(node_pointer->next);
760 pool.template destruct<true>(nodes_in_reversed_order);
764 tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
765 pool.template destruct<true>(nodes_in_reversed_order);
766 nodes_in_reversed_order = next;
769 return element_count;
772 * \return true, if stack is empty.
774 * \note It only guarantees that at some point during the execution of the function the stack has been empty.
775 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
777 bool empty(void) const
779 return pool.get_pointer(tos.load()) == NULL;
783 #ifndef BOOST_DOXYGEN_INVOKED
784 detail::atomic<tagged_node_handle> tos;
786 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
787 char padding[padding_size];
793 } /* namespace lockfree */
794 } /* namespace boost */
796 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */