1 // lock-free single-producer/single-consumer ringbuffer
2 // this algorithm is implemented in various projects (linux kernel)
4 // Copyright (C) 2009-2013 Tim Blechmann
6 // Distributed under the Boost Software License, Version 1.0. (See
7 // accompanying file LICENSE_1_0.txt or copy at
8 // http://www.boost.org/LICENSE_1_0.txt)
10 #ifndef BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
11 #define BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
16 #include <boost/aligned_storage.hpp>
17 #include <boost/assert.hpp>
18 #include <boost/static_assert.hpp>
19 #include <boost/utility.hpp>
20 #include <boost/utility/enable_if.hpp>
21 #include <boost/config.hpp> // for BOOST_LIKELY
23 #include <boost/type_traits/has_trivial_destructor.hpp>
24 #include <boost/type_traits/is_convertible.hpp>
26 #include <boost/lockfree/detail/atomic.hpp>
27 #include <boost/lockfree/detail/copy_payload.hpp>
28 #include <boost/lockfree/detail/parameter.hpp>
29 #include <boost/lockfree/detail/prefix.hpp>
31 #include <boost/lockfree/lockfree_forward.hpp>
33 #ifdef BOOST_HAS_PRAGMA_ONCE
41 typedef parameter::parameters<boost::parameter::optional<tag::capacity>,
42 boost::parameter::optional<tag::allocator>
43 > ringbuffer_signature;
48 #ifndef BOOST_DOXYGEN_INVOKED
50 typedef std::size_t size_t;
51 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
52 atomic<size_t> write_index_;
53 char padding1[padding_size]; /* force read_index and write_index to different cache lines */
54 atomic<size_t> read_index_;
56 BOOST_DELETED_FUNCTION(ringbuffer_base(ringbuffer_base const&))
57 BOOST_DELETED_FUNCTION(ringbuffer_base& operator= (ringbuffer_base const&))
60 ringbuffer_base(void):
61 write_index_(0), read_index_(0)
64 static size_t next_index(size_t arg, size_t max_size)
67 while (BOOST_UNLIKELY(ret >= max_size))
72 static size_t read_available(size_t write_index, size_t read_index, size_t max_size)
74 if (write_index >= read_index)
75 return write_index - read_index;
77 const size_t ret = write_index + max_size - read_index;
81 static size_t write_available(size_t write_index, size_t read_index, size_t max_size)
83 size_t ret = read_index - write_index - 1;
84 if (write_index >= read_index)
89 size_t read_available(size_t max_size) const
91 size_t write_index = write_index_.load(memory_order_acquire);
92 const size_t read_index = read_index_.load(memory_order_relaxed);
93 return read_available(write_index, read_index, max_size);
96 size_t write_available(size_t max_size) const
98 size_t write_index = write_index_.load(memory_order_relaxed);
99 const size_t read_index = read_index_.load(memory_order_acquire);
100 return write_available(write_index, read_index, max_size);
103 bool push(T const & t, T * buffer, size_t max_size)
105 const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
106 const size_t next = next_index(write_index, max_size);
108 if (next == read_index_.load(memory_order_acquire))
109 return false; /* ringbuffer is full */
111 new (buffer + write_index) T(t); // copy-construct
113 write_index_.store(next, memory_order_release);
118 size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size)
120 return push(input_buffer, input_buffer + input_count, internal_buffer, max_size) - input_buffer;
123 template <typename ConstIterator>
124 ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size)
126 // FIXME: avoid std::distance
128 const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
129 const size_t read_index = read_index_.load(memory_order_acquire);
130 const size_t avail = write_available(write_index, read_index, max_size);
135 size_t input_count = std::distance(begin, end);
136 input_count = (std::min)(input_count, avail);
138 size_t new_write_index = write_index + input_count;
140 const ConstIterator last = boost::next(begin, input_count);
142 if (write_index + input_count > max_size) {
143 /* copy data in two sections */
144 const size_t count0 = max_size - write_index;
145 const ConstIterator midpoint = boost::next(begin, count0);
147 std::uninitialized_copy(begin, midpoint, internal_buffer + write_index);
148 std::uninitialized_copy(midpoint, last, internal_buffer);
149 new_write_index -= max_size;
151 std::uninitialized_copy(begin, last, internal_buffer + write_index);
153 if (new_write_index == max_size)
157 write_index_.store(new_write_index, memory_order_release);
161 template <typename Functor>
162 bool consume_one(Functor & functor, T * buffer, size_t max_size)
164 const size_t write_index = write_index_.load(memory_order_acquire);
165 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
166 if ( empty(write_index, read_index) )
169 T & object_to_consume = buffer[read_index];
170 functor( object_to_consume );
171 object_to_consume.~T();
173 size_t next = next_index(read_index, max_size);
174 read_index_.store(next, memory_order_release);
178 template <typename Functor>
179 bool consume_one(Functor const & functor, T * buffer, size_t max_size)
181 const size_t write_index = write_index_.load(memory_order_acquire);
182 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
183 if ( empty(write_index, read_index) )
186 T & object_to_consume = buffer[read_index];
187 functor( object_to_consume );
188 object_to_consume.~T();
190 size_t next = next_index(read_index, max_size);
191 read_index_.store(next, memory_order_release);
195 template <typename Functor>
196 size_t consume_all (Functor const & functor, T * internal_buffer, size_t max_size)
198 const size_t write_index = write_index_.load(memory_order_acquire);
199 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
201 const size_t avail = read_available(write_index, read_index, max_size);
206 const size_t output_count = avail;
208 size_t new_read_index = read_index + output_count;
210 if (read_index + output_count > max_size) {
211 /* copy data in two sections */
212 const size_t count0 = max_size - read_index;
213 const size_t count1 = output_count - count0;
215 run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
216 run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
218 new_read_index -= max_size;
220 run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
222 if (new_read_index == max_size)
226 read_index_.store(new_read_index, memory_order_release);
230 template <typename Functor>
231 size_t consume_all (Functor & functor, T * internal_buffer, size_t max_size)
233 const size_t write_index = write_index_.load(memory_order_acquire);
234 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
236 const size_t avail = read_available(write_index, read_index, max_size);
241 const size_t output_count = avail;
243 size_t new_read_index = read_index + output_count;
245 if (read_index + output_count > max_size) {
246 /* copy data in two sections */
247 const size_t count0 = max_size - read_index;
248 const size_t count1 = output_count - count0;
250 run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
251 run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
253 new_read_index -= max_size;
255 run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
257 if (new_read_index == max_size)
261 read_index_.store(new_read_index, memory_order_release);
265 size_t pop (T * output_buffer, size_t output_count, T * internal_buffer, size_t max_size)
267 const size_t write_index = write_index_.load(memory_order_acquire);
268 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
270 const size_t avail = read_available(write_index, read_index, max_size);
275 output_count = (std::min)(output_count, avail);
277 size_t new_read_index = read_index + output_count;
279 if (read_index + output_count > max_size) {
280 /* copy data in two sections */
281 const size_t count0 = max_size - read_index;
282 const size_t count1 = output_count - count0;
284 copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, output_buffer);
285 copy_and_delete(internal_buffer, internal_buffer + count1, output_buffer + count0);
287 new_read_index -= max_size;
289 copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer);
290 if (new_read_index == max_size)
294 read_index_.store(new_read_index, memory_order_release);
298 template <typename OutputIterator>
299 size_t pop_to_output_iterator (OutputIterator it, T * internal_buffer, size_t max_size)
301 const size_t write_index = write_index_.load(memory_order_acquire);
302 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
304 const size_t avail = read_available(write_index, read_index, max_size);
308 size_t new_read_index = read_index + avail;
310 if (read_index + avail > max_size) {
311 /* copy data in two sections */
312 const size_t count0 = max_size - read_index;
313 const size_t count1 = avail - count0;
315 it = copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, it);
316 copy_and_delete(internal_buffer, internal_buffer + count1, it);
318 new_read_index -= max_size;
320 copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + avail, it);
321 if (new_read_index == max_size)
325 read_index_.store(new_read_index, memory_order_release);
329 const T& front(const T * internal_buffer) const
331 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
332 return *(internal_buffer + read_index);
335 T& front(T * internal_buffer)
337 const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
338 return *(internal_buffer + read_index);
344 /** reset the ringbuffer
346 * \note Not thread-safe
350 if ( !boost::has_trivial_destructor<T>::value ) {
351 // make sure to call all destructors!
354 while (pop(dummy_element))
357 write_index_.store(0, memory_order_relaxed);
358 read_index_.store(0, memory_order_release);
362 /** Check if the ringbuffer is empty
364 * \return true, if the ringbuffer is empty, false otherwise
365 * \note Due to the concurrent nature of the ringbuffer the result may be inaccurate.
369 return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed));
373 * \return true, if implementation is lock-free.
376 bool is_lock_free(void) const
378 return write_index_.is_lock_free() && read_index_.is_lock_free();
382 bool empty(size_t write_index, size_t read_index)
384 return write_index == read_index;
387 template< class OutputIterator >
388 OutputIterator copy_and_delete( T * first, T * last, OutputIterator out )
390 if (boost::has_trivial_destructor<T>::value) {
391 return std::copy(first, last, out); // will use memcpy if possible
393 for (; first != last; ++first, ++out) {
401 template< class Functor >
402 void run_functor_and_delete( T * first, T * last, Functor & functor )
404 for (; first != last; ++first) {
410 template< class Functor >
411 void run_functor_and_delete( T * first, T * last, Functor const & functor )
413 for (; first != last; ++first) {
420 template <typename T, std::size_t MaxSize>
421 class compile_time_sized_ringbuffer:
422 public ringbuffer_base<T>
424 typedef std::size_t size_type;
425 static const std::size_t max_size = MaxSize + 1;
427 typedef typename boost::aligned_storage<max_size * sizeof(T),
428 boost::alignment_of<T>::value
429 >::type storage_type;
431 storage_type storage_;
435 return static_cast<T*>(storage_.address());
438 const T * data() const
440 return static_cast<const T*>(storage_.address());
444 size_type max_number_of_elements() const
450 bool push(T const & t)
452 return ringbuffer_base<T>::push(t, data(), max_size);
455 template <typename Functor>
456 bool consume_one(Functor & f)
458 return ringbuffer_base<T>::consume_one(f, data(), max_size);
461 template <typename Functor>
462 bool consume_one(Functor const & f)
464 return ringbuffer_base<T>::consume_one(f, data(), max_size);
467 template <typename Functor>
468 size_type consume_all(Functor & f)
470 return ringbuffer_base<T>::consume_all(f, data(), max_size);
473 template <typename Functor>
474 size_type consume_all(Functor const & f)
476 return ringbuffer_base<T>::consume_all(f, data(), max_size);
479 size_type push(T const * t, size_type size)
481 return ringbuffer_base<T>::push(t, size, data(), max_size);
484 template <size_type size>
485 size_type push(T const (&t)[size])
487 return push(t, size);
490 template <typename ConstIterator>
491 ConstIterator push(ConstIterator begin, ConstIterator end)
493 return ringbuffer_base<T>::push(begin, end, data(), max_size);
496 size_type pop(T * ret, size_type size)
498 return ringbuffer_base<T>::pop(ret, size, data(), max_size);
501 template <typename OutputIterator>
502 size_type pop_to_output_iterator(OutputIterator it)
504 return ringbuffer_base<T>::pop_to_output_iterator(it, data(), max_size);
507 const T& front(void) const
509 return ringbuffer_base<T>::front(data());
514 return ringbuffer_base<T>::front(data());
518 template <typename T, typename Alloc>
519 class runtime_sized_ringbuffer:
520 public ringbuffer_base<T>,
523 typedef std::size_t size_type;
524 size_type max_elements_;
525 typedef typename Alloc::pointer pointer;
529 size_type max_number_of_elements() const
531 return max_elements_;
535 explicit runtime_sized_ringbuffer(size_type max_elements):
536 max_elements_(max_elements + 1)
538 array_ = Alloc::allocate(max_elements_);
541 template <typename U>
542 runtime_sized_ringbuffer(typename Alloc::template rebind<U>::other const & alloc, size_type max_elements):
543 Alloc(alloc), max_elements_(max_elements + 1)
545 array_ = Alloc::allocate(max_elements_);
548 runtime_sized_ringbuffer(Alloc const & alloc, size_type max_elements):
549 Alloc(alloc), max_elements_(max_elements + 1)
551 array_ = Alloc::allocate(max_elements_);
554 ~runtime_sized_ringbuffer(void)
556 // destroy all remaining items
558 while (pop(&out, 1)) {}
560 Alloc::deallocate(array_, max_elements_);
563 bool push(T const & t)
565 return ringbuffer_base<T>::push(t, &*array_, max_elements_);
568 template <typename Functor>
569 bool consume_one(Functor & f)
571 return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
574 template <typename Functor>
575 bool consume_one(Functor const & f)
577 return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
580 template <typename Functor>
581 size_type consume_all(Functor & f)
583 return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
586 template <typename Functor>
587 size_type consume_all(Functor const & f)
589 return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
592 size_type push(T const * t, size_type size)
594 return ringbuffer_base<T>::push(t, size, &*array_, max_elements_);
597 template <size_type size>
598 size_type push(T const (&t)[size])
600 return push(t, size);
603 template <typename ConstIterator>
604 ConstIterator push(ConstIterator begin, ConstIterator end)
606 return ringbuffer_base<T>::push(begin, end, &*array_, max_elements_);
609 size_type pop(T * ret, size_type size)
611 return ringbuffer_base<T>::pop(ret, size, &*array_, max_elements_);
614 template <typename OutputIterator>
615 size_type pop_to_output_iterator(OutputIterator it)
617 return ringbuffer_base<T>::pop_to_output_iterator(it, &*array_, max_elements_);
620 const T& front(void) const
622 return ringbuffer_base<T>::front(&*array_);
627 return ringbuffer_base<T>::front(&*array_);
631 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
632 template <typename T, typename A0, typename A1>
634 template <typename T, typename ...Options>
636 struct make_ringbuffer
638 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
639 typedef typename ringbuffer_signature::bind<A0, A1>::type bound_args;
641 typedef typename ringbuffer_signature::bind<Options...>::type bound_args;
644 typedef extract_capacity<bound_args> extract_capacity_t;
646 static const bool runtime_sized = !extract_capacity_t::has_capacity;
647 static const size_t capacity = extract_capacity_t::capacity;
649 typedef extract_allocator<bound_args, T> extract_allocator_t;
650 typedef typename extract_allocator_t::type allocator;
652 // allocator argument is only sane, for run-time sized ringbuffers
653 BOOST_STATIC_ASSERT((mpl::if_<mpl::bool_<!runtime_sized>,
654 mpl::bool_<!extract_allocator_t::has_allocator>,
658 typedef typename mpl::if_c<runtime_sized,
659 runtime_sized_ringbuffer<T, allocator>,
660 compile_time_sized_ringbuffer<T, capacity>
661 >::type ringbuffer_type;
665 } /* namespace detail */
668 /** The spsc_queue class provides a single-writer/single-reader fifo queue, pushing and popping is wait-free.
671 * - \c boost::lockfree::capacity<>, optional <br>
672 * If this template argument is passed to the options, the size of the ringbuffer is set at compile-time.
674 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<T>> <br>
675 * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured
676 * to be sized at run-time
679 * - T must have a default constructor
680 * - T must be copyable
682 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
683 template <typename T, class A0, class A1>
685 template <typename T, typename ...Options>
688 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
689 public detail::make_ringbuffer<T, A0, A1>::ringbuffer_type
691 public detail::make_ringbuffer<T, Options...>::ringbuffer_type
696 #ifndef BOOST_DOXYGEN_INVOKED
698 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
699 typedef typename detail::make_ringbuffer<T, A0, A1>::ringbuffer_type base_type;
700 static const bool runtime_sized = detail::make_ringbuffer<T, A0, A1>::runtime_sized;
701 typedef typename detail::make_ringbuffer<T, A0, A1>::allocator allocator_arg;
703 typedef typename detail::make_ringbuffer<T, Options...>::ringbuffer_type base_type;
704 static const bool runtime_sized = detail::make_ringbuffer<T, Options...>::runtime_sized;
705 typedef typename detail::make_ringbuffer<T, Options...>::allocator allocator_arg;
709 struct implementation_defined
711 typedef allocator_arg allocator;
712 typedef std::size_t size_type;
717 typedef T value_type;
718 typedef typename implementation_defined::allocator allocator;
719 typedef typename implementation_defined::size_type size_type;
721 /** Constructs a spsc_queue
723 * \pre spsc_queue must be configured to be sized at compile-time
728 BOOST_ASSERT(!runtime_sized);
731 template <typename U>
732 explicit spsc_queue(typename allocator::template rebind<U>::other const &)
734 // just for API compatibility: we don't actually need an allocator
735 BOOST_STATIC_ASSERT(!runtime_sized);
738 explicit spsc_queue(allocator const &)
740 // just for API compatibility: we don't actually need an allocator
741 BOOST_ASSERT(!runtime_sized);
746 /** Constructs a spsc_queue for element_count elements
748 * \pre spsc_queue must be configured to be sized at run-time
751 explicit spsc_queue(size_type element_count):
752 base_type(element_count)
754 BOOST_ASSERT(runtime_sized);
757 template <typename U>
758 spsc_queue(size_type element_count, typename allocator::template rebind<U>::other const & alloc):
759 base_type(alloc, element_count)
761 BOOST_STATIC_ASSERT(runtime_sized);
764 spsc_queue(size_type element_count, allocator_arg const & alloc):
765 base_type(alloc, element_count)
767 BOOST_ASSERT(runtime_sized);
771 /** Pushes object t to the ringbuffer.
773 * \pre only one thread is allowed to push data to the spsc_queue
774 * \post object will be pushed to the spsc_queue, unless it is full.
775 * \return true, if the push operation is successful.
777 * \note Thread-safe and wait-free
779 bool push(T const & t)
781 return base_type::push(t);
784 /** Pops one object from ringbuffer.
786 * \pre only one thread is allowed to pop data to the spsc_queue
787 * \post if ringbuffer is not empty, object will be discarded.
788 * \return true, if the pop operation is successful, false if ringbuffer was empty.
790 * \note Thread-safe and wait-free
794 detail::consume_noop consume_functor;
795 return consume_one( consume_functor );
798 /** Pops one object from ringbuffer.
800 * \pre only one thread is allowed to pop data to the spsc_queue
801 * \post if ringbuffer is not empty, object will be copied to ret.
802 * \return true, if the pop operation is successful, false if ringbuffer was empty.
804 * \note Thread-safe and wait-free
806 template <typename U>
807 typename boost::enable_if<typename is_convertible<T, U>::type, bool>::type
810 detail::consume_via_copy<U> consume_functor(ret);
811 return consume_one( consume_functor );
814 /** Pushes as many objects from the array t as there is space.
816 * \pre only one thread is allowed to push data to the spsc_queue
817 * \return number of pushed items
819 * \note Thread-safe and wait-free
821 size_type push(T const * t, size_type size)
823 return base_type::push(t, size);
826 /** Pushes as many objects from the array t as there is space available.
828 * \pre only one thread is allowed to push data to the spsc_queue
829 * \return number of pushed items
831 * \note Thread-safe and wait-free
833 template <size_type size>
834 size_type push(T const (&t)[size])
836 return push(t, size);
839 /** Pushes as many objects from the range [begin, end) as there is space .
841 * \pre only one thread is allowed to push data to the spsc_queue
842 * \return iterator to the first element, which has not been pushed
844 * \note Thread-safe and wait-free
846 template <typename ConstIterator>
847 ConstIterator push(ConstIterator begin, ConstIterator end)
849 return base_type::push(begin, end);
852 /** Pops a maximum of size objects from ringbuffer.
854 * \pre only one thread is allowed to pop data to the spsc_queue
855 * \return number of popped items
857 * \note Thread-safe and wait-free
859 size_type pop(T * ret, size_type size)
861 return base_type::pop(ret, size);
864 /** Pops a maximum of size objects from spsc_queue.
866 * \pre only one thread is allowed to pop data to the spsc_queue
867 * \return number of popped items
869 * \note Thread-safe and wait-free
871 template <size_type size>
872 size_type pop(T (&ret)[size])
874 return pop(ret, size);
877 /** Pops objects to the output iterator it
879 * \pre only one thread is allowed to pop data to the spsc_queue
880 * \return number of popped items
882 * \note Thread-safe and wait-free
884 template <typename OutputIterator>
885 typename boost::disable_if<typename is_convertible<T, OutputIterator>::type, size_type>::type
886 pop(OutputIterator it)
888 return base_type::pop_to_output_iterator(it);
891 /** consumes one element via a functor
893 * pops one element from the queue and applies the functor on this object
895 * \returns true, if one element was consumed
897 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
899 template <typename Functor>
900 bool consume_one(Functor & f)
902 return base_type::consume_one(f);
905 /// \copydoc boost::lockfree::spsc_queue::consume_one(Functor & rhs)
906 template <typename Functor>
907 bool consume_one(Functor const & f)
909 return base_type::consume_one(f);
912 /** consumes all elements via a functor
914 * sequentially pops all elements from the queue and applies the functor on each object
916 * \returns number of elements that are consumed
918 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
920 template <typename Functor>
921 size_type consume_all(Functor & f)
923 return base_type::consume_all(f);
926 /// \copydoc boost::lockfree::spsc_queue::consume_all(Functor & rhs)
927 template <typename Functor>
928 size_type consume_all(Functor const & f)
930 return base_type::consume_all(f);
933 /** get number of elements that are available for read
935 * \return number of available elements that can be popped from the spsc_queue
937 * \note Thread-safe and wait-free, should only be called from the consumer thread
939 size_type read_available() const
941 return base_type::read_available(base_type::max_number_of_elements());
944 /** get write space to write elements
946 * \return number of elements that can be pushed to the spsc_queue
948 * \note Thread-safe and wait-free, should only be called from the producer thread
950 size_type write_available() const
952 return base_type::write_available(base_type::max_number_of_elements());
955 /** get reference to element in the front of the queue
957 * Availability of front element can be checked using read_available().
959 * \pre only a consuming thread is allowed to check front element
960 * \pre read_available() > 0. If ringbuffer is empty, it's undefined behaviour to invoke this method.
961 * \return reference to the first element in the queue
963 * \note Thread-safe and wait-free
965 const T& front() const
967 BOOST_ASSERT(read_available() > 0);
968 return base_type::front();
971 /// \copydoc boost::lockfree::spsc_queue::front() const
974 BOOST_ASSERT(read_available() > 0);
975 return base_type::front();
978 /** reset the ringbuffer
980 * \note Not thread-safe
984 if ( !boost::has_trivial_destructor<T>::value ) {
985 // make sure to call all destructors!
988 while (pop(dummy_element))
991 base_type::write_index_.store(0, memory_order_relaxed);
992 base_type::read_index_.store(0, memory_order_release);
997 } /* namespace lockfree */
998 } /* namespace boost */
1001 #endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */