]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/fiber/include/boost/fiber/algo/detail/chase_lev_queue.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / fiber / include / boost / fiber / algo / detail / chase_lev_queue.hpp
CommitLineData
7c673cae
FG
1
2// Copyright Oliver Kowalke 2013.
3// Distributed under the Boost Software License, Version 1.0.
4// (See accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6
7#ifndef BOOST_FIBERS_ALGO_DETAIL_CHASE_LEV_QUEUE_H
8#define BOOST_FIBERS_ALGO_DETAIL_CHASE_LEV_QUEUE_H
9
10#include <atomic>
11#include <cstddef>
12#include <memory>
13#include <type_traits>
14#include <vector>
15
16#include <boost/assert.hpp>
17#include <boost/config.hpp>
18
19#include <boost/fiber/detail/config.hpp>
20#include <boost/fiber/context.hpp>
21
22// David Chase and Yossi Lev. Dynamic circular work-stealing deque.
23// In SPAA ’05: Proceedings of the seventeenth annual ACM symposium
24// on Parallelism in algorithms and architectures, pages 21–28,
25// New York, NY, USA, 2005. ACM.
26//
27// Nhat Minh Lê, Antoniu Pop, Albert Cohen, and Francesco Zappa Nardelli. 2013.
28// Correct and efficient work-stealing for weak memory models.
29// In Proceedings of the 18th ACM SIGPLAN symposium on Principles and practice
30// of parallel programming (PPoPP '13). ACM, New York, NY, USA, 69-80.
31namespace boost {
32namespace fibers {
33namespace algo {
34namespace detail {
35
36class chase_lev_queue {
37private:
38 class circular_buffer {
39 private:
40 typedef typename std::aligned_storage< sizeof( context *), alignof( context *) >::type storage_t;
41
42 int64_t size_;
43 context ** items;
44 chase_lev_queue * queue_;
45
46 public:
47 circular_buffer( int64_t size, chase_lev_queue * queue) noexcept :
48 size_{ size },
49 items{ reinterpret_cast< context ** >( new storage_t[size_] ) },
50 queue_{ queue } {
51 }
52
53 ~circular_buffer() {
54 delete [] reinterpret_cast< storage_t * >( items);
55 }
56
57 int64_t size() const noexcept {
58 return size_;
59 }
60
61 context * get( int64_t idx) noexcept {
62 BOOST_ASSERT( 0 <= idx);
63 return * (items + (idx & (size() - 1)));
64 }
65
66 void put( int64_t idx, context * ctx) noexcept {
67 BOOST_ASSERT( 0 <= idx);
68 * (items + (idx & (size() - 1))) = ctx;
69 }
70
71 circular_buffer * grow( int64_t top, int64_t bottom) {
72 BOOST_ASSERT( 0 <= top);
73 BOOST_ASSERT( 0 <= bottom);
74 circular_buffer * buffer = new circular_buffer{ size() * 2, queue_ };
75 queue_->old_buffers_.push_back( this);
76 for ( int64_t i = top; i != bottom; ++i) {
77 buffer->put( i, get( i) );
78 }
79 return buffer;
80 }
81 };
82
83 std::atomic< int64_t > top_{ 0 };
84 std::atomic< int64_t > bottom_{ 0 };
85 std::atomic< circular_buffer * > buffer_;
86 std::vector< circular_buffer * > old_buffers_;
87
88public:
89 chase_lev_queue() :
90 buffer_{ new circular_buffer{ 1024, this } } {
91 old_buffers_.resize( 10);
92 }
93
94 ~chase_lev_queue() {
95 delete buffer_.load( std::memory_order_seq_cst);
96 for ( circular_buffer * buffer : old_buffers_) {
97 delete buffer;
98 }
99 }
100
101 chase_lev_queue( chase_lev_queue const&) = delete;
102 chase_lev_queue( chase_lev_queue &&) = delete;
103
104 chase_lev_queue & operator=( chase_lev_queue const&) = delete;
105 chase_lev_queue & operator=( chase_lev_queue &&) = delete;
106
107 bool empty() const noexcept {
108 int64_t bottom = bottom_.load( std::memory_order_relaxed);
109 int64_t top = top_.load( std::memory_order_relaxed);
110 return bottom <= top;
111 }
112
113 void push( context * ctx) {
114 int64_t bottom = bottom_.load( std::memory_order_relaxed);
115 int64_t top = top_.load( std::memory_order_acquire);
116 circular_buffer * buffer = buffer_.load( std::memory_order_relaxed);
117 if ( (bottom - top) > buffer->size() - 1) {
118 // queue is full
119 buffer = buffer->grow( top, bottom);
120 buffer_.store( buffer, std::memory_order_release);
121 }
122 buffer->put( bottom, ctx);
123 std::atomic_thread_fence( std::memory_order_release);
124 bottom_.store( bottom + 1, std::memory_order_relaxed);
125 }
126
127 context * pop() {
128 int64_t bottom = bottom_.load( std::memory_order_relaxed) - 1;
129 circular_buffer * buffer = buffer_.load( std::memory_order_relaxed);
130 bottom_.store( bottom, std::memory_order_relaxed);
131 std::atomic_thread_fence( std::memory_order_seq_cst);
132 int64_t top = top_.load( std::memory_order_relaxed);
133 context * ctx = nullptr;
134 if ( top <= bottom) {
135 // queue is not empty
136 ctx = buffer->get( bottom);
137 // last element
138 if ( top == bottom) {
139 if ( ! top_.compare_exchange_strong( top, top + 1,
140 std::memory_order_seq_cst, std::memory_order_relaxed) ) {
141 return nullptr;
142 }
143 bottom_.store( bottom + 1, std::memory_order_relaxed);
144 }
145 } else {
146 // queue is empty
147 bottom_.store( bottom + 1, std::memory_order_relaxed);
148 }
149 return ctx;
150 }
151
152 context * steal() {
153 int64_t top = top_.load( std::memory_order_acquire);
154 std::atomic_thread_fence( std::memory_order_seq_cst);
155 int64_t bottom = bottom_.load( std::memory_order_acquire);
156 context * ctx = nullptr;
157 if ( top < bottom) {
158 // queue is not empty
159 circular_buffer * buffer = buffer_.load( std::memory_order_consume);
160 ctx = buffer->get( top);
161 if ( ! top_.compare_exchange_strong( top, top + 1,
162 std::memory_order_seq_cst, std::memory_order_relaxed) ) {
163 return nullptr;
164 }
165 }
166 return ctx;
167 }
168};
169
170}}}}
171
172#endif // #define BOOST_FIBERS_ALGO_DETAIL_CHASE_LEV_QUEUE_H