]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/lockfree/detail/freelist.hpp
import quincy beta 17.1.0
[ceph.git] / ceph / src / boost / boost / lockfree / detail / freelist.hpp
1 // lock-free freelist
2 //
3 // Copyright (C) 2008-2016 Tim Blechmann
4 //
5 // Distributed under the Boost Software License, Version 1.0. (See
6 // accompanying file LICENSE_1_0.txt or copy at
7 // http://www.boost.org/LICENSE_1_0.txt)
8
9 #ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
10 #define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
11
12 #include <cstring>
13 #include <limits>
14 #include <memory>
15
16 #include <boost/array.hpp>
17 #include <boost/config.hpp>
18 #include <boost/cstdint.hpp>
19 #include <boost/noncopyable.hpp>
20 #include <boost/static_assert.hpp>
21
22 #include <boost/align/align_up.hpp>
23 #include <boost/align/aligned_allocator_adaptor.hpp>
24
25 #include <boost/lockfree/detail/atomic.hpp>
26 #include <boost/lockfree/detail/parameter.hpp>
27 #include <boost/lockfree/detail/tagged_ptr.hpp>
28
29 #if defined(_MSC_VER)
30 #pragma warning(push)
31 #pragma warning(disable: 4100) // unreferenced formal parameter
32 #pragma warning(disable: 4127) // conditional expression is constant
33 #endif
34
35 namespace boost {
36 namespace lockfree {
37 namespace detail {
38
39 template <typename T,
40 typename Alloc = std::allocator<T>
41 >
42 class freelist_stack:
43 Alloc
44 {
45 struct freelist_node
46 {
47 tagged_ptr<freelist_node> next;
48 };
49
50 typedef tagged_ptr<freelist_node> tagged_node_ptr;
51
52 public:
53 typedef T * index_t;
54 typedef tagged_ptr<T> tagged_node_handle;
55
56 template <typename Allocator>
57 freelist_stack (Allocator const & alloc, std::size_t n = 0):
58 Alloc(alloc),
59 pool_(tagged_node_ptr(NULL))
60 {
61 for (std::size_t i = 0; i != n; ++i) {
62 T * node = Alloc::allocate(1);
63 std::memset(node, 0, sizeof(T));
64 #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
65 destruct<false>(node);
66 #else
67 deallocate<false>(node);
68 #endif
69 }
70 }
71
72 template <bool ThreadSafe>
73 void reserve (std::size_t count)
74 {
75 for (std::size_t i = 0; i != count; ++i) {
76 T * node = Alloc::allocate(1);
77 std::memset(node, 0, sizeof(T));
78 deallocate<ThreadSafe>(node);
79 }
80 }
81
82 template <bool ThreadSafe, bool Bounded>
83 T * construct (void)
84 {
85 T * node = allocate<ThreadSafe, Bounded>();
86 if (node)
87 new(node) T();
88 return node;
89 }
90
91 template <bool ThreadSafe, bool Bounded, typename ArgumentType>
92 T * construct (ArgumentType const & arg)
93 {
94 T * node = allocate<ThreadSafe, Bounded>();
95 if (node)
96 new(node) T(arg);
97 return node;
98 }
99
100 template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
101 T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
102 {
103 T * node = allocate<ThreadSafe, Bounded>();
104 if (node)
105 new(node) T(arg1, arg2);
106 return node;
107 }
108
109 template <bool ThreadSafe>
110 void destruct (tagged_node_handle const & tagged_ptr)
111 {
112 T * n = tagged_ptr.get_ptr();
113 n->~T();
114 deallocate<ThreadSafe>(n);
115 }
116
117 template <bool ThreadSafe>
118 void destruct (T * n)
119 {
120 n->~T();
121 deallocate<ThreadSafe>(n);
122 }
123
124 ~freelist_stack(void)
125 {
126 tagged_node_ptr current = pool_.load();
127
128 while (current) {
129 freelist_node * current_ptr = current.get_ptr();
130 if (current_ptr)
131 current = current_ptr->next;
132 Alloc::deallocate((T*)current_ptr, 1);
133 }
134 }
135
136 bool is_lock_free(void) const
137 {
138 return pool_.is_lock_free();
139 }
140
141 T * get_handle(T * pointer) const
142 {
143 return pointer;
144 }
145
146 T * get_handle(tagged_node_handle const & handle) const
147 {
148 return get_pointer(handle);
149 }
150
151 T * get_pointer(tagged_node_handle const & tptr) const
152 {
153 return tptr.get_ptr();
154 }
155
156 T * get_pointer(T * pointer) const
157 {
158 return pointer;
159 }
160
161 T * null_handle(void) const
162 {
163 return NULL;
164 }
165
166 protected: // allow use from subclasses
167 template <bool ThreadSafe, bool Bounded>
168 T * allocate (void)
169 {
170 if (ThreadSafe)
171 return allocate_impl<Bounded>();
172 else
173 return allocate_impl_unsafe<Bounded>();
174 }
175
176 private:
177 template <bool Bounded>
178 T * allocate_impl (void)
179 {
180 tagged_node_ptr old_pool = pool_.load(memory_order_consume);
181
182 for(;;) {
183 if (!old_pool.get_ptr()) {
184 if (!Bounded) {
185 T *ptr = Alloc::allocate(1);
186 std::memset(ptr, 0, sizeof(T));
187 return ptr;
188 }
189 else
190 return 0;
191 }
192
193 freelist_node * new_pool_ptr = old_pool->next.get_ptr();
194 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
195
196 if (pool_.compare_exchange_weak(old_pool, new_pool)) {
197 void * ptr = old_pool.get_ptr();
198 return reinterpret_cast<T*>(ptr);
199 }
200 }
201 }
202
203 template <bool Bounded>
204 T * allocate_impl_unsafe (void)
205 {
206 tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
207
208 if (!old_pool.get_ptr()) {
209 if (!Bounded) {
210 T *ptr = Alloc::allocate(1);
211 std::memset(ptr, 0, sizeof(T));
212 return ptr;
213 }
214 else
215 return 0;
216 }
217
218 freelist_node * new_pool_ptr = old_pool->next.get_ptr();
219 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
220
221 pool_.store(new_pool, memory_order_relaxed);
222 void * ptr = old_pool.get_ptr();
223 return reinterpret_cast<T*>(ptr);
224 }
225
226 protected:
227 template <bool ThreadSafe>
228 void deallocate (T * n)
229 {
230 if (ThreadSafe)
231 deallocate_impl(n);
232 else
233 deallocate_impl_unsafe(n);
234 }
235
236 private:
237 void deallocate_impl (T * n)
238 {
239 void * node = n;
240 tagged_node_ptr old_pool = pool_.load(memory_order_consume);
241 freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
242
243 for(;;) {
244 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
245 new_pool->next.set_ptr(old_pool.get_ptr());
246
247 if (pool_.compare_exchange_weak(old_pool, new_pool))
248 return;
249 }
250 }
251
252 void deallocate_impl_unsafe (T * n)
253 {
254 void * node = n;
255 tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
256 freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
257
258 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
259 new_pool->next.set_ptr(old_pool.get_ptr());
260
261 pool_.store(new_pool, memory_order_relaxed);
262 }
263
264 atomic<tagged_node_ptr> pool_;
265 };
266
267 class
268 BOOST_ALIGNMENT( 4 ) // workaround for bugs in MSVC
269 tagged_index
270 {
271 public:
272 typedef boost::uint16_t tag_t;
273 typedef boost::uint16_t index_t;
274
275 /** uninitialized constructor */
276 tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)
277 {}
278
279 /** copy constructor */
280 #ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
281 tagged_index(tagged_index const & rhs):
282 index(rhs.index), tag(rhs.tag)
283 {}
284 #else
285 tagged_index(tagged_index const & rhs) = default;
286 #endif
287
288 explicit tagged_index(index_t i, tag_t t = 0):
289 index(i), tag(t)
290 {}
291
292 /** index access */
293 /* @{ */
294 index_t get_index() const
295 {
296 return index;
297 }
298
299 void set_index(index_t i)
300 {
301 index = i;
302 }
303 /* @} */
304
305 /** tag access */
306 /* @{ */
307 tag_t get_tag() const
308 {
309 return tag;
310 }
311
312 tag_t get_next_tag() const
313 {
314 tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();
315 return next;
316 }
317
318 void set_tag(tag_t t)
319 {
320 tag = t;
321 }
322 /* @} */
323
324 bool operator==(tagged_index const & rhs) const
325 {
326 return (index == rhs.index) && (tag == rhs.tag);
327 }
328
329 bool operator!=(tagged_index const & rhs) const
330 {
331 return !operator==(rhs);
332 }
333
334 protected:
335 index_t index;
336 tag_t tag;
337 };
338
339 template <typename T,
340 std::size_t size>
341 struct compiletime_sized_freelist_storage
342 {
343 // array-based freelists only support a 16bit address space.
344 BOOST_STATIC_ASSERT(size < 65536);
345
346 boost::array<char, size * sizeof(T) + 64> data;
347
348 // unused ... only for API purposes
349 template <typename Allocator>
350 compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */)
351 {
352 data.fill(0);
353 }
354
355 T * nodes(void) const
356 {
357 char * data_pointer = const_cast<char*>(data.data());
358 return reinterpret_cast<T*>( boost::alignment::align_up( data_pointer, BOOST_LOCKFREE_CACHELINE_BYTES ) );
359 }
360
361 std::size_t node_count(void) const
362 {
363 return size;
364 }
365 };
366
367 template <typename T,
368 typename Alloc = std::allocator<T> >
369 struct runtime_sized_freelist_storage:
370 boost::alignment::aligned_allocator_adaptor<Alloc, BOOST_LOCKFREE_CACHELINE_BYTES >
371 {
372 typedef boost::alignment::aligned_allocator_adaptor<Alloc, BOOST_LOCKFREE_CACHELINE_BYTES > allocator_type;
373 T * nodes_;
374 std::size_t node_count_;
375
376 template <typename Allocator>
377 runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):
378 allocator_type(alloc), node_count_(count)
379 {
380 if (count > 65535)
381 boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects"));
382 nodes_ = allocator_type::allocate(count);
383 std::memset(nodes_, 0, sizeof(T) * count);
384 }
385
386 ~runtime_sized_freelist_storage(void)
387 {
388 allocator_type::deallocate(nodes_, node_count_);
389 }
390
391 T * nodes(void) const
392 {
393 return nodes_;
394 }
395
396 std::size_t node_count(void) const
397 {
398 return node_count_;
399 }
400 };
401
402
403 template <typename T,
404 typename NodeStorage = runtime_sized_freelist_storage<T>
405 >
406 class fixed_size_freelist:
407 NodeStorage
408 {
409 struct freelist_node
410 {
411 tagged_index next;
412 };
413
414 void initialize(void)
415 {
416 T * nodes = NodeStorage::nodes();
417 for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {
418 tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);
419 next_index->set_index(null_handle());
420
421 #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
422 destruct<false>(nodes + i);
423 #else
424 deallocate<false>(static_cast<index_t>(i));
425 #endif
426 }
427 }
428
429 public:
430 typedef tagged_index tagged_node_handle;
431 typedef tagged_index::index_t index_t;
432
433 template <typename Allocator>
434 fixed_size_freelist (Allocator const & alloc, std::size_t count):
435 NodeStorage(alloc, count),
436 pool_(tagged_index(static_cast<index_t>(count), 0))
437 {
438 initialize();
439 }
440
441 fixed_size_freelist (void):
442 pool_(tagged_index(NodeStorage::node_count(), 0))
443 {
444 initialize();
445 }
446
447 template <bool ThreadSafe, bool Bounded>
448 T * construct (void)
449 {
450 index_t node_index = allocate<ThreadSafe>();
451 if (node_index == null_handle())
452 return NULL;
453
454 T * node = NodeStorage::nodes() + node_index;
455 new(node) T();
456 return node;
457 }
458
459 template <bool ThreadSafe, bool Bounded, typename ArgumentType>
460 T * construct (ArgumentType const & arg)
461 {
462 index_t node_index = allocate<ThreadSafe>();
463 if (node_index == null_handle())
464 return NULL;
465
466 T * node = NodeStorage::nodes() + node_index;
467 new(node) T(arg);
468 return node;
469 }
470
471 template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
472 T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
473 {
474 index_t node_index = allocate<ThreadSafe>();
475 if (node_index == null_handle())
476 return NULL;
477
478 T * node = NodeStorage::nodes() + node_index;
479 new(node) T(arg1, arg2);
480 return node;
481 }
482
483 template <bool ThreadSafe>
484 void destruct (tagged_node_handle tagged_index)
485 {
486 index_t index = tagged_index.get_index();
487 T * n = NodeStorage::nodes() + index;
488 (void)n; // silence msvc warning
489 n->~T();
490 deallocate<ThreadSafe>(index);
491 }
492
493 template <bool ThreadSafe>
494 void destruct (T * n)
495 {
496 n->~T();
497 deallocate<ThreadSafe>(static_cast<index_t>(n - NodeStorage::nodes()));
498 }
499
500 bool is_lock_free(void) const
501 {
502 return pool_.is_lock_free();
503 }
504
505 index_t null_handle(void) const
506 {
507 return static_cast<index_t>(NodeStorage::node_count());
508 }
509
510 index_t get_handle(T * pointer) const
511 {
512 if (pointer == NULL)
513 return null_handle();
514 else
515 return static_cast<index_t>(pointer - NodeStorage::nodes());
516 }
517
518 index_t get_handle(tagged_node_handle const & handle) const
519 {
520 return handle.get_index();
521 }
522
523 T * get_pointer(tagged_node_handle const & tptr) const
524 {
525 return get_pointer(tptr.get_index());
526 }
527
528 T * get_pointer(index_t index) const
529 {
530 if (index == null_handle())
531 return 0;
532 else
533 return NodeStorage::nodes() + index;
534 }
535
536 T * get_pointer(T * ptr) const
537 {
538 return ptr;
539 }
540
541 protected: // allow use from subclasses
542 template <bool ThreadSafe>
543 index_t allocate (void)
544 {
545 if (ThreadSafe)
546 return allocate_impl();
547 else
548 return allocate_impl_unsafe();
549 }
550
551 private:
552 index_t allocate_impl (void)
553 {
554 tagged_index old_pool = pool_.load(memory_order_consume);
555
556 for(;;) {
557 index_t index = old_pool.get_index();
558 if (index == null_handle())
559 return index;
560
561 T * old_node = NodeStorage::nodes() + index;
562 tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
563
564 tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
565
566 if (pool_.compare_exchange_weak(old_pool, new_pool))
567 return old_pool.get_index();
568 }
569 }
570
571 index_t allocate_impl_unsafe (void)
572 {
573 tagged_index old_pool = pool_.load(memory_order_consume);
574
575 index_t index = old_pool.get_index();
576 if (index == null_handle())
577 return index;
578
579 T * old_node = NodeStorage::nodes() + index;
580 tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
581
582 tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
583
584 pool_.store(new_pool, memory_order_relaxed);
585 return old_pool.get_index();
586 }
587
588 template <bool ThreadSafe>
589 void deallocate (index_t index)
590 {
591 if (ThreadSafe)
592 deallocate_impl(index);
593 else
594 deallocate_impl_unsafe(index);
595 }
596
597 void deallocate_impl (index_t index)
598 {
599 freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
600 tagged_index old_pool = pool_.load(memory_order_consume);
601
602 for(;;) {
603 tagged_index new_pool (index, old_pool.get_tag());
604 new_pool_node->next.set_index(old_pool.get_index());
605
606 if (pool_.compare_exchange_weak(old_pool, new_pool))
607 return;
608 }
609 }
610
611 void deallocate_impl_unsafe (index_t index)
612 {
613 freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
614 tagged_index old_pool = pool_.load(memory_order_consume);
615
616 tagged_index new_pool (index, old_pool.get_tag());
617 new_pool_node->next.set_index(old_pool.get_index());
618
619 pool_.store(new_pool);
620 }
621
622 atomic<tagged_index> pool_;
623 };
624
625 template <typename T,
626 typename Alloc,
627 bool IsCompileTimeSized,
628 bool IsFixedSize,
629 std::size_t Capacity
630 >
631 struct select_freelist
632 {
633 typedef typename mpl::if_c<IsCompileTimeSized,
634 compiletime_sized_freelist_storage<T, Capacity>,
635 runtime_sized_freelist_storage<T, Alloc>
636 >::type fixed_sized_storage_type;
637
638 typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,
639 fixed_size_freelist<T, fixed_sized_storage_type>,
640 freelist_stack<T, Alloc>
641 >::type type;
642 };
643
644 template <typename T, bool IsNodeBased>
645 struct select_tagged_handle
646 {
647 typedef typename mpl::if_c<IsNodeBased,
648 tagged_ptr<T>,
649 tagged_index
650 >::type tagged_handle_type;
651
652 typedef typename mpl::if_c<IsNodeBased,
653 T*,
654 typename tagged_index::index_t
655 >::type handle_type;
656 };
657
658
659 } /* namespace detail */
660 } /* namespace lockfree */
661 } /* namespace boost */
662
663 #if defined(_MSC_VER)
664 #pragma warning(pop)
665 #endif
666
667
668 #endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */