]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/pool/include/boost/pool/pool.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / pool / include / boost / pool / pool.hpp
CommitLineData
7c673cae
FG
1// Copyright (C) 2000, 2001 Stephen Cleary
2//
3// Distributed under the Boost Software License, Version 1.0. (See
4// accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6//
7// See http://www.boost.org for updates, documentation, and revision history.
8
9#ifndef BOOST_POOL_HPP
10#define BOOST_POOL_HPP
11
12#include <boost/config.hpp> // for workarounds
13
14// std::less, std::less_equal, std::greater
15#include <functional>
16// new[], delete[], std::nothrow
17#include <new>
18// std::size_t, std::ptrdiff_t
19#include <cstddef>
20// std::malloc, std::free
21#include <cstdlib>
22// std::invalid_argument
23#include <exception>
24// std::max
25#include <algorithm>
26
27#include <boost/pool/poolfwd.hpp>
28
29// boost::integer::static_lcm
30#include <boost/integer/common_factor_ct.hpp>
31// boost::simple_segregated_storage
32#include <boost/pool/simple_segregated_storage.hpp>
33// boost::alignment_of
34#include <boost/type_traits/alignment_of.hpp>
35// BOOST_ASSERT
36#include <boost/assert.hpp>
37
38#ifdef BOOST_POOL_INSTRUMENT
39#include <iostream>
40#include<iomanip>
41#endif
42#ifdef BOOST_POOL_VALGRIND
43#include <set>
44#include <valgrind/memcheck.h>
45#endif
46
47#ifdef BOOST_NO_STDC_NAMESPACE
48 namespace std { using ::malloc; using ::free; }
49#endif
50
51// There are a few places in this file where the expression "this->m" is used.
52// This expression is used to force instantiation-time name lookup, which I am
53// informed is required for strict Standard compliance. It's only necessary
54// if "m" is a member of a base class that is dependent on a template
55// parameter.
56// Thanks to Jens Maurer for pointing this out!
57
58/*!
59 \file
60 \brief Provides class \ref pool: a fast memory allocator that guarantees proper alignment of all allocated chunks,
61 and which extends and generalizes the framework provided by the simple segregated storage solution.
62 Also provides two UserAllocator classes which can be used in conjuction with \ref pool.
63*/
64
65/*!
66 \mainpage Boost.Pool Memory Allocation Scheme
67
68 \section intro_sec Introduction
69
70 Pool allocation is a memory allocation scheme that is very fast, but limited in its usage.
71
72 This Doxygen-style documentation is complementary to the
73 full Quickbook-generated html and pdf documentation at www.boost.org.
74
75 This page generated from file pool.hpp.
76
77*/
78
79#ifdef BOOST_MSVC
80#pragma warning(push)
81#pragma warning(disable:4127) // Conditional expression is constant
82#endif
83
84 namespace boost
85{
86
87//! \brief Allocator used as the default template parameter for
88//! a <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
89//! template parameter. Uses new and delete.
90struct default_user_allocator_new_delete
91{
92 typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
93 typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
94
95 static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
96 { //! Attempts to allocate n bytes from the system. Returns 0 if out-of-memory
97 return new (std::nothrow) char[bytes];
98 }
99 static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
100 { //! Attempts to de-allocate block.
101 //! \pre Block must have been previously returned from a call to UserAllocator::malloc.
102 delete [] block;
103 }
104};
105
106//! \brief <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
107//! used as template parameter for \ref pool and \ref object_pool.
108//! Uses malloc and free internally.
109struct default_user_allocator_malloc_free
110{
111 typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
112 typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
113
114 static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
115 { return static_cast<char *>((std::malloc)(bytes)); }
116 static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
117 { (std::free)(block); }
118};
119
120namespace details
121{ //! Implemention only.
122
123template <typename SizeType>
124class PODptr
125{ //! PODptr is a class that pretends to be a "pointer" to different class types
126 //! that don't really exist. It provides member functions to access the "data"
127 //! of the "object" it points to. Since these "class" types are of variable
128 //! size, and contains some information at the *end* of its memory
129 //! (for alignment reasons),
130 //! PODptr must contain the size of this "class" as well as the pointer to this "object".
131
132 /*! \details A PODptr holds the location and size of a memory block allocated from the system.
133 Each memory block is split logically into three sections:
134
135 <b>Chunk area</b>. This section may be different sizes. PODptr does not care what the size of the chunks is,
136 but it does care (and keep track of) the total size of the chunk area.
137
138 <b>Next pointer</b>. This section is always the same size for a given SizeType. It holds a pointer
139 to the location of the next memory block in the memory block list, or 0 if there is no such block.
140
141 <b>Next size</b>. This section is always the same size for a given SizeType. It holds the size of the
142 next memory block in the memory block list.
143
144The PODptr class just provides cleaner ways of dealing with raw memory blocks.
145
146A PODptr object is either valid or invalid. An invalid PODptr is analogous to a null pointer.
147The default constructor for PODptr will result in an invalid object.
148Calling the member function invalidate will result in that object becoming invalid.
149The member function valid can be used to test for validity.
150*/
151 public:
152 typedef SizeType size_type;
153
154 private:
155 char * ptr;
156 size_type sz;
157
158 char * ptr_next_size() const
159 {
160 return (ptr + sz - sizeof(size_type));
161 }
162 char * ptr_next_ptr() const
163 {
164 return (ptr_next_size() -
165 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
166 }
167
168 public:
169 PODptr(char * const nptr, const size_type nsize)
170 :ptr(nptr), sz(nsize)
171 {
172 //! A PODptr may be created to point to a memory block by passing
173 //! the address and size of that memory block into the constructor.
174 //! A PODptr constructed in this way is valid.
175 }
176 PODptr()
177 : ptr(0), sz(0)
178 { //! default constructor for PODptr will result in an invalid object.
179 }
180
181 bool valid() const
182 { //! A PODptr object is either valid or invalid.
183 //! An invalid PODptr is analogous to a null pointer.
184 //! \returns true if PODptr is valid, false if invalid.
185 return (begin() != 0);
186 }
187 void invalidate()
188 { //! Make object invalid.
189 begin() = 0;
190 }
191 char * & begin()
192 { //! Each PODptr keeps the address and size of its memory block.
193 //! \returns The address of its memory block.
194 return ptr;
195 }
196 char * begin() const
197 { //! Each PODptr keeps the address and size of its memory block.
198 //! \return The address of its memory block.
199 return ptr;
200 }
201 char * end() const
202 { //! \returns begin() plus element_size (a 'past the end' value).
203 return ptr_next_ptr();
204 }
205 size_type total_size() const
206 { //! Each PODptr keeps the address and size of its memory block.
207 //! The address may be read or written by the member functions begin.
208 //! The size of the memory block may only be read,
209 //! \returns size of the memory block.
210 return sz;
211 }
212 size_type element_size() const
213 { //! \returns size of element pointer area.
214 return static_cast<size_type>(sz - sizeof(size_type) -
215 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
216 }
217
218 size_type & next_size() const
219 { //!
220 //! \returns next_size.
221 return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
222 }
223 char * & next_ptr() const
224 { //! \returns pointer to next pointer area.
225 return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr())));
226 }
227
228 PODptr next() const
229 { //! \returns next PODptr.
230 return PODptr<size_type>(next_ptr(), next_size());
231 }
232 void next(const PODptr & arg) const
233 { //! Sets next PODptr.
234 next_ptr() = arg.begin();
235 next_size() = arg.total_size();
236 }
237}; // class PODptr
238} // namespace details
239
240#ifndef BOOST_POOL_VALGRIND
241/*!
242 \brief A fast memory allocator that guarantees proper alignment of all allocated chunks.
243
244 \details Whenever an object of type pool needs memory from the system,
245 it will request it from its UserAllocator template parameter.
246 The amount requested is determined using a doubling algorithm;
247 that is, each time more system memory is allocated,
248 the amount of system memory requested is doubled.
249
250 Users may control the doubling algorithm by using the following extensions:
251
252 Users may pass an additional constructor parameter to pool.
253 This parameter is of type size_type,
254 and is the number of chunks to request from the system
255 the first time that object needs to allocate system memory.
256 The default is 32. This parameter may not be 0.
257
258 Users may also pass an optional third parameter to pool's
259 constructor. This parameter is of type size_type,
260 and sets a maximum size for allocated chunks. When this
261 parameter takes the default value of 0, then there is no upper
262 limit on chunk size.
263
264 Finally, if the doubling algorithm results in no memory
265 being allocated, the pool will backtrack just once, halving
266 the chunk size and trying again.
267
268 <b>UserAllocator type</b> - the method that the Pool will use to allocate memory from the system.
269
270 There are essentially two ways to use class pool: the client can call \ref malloc() and \ref free() to allocate
271 and free single chunks of memory, this is the most efficient way to use a pool, but does not allow for
272 the efficient allocation of arrays of chunks. Alternatively, the client may call \ref ordered_malloc() and \ref
273 ordered_free(), in which case the free list is maintained in an ordered state, and efficient allocation of arrays
274 of chunks are possible. However, this latter option can suffer from poor performance when large numbers of
275 allocations are performed.
276
277*/
278template <typename UserAllocator>
279class pool: protected simple_segregated_storage < typename UserAllocator::size_type >
280{
281 public:
282 typedef UserAllocator user_allocator; //!< User allocator.
283 typedef typename UserAllocator::size_type size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
284 typedef typename UserAllocator::difference_type difference_type; //!< A signed integral type that can represent the difference of any two pointers.
285
286 private:
287 BOOST_STATIC_CONSTANT(size_type, min_alloc_size =
288 (::boost::integer::static_lcm<sizeof(void *), sizeof(size_type)>::value) );
289 BOOST_STATIC_CONSTANT(size_type, min_align =
290 (::boost::integer::static_lcm< ::boost::alignment_of<void *>::value, ::boost::alignment_of<size_type>::value>::value) );
291
292 //! \returns 0 if out-of-memory.
293 //! Called if malloc/ordered_malloc needs to resize the free list.
294 void * malloc_need_resize(); //! Called if malloc needs to resize the free list.
295 void * ordered_malloc_need_resize(); //! Called if ordered_malloc needs to resize the free list.
296
297 protected:
298 details::PODptr<size_type> list; //!< List structure holding ordered blocks.
299
300 simple_segregated_storage<size_type> & store()
301 { //! \returns pointer to store.
302 return *this;
303 }
304 const simple_segregated_storage<size_type> & store() const
305 { //! \returns pointer to store.
306 return *this;
307 }
308 const size_type requested_size;
309 size_type next_size;
310 size_type start_size;
311 size_type max_size;
312
313 //! finds which POD in the list 'chunk' was allocated from.
314 details::PODptr<size_type> find_POD(void * const chunk) const;
315
316 // is_from() tests a chunk to determine if it belongs in a block.
317 static bool is_from(void * const chunk, char * const i,
318 const size_type sizeof_i)
319 { //! \param chunk chunk to check if is from this pool.
320 //! \param i memory chunk at i with element sizeof_i.
321 //! \param sizeof_i element size (size of the chunk area of that block, not the total size of that block).
322 //! \returns true if chunk was allocated or may be returned.
323 //! as the result of a future allocation.
324 //!
325 //! Returns false if chunk was allocated from some other pool,
326 //! or may be returned as the result of a future allocation from some other pool.
327 //! Otherwise, the return value is meaningless.
328 //!
329 //! Note that this function may not be used to reliably test random pointer values.
330
331 // We use std::less_equal and std::less to test 'chunk'
332 // against the array bounds because standard operators
333 // may return unspecified results.
334 // This is to ensure portability. The operators < <= > >= are only
335 // defined for pointers to objects that are 1) in the same array, or
336 // 2) subobjects of the same object [5.9/2].
337 // The functor objects guarantee a total order for any pointer [20.3.3/8]
338 std::less_equal<void *> lt_eq;
339 std::less<void *> lt;
340 return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
341 }
342
343 size_type alloc_size() const
344 { //! Calculated size of the memory chunks that will be allocated by this Pool.
345 //! \returns allocated size.
346 // For alignment reasons, this used to be defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)),
347 // but is now more parsimonious: just rounding up to the minimum required alignment of our housekeeping data
348 // when required. This works provided all alignments are powers of two.
349 size_type s = (std::max)(requested_size, min_alloc_size);
350 size_type rem = s % min_align;
351 if(rem)
352 s += min_align - rem;
353 BOOST_ASSERT(s >= min_alloc_size);
354 BOOST_ASSERT(s % min_align == 0);
355 return s;
356 }
357
358 static void * & nextof(void * const ptr)
359 { //! \returns Pointer dereferenced.
360 //! (Provided and used for the sake of code readability :)
361 return *(static_cast<void **>(ptr));
362 }
363
364 public:
365 // pre: npartition_size != 0 && nnext_size != 0
366 explicit pool(const size_type nrequested_size,
367 const size_type nnext_size = 32,
368 const size_type nmax_size = 0)
369 :
370 list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
371 { //! Constructs a new empty Pool that can be used to allocate chunks of size RequestedSize.
372 //! \param nrequested_size Requested chunk size
373 //! \param nnext_size parameter is of type size_type,
374 //! is the number of chunks to request from the system
375 //! the first time that object needs to allocate system memory.
376 //! The default is 32. This parameter may not be 0.
377 //! \param nmax_size is the maximum number of chunks to allocate in one block.
378 }
379
380 ~pool()
381 { //! Destructs the Pool, freeing its list of memory blocks.
382 purge_memory();
383 }
384
385 // Releases memory blocks that don't have chunks allocated
386 // pre: lists are ordered
387 // Returns true if memory was actually deallocated
388 bool release_memory();
389
390 // Releases *all* memory blocks, even if chunks are still allocated
391 // Returns true if memory was actually deallocated
392 bool purge_memory();
393
394 size_type get_next_size() const
395 { //! Number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be 0.
396 //! \returns next_size;
397 return next_size;
398 }
399 void set_next_size(const size_type nnext_size)
400 { //! Set number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be set to 0.
401 //! \returns nnext_size.
402 next_size = start_size = nnext_size;
403 }
404 size_type get_max_size() const
405 { //! \returns max_size.
406 return max_size;
407 }
408 void set_max_size(const size_type nmax_size)
409 { //! Set max_size.
410 max_size = nmax_size;
411 }
412 size_type get_requested_size() const
413 { //! \returns the requested size passed into the constructor.
414 //! (This value will not change during the lifetime of a Pool object).
415 return requested_size;
416 }
417
418 // Both malloc and ordered_malloc do a quick inlined check first for any
419 // free chunks. Only if we need to get another memory block do we call
420 // the non-inlined *_need_resize() functions.
421 // Returns 0 if out-of-memory
422 void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
423 { //! Allocates a chunk of memory. Searches in the list of memory blocks
424 //! for a block that has a free chunk, and returns that free chunk if found.
425 //! Otherwise, creates a new memory block, adds its free list to pool's free list,
426 //! \returns a free chunk from that block.
427 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
428 // Look for a non-empty storage
429 if (!store().empty())
430 return (store().malloc)();
431 return malloc_need_resize();
432 }
433
434 void * ordered_malloc()
435 { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1).
436 //! \returns a free chunk from that block.
437 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
438 // Look for a non-empty storage
439 if (!store().empty())
440 return (store().malloc)();
441 return ordered_malloc_need_resize();
442 }
443
444 // Returns 0 if out-of-memory
445 // Allocate a contiguous section of n chunks
446 void * ordered_malloc(size_type n);
447 //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n).
448 //! \returns a free chunk from that block.
449 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
450
451 // pre: 'chunk' must have been previously
452 // returned by *this.malloc().
453 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
454 { //! Deallocates a chunk of memory. Note that chunk may not be 0. O(1).
455 //!
456 //! Chunk must have been previously returned by t.malloc() or t.ordered_malloc().
457 //! Assumes that chunk actually refers to a block of chunks
458 //! spanning n * partition_sz bytes.
459 //! deallocates each chunk in that block.
460 //! Note that chunk may not be 0. O(n).
461 (store().free)(chunk);
462 }
463
464 // pre: 'chunk' must have been previously
465 // returned by *this.malloc().
466 void ordered_free(void * const chunk)
467 { //! Same as above, but is order-preserving.
468 //!
469 //! Note that chunk may not be 0. O(N) with respect to the size of the free list.
470 //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
471 store().ordered_free(chunk);
472 }
473
474 // pre: 'chunk' must have been previously
475 // returned by *this.malloc(n).
476 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n)
477 { //! Assumes that chunk actually refers to a block of chunks.
478 //!
479 //! chunk must have been previously returned by t.ordered_malloc(n)
480 //! spanning n * partition_sz bytes.
481 //! Deallocates each chunk in that block.
482 //! Note that chunk may not be 0. O(n).
483 const size_type partition_size = alloc_size();
484 const size_type total_req_size = n * requested_size;
485 const size_type num_chunks = total_req_size / partition_size +
486 ((total_req_size % partition_size) ? true : false);
487
488 store().free_n(chunks, num_chunks, partition_size);
489 }
490
491 // pre: 'chunk' must have been previously
492 // returned by *this.malloc(n).
493 void ordered_free(void * const chunks, const size_type n)
494 { //! Assumes that chunk actually refers to a block of chunks spanning n * partition_sz bytes;
495 //! deallocates each chunk in that block.
496 //!
497 //! Note that chunk may not be 0. Order-preserving. O(N + n) where N is the size of the free list.
498 //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
499
500 const size_type partition_size = alloc_size();
501 const size_type total_req_size = n * requested_size;
502 const size_type num_chunks = total_req_size / partition_size +
503 ((total_req_size % partition_size) ? true : false);
504
505 store().ordered_free_n(chunks, num_chunks, partition_size);
506 }
507
508 // is_from() tests a chunk to determine if it was allocated from *this
509 bool is_from(void * const chunk) const
510 { //! \returns Returns true if chunk was allocated from u or
511 //! may be returned as the result of a future allocation from u.
512 //! Returns false if chunk was allocated from some other pool or
513 //! may be returned as the result of a future allocation from some other pool.
514 //! Otherwise, the return value is meaningless.
515 //! Note that this function may not be used to reliably test random pointer values.
516 return (find_POD(chunk).valid());
517 }
518};
519
520#ifndef BOOST_NO_INCLASS_MEMBER_INITIALIZATION
521template <typename UserAllocator>
522typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_alloc_size;
523template <typename UserAllocator>
524typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_align;
525#endif
526
527template <typename UserAllocator>
528bool pool<UserAllocator>::release_memory()
529{ //! pool must be ordered. Frees every memory block that doesn't have any allocated chunks.
530 //! \returns true if at least one memory block was freed.
531
532 // ret is the return value: it will be set to true when we actually call
533 // UserAllocator::free(..)
534 bool ret = false;
535
536 // This is a current & previous iterator pair over the memory block list
537 details::PODptr<size_type> ptr = list;
538 details::PODptr<size_type> prev;
539
540 // This is a current & previous iterator pair over the free memory chunk list
541 // Note that "prev_free" in this case does NOT point to the previous memory
542 // chunk in the free list, but rather the last free memory chunk before the
543 // current block.
544 void * free_p = this->first;
545 void * prev_free_p = 0;
546
547 const size_type partition_size = alloc_size();
548
549 // Search through all the all the allocated memory blocks
550 while (ptr.valid())
551 {
552 // At this point:
553 // ptr points to a valid memory block
554 // free_p points to either:
555 // 0 if there are no more free chunks
556 // the first free chunk in this or some next memory block
557 // prev_free_p points to either:
558 // the last free chunk in some previous memory block
559 // 0 if there is no such free chunk
560 // prev is either:
561 // the PODptr whose next() is ptr
562 // !valid() if there is no such PODptr
563
564 // If there are no more free memory chunks, then every remaining
565 // block is allocated out to its fullest capacity, and we can't
566 // release any more memory
567 if (free_p == 0)
568 break;
569
570 // We have to check all the chunks. If they are *all* free (i.e., present
571 // in the free list), then we can free the block.
572 bool all_chunks_free = true;
573
574 // Iterate 'i' through all chunks in the memory block
575 // if free starts in the memory block, be careful to keep it there
576 void * saved_free = free_p;
577 for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
578 {
579 // If this chunk is not free
580 if (i != free_p)
581 {
582 // We won't be able to free this block
583 all_chunks_free = false;
584
585 // free_p might have travelled outside ptr
586 free_p = saved_free;
587 // Abort searching the chunks; we won't be able to free this
588 // block because a chunk is not free.
589 break;
590 }
591
592 // We do not increment prev_free_p because we are in the same block
593 free_p = nextof(free_p);
594 }
595
596 // post: if the memory block has any chunks, free_p points to one of them
597 // otherwise, our assertions above are still valid
598
599 const details::PODptr<size_type> next = ptr.next();
600
601 if (!all_chunks_free)
602 {
603 if (is_from(free_p, ptr.begin(), ptr.element_size()))
604 {
605 std::less<void *> lt;
606 void * const end = ptr.end();
607 do
608 {
609 prev_free_p = free_p;
610 free_p = nextof(free_p);
611 } while (free_p && lt(free_p, end));
612 }
613 // This invariant is now restored:
614 // free_p points to the first free chunk in some next memory block, or
615 // 0 if there is no such chunk.
616 // prev_free_p points to the last free chunk in this memory block.
617
618 // We are just about to advance ptr. Maintain the invariant:
619 // prev is the PODptr whose next() is ptr, or !valid()
620 // if there is no such PODptr
621 prev = ptr;
622 }
623 else
624 {
625 // All chunks from this block are free
626
627 // Remove block from list
628 if (prev.valid())
629 prev.next(next);
630 else
631 list = next;
632
633 // Remove all entries in the free list from this block
634 if (prev_free_p != 0)
635 nextof(prev_free_p) = free_p;
636 else
637 this->first = free_p;
638
639 // And release memory
640 (UserAllocator::free)(ptr.begin());
641 ret = true;
642 }
643
644 // Increment ptr
645 ptr = next;
646 }
647
648 next_size = start_size;
649 return ret;
650}
651
652template <typename UserAllocator>
653bool pool<UserAllocator>::purge_memory()
654{ //! pool must be ordered.
655 //! Frees every memory block.
656 //!
657 //! This function invalidates any pointers previously returned
658 //! by allocation functions of t.
659 //! \returns true if at least one memory block was freed.
660
661 details::PODptr<size_type> iter = list;
662
663 if (!iter.valid())
664 return false;
665
666 do
667 {
668 // hold "next" pointer
669 const details::PODptr<size_type> next = iter.next();
670
671 // delete the storage
672 (UserAllocator::free)(iter.begin());
673
674 // increment iter
675 iter = next;
676 } while (iter.valid());
677
678 list.invalidate();
679 this->first = 0;
680 next_size = start_size;
681
682 return true;
683}
684
685template <typename UserAllocator>
686void * pool<UserAllocator>::malloc_need_resize()
687{ //! No memory in any of our storages; make a new storage,
688 //! Allocates chunk in newly malloc aftert resize.
689 //! \returns pointer to chunk.
690 size_type partition_size = alloc_size();
691 size_type POD_size = static_cast<size_type>(next_size * partition_size +
692 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
693 char * ptr = (UserAllocator::malloc)(POD_size);
694 if (ptr == 0)
695 {
696 if(next_size > 4)
697 {
698 next_size >>= 1;
699 partition_size = alloc_size();
700 POD_size = static_cast<size_type>(next_size * partition_size +
701 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
702 ptr = (UserAllocator::malloc)(POD_size);
703 }
704 if(ptr == 0)
705 return 0;
706 }
707 const details::PODptr<size_type> node(ptr, POD_size);
708
709 BOOST_USING_STD_MIN();
710 if(!max_size)
711 next_size <<= 1;
712 else if( next_size*partition_size/requested_size < max_size)
713 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
714
715 // initialize it,
716 store().add_block(node.begin(), node.element_size(), partition_size);
717
718 // insert it into the list,
719 node.next(list);
720 list = node;
721
722 // and return a chunk from it.
723 return (store().malloc)();
724}
725
726template <typename UserAllocator>
727void * pool<UserAllocator>::ordered_malloc_need_resize()
728{ //! No memory in any of our storages; make a new storage,
729 //! \returns pointer to new chunk.
730 size_type partition_size = alloc_size();
731 size_type POD_size = static_cast<size_type>(next_size * partition_size +
732 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
733 char * ptr = (UserAllocator::malloc)(POD_size);
734 if (ptr == 0)
735 {
736 if(next_size > 4)
737 {
738 next_size >>= 1;
739 partition_size = alloc_size();
740 POD_size = static_cast<size_type>(next_size * partition_size +
741 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
742 ptr = (UserAllocator::malloc)(POD_size);
743 }
744 if(ptr == 0)
745 return 0;
746 }
747 const details::PODptr<size_type> node(ptr, POD_size);
748
749 BOOST_USING_STD_MIN();
750 if(!max_size)
751 next_size <<= 1;
752 else if( next_size*partition_size/requested_size < max_size)
753 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
754
755 // initialize it,
756 // (we can use "add_block" here because we know that
757 // the free list is empty, so we don't have to use
758 // the slower ordered version)
759 store().add_ordered_block(node.begin(), node.element_size(), partition_size);
760
761 // insert it into the list,
762 // handle border case
763 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
764 {
765 node.next(list);
766 list = node;
767 }
768 else
769 {
770 details::PODptr<size_type> prev = list;
771
772 while (true)
773 {
774 // if we're about to hit the end or
775 // if we've found where "node" goes
776 if (prev.next_ptr() == 0
777 || std::greater<void *>()(prev.next_ptr(), node.begin()))
778 break;
779
780 prev = prev.next();
781 }
782
783 node.next(prev.next());
784 prev.next(node);
785 }
786 // and return a chunk from it.
787 return (store().malloc)();
788}
789
790template <typename UserAllocator>
791void * pool<UserAllocator>::ordered_malloc(const size_type n)
792{ //! Gets address of a chunk n, allocating new memory if not already available.
793 //! \returns Address of chunk n if allocated ok.
794 //! \returns 0 if not enough memory for n chunks.
795
796 const size_type partition_size = alloc_size();
797 const size_type total_req_size = n * requested_size;
798 const size_type num_chunks = total_req_size / partition_size +
799 ((total_req_size % partition_size) ? true : false);
800
801 void * ret = store().malloc_n(num_chunks, partition_size);
802
803#ifdef BOOST_POOL_INSTRUMENT
804 std::cout << "Allocating " << n << " chunks from pool of size " << partition_size << std::endl;
805#endif
806 if ((ret != 0) || (n == 0))
807 return ret;
808
809#ifdef BOOST_POOL_INSTRUMENT
810 std::cout << "Cache miss, allocating another chunk...\n";
811#endif
812
813 // Not enough memory in our storages; make a new storage,
814 BOOST_USING_STD_MAX();
815 next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
816 size_type POD_size = static_cast<size_type>(next_size * partition_size +
817 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
818 char * ptr = (UserAllocator::malloc)(POD_size);
819 if (ptr == 0)
820 {
821 if(num_chunks < next_size)
822 {
823 // Try again with just enough memory to do the job, or at least whatever we
824 // allocated last time:
825 next_size >>= 1;
826 next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
827 POD_size = static_cast<size_type>(next_size * partition_size +
828 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
829 ptr = (UserAllocator::malloc)(POD_size);
830 }
831 if(ptr == 0)
832 return 0;
833 }
834 const details::PODptr<size_type> node(ptr, POD_size);
835
836 // Split up block so we can use what wasn't requested.
837 if (next_size > num_chunks)
838 store().add_ordered_block(node.begin() + num_chunks * partition_size,
839 node.element_size() - num_chunks * partition_size, partition_size);
840
841 BOOST_USING_STD_MIN();
842 if(!max_size)
843 next_size <<= 1;
844 else if( next_size*partition_size/requested_size < max_size)
845 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
846
847 // insert it into the list,
848 // handle border case.
849 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
850 {
851 node.next(list);
852 list = node;
853 }
854 else
855 {
856 details::PODptr<size_type> prev = list;
857
858 while (true)
859 {
860 // if we're about to hit the end, or if we've found where "node" goes.
861 if (prev.next_ptr() == 0
862 || std::greater<void *>()(prev.next_ptr(), node.begin()))
863 break;
864
865 prev = prev.next();
866 }
867
868 node.next(prev.next());
869 prev.next(node);
870 }
871
872 // and return it.
873 return node.begin();
874}
875
876template <typename UserAllocator>
877details::PODptr<typename pool<UserAllocator>::size_type>
878pool<UserAllocator>::find_POD(void * const chunk) const
879{ //! find which PODptr storage memory that this chunk is from.
880 //! \returns the PODptr that holds this chunk.
881 // Iterate down list to find which storage this chunk is from.
882 details::PODptr<size_type> iter = list;
883 while (iter.valid())
884 {
885 if (is_from(chunk, iter.begin(), iter.element_size()))
886 return iter;
887 iter = iter.next();
888 }
889
890 return iter;
891}
892
893#else // BOOST_POOL_VALGRIND
894
895template<typename UserAllocator>
896class pool
897{
898public:
899 // types
900 typedef UserAllocator user_allocator; // User allocator.
901 typedef typename UserAllocator::size_type size_type; // An unsigned integral type that can represent the size of the largest object to be allocated.
902 typedef typename UserAllocator::difference_type difference_type; // A signed integral type that can represent the difference of any two pointers.
903
904 // construct/copy/destruct
905 explicit pool(const size_type s, const size_type = 32, const size_type m = 0) : chunk_size(s), max_alloc_size(m) {}
906 ~pool()
907 {
908 purge_memory();
909 }
910
911 bool release_memory()
912 {
913 bool ret = free_list.empty() ? false : true;
914 for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
915 {
916 (user_allocator::free)(static_cast<char*>(*pos));
917 }
918 free_list.clear();
919 return ret;
920 }
921 bool purge_memory()
922 {
923 bool ret = free_list.empty() && used_list.empty() ? false : true;
924 for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
925 {
926 (user_allocator::free)(static_cast<char*>(*pos));
927 }
928 free_list.clear();
929 for(std::set<void*>::iterator pos = used_list.begin(); pos != used_list.end(); ++pos)
930 {
931 (user_allocator::free)(static_cast<char*>(*pos));
932 }
933 used_list.clear();
934 return ret;
935 }
936 size_type get_next_size() const
937 {
938 return 1;
939 }
940 void set_next_size(const size_type){}
941 size_type get_max_size() const
942 {
943 return max_alloc_size;
944 }
945 void set_max_size(const size_type s)
946 {
947 max_alloc_size = s;
948 }
949 size_type get_requested_size() const
950 {
951 return chunk_size;
952 }
953 void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
954 {
955 void* ret;
956 if(free_list.empty())
957 {
958 ret = (user_allocator::malloc)(chunk_size);
959 VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
960 }
961 else
962 {
963 ret = *free_list.begin();
964 free_list.erase(free_list.begin());
965 VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
966 }
967 used_list.insert(ret);
968 return ret;
969 }
970 void * ordered_malloc()
971 {
972 return (this->malloc)();
973 }
974 void * ordered_malloc(size_type n)
975 {
976 if(max_alloc_size && (n > max_alloc_size))
977 return 0;
978 void* ret = (user_allocator::malloc)(chunk_size * n);
979 used_list.insert(ret);
980 return ret;
981 }
982 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk)
983 {
984 BOOST_ASSERT(used_list.count(chunk) == 1);
985 BOOST_ASSERT(free_list.count(chunk) == 0);
986 used_list.erase(chunk);
987 free_list.insert(chunk);
988 VALGRIND_MAKE_MEM_NOACCESS(chunk, chunk_size);
989 }
990 void ordered_free(void *const chunk)
991 {
992 return (this->free)(chunk);
993 }
994 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk, const size_type)
995 {
996 BOOST_ASSERT(used_list.count(chunk) == 1);
997 BOOST_ASSERT(free_list.count(chunk) == 0);
998 used_list.erase(chunk);
999 (user_allocator::free)(static_cast<char*>(chunk));
1000 }
1001 void ordered_free(void *const chunk, const size_type n)
1002 {
1003 (this->free)(chunk, n);
1004 }
1005 bool is_from(void *const chunk) const
1006 {
1007 return used_list.count(chunk) || free_list.count(chunk);
1008 }
1009
1010protected:
1011 size_type chunk_size, max_alloc_size;
1012 std::set<void*> free_list, used_list;
1013};
1014
1015#endif
1016
1017} // namespace boost
1018
1019#ifdef BOOST_MSVC
1020#pragma warning(pop)
1021#endif
1022
1023#endif // #ifdef BOOST_POOL_HPP
1024