]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ////////////////////////////////////////////////////////////////////////////// |
2 | // | |
3 | // (C) Copyright Ion Gaztanaga 2005-2013. Distributed under the Boost | |
4 | // Software License, Version 1.0. (See accompanying file | |
5 | // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | |
6 | // | |
7 | // See http://www.boost.org/libs/container for documentation. | |
8 | // | |
9 | ////////////////////////////////////////////////////////////////////////////// | |
10 | #ifndef BOOST_CONTAINER_DETAIL_NODE_POOL_IMPL_HPP | |
11 | #define BOOST_CONTAINER_DETAIL_NODE_POOL_IMPL_HPP | |
12 | ||
13 | #ifndef BOOST_CONFIG_HPP | |
14 | # include <boost/config.hpp> | |
15 | #endif | |
16 | ||
17 | #if defined(BOOST_HAS_PRAGMA_ONCE) | |
18 | # pragma once | |
19 | #endif | |
20 | ||
21 | #include <boost/container/detail/config_begin.hpp> | |
22 | #include <boost/container/detail/workaround.hpp> | |
23 | #include <boost/container/container_fwd.hpp> | |
24 | ||
25 | #include <boost/container/detail/math_functions.hpp> | |
26 | #include <boost/container/detail/mpl.hpp> | |
27 | #include <boost/container/detail/pool_common.hpp> | |
28 | #include <boost/container/detail/to_raw_pointer.hpp> | |
29 | #include <boost/container/detail/type_traits.hpp> | |
30 | ||
31 | #include <boost/intrusive/pointer_traits.hpp> | |
32 | #include <boost/intrusive/set.hpp> | |
33 | #include <boost/intrusive/slist.hpp> | |
34 | ||
35 | #include <boost/core/no_exceptions_support.hpp> | |
36 | #include <boost/assert.hpp> | |
37 | #include <cstddef> | |
38 | ||
39 | namespace boost { | |
40 | namespace container { | |
41 | namespace container_detail { | |
42 | ||
43 | template<class SegmentManagerBase> | |
44 | class private_node_pool_impl | |
45 | { | |
46 | //Non-copyable | |
47 | private_node_pool_impl(); | |
48 | private_node_pool_impl(const private_node_pool_impl &); | |
49 | private_node_pool_impl &operator=(const private_node_pool_impl &); | |
50 | ||
51 | //A node object will hold node_t when it's not allocated | |
52 | public: | |
53 | typedef typename SegmentManagerBase::void_pointer void_pointer; | |
54 | typedef typename node_slist<void_pointer>::slist_hook_t slist_hook_t; | |
55 | typedef typename node_slist<void_pointer>::node_t node_t; | |
56 | typedef typename node_slist<void_pointer>::node_slist_t free_nodes_t; | |
57 | typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain; | |
58 | typedef typename SegmentManagerBase::size_type size_type; | |
59 | ||
60 | private: | |
61 | typedef typename bi::make_slist | |
62 | < node_t, bi::base_hook<slist_hook_t> | |
63 | , bi::linear<true> | |
64 | , bi::constant_time_size<false> >::type blockslist_t; | |
65 | ||
66 | static size_type get_rounded_size(size_type orig_size, size_type round_to) | |
67 | { return ((orig_size-1)/round_to+1)*round_to; } | |
68 | ||
69 | public: | |
70 | ||
71 | //!Segment manager typedef | |
72 | typedef SegmentManagerBase segment_manager_base_type; | |
73 | ||
74 | //!Constructor from a segment manager. Never throws | |
75 | private_node_pool_impl(segment_manager_base_type *segment_mngr_base, size_type node_size, size_type nodes_per_block) | |
76 | : m_nodes_per_block(nodes_per_block) | |
77 | , m_real_node_size(lcm(node_size, size_type(alignment_of<node_t>::value))) | |
78 | //General purpose allocator | |
79 | , mp_segment_mngr_base(segment_mngr_base) | |
80 | , m_blocklist() | |
81 | , m_freelist() | |
82 | //Debug node count | |
83 | , m_allocated(0) | |
84 | {} | |
85 | ||
86 | //!Destructor. Deallocates all allocated blocks. Never throws | |
87 | ~private_node_pool_impl() | |
88 | { this->purge_blocks(); } | |
89 | ||
90 | size_type get_real_num_node() const | |
91 | { return m_nodes_per_block; } | |
92 | ||
93 | //!Returns the segment manager. Never throws | |
94 | segment_manager_base_type* get_segment_manager_base()const | |
95 | { return container_detail::to_raw_pointer(mp_segment_mngr_base); } | |
96 | ||
97 | void *allocate_node() | |
98 | { return this->priv_alloc_node(); } | |
99 | ||
100 | //!Deallocates an array pointed by ptr. Never throws | |
101 | void deallocate_node(void *ptr) | |
102 | { this->priv_dealloc_node(ptr); } | |
103 | ||
104 | //!Allocates a singly linked list of n nodes ending in null pointer. | |
105 | void allocate_nodes(const size_type n, multiallocation_chain &chain) | |
106 | { | |
107 | //Preallocate all needed blocks to fulfill the request | |
108 | size_type cur_nodes = m_freelist.size(); | |
109 | if(cur_nodes < n){ | |
110 | this->priv_alloc_block(((n - cur_nodes) - 1)/m_nodes_per_block + 1); | |
111 | } | |
112 | ||
113 | //We just iterate the needed nodes to get the last we'll erase | |
114 | typedef typename free_nodes_t::iterator free_iterator; | |
115 | free_iterator before_last_new_it = m_freelist.before_begin(); | |
116 | for(size_type j = 0; j != n; ++j){ | |
117 | ++before_last_new_it; | |
118 | } | |
119 | ||
120 | //Cache the first node of the allocated range before erasing | |
121 | free_iterator first_node(m_freelist.begin()); | |
122 | free_iterator last_node (before_last_new_it); | |
123 | ||
124 | //Erase the range. Since we already have the distance, this is O(1) | |
125 | m_freelist.erase_after( m_freelist.before_begin() | |
126 | , ++free_iterator(before_last_new_it) | |
127 | , n); | |
128 | ||
129 | //Now take the last erased node and just splice it in the end | |
130 | //of the intrusive list that will be traversed by the multialloc iterator. | |
131 | chain.incorporate_after(chain.before_begin(), &*first_node, &*last_node, n); | |
132 | m_allocated += n; | |
133 | } | |
134 | ||
135 | void deallocate_nodes(multiallocation_chain &chain) | |
136 | { | |
137 | typedef typename multiallocation_chain::iterator iterator; | |
138 | iterator it(chain.begin()), itend(chain.end()); | |
139 | while(it != itend){ | |
140 | void *pElem = &*it; | |
141 | ++it; | |
142 | this->priv_dealloc_node(pElem); | |
143 | } | |
144 | } | |
145 | ||
146 | //!Deallocates all the free blocks of memory. Never throws | |
147 | void deallocate_free_blocks() | |
148 | { | |
149 | typedef typename free_nodes_t::iterator nodelist_iterator; | |
150 | typename blockslist_t::iterator bit(m_blocklist.before_begin()), | |
151 | it(m_blocklist.begin()), | |
152 | itend(m_blocklist.end()); | |
153 | free_nodes_t backup_list; | |
154 | nodelist_iterator backup_list_last = backup_list.before_begin(); | |
155 | ||
156 | //Execute the algorithm and get an iterator to the last value | |
157 | size_type blocksize = (get_rounded_size) | |
158 | (m_real_node_size*m_nodes_per_block, (size_type) alignment_of<node_t>::value); | |
159 | ||
160 | while(it != itend){ | |
161 | //Collect all the nodes from the block pointed by it | |
162 | //and push them in the list | |
163 | free_nodes_t free_nodes; | |
164 | nodelist_iterator last_it = free_nodes.before_begin(); | |
165 | const void *addr = get_block_from_hook(&*it, blocksize); | |
166 | ||
167 | m_freelist.remove_and_dispose_if | |
168 | (is_between(addr, blocksize), push_in_list(free_nodes, last_it)); | |
169 | ||
170 | //If the number of nodes is equal to m_nodes_per_block | |
171 | //this means that the block can be deallocated | |
172 | if(free_nodes.size() == m_nodes_per_block){ | |
173 | //Unlink the nodes | |
174 | free_nodes.clear(); | |
175 | it = m_blocklist.erase_after(bit); | |
176 | mp_segment_mngr_base->deallocate((void*)addr); | |
177 | } | |
178 | //Otherwise, insert them in the backup list, since the | |
179 | //next "remove_if" does not need to check them again. | |
180 | else{ | |
181 | //Assign the iterator to the last value if necessary | |
182 | if(backup_list.empty() && !m_freelist.empty()){ | |
183 | backup_list_last = last_it; | |
184 | } | |
185 | //Transfer nodes. This is constant time. | |
186 | backup_list.splice_after | |
187 | ( backup_list.before_begin() | |
188 | , free_nodes | |
189 | , free_nodes.before_begin() | |
190 | , last_it | |
191 | , free_nodes.size()); | |
192 | bit = it; | |
193 | ++it; | |
194 | } | |
195 | } | |
196 | //We should have removed all the nodes from the free list | |
197 | BOOST_ASSERT(m_freelist.empty()); | |
198 | ||
199 | //Now pass all the node to the free list again | |
200 | m_freelist.splice_after | |
201 | ( m_freelist.before_begin() | |
202 | , backup_list | |
203 | , backup_list.before_begin() | |
204 | , backup_list_last | |
205 | , backup_list.size()); | |
206 | } | |
207 | ||
208 | size_type num_free_nodes() | |
209 | { return m_freelist.size(); } | |
210 | ||
211 | //!Deallocates all used memory. Precondition: all nodes allocated from this pool should | |
212 | //!already be deallocated. Otherwise, undefined behaviour. Never throws | |
213 | void purge_blocks() | |
214 | { | |
215 | //check for memory leaks | |
216 | BOOST_ASSERT(m_allocated==0); | |
217 | size_type blocksize = (get_rounded_size) | |
218 | (m_real_node_size*m_nodes_per_block, (size_type)alignment_of<node_t>::value); | |
219 | ||
220 | //We iterate though the NodeBlock list to free the memory | |
221 | while(!m_blocklist.empty()){ | |
222 | void *addr = get_block_from_hook(&m_blocklist.front(), blocksize); | |
223 | m_blocklist.pop_front(); | |
224 | mp_segment_mngr_base->deallocate((void*)addr); | |
225 | } | |
226 | //Just clear free node list | |
227 | m_freelist.clear(); | |
228 | } | |
229 | ||
230 | void swap(private_node_pool_impl &other) | |
231 | { | |
232 | BOOST_ASSERT(m_nodes_per_block == other.m_nodes_per_block); | |
233 | BOOST_ASSERT(m_real_node_size == other.m_real_node_size); | |
234 | std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base); | |
235 | m_blocklist.swap(other.m_blocklist); | |
236 | m_freelist.swap(other.m_freelist); | |
237 | std::swap(m_allocated, other.m_allocated); | |
238 | } | |
239 | ||
240 | private: | |
241 | ||
242 | struct push_in_list | |
243 | { | |
244 | push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it) | |
245 | : slist_(l), last_it_(it) | |
246 | {} | |
247 | ||
248 | void operator()(typename free_nodes_t::pointer p) const | |
249 | { | |
250 | slist_.push_front(*p); | |
251 | if(slist_.size() == 1){ //Cache last element | |
252 | ++last_it_ = slist_.begin(); | |
253 | } | |
254 | } | |
255 | ||
256 | private: | |
257 | free_nodes_t &slist_; | |
258 | typename free_nodes_t::iterator &last_it_; | |
259 | }; | |
260 | ||
261 | struct is_between | |
262 | { | |
263 | typedef typename free_nodes_t::value_type argument_type; | |
264 | typedef bool result_type; | |
265 | ||
266 | is_between(const void *addr, std::size_t size) | |
267 | : beg_(static_cast<const char *>(addr)), end_(beg_+size) | |
268 | {} | |
269 | ||
270 | bool operator()(typename free_nodes_t::const_reference v) const | |
271 | { | |
272 | return (beg_ <= reinterpret_cast<const char *>(&v) && | |
273 | end_ > reinterpret_cast<const char *>(&v)); | |
274 | } | |
275 | private: | |
276 | const char * beg_; | |
277 | const char * end_; | |
278 | }; | |
279 | ||
280 | //!Allocates one node, using single segregated storage algorithm. | |
281 | //!Never throws | |
282 | node_t *priv_alloc_node() | |
283 | { | |
284 | //If there are no free nodes we allocate a new block | |
285 | if (m_freelist.empty()) | |
286 | this->priv_alloc_block(1); | |
287 | //We take the first free node | |
288 | node_t *n = (node_t*)&m_freelist.front(); | |
289 | m_freelist.pop_front(); | |
290 | ++m_allocated; | |
291 | return n; | |
292 | } | |
293 | ||
294 | //!Deallocates one node, using single segregated storage algorithm. | |
295 | //!Never throws | |
296 | void priv_dealloc_node(void *pElem) | |
297 | { | |
298 | //We put the node at the beginning of the free node list | |
299 | node_t * to_deallocate = static_cast<node_t*>(pElem); | |
300 | m_freelist.push_front(*to_deallocate); | |
301 | BOOST_ASSERT(m_allocated>0); | |
302 | --m_allocated; | |
303 | } | |
304 | ||
305 | //!Allocates several blocks of nodes. Can throw | |
306 | void priv_alloc_block(size_type num_blocks) | |
307 | { | |
308 | BOOST_ASSERT(num_blocks > 0); | |
309 | size_type blocksize = | |
310 | (get_rounded_size)(m_real_node_size*m_nodes_per_block, (size_type)alignment_of<node_t>::value); | |
311 | ||
312 | BOOST_TRY{ | |
313 | for(size_type i = 0; i != num_blocks; ++i){ | |
314 | //We allocate a new NodeBlock and put it as first | |
315 | //element in the free Node list | |
316 | char *pNode = reinterpret_cast<char*> | |
317 | (mp_segment_mngr_base->allocate(blocksize + sizeof(node_t))); | |
318 | char *pBlock = pNode; | |
319 | m_blocklist.push_front(get_block_hook(pBlock, blocksize)); | |
320 | ||
321 | //We initialize all Nodes in Node Block to insert | |
322 | //them in the free Node list | |
323 | for(size_type j = 0; j < m_nodes_per_block; ++j, pNode += m_real_node_size){ | |
324 | m_freelist.push_front(*new (pNode) node_t); | |
325 | } | |
326 | } | |
327 | } | |
328 | BOOST_CATCH(...){ | |
329 | //to-do: if possible, an efficient way to deallocate allocated blocks | |
330 | BOOST_RETHROW | |
331 | } | |
332 | BOOST_CATCH_END | |
333 | } | |
334 | ||
335 | //!Deprecated, use deallocate_free_blocks | |
336 | void deallocate_free_chunks() | |
337 | { this->deallocate_free_blocks(); } | |
338 | ||
339 | //!Deprecated, use purge_blocks | |
340 | void purge_chunks() | |
341 | { this->purge_blocks(); } | |
342 | ||
343 | private: | |
344 | //!Returns a reference to the block hook placed in the end of the block | |
345 | static node_t & get_block_hook (void *block, size_type blocksize) | |
346 | { | |
347 | return *reinterpret_cast<node_t*>(reinterpret_cast<char*>(block) + blocksize); | |
348 | } | |
349 | ||
350 | //!Returns the starting address of the block reference to the block hook placed in the end of the block | |
351 | void *get_block_from_hook (node_t *hook, size_type blocksize) | |
352 | { | |
353 | return (reinterpret_cast<char*>(hook) - blocksize); | |
354 | } | |
355 | ||
356 | private: | |
357 | typedef typename boost::intrusive::pointer_traits | |
358 | <void_pointer>::template rebind_pointer<segment_manager_base_type>::type segment_mngr_base_ptr_t; | |
359 | ||
360 | const size_type m_nodes_per_block; | |
361 | const size_type m_real_node_size; | |
362 | segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager | |
363 | blockslist_t m_blocklist; //Intrusive container of blocks | |
364 | free_nodes_t m_freelist; //Intrusive container of free nods | |
365 | size_type m_allocated; //Used nodes for debugging | |
366 | }; | |
367 | ||
368 | ||
369 | } //namespace container_detail { | |
370 | } //namespace container { | |
371 | } //namespace boost { | |
372 | ||
373 | #include <boost/container/detail/config_end.hpp> | |
374 | ||
375 | #endif //#ifndef BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP |