]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ////////////////////////////////////////////////////////////////////////////// |
2 | // | |
3 | // (C) Copyright Ion Gaztanaga 2015-2016. | |
4 | // Distributed under the Boost Software License, Version 1.0. | |
5 | // (See accompanying file LICENSE_1_0.txt or copy at | |
6 | // http://www.boost.org/LICENSE_1_0.txt) | |
7 | // | |
8 | // See http://www.boost.org/libs/move for documentation. | |
9 | // | |
10 | ////////////////////////////////////////////////////////////////////////////// | |
11 | // | |
12 | // Stable sorting that works in O(N*log(N)) worst time | |
13 | // and uses O(1) extra memory | |
14 | // | |
15 | ////////////////////////////////////////////////////////////////////////////// | |
16 | // | |
17 | // The main idea of the adaptive_sort algorithm was developed by Andrey Astrelin | |
18 | // and explained in the article from the russian collaborative blog | |
19 | // Habrahabr (http://habrahabr.ru/post/205290/). The algorithm is based on | |
20 | // ideas from B-C. Huang and M. A. Langston explained in their article | |
21 | // "Fast Stable Merging and Sorting in Constant Extra Space (1989-1992)" | |
22 | // (http://comjnl.oxfordjournals.org/content/35/6/643.full.pdf). | |
23 | // | |
24 | // This implementation by Ion Gaztanaga uses previous ideas with additional changes: | |
25 | // | |
26 | // - Use of GCD-based rotation. | |
27 | // - Non power of two buffer-sizes. | |
28 | // - Tries to find sqrt(len)*2 unique keys, so that the merge sort | |
29 | // phase can form up to sqrt(len)*4 segments if enough keys are found. | |
30 | // - The merge-sort phase can take advantage of external memory to | |
31 | // save some additional combination steps. | |
32 | // - Optimized comparisons when selection-sorting blocks as A and B blocks | |
33 | // are already sorted. | |
34 | // - The combination phase is performed alternating merge to left and merge | |
35 | // to right phases minimizing swaps due to internal buffer repositioning. | |
36 | // - When merging blocks special optimizations are made to avoid moving some | |
37 | // elements twice. | |
38 | // | |
39 | // The adaptive_merge algorithm was developed by Ion Gaztanaga reusing some parts | |
40 | // from the sorting algorithm and implementing an additional block merge algorithm | |
41 | // without moving elements to left or right, which is used when external memory | |
42 | // is available. | |
43 | ////////////////////////////////////////////////////////////////////////////// | |
44 | #ifndef BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP | |
45 | #define BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP | |
46 | ||
47 | #include <boost/move/detail/config_begin.hpp> | |
48 | #include <boost/move/detail/reverse_iterator.hpp> | |
49 | #include <boost/move/algo/move.hpp> | |
50 | #include <boost/move/algo/detail/merge.hpp> | |
51 | #include <boost/move/adl_move_swap.hpp> | |
52 | #include <boost/move/algo/detail/insertion_sort.hpp> | |
53 | #include <boost/move/algo/detail/merge_sort.hpp> | |
54 | #include <boost/move/algo/detail/merge.hpp> | |
55 | #include <boost/assert.hpp> | |
56 | #include <boost/cstdint.hpp> | |
57 | ||
58 | #ifdef BOOST_MOVE_ADAPTIVE_SORT_STATS | |
59 | #define BOOST_MOVE_ADAPTIVE_SORT_PRINT(STR, L) \ | |
60 | print_stats(STR, L)\ | |
61 | // | |
62 | #else | |
63 | #define BOOST_MOVE_ADAPTIVE_SORT_PRINT(STR, L) | |
64 | #endif | |
65 | ||
66 | namespace boost { | |
67 | namespace movelib { | |
68 | ||
69 | namespace detail_adaptive { | |
70 | ||
71 | static const std::size_t AdaptiveSortInsertionSortThreshold = 16; | |
72 | //static const std::size_t AdaptiveSortInsertionSortThreshold = 4; | |
73 | BOOST_STATIC_ASSERT((AdaptiveSortInsertionSortThreshold&(AdaptiveSortInsertionSortThreshold-1)) == 0); | |
74 | ||
75 | #if defined BOOST_HAS_INTPTR_T | |
76 | typedef ::boost::uintptr_t uintptr_t; | |
77 | #else | |
78 | typedef std::size_t uintptr_t; | |
79 | #endif | |
80 | ||
81 | template<class T> | |
82 | const T &min_value(const T &a, const T &b) | |
83 | { | |
84 | return a < b ? a : b; | |
85 | } | |
86 | ||
87 | template<class T> | |
88 | const T &max_value(const T &a, const T &b) | |
89 | { | |
90 | return a > b ? a : b; | |
91 | } | |
92 | ||
93 | template<class T> | |
94 | class adaptive_xbuf | |
95 | { | |
96 | adaptive_xbuf(const adaptive_xbuf &); | |
97 | adaptive_xbuf & operator=(const adaptive_xbuf &); | |
98 | ||
99 | public: | |
100 | typedef T* iterator; | |
101 | ||
102 | adaptive_xbuf() | |
103 | : m_ptr(0), m_size(0), m_capacity(0) | |
104 | {} | |
105 | ||
106 | adaptive_xbuf(T *raw_memory, std::size_t capacity) | |
107 | : m_ptr(raw_memory), m_size(0), m_capacity(capacity) | |
108 | {} | |
109 | ||
110 | template<class RandIt> | |
111 | void move_assign(RandIt first, std::size_t n) | |
112 | { | |
113 | if(n <= m_size){ | |
114 | boost::move(first, first+n, m_ptr); | |
115 | std::size_t size = m_size; | |
116 | while(size-- != n){ | |
117 | m_ptr[size].~T(); | |
118 | } | |
119 | m_size = n; | |
120 | } | |
121 | else{ | |
122 | T *result = boost::move(first, first+m_size, m_ptr); | |
123 | boost::uninitialized_move(first+m_size, first+n, result); | |
124 | m_size = n; | |
125 | } | |
126 | } | |
127 | ||
128 | template<class RandIt> | |
129 | void push_back(RandIt first, std::size_t n) | |
130 | { | |
131 | BOOST_ASSERT(m_capacity - m_size >= n); | |
132 | boost::uninitialized_move(first, first+n, m_ptr+m_size); | |
133 | m_size += n; | |
134 | } | |
135 | ||
136 | template<class RandIt> | |
137 | iterator add(RandIt it) | |
138 | { | |
139 | BOOST_ASSERT(m_size < m_capacity); | |
140 | T * p_ret = m_ptr + m_size; | |
141 | ::new(p_ret) T(::boost::move(*it)); | |
142 | ++m_size; | |
143 | return p_ret; | |
144 | } | |
145 | ||
146 | template<class RandIt> | |
147 | void insert(iterator pos, RandIt it) | |
148 | { | |
149 | if(pos == (m_ptr + m_size)){ | |
150 | this->add(it); | |
151 | } | |
152 | else{ | |
153 | this->add(m_ptr+m_size-1); | |
154 | //m_size updated | |
155 | boost::move_backward(pos, m_ptr+m_size-2, m_ptr+m_size-1); | |
156 | *pos = boost::move(*it); | |
157 | } | |
158 | } | |
159 | ||
160 | void set_size(std::size_t size) | |
161 | { | |
162 | m_size = size; | |
163 | } | |
164 | ||
165 | void shrink_to_fit(std::size_t const size) | |
166 | { | |
167 | if(m_size > size){ | |
168 | for(std::size_t szt_i = size; szt_i != m_size; ++szt_i){ | |
169 | m_ptr[szt_i].~T(); | |
170 | } | |
171 | m_size = size; | |
172 | } | |
173 | } | |
174 | ||
175 | void initialize_until(std::size_t const size, T &t) | |
176 | { | |
177 | BOOST_ASSERT(m_size < m_capacity); | |
178 | if(m_size < size){ | |
179 | ::new((void*)&m_ptr[m_size]) T(::boost::move(t)); | |
180 | ++m_size; | |
181 | for(; m_size != size; ++m_size){ | |
182 | ::new((void*)&m_ptr[m_size]) T(::boost::move(m_ptr[m_size-1])); | |
183 | } | |
184 | t = ::boost::move(m_ptr[m_size-1]); | |
185 | } | |
186 | } | |
187 | ||
188 | template<class U> | |
189 | bool supports_aligned_trailing(std::size_t size, std::size_t trail_count) const | |
190 | { | |
191 | if(this->data()){ | |
192 | uintptr_t u_addr_sz = uintptr_t(this->data()+size); | |
193 | uintptr_t u_addr_cp = uintptr_t(this->data()+this->capacity()); | |
194 | u_addr_sz = ((u_addr_sz + sizeof(U)-1)/sizeof(U))*sizeof(U); | |
195 | ||
196 | return (u_addr_cp >= u_addr_sz) && ((u_addr_cp - u_addr_sz)/sizeof(U) >= trail_count); | |
197 | } | |
198 | return false; | |
199 | } | |
200 | ||
201 | template<class U> | |
202 | U *aligned_trailing() const | |
203 | { | |
204 | return this->aligned_trailing<U>(this->size()); | |
205 | } | |
206 | ||
207 | template<class U> | |
208 | U *aligned_trailing(std::size_t pos) const | |
209 | { | |
210 | uintptr_t u_addr = uintptr_t(this->data()+pos); | |
211 | u_addr = ((u_addr + sizeof(U)-1)/sizeof(U))*sizeof(U); | |
212 | return (U*)u_addr; | |
213 | } | |
214 | ||
215 | ~adaptive_xbuf() | |
216 | { | |
217 | this->clear(); | |
218 | } | |
219 | ||
220 | std::size_t capacity() const | |
221 | { return m_capacity; } | |
222 | ||
223 | iterator data() const | |
224 | { return m_ptr; } | |
225 | ||
226 | iterator end() const | |
227 | { return m_ptr+m_size; } | |
228 | ||
229 | std::size_t size() const | |
230 | { return m_size; } | |
231 | ||
232 | bool empty() const | |
233 | { return !m_size; } | |
234 | ||
235 | void clear() | |
236 | { | |
237 | this->shrink_to_fit(0u); | |
238 | } | |
239 | ||
240 | private: | |
241 | T *m_ptr; | |
242 | std::size_t m_size; | |
243 | std::size_t m_capacity; | |
244 | }; | |
245 | ||
246 | template<class Iterator, class Op> | |
247 | class range_xbuf | |
248 | { | |
249 | range_xbuf(const range_xbuf &); | |
250 | range_xbuf & operator=(const range_xbuf &); | |
251 | ||
252 | public: | |
253 | typedef typename iterator_traits<Iterator>::size_type size_type; | |
254 | typedef Iterator iterator; | |
255 | ||
256 | range_xbuf(Iterator first, Iterator last) | |
257 | : m_first(first), m_last(first), m_cap(last) | |
258 | {} | |
259 | ||
260 | template<class RandIt> | |
261 | void move_assign(RandIt first, std::size_t n) | |
262 | { | |
263 | BOOST_ASSERT(size_type(n) <= size_type(m_cap-m_first)); | |
264 | m_last = Op()(forward_t(), first, first+n, m_first); | |
265 | } | |
266 | ||
267 | ~range_xbuf() | |
268 | {} | |
269 | ||
270 | std::size_t capacity() const | |
271 | { return m_cap-m_first; } | |
272 | ||
273 | Iterator data() const | |
274 | { return m_first; } | |
275 | ||
276 | Iterator end() const | |
277 | { return m_last; } | |
278 | ||
279 | std::size_t size() const | |
280 | { return m_last-m_first; } | |
281 | ||
282 | bool empty() const | |
283 | { return m_first == m_last; } | |
284 | ||
285 | void clear() | |
286 | { | |
287 | m_last = m_first; | |
288 | } | |
289 | ||
290 | template<class RandIt> | |
291 | iterator add(RandIt it) | |
292 | { | |
293 | Iterator pos(m_last); | |
294 | *pos = boost::move(*it); | |
295 | ++m_last; | |
296 | return pos; | |
297 | } | |
298 | ||
299 | void set_size(std::size_t size) | |
300 | { | |
301 | m_last = m_first; | |
302 | m_last += size; | |
303 | } | |
304 | ||
305 | private: | |
306 | Iterator const m_first; | |
307 | Iterator m_last; | |
308 | Iterator const m_cap; | |
309 | }; | |
310 | ||
311 | ||
312 | template<class RandIt, class Compare> | |
313 | RandIt skip_until_merge | |
314 | ( RandIt first1, RandIt const last1 | |
315 | , const typename iterator_traits<RandIt>::value_type &next_key, Compare comp) | |
316 | { | |
317 | while(first1 != last1 && !comp(next_key, *first1)){ | |
318 | ++first1; | |
319 | } | |
320 | return first1; | |
321 | } | |
322 | ||
323 | template<class InputIt1, class InputIt2, class OutputIt, class Compare, class Op> | |
324 | OutputIt op_partial_merge | |
325 | (InputIt1 &r_first1, InputIt1 const last1, InputIt2 &r_first2, InputIt2 const last2, OutputIt d_first, Compare comp, Op op) | |
326 | { | |
327 | InputIt1 first1(r_first1); | |
328 | InputIt2 first2(r_first2); | |
329 | if(first2 != last2 && last1 != first1) | |
330 | while(1){ | |
331 | if(comp(*first2, *first1)) { | |
332 | op(first2++, d_first++); | |
333 | if(first2 == last2){ | |
334 | break; | |
335 | } | |
336 | } | |
337 | else{ | |
338 | op(first1++, d_first++); | |
339 | if(first1 == last1){ | |
340 | break; | |
341 | } | |
342 | } | |
343 | } | |
344 | r_first1 = first1; | |
345 | r_first2 = first2; | |
346 | return d_first; | |
347 | } | |
348 | ||
349 | template<class RandIt1, class RandIt2, class RandItB, class Compare, class Op> | |
350 | RandItB op_buffered_partial_merge_to_left_placed | |
351 | ( RandIt1 first1, RandIt1 const last1 | |
352 | , RandIt2 &rfirst2, RandIt2 const last2 | |
353 | , RandItB &rfirstb, Compare comp, Op op ) | |
354 | { | |
355 | RandItB firstb = rfirstb; | |
356 | RandItB lastb = firstb; | |
357 | RandIt2 first2 = rfirst2; | |
358 | ||
359 | //Move to buffer while merging | |
360 | //Three way moves need less moves when op is swap_op so use it | |
361 | //when merging elements from range2 to the destination occupied by range1 | |
362 | if(first1 != last1 && first2 != last2){ | |
363 | op(three_way_t(), first2++, first1++, lastb++); | |
364 | ||
365 | while(true){ | |
366 | if(first1 == last1){ | |
367 | break; | |
368 | } | |
369 | if(first2 == last2){ | |
370 | lastb = op(forward_t(), first1, last1, firstb); | |
371 | break; | |
372 | } | |
373 | op(three_way_t(), comp(*first2, *firstb) ? first2++ : firstb++, first1++, lastb++); | |
374 | } | |
375 | } | |
376 | ||
377 | rfirst2 = first2; | |
378 | rfirstb = firstb; | |
379 | return lastb; | |
380 | } | |
381 | ||
382 | /////////////////////////////////////////////////////////////////////////////// | |
383 | // | |
384 | // PARTIAL MERGE BUF | |
385 | // | |
386 | /////////////////////////////////////////////////////////////////////////////// | |
387 | ||
388 | template<class Buf, class RandIt, class Compare, class Op> | |
389 | RandIt op_partial_merge_with_buf_impl | |
390 | ( RandIt first1, RandIt const last1, RandIt first2, RandIt last2 | |
391 | , Buf &buf, typename Buf::iterator &buf_first1_in_out, typename Buf::iterator &buf_last1_in_out | |
392 | , Compare comp, Op op | |
393 | ) | |
394 | { | |
395 | typedef typename Buf::iterator buf_iterator; | |
396 | ||
397 | BOOST_ASSERT(first1 != last1); | |
398 | BOOST_ASSERT(first2 != last2); | |
399 | buf_iterator buf_first1 = buf_first1_in_out; | |
400 | buf_iterator buf_last1 = buf_last1_in_out; | |
401 | ||
402 | if(buf_first1 == buf_last1){ | |
403 | //Skip any element that does not need to be moved | |
404 | first1 = skip_until_merge(first1, last1, *last1, comp); | |
405 | if(first1 == last1){ | |
406 | return first1; | |
407 | } | |
408 | buf_first1 = buf.data(); | |
409 | buf_last1 = op_buffered_partial_merge_to_left_placed(first1, last1, first2, last2, buf_first1, comp, op); | |
410 | BOOST_ASSERT(buf_last1 == (buf.data() + (last1-first1))); | |
411 | first1 = last1; | |
412 | } | |
413 | else{ | |
414 | BOOST_ASSERT((last1-first1) == (buf_last1 - buf_first1)); | |
415 | } | |
416 | ||
417 | //Now merge from buffer | |
418 | first1 = op_partial_merge(buf_first1, buf_last1, first2, last2, first1, comp, op); | |
419 | buf_first1_in_out = buf_first1; | |
420 | buf_last1_in_out = buf_last1; | |
421 | return first1; | |
422 | } | |
423 | ||
424 | template<class RandIt, class Buf, class Compare, class Op> | |
425 | RandIt op_partial_merge_with_buf | |
426 | ( RandIt first1, RandIt const last1, RandIt first2, RandIt last2 | |
427 | , Buf &buf | |
428 | , typename Buf::iterator &buf_first1_in_out | |
429 | , typename Buf::iterator &buf_last1_in_out | |
430 | , Compare comp | |
431 | , Op op | |
432 | , bool is_stable) | |
433 | { | |
434 | return is_stable | |
435 | ? op_partial_merge_with_buf_impl | |
436 | (first1, last1, first2, last2, buf, buf_first1_in_out, buf_last1_in_out, comp, op) | |
437 | : op_partial_merge_with_buf_impl | |
438 | (first1, last1, first2, last2, buf, buf_first1_in_out, buf_last1_in_out, antistable<Compare>(comp), op) | |
439 | ; | |
440 | } | |
441 | ||
442 | // key_first - sequence of keys, in same order as blocks. key_comp(key, midkey) means stream A | |
443 | // first - first element to merge. | |
444 | // first[-l_block, 0) - buffer | |
445 | // l_block - length of regular blocks. Blocks are stable sorted by 1st elements and key-coded | |
446 | // l_irreg1 is the irregular block to be merged before n_bef_irreg2 blocks (can be 0) | |
447 | // n_bef_irreg2/n_aft_irreg2 are regular blocks | |
448 | // l_irreg2 is a irregular block, that is to be merged after n_bef_irreg2 blocks and before n_aft_irreg2 blocks | |
449 | // If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks). | |
450 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Op, class Buf> | |
451 | void op_merge_blocks_with_buf | |
452 | ( RandItKeys key_first | |
453 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
454 | , KeyCompare key_comp | |
455 | , RandIt const first | |
456 | , typename iterator_traits<RandIt>::size_type const l_block | |
457 | , typename iterator_traits<RandIt>::size_type const l_irreg1 | |
458 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
459 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
460 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
461 | , Compare comp | |
462 | , Op op | |
463 | , Buf & xbuf) | |
464 | { | |
465 | typedef typename Buf::iterator buf_iterator; | |
466 | buf_iterator buffer = xbuf.data(); | |
467 | buf_iterator buffer_end = buffer; | |
468 | RandIt first1 = first; | |
469 | RandIt last1 = first1 + l_irreg1; | |
470 | RandItKeys const key_end (key_first+n_bef_irreg2); | |
471 | ||
472 | bool is_range1_A = true; //first l_irreg1 elements are always from range A | |
473 | ||
474 | for( ; key_first != key_end; ++key_first, last1 += l_block){ | |
475 | //If the trailing block is empty, we'll make it equal to the previous if empty | |
476 | bool const is_range2_A = key_comp(*key_first, midkey); | |
477 | ||
478 | if(is_range1_A == is_range2_A){ | |
479 | //If buffered, put those elements in place | |
480 | RandIt res = op(forward_t(), buffer, buffer_end, first1); | |
481 | BOOST_ASSERT(buffer == buffer_end || res == last1); (void)res; | |
482 | buffer_end = buffer; | |
483 | first1 = last1; | |
484 | } | |
485 | else { | |
486 | first1 = op_partial_merge_with_buf(first1, last1, last1, last1 + l_block, xbuf, buffer, buffer_end, comp, op, is_range1_A); | |
487 | BOOST_ASSERT(buffer == buffer_end || (buffer_end-buffer) == (last1+l_block-first1)); | |
488 | is_range1_A ^= buffer == buffer_end; | |
489 | } | |
490 | } | |
491 | ||
492 | //Now the trailing irregular block, first put buffered elements in place | |
493 | RandIt res = op(forward_t(), buffer, buffer_end, first1); | |
494 | BOOST_ASSERT(buffer == buffer_end || res == last1); (void)res; | |
495 | ||
496 | BOOST_ASSERT(l_irreg2 || n_aft_irreg2); | |
497 | if(l_irreg2){ | |
498 | bool const is_range2_A = false; //last l_irreg2 elements always from range B | |
499 | if(is_range1_A == is_range2_A){ | |
500 | first1 = last1; | |
501 | last1 = last1+l_block*n_aft_irreg2; | |
502 | } | |
503 | else { | |
504 | last1 += l_block*n_aft_irreg2; | |
505 | } | |
506 | xbuf.clear(); | |
507 | op_buffered_merge(first1, last1, last1+l_irreg2, comp, op, xbuf); | |
508 | } | |
509 | } | |
510 | ||
511 | ||
512 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Buf> | |
513 | void merge_blocks_with_buf | |
514 | ( RandItKeys key_first | |
515 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
516 | , KeyCompare key_comp | |
517 | , RandIt const first | |
518 | , typename iterator_traits<RandIt>::size_type const l_block | |
519 | , typename iterator_traits<RandIt>::size_type const l_irreg1 | |
520 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
521 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
522 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
523 | , Compare comp | |
524 | , Buf & xbuf | |
525 | , bool const xbuf_used) | |
526 | { | |
527 | if(xbuf_used){ | |
528 | op_merge_blocks_with_buf | |
529 | (key_first, midkey, key_comp, first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, move_op(), xbuf); | |
530 | } | |
531 | else{ | |
532 | op_merge_blocks_with_buf | |
533 | (key_first, midkey, key_comp, first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, swap_op(), xbuf); | |
534 | } | |
535 | } | |
536 | ||
537 | /////////////////////////////////////////////////////////////////////////////// | |
538 | // | |
539 | // PARTIAL MERGE LEFT | |
540 | // | |
541 | /////////////////////////////////////////////////////////////////////////////// | |
542 | ||
543 | template<class RandIt, class Compare, class Op> | |
544 | RandIt op_partial_merge_left_middle_buffer_impl | |
545 | (RandIt first1, RandIt const last1, RandIt const first2 | |
546 | , const typename iterator_traits<RandIt>::value_type &next_key, Compare comp | |
547 | , Op op) | |
548 | { | |
549 | first1 = skip_until_merge(first1, last1, next_key, comp); | |
550 | ||
551 | //Even if we copy backward, no overlapping occurs so use forward copy | |
552 | //that can be faster specially with trivial types | |
553 | RandIt const new_first1 = first2 - (last1 - first1); | |
554 | BOOST_ASSERT(last1 <= new_first1); | |
555 | op(forward_t(), first1, last1, new_first1); | |
556 | return new_first1; | |
557 | } | |
558 | ||
559 | template<class RandIt, class Compare, class Op> | |
560 | RandIt op_partial_merge_left_middle_buffer | |
561 | ( RandIt first1, RandIt const last1, RandIt const first2 | |
562 | , const typename iterator_traits<RandIt>::value_type &next_key, Compare comp, Op op, bool is_stable) | |
563 | { | |
564 | return is_stable ? op_partial_merge_left_middle_buffer_impl(first1, last1, first2, next_key, comp, op) | |
565 | : op_partial_merge_left_middle_buffer_impl(first1, last1, first2, next_key, antistable<Compare>(comp), op); | |
566 | } | |
567 | ||
568 | // Partially merges two ordered ranges. Partially means that elements are merged | |
569 | // until one of two ranges is exhausted (M elements from ranges 1 y 2). | |
570 | // [buf_first, ...) -> buffer that can be overwritten | |
571 | // [first1, last1) merge [last1,last2) -> [buf_first, buf_first+M) | |
572 | // Note: distance(buf_first, first1) >= distance(last1, last2), so no overlapping occurs. | |
573 | template<class RandIt, class Compare, class Op> | |
574 | RandIt op_partial_merge_left_smart_impl | |
575 | ( RandIt first1, RandIt last1, RandIt first2, RandIt const last2, Compare comp, Op op) | |
576 | { | |
577 | RandIt dest; | |
578 | if(last1 != first2){ | |
579 | BOOST_ASSERT(0 != (last1-first1)); | |
580 | BOOST_ASSERT((first2-last1)==(last2-first2)); | |
581 | //Skip any element that does not need to be moved | |
582 | first1 = skip_until_merge(first1, last1, *first2, comp); | |
583 | if(first1 == last1) | |
584 | return first2; | |
585 | RandIt buf_first1 = first2 - (last1-first1); | |
586 | dest = last1; | |
587 | last1 = op_buffered_partial_merge_to_left_placed(first1, last1, first2, last2, buf_first1, comp, op); | |
588 | first1 = buf_first1; | |
589 | BOOST_ASSERT((first1-dest) == (last2-first2)); | |
590 | } | |
591 | else{ | |
592 | dest = first1-(last2-first2); | |
593 | } | |
594 | ||
595 | op_partial_merge(first1, last1, first2, last2, dest, comp, op); | |
596 | return first1 == last1 ? first2 : first1; | |
597 | } | |
598 | ||
599 | template<class RandIt, class Compare, class Op> | |
600 | RandIt op_partial_merge_left_smart | |
601 | (RandIt first1, RandIt const last1, RandIt first2, RandIt const last2, Compare comp, Op op, bool is_stable) | |
602 | { | |
603 | return is_stable ? op_partial_merge_left_smart_impl(first1, last1, first2, last2, comp, op) | |
604 | : op_partial_merge_left_smart_impl(first1, last1, first2, last2, antistable<Compare>(comp), op); | |
605 | } | |
606 | ||
607 | // first - first element to merge. | |
608 | // first[-l_block, 0) - buffer | |
609 | // l_block - length of regular blocks. Blocks are stable sorted by 1st elements and key-coded | |
610 | // key_first - sequence of keys, in same order as blocks. key<midkey means stream A | |
611 | // n_bef_irreg2/n_aft_irreg2 are regular blocks | |
612 | // l_irreg2 is a irregular block, that is to be merged after n_bef_irreg2 blocks and before n_aft_irreg2 blocks | |
613 | // If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks). | |
614 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Op> | |
615 | void op_merge_blocks_left | |
616 | ( RandItKeys key_first | |
617 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
618 | , KeyCompare key_comp | |
619 | , RandIt const first | |
620 | , typename iterator_traits<RandIt>::size_type const l_block | |
621 | , typename iterator_traits<RandIt>::size_type const l_irreg1 | |
622 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
623 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
624 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
625 | , Compare comp, Op op) | |
626 | { | |
627 | RandIt buffer = first - l_block; | |
628 | RandIt first1 = first; | |
629 | RandIt last1 = first1 + l_irreg1; | |
630 | RandIt first2 = last1; | |
631 | RandItKeys const key_end (key_first+n_bef_irreg2); | |
632 | bool is_range1_A = true; | |
633 | for( ; key_first != key_end; first2 += l_block, ++key_first){ | |
634 | //If the trailing block is empty, we'll make it equal to the previous if empty | |
635 | bool const is_range2_A = key_comp(*key_first, midkey); | |
636 | ||
637 | if(is_range1_A == is_range2_A){ | |
638 | if(last1 != buffer){ //equiv. to if(!is_buffer_middle) | |
639 | buffer = op(forward_t(), first1, last1, buffer); | |
640 | } | |
641 | first1 = first2; | |
642 | last1 = first2 + l_block; | |
643 | } | |
644 | else { | |
645 | RandIt const last2 = first2 + l_block; | |
646 | first1 = op_partial_merge_left_smart(first1, last1, first2, last2, comp, op, is_range1_A); | |
647 | ||
648 | if(first1 < first2){ //is_buffer_middle for the next iteration | |
649 | last1 = first2; | |
650 | buffer = last1; | |
651 | } | |
652 | else{ //!is_buffer_middle for the next iteration | |
653 | is_range1_A = is_range2_A; | |
654 | buffer = first1 - l_block; | |
655 | last1 = last2; | |
656 | } | |
657 | } | |
658 | } | |
659 | ||
660 | //Now the trailing irregular block | |
661 | bool const is_range2_A = false; //Trailing l_irreg2 is always from Range B | |
662 | bool const is_buffer_middle = last1 == buffer; | |
663 | ||
664 | if(!l_irreg2 || is_range1_A == is_range2_A){ //trailing is always B type | |
665 | //If range1 is buffered, write it to its final position | |
666 | if(!is_buffer_middle){ | |
667 | buffer = op(forward_t(), first1, last1, buffer); | |
668 | } | |
669 | first1 = first2; | |
670 | } | |
671 | else { | |
672 | if(is_buffer_middle){ | |
673 | first1 = op_partial_merge_left_middle_buffer(first1, last1, first2, first2[l_block*n_aft_irreg2], comp, op, is_range1_A); | |
674 | buffer = first1 - l_block; | |
675 | } | |
676 | } | |
677 | last1 = first2 + l_block*n_aft_irreg2; | |
678 | op_merge_left(buffer, first1, last1, last1+l_irreg2, comp, op); | |
679 | } | |
680 | ||
681 | /////////////////////////////////////////////////////////////////////////////// | |
682 | // | |
683 | // PARTIAL MERGE BUFFERLESS | |
684 | // | |
685 | /////////////////////////////////////////////////////////////////////////////// | |
686 | ||
687 | // [first1, last1) merge [last1,last2) -> [first1,last2) | |
688 | template<class RandIt, class Compare> | |
689 | RandIt partial_merge_bufferless_impl | |
690 | (RandIt first1, RandIt last1, RandIt const last2, bool *const pis_range1_A, Compare comp) | |
691 | { | |
692 | if(last1 == last2){ | |
693 | return first1; | |
694 | } | |
695 | bool const is_range1_A = *pis_range1_A; | |
696 | if(first1 != last1 && comp(*last1, last1[-1])){ | |
697 | do{ | |
698 | RandIt const old_last1 = last1; | |
699 | last1 = lower_bound(last1, last2, *first1, comp); | |
700 | first1 = rotate_gcd(first1, old_last1, last1);//old_last1 == last1 supported | |
701 | if(last1 == last2){ | |
702 | return first1; | |
703 | } | |
704 | do{ | |
705 | ++first1; | |
706 | } while(last1 != first1 && !comp(*last1, *first1) ); | |
707 | } while(first1 != last1); | |
708 | } | |
709 | *pis_range1_A = !is_range1_A; | |
710 | return last1; | |
711 | } | |
712 | ||
713 | // [first1, last1) merge [last1,last2) -> [first1,last2) | |
714 | template<class RandIt, class Compare> | |
715 | RandIt partial_merge_bufferless | |
716 | (RandIt first1, RandIt last1, RandIt const last2, bool *const pis_range1_A, Compare comp) | |
717 | { | |
718 | return *pis_range1_A ? partial_merge_bufferless_impl(first1, last1, last2, pis_range1_A, comp) | |
719 | : partial_merge_bufferless_impl(first1, last1, last2, pis_range1_A, antistable<Compare>(comp)); | |
720 | } | |
721 | ||
722 | ||
723 | ||
724 | // l_block - length of regular blocks. First nblocks are stable sorted by 1st elements and key-coded | |
725 | // keys - sequence of keys, in same order as blocks. key<midkey means stream A | |
726 | // n_aft_irreg2 are regular blocks from stream A. l_irreg2 is length of last (irregular) block from stream B, that should go before n_aft_irreg2 blocks. | |
727 | // l_irreg2=0 requires n_aft_irreg2=0 (no irregular blocks). l_irreg2>0, n_aft_irreg2=0 is possible. | |
728 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare> | |
729 | void merge_blocks_bufferless | |
730 | ( RandItKeys key_first | |
731 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
732 | , KeyCompare key_comp | |
733 | , RandIt first | |
734 | , typename iterator_traits<RandIt>::size_type const l_block | |
735 | , typename iterator_traits<RandIt>::size_type const l_irreg1 | |
736 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
737 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
738 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
739 | , Compare comp) | |
740 | { | |
741 | if(n_bef_irreg2 == 0){ | |
742 | RandIt const last_reg(first+l_irreg1+n_aft_irreg2*l_block); | |
743 | merge_bufferless(first, last_reg, last_reg+l_irreg2, comp); | |
744 | } | |
745 | else{ | |
746 | RandIt first1 = first; | |
747 | RandIt last1 = l_irreg1 ? first + l_irreg1: first + l_block; | |
748 | RandItKeys const key_end (key_first+n_bef_irreg2); | |
749 | bool is_range1_A = l_irreg1 ? true : key_comp(*key_first++, midkey); | |
750 | ||
751 | for( ; key_first != key_end; ++key_first){ | |
752 | bool is_range2_A = key_comp(*key_first, midkey); | |
753 | if(is_range1_A == is_range2_A){ | |
754 | first1 = last1; | |
755 | } | |
756 | else{ | |
757 | first1 = partial_merge_bufferless(first1, last1, last1 + l_block, &is_range1_A, comp); | |
758 | } | |
759 | last1 += l_block; | |
760 | } | |
761 | ||
762 | if(l_irreg2){ | |
763 | if(!is_range1_A){ | |
764 | first1 = last1; | |
765 | } | |
766 | last1 += l_block*n_aft_irreg2; | |
767 | merge_bufferless(first1, last1, last1+l_irreg2, comp); | |
768 | } | |
769 | } | |
770 | } | |
771 | ||
772 | /////////////////////////////////////////////////////////////////////////////// | |
773 | // | |
774 | // BUFFERED MERGE | |
775 | // | |
776 | /////////////////////////////////////////////////////////////////////////////// | |
777 | template<class RandIt, class Compare, class Op, class Buf> | |
778 | void op_buffered_merge | |
779 | ( RandIt first, RandIt const middle, RandIt last | |
780 | , Compare comp, Op op | |
781 | , Buf &xbuf) | |
782 | { | |
783 | if(first != middle && middle != last && comp(*middle, middle[-1])){ | |
784 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
785 | size_type const len1 = size_type(middle-first); | |
786 | size_type const len2 = size_type(last-middle); | |
787 | if(len1 <= len2){ | |
788 | first = upper_bound(first, middle, *middle, comp); | |
789 | xbuf.move_assign(first, size_type(middle-first)); | |
790 | op_merge_with_right_placed | |
791 | (xbuf.data(), xbuf.end(), first, middle, last, comp, op); | |
792 | } | |
793 | else{ | |
794 | last = lower_bound(middle, last, middle[-1], comp); | |
795 | xbuf.move_assign(middle, size_type(last-middle)); | |
796 | op_merge_with_left_placed | |
797 | (first, middle, last, xbuf.data(), xbuf.end(), comp, op); | |
798 | } | |
799 | } | |
800 | } | |
801 | ||
802 | template<class RandIt, class Compare> | |
803 | void buffered_merge | |
804 | ( RandIt first, RandIt const middle, RandIt last | |
805 | , Compare comp | |
806 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> &xbuf) | |
807 | { | |
808 | op_buffered_merge(first, middle, last, comp, move_op(), xbuf); | |
809 | } | |
810 | ||
811 | // Complexity: 2*distance(first, last)+max_collected^2/2 | |
812 | // | |
813 | // Tries to collect at most n_keys unique elements from [first, last), | |
814 | // in the begining of the range, and ordered according to comp | |
815 | // | |
816 | // Returns the number of collected keys | |
817 | template<class RandIt, class Compare> | |
818 | typename iterator_traits<RandIt>::size_type | |
819 | collect_unique | |
820 | ( RandIt const first, RandIt const last | |
821 | , typename iterator_traits<RandIt>::size_type const max_collected, Compare comp | |
822 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf) | |
823 | { | |
824 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
825 | typedef typename iterator_traits<RandIt>::value_type value_type; | |
826 | size_type h = 0; | |
827 | if(max_collected){ | |
828 | ++h; // first key is always here | |
829 | RandIt h0 = first; | |
830 | RandIt u = first; ++u; | |
831 | RandIt search_end = u; | |
832 | ||
833 | if(xbuf.capacity() >= max_collected){ | |
834 | value_type *const ph0 = xbuf.add(first); | |
835 | while(u != last && h < max_collected){ | |
836 | value_type * const r = lower_bound(ph0, xbuf.end(), *u, comp); | |
837 | //If key not found add it to [h, h+h0) | |
838 | if(r == xbuf.end() || comp(*u, *r) ){ | |
839 | RandIt const new_h0 = boost::move(search_end, u, h0); | |
840 | search_end = u; | |
841 | ++search_end; | |
842 | ++h; | |
843 | xbuf.insert(r, u); | |
844 | h0 = new_h0; | |
845 | } | |
846 | ++u; | |
847 | } | |
848 | boost::move_backward(first, h0, h0+h); | |
849 | boost::move(xbuf.data(), xbuf.end(), first); | |
850 | } | |
851 | else{ | |
852 | while(u != last && h < max_collected){ | |
853 | RandIt const r = lower_bound(h0, search_end, *u, comp); | |
854 | //If key not found add it to [h, h+h0) | |
855 | if(r == search_end || comp(*u, *r) ){ | |
856 | RandIt const new_h0 = rotate_gcd(h0, search_end, u); | |
857 | search_end = u; | |
858 | ++search_end; | |
859 | ++h; | |
860 | rotate_gcd(r+(new_h0-h0), u, search_end); | |
861 | h0 = new_h0; | |
862 | } | |
863 | ++u; | |
864 | } | |
865 | rotate_gcd(first, h0, h0+h); | |
866 | } | |
867 | } | |
868 | return h; | |
869 | } | |
870 | ||
871 | template<class Unsigned> | |
872 | Unsigned floor_sqrt(Unsigned const n) | |
873 | { | |
874 | Unsigned x = n; | |
875 | Unsigned y = x/2 + (x&1); | |
876 | while (y < x){ | |
877 | x = y; | |
878 | y = (x + n / x)/2; | |
879 | } | |
880 | return x; | |
881 | } | |
882 | ||
883 | template<class Unsigned> | |
884 | Unsigned ceil_sqrt(Unsigned const n) | |
885 | { | |
886 | Unsigned r = floor_sqrt(n); | |
887 | return r + Unsigned((n%r) != 0); | |
888 | } | |
889 | ||
890 | template<class Unsigned> | |
891 | Unsigned floor_merge_multiple(Unsigned const n, Unsigned &base, Unsigned &pow) | |
892 | { | |
893 | Unsigned s = n; | |
894 | Unsigned p = 0; | |
895 | while(s > AdaptiveSortInsertionSortThreshold){ | |
896 | s /= 2; | |
897 | ++p; | |
898 | } | |
899 | base = s; | |
900 | pow = p; | |
901 | return s << p; | |
902 | } | |
903 | ||
904 | template<class Unsigned> | |
905 | Unsigned ceil_merge_multiple(Unsigned const n, Unsigned &base, Unsigned &pow) | |
906 | { | |
907 | Unsigned fm = floor_merge_multiple(n, base, pow); | |
908 | ||
909 | if(fm != n){ | |
910 | if(base < AdaptiveSortInsertionSortThreshold){ | |
911 | ++base; | |
912 | } | |
913 | else{ | |
914 | base = AdaptiveSortInsertionSortThreshold/2 + 1; | |
915 | ++pow; | |
916 | } | |
917 | } | |
918 | return base << pow; | |
919 | } | |
920 | ||
921 | template<class Unsigned> | |
922 | Unsigned ceil_sqrt_multiple(Unsigned const n, Unsigned *pbase = 0) | |
923 | { | |
924 | Unsigned const r = ceil_sqrt(n); | |
925 | Unsigned pow = 0; | |
926 | Unsigned base = 0; | |
927 | Unsigned const res = ceil_merge_multiple(r, base, pow); | |
928 | if(pbase) *pbase = base; | |
929 | return res; | |
930 | } | |
931 | ||
932 | template<class Unsigned> | |
933 | Unsigned ceil_sqrt_pow2(Unsigned const n) | |
934 | { | |
935 | Unsigned r=1; | |
936 | Unsigned exp = 0; | |
937 | Unsigned pow = 1u; | |
938 | while(pow != 0 && pow < n){ | |
939 | r*=2; | |
940 | ++exp; | |
941 | pow = r << exp; | |
942 | } | |
943 | return r; | |
944 | } | |
945 | ||
946 | struct less | |
947 | { | |
948 | template<class T> | |
949 | bool operator()(const T &l, const T &r) | |
950 | { return l < r; } | |
951 | }; | |
952 | ||
953 | /////////////////////////////////////////////////////////////////////////////// | |
954 | // | |
955 | // MERGE BLOCKS | |
956 | // | |
957 | /////////////////////////////////////////////////////////////////////////////// | |
958 | ||
959 | //#define ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN | |
960 | ||
961 | #if defined ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN | |
962 | template<class RandIt, class Compare> | |
963 | void slow_stable_sort | |
964 | ( RandIt const first, RandIt const last, Compare comp) | |
965 | { | |
966 | boost::movelib::inplace_stable_sort(first, last, comp); | |
967 | } | |
968 | ||
969 | #else //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN | |
970 | ||
971 | template<class RandIt, class Compare> | |
972 | void slow_stable_sort | |
973 | ( RandIt const first, RandIt const last, Compare comp) | |
974 | { | |
975 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
976 | size_type L = size_type(last - first); | |
977 | { //Use insertion sort to merge first elements | |
978 | size_type m = 0; | |
979 | while((L - m) > size_type(AdaptiveSortInsertionSortThreshold)){ | |
980 | insertion_sort(first+m, first+m+size_type(AdaptiveSortInsertionSortThreshold), comp); | |
981 | m += AdaptiveSortInsertionSortThreshold; | |
982 | } | |
983 | insertion_sort(first+m, last, comp); | |
984 | } | |
985 | ||
986 | size_type h = AdaptiveSortInsertionSortThreshold; | |
987 | for(bool do_merge = L > h; do_merge; h*=2){ | |
988 | do_merge = (L - h) > h; | |
989 | size_type p0 = 0; | |
990 | if(do_merge){ | |
991 | size_type const h_2 = 2*h; | |
992 | while((L-p0) > h_2){ | |
993 | merge_bufferless(first+p0, first+p0+h, first+p0+h_2, comp); | |
994 | p0 += h_2; | |
995 | } | |
996 | } | |
997 | if((L-p0) > h){ | |
998 | merge_bufferless(first+p0, first+p0+h, last, comp); | |
999 | } | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | #endif //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN | |
1004 | ||
1005 | //Returns new l_block and updates use_buf | |
1006 | template<class Unsigned> | |
1007 | Unsigned lblock_for_combine | |
1008 | (Unsigned const l_block, Unsigned const n_keys, Unsigned const l_data, bool &use_buf) | |
1009 | { | |
1010 | BOOST_ASSERT(l_data > 1); | |
1011 | ||
1012 | //We need to guarantee lblock >= l_merged/(n_keys/2) keys for the combination. | |
1013 | //We have at least 4 keys guaranteed (which are the minimum to merge 2 ranges) | |
1014 | //If l_block != 0, then n_keys is already enough to merge all blocks in all | |
1015 | //phases as we've found all needed keys for that buffer and length before. | |
1016 | //If l_block == 0 then see if half keys can be used as buffer and the rest | |
1017 | //as keys guaranteeing that n_keys >= (2*l_merged)/lblock = | |
1018 | if(!l_block){ | |
1019 | //If l_block == 0 then n_keys is power of two | |
1020 | //(guaranteed by build_params(...)) | |
1021 | BOOST_ASSERT(n_keys >= 4); | |
1022 | //BOOST_ASSERT(0 == (n_keys &(n_keys-1))); | |
1023 | ||
1024 | //See if half keys are at least 4 and if half keys fulfill | |
1025 | Unsigned const new_buf = n_keys/2; | |
1026 | Unsigned const new_keys = n_keys-new_buf; | |
1027 | use_buf = new_keys >= 4 && new_keys >= l_data/new_buf; | |
1028 | if(use_buf){ | |
1029 | return new_buf; | |
1030 | } | |
1031 | else{ | |
1032 | return l_data/n_keys; | |
1033 | } | |
1034 | } | |
1035 | else{ | |
1036 | use_buf = true; | |
1037 | return l_block; | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | ||
1042 | //Although "cycle" sort is known to have the minimum number of writes to target | |
1043 | //selection sort is more appropriate here as we want to minimize swaps. | |
1044 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf> | |
1045 | void selection_sort_blocks | |
1046 | ( RandItKeys keys | |
1047 | , typename iterator_traits<RandIt>::size_type &midkey_idx //inout | |
1048 | , KeyCompare key_comp | |
1049 | , RandIt const first_block | |
1050 | , typename iterator_traits<RandIt>::size_type const l_block | |
1051 | , typename iterator_traits<RandIt>::size_type const n_blocks | |
1052 | , Compare comp | |
1053 | , bool use_first_element | |
1054 | , XBuf & xbuf | |
1055 | ) | |
1056 | { | |
1057 | typedef typename iterator_traits<RandIt>::size_type size_type ; | |
1058 | size_type const back_midkey_idx = midkey_idx; | |
1059 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1060 | typedef typename iterator_traits<RandIt>::value_type value_type; | |
1061 | ||
1062 | //Nothing to sort if 0 or 1 blocks or all belong to the first ordered half | |
1063 | if(n_blocks < 2 || back_midkey_idx >= n_blocks){ | |
1064 | return; | |
1065 | } | |
1066 | //One-past the position of the first untouched element of the second half | |
1067 | size_type high_watermark = back_midkey_idx+1; | |
1068 | BOOST_ASSERT(high_watermark <= n_blocks); | |
1069 | const bool b_cache_on = xbuf.capacity() >= l_block; | |
1070 | //const bool b_cache_on = false; | |
1071 | const size_type cached_none = size_type(-1); | |
1072 | size_type cached_block = cached_none; | |
1073 | ||
1074 | //Sort by first element if left merging, last element otherwise | |
1075 | size_type const reg_off = use_first_element ? 0u: l_block-1; | |
1076 | ||
1077 | for(size_type block=0; block < n_blocks-1; ++block){ | |
1078 | size_type min_block = block; | |
1079 | //Since we are searching for the minimum value in two sorted halves: | |
1080 | //Optimization 1: If block belongs to first half, don't waste time comparing elements of the first half. | |
1081 | //Optimization 2: It is enough to compare until the first untouched element of the second half. | |
1082 | //Optimization 3: If cache memory is available, instead of swapping blocks (3 writes per element), | |
1083 | // play with the cache to aproximate it to 2 writes per element. | |
1084 | high_watermark = size_type(max_value(block+2, high_watermark)); | |
1085 | BOOST_ASSERT(high_watermark <= n_blocks); | |
1086 | for(size_type next_block = size_type(max_value(block+1, back_midkey_idx)); next_block < high_watermark; ++next_block){ | |
1087 | const value_type &min_v = (b_cache_on && (cached_block == min_block) ? xbuf.data()[reg_off] : first_block[min_block*l_block+reg_off]); | |
1088 | const value_type &v = (b_cache_on && (cached_block == next_block) ? xbuf.data()[reg_off] : first_block[next_block*l_block+reg_off]); | |
1089 | ||
1090 | if( comp(v, min_v) || (!comp(min_v, v) && key_comp(keys[next_block], keys[min_block])) ){ | |
1091 | min_block = next_block; | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | if(min_block != block){ | |
1096 | BOOST_ASSERT(block >= back_midkey_idx || min_block >= back_midkey_idx); | |
1097 | BOOST_ASSERT(min_block < high_watermark); | |
1098 | //Increase high watermark if not the maximum and min_block is just before the high watermark | |
1099 | high_watermark += size_type((min_block + 1) != n_blocks && (min_block + 1) == high_watermark); | |
1100 | BOOST_ASSERT(high_watermark <= n_blocks); | |
1101 | if(!b_cache_on){ | |
1102 | boost::adl_move_swap_ranges(first_block+block*l_block, first_block+(block+1)*l_block, first_block+min_block*l_block); | |
1103 | } | |
1104 | else if(cached_block == cached_none){ | |
1105 | //Cache the biggest block and put the minimum into its final position | |
1106 | xbuf.move_assign(first_block+block*l_block, l_block); | |
1107 | boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block); | |
1108 | cached_block = min_block; | |
1109 | } | |
1110 | else if(cached_block == block){ | |
1111 | //Since block is cached and is not the minimum, just put the minimum directly into its final position and update the cache index | |
1112 | boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block); | |
1113 | cached_block = min_block; | |
1114 | } | |
1115 | else if(cached_block == min_block){ | |
1116 | //Since the minimum is cached, move the block to the back position and flush the cache to its final position | |
1117 | boost::move(first_block+block*l_block, first_block+(block+1)*l_block, first_block+min_block*l_block); | |
1118 | boost::move(xbuf.data(), xbuf.end(), first_block+block*l_block); | |
1119 | cached_block = cached_none; | |
1120 | } | |
1121 | else{ | |
1122 | //Cached block is not any of two blocks to be exchanged, a smarter operation must be performed | |
1123 | BOOST_ASSERT(cached_block != min_block); | |
1124 | BOOST_ASSERT(cached_block != block); | |
1125 | BOOST_ASSERT(cached_block > block); | |
1126 | BOOST_ASSERT(cached_block < high_watermark); | |
1127 | //Instead of moving block to the slot of the minimum (which is typical selection sort), before copying | |
1128 | //data from the minimum slot to its final position: | |
1129 | // -> move it to free slot pointed by cached index, and | |
1130 | // -> move cached index into slot of the minimum. | |
1131 | //Since both cached_block and min_block belong to the still unordered range of blocks, the change | |
1132 | //does not break selection sort and saves one copy. | |
1133 | boost::move(first_block+block*l_block, first_block+(block+1)*l_block, first_block+cached_block*l_block); | |
1134 | boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block); | |
1135 | //Note that this trick requires an additionl fix for keys and midkey index | |
1136 | boost::adl_move_swap(keys[cached_block], keys[min_block]); | |
1137 | if(midkey_idx == cached_block) | |
1138 | midkey_idx = min_block; | |
1139 | else if(midkey_idx == min_block) | |
1140 | midkey_idx = cached_block; | |
1141 | boost::adl_move_swap(cached_block, min_block); | |
1142 | } | |
1143 | //Once min_block and block are exchanged, fix the movement imitation key buffer and midkey index. | |
1144 | boost::adl_move_swap(keys[block], keys[min_block]); | |
1145 | if(midkey_idx == block) | |
1146 | midkey_idx = min_block; | |
1147 | else if(midkey_idx == min_block) | |
1148 | midkey_idx = block; | |
1149 | } | |
1150 | else if(b_cache_on && cached_block == block){ | |
1151 | //The selected block was the minimum, but since it was cached, move it to its final position | |
1152 | boost::move(xbuf.data(), xbuf.end(), first_block+block*l_block); | |
1153 | cached_block = cached_none; | |
1154 | } | |
1155 | } //main for loop | |
1156 | ||
1157 | if(b_cache_on && cached_block != cached_none){ | |
1158 | //The sort has ended with cached data, move it to its final position | |
1159 | boost::move(xbuf.data(), xbuf.end(), first_block+cached_block*l_block); | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | template<class RandIt, class Compare, class XBuf> | |
1164 | void stable_sort( RandIt first, RandIt last, Compare comp, XBuf & xbuf) | |
1165 | { | |
1166 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1167 | size_type const len = size_type(last - first); | |
1168 | size_type const half_len = len/2 + (len&1); | |
1169 | if(std::size_t(xbuf.capacity() - xbuf.size()) >= half_len) { | |
1170 | merge_sort(first, last, comp, xbuf.data()+xbuf.size()); | |
1171 | } | |
1172 | else{ | |
1173 | slow_stable_sort(first, last, comp); | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | template<class RandIt, class Comp, class XBuf> | |
1178 | void initialize_keys( RandIt first, RandIt last | |
1179 | , Comp comp | |
1180 | , XBuf & xbuf) | |
1181 | { | |
1182 | stable_sort(first, last, comp, xbuf); | |
1183 | } | |
1184 | ||
1185 | template<class RandIt, class U> | |
1186 | void initialize_keys( RandIt first, RandIt last | |
1187 | , less | |
1188 | , U &) | |
1189 | { | |
1190 | typedef typename iterator_traits<RandIt>::value_type value_type; | |
1191 | std::size_t count = std::size_t(last - first); | |
1192 | for(std::size_t i = 0; i != count; ++i){ | |
1193 | *first = value_type(i); | |
1194 | ++first; | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf> | |
1199 | void combine_params | |
1200 | ( RandItKeys const keys | |
1201 | , KeyCompare key_comp | |
1202 | , RandIt const first | |
1203 | , typename iterator_traits<RandIt>::size_type l_combined | |
1204 | , typename iterator_traits<RandIt>::size_type const l_prev_merged | |
1205 | , typename iterator_traits<RandIt>::size_type const l_block | |
1206 | , XBuf & xbuf | |
1207 | , Compare comp | |
1208 | //Output | |
1209 | , typename iterator_traits<RandIt>::size_type &midkey_idx | |
1210 | , typename iterator_traits<RandIt>::size_type &l_irreg1 | |
1211 | , typename iterator_traits<RandIt>::size_type &n_bef_irreg2 | |
1212 | , typename iterator_traits<RandIt>::size_type &n_aft_irreg2 | |
1213 | , typename iterator_traits<RandIt>::size_type &l_irreg2 | |
1214 | //Options | |
1215 | , bool is_merge_left_or_bufferless | |
1216 | , bool do_initialize_keys = true) | |
1217 | { | |
1218 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1219 | typedef typename iterator_traits<RandIt>::value_type value_type; | |
1220 | ||
1221 | //Initial parameters for selection sort blocks | |
1222 | l_irreg1 = l_prev_merged%l_block; | |
1223 | l_irreg2 = (l_combined-l_irreg1)%l_block; | |
1224 | BOOST_ASSERT(((l_combined-l_irreg1-l_irreg2)%l_block) == 0); | |
1225 | size_type const n_reg_block = (l_combined-l_irreg1-l_irreg2)/l_block; | |
1226 | midkey_idx = l_prev_merged/l_block; | |
1227 | BOOST_ASSERT(n_reg_block>=midkey_idx); | |
1228 | ||
1229 | //Key initialization | |
1230 | if (do_initialize_keys) { | |
1231 | initialize_keys(keys, keys+n_reg_block+(midkey_idx==n_reg_block), key_comp, xbuf); | |
1232 | } | |
1233 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A initkey: ", l_combined + l_block); | |
1234 | ||
1235 | //Selection sort blocks | |
1236 | selection_sort_blocks(keys, midkey_idx, key_comp, first+l_irreg1, l_block, n_reg_block, comp, is_merge_left_or_bufferless, xbuf); | |
1237 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A selsort: ", l_combined + l_block); | |
1238 | ||
1239 | //Special case for the last elements | |
1240 | n_aft_irreg2 = 0; | |
1241 | if(l_irreg2 != 0){ | |
1242 | size_type const reg_off = is_merge_left_or_bufferless ? 0u: l_block-1; | |
1243 | size_type const irreg_off = is_merge_left_or_bufferless ? 0u: l_irreg2-1; | |
1244 | RandIt prev_block_first = first + l_combined - l_irreg2; | |
1245 | const value_type &incomplete_block_first = prev_block_first[irreg_off]; | |
1246 | while(n_aft_irreg2 != n_reg_block && | |
1247 | comp(incomplete_block_first, (prev_block_first-= l_block)[reg_off]) ){ | |
1248 | ++n_aft_irreg2; | |
1249 | } | |
1250 | } | |
1251 | n_bef_irreg2 = n_reg_block-n_aft_irreg2; | |
1252 | } | |
1253 | ||
1254 | // first - first element to merge. | |
1255 | // first[-l_block, 0) - buffer (if use_buf == true) | |
1256 | // l_block - length of regular blocks. First nblocks are stable sorted by 1st elements and key-coded | |
1257 | // keys - sequence of keys, in same order as blocks. key<midkey means stream A | |
1258 | // n_bef_irreg2/n_aft_irreg2 are regular blocks | |
1259 | // l_irreg2 is a irregular block, that is to be combined after n_bef_irreg2 blocks and before n_aft_irreg2 blocks | |
1260 | // If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks). | |
1261 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare> | |
1262 | void merge_blocks_left | |
1263 | ( RandItKeys const key_first | |
1264 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
1265 | , KeyCompare key_comp | |
1266 | , RandIt const first | |
1267 | , typename iterator_traits<RandIt>::size_type const l_block | |
1268 | , typename iterator_traits<RandIt>::size_type const l_irreg1 | |
1269 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
1270 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
1271 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
1272 | , Compare comp | |
1273 | , bool const xbuf_used) | |
1274 | { | |
1275 | if(xbuf_used){ | |
1276 | op_merge_blocks_left | |
1277 | (key_first, midkey, key_comp, first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, move_op()); | |
1278 | } | |
1279 | else{ | |
1280 | op_merge_blocks_left | |
1281 | (key_first, midkey, key_comp, first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, swap_op()); | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | ||
1286 | // first - first element to merge. | |
1287 | // [first+l_block*(n_bef_irreg2+n_aft_irreg2)+l_irreg2, first+l_block*(n_bef_irreg2+n_aft_irreg2+1)+l_irreg2) - buffer | |
1288 | // l_block - length of regular blocks. First nblocks are stable sorted by 1st elements and key-coded | |
1289 | // keys - sequence of keys, in same order as blocks. key<midkey means stream A | |
1290 | // n_bef_irreg2/n_aft_irreg2 are regular blocks | |
1291 | // l_irreg2 is a irregular block, that is to be combined after n_bef_irreg2 blocks and before n_aft_irreg2 blocks | |
1292 | // If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks). | |
1293 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare> | |
1294 | void merge_blocks_right | |
1295 | ( RandItKeys const key_first | |
1296 | , const typename iterator_traits<RandItKeys>::value_type &midkey | |
1297 | , KeyCompare key_comp | |
1298 | , RandIt const first | |
1299 | , typename iterator_traits<RandIt>::size_type const l_block | |
1300 | , typename iterator_traits<RandIt>::size_type const n_bef_irreg2 | |
1301 | , typename iterator_traits<RandIt>::size_type const n_aft_irreg2 | |
1302 | , typename iterator_traits<RandIt>::size_type const l_irreg2 | |
1303 | , Compare comp | |
1304 | , bool const xbuf_used) | |
1305 | { | |
1306 | merge_blocks_left | |
1307 | ( make_reverse_iterator(key_first+n_aft_irreg2 + n_bef_irreg2) | |
1308 | , midkey | |
1309 | , negate<KeyCompare>(key_comp) | |
1310 | , make_reverse_iterator(first+(n_bef_irreg2+n_aft_irreg2)*l_block+l_irreg2) | |
1311 | , l_block | |
1312 | , l_irreg2 | |
1313 | , n_aft_irreg2 + n_bef_irreg2 | |
1314 | , 0 | |
1315 | , 0 | |
1316 | , inverse<Compare>(comp), xbuf_used); | |
1317 | } | |
1318 | ||
1319 | ||
1320 | template<class RandIt> | |
1321 | void move_data_backward( RandIt cur_pos | |
1322 | , typename iterator_traits<RandIt>::size_type const l_data | |
1323 | , RandIt new_pos | |
1324 | , bool const xbuf_used) | |
1325 | { | |
1326 | //Move buffer to the total combination right | |
1327 | if(xbuf_used){ | |
1328 | boost::move_backward(cur_pos, cur_pos+l_data, new_pos+l_data); | |
1329 | } | |
1330 | else{ | |
1331 | boost::adl_move_swap_ranges_backward(cur_pos, cur_pos+l_data, new_pos+l_data); | |
1332 | //Rotate does less moves but it seems slower due to cache issues | |
1333 | //rotate_gcd(first-l_block, first+len-l_block, first+len); | |
1334 | } | |
1335 | } | |
1336 | ||
1337 | template<class RandIt> | |
1338 | void move_data_forward( RandIt cur_pos | |
1339 | , typename iterator_traits<RandIt>::size_type const l_data | |
1340 | , RandIt new_pos | |
1341 | , bool const xbuf_used) | |
1342 | { | |
1343 | //Move buffer to the total combination right | |
1344 | if(xbuf_used){ | |
1345 | boost::move(cur_pos, cur_pos+l_data, new_pos); | |
1346 | } | |
1347 | else{ | |
1348 | boost::adl_move_swap_ranges(cur_pos, cur_pos+l_data, new_pos); | |
1349 | //Rotate does less moves but it seems slower due to cache issues | |
1350 | //rotate_gcd(first-l_block, first+len-l_block, first+len); | |
1351 | } | |
1352 | } | |
1353 | ||
1354 | template <class Unsigned> | |
1355 | Unsigned calculate_total_combined(Unsigned const len, Unsigned const l_prev_merged, Unsigned *pl_irreg_combined = 0) | |
1356 | { | |
1357 | typedef Unsigned size_type; | |
1358 | ||
1359 | size_type const l_combined = 2*l_prev_merged; | |
1360 | size_type l_irreg_combined = len%l_combined; | |
1361 | size_type l_total_combined = len; | |
1362 | if(l_irreg_combined <= l_prev_merged){ | |
1363 | l_total_combined -= l_irreg_combined; | |
1364 | l_irreg_combined = 0; | |
1365 | } | |
1366 | if(pl_irreg_combined) | |
1367 | *pl_irreg_combined = l_irreg_combined; | |
1368 | return l_total_combined; | |
1369 | } | |
1370 | ||
1371 | // keys are on the left of first: | |
1372 | // If use_buf: [first - l_block - n_keys, first - l_block). | |
1373 | // Otherwise: [first - n_keys, first). | |
1374 | // Buffer (if use_buf) is also on the left of first [first - l_block, first). | |
1375 | // Blocks of length l_prev_merged combined. We'll combine them in pairs | |
1376 | // l_prev_merged and n_keys are powers of 2. (2*l_prev_merged/l_block) keys are guaranteed | |
1377 | // Returns the number of combined elements (some trailing elements might be left uncombined) | |
1378 | template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf> | |
1379 | void adaptive_sort_combine_blocks | |
1380 | ( RandItKeys const keys | |
1381 | , KeyCompare key_comp | |
1382 | , RandIt const first | |
1383 | , typename iterator_traits<RandIt>::size_type const len | |
1384 | , typename iterator_traits<RandIt>::size_type const l_prev_merged | |
1385 | , typename iterator_traits<RandIt>::size_type const l_block | |
1386 | , bool const use_buf | |
1387 | , bool const xbuf_used | |
1388 | , XBuf & xbuf | |
1389 | , Compare comp | |
1390 | , bool merge_left) | |
1391 | { | |
1392 | (void)xbuf; | |
1393 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1394 | ||
1395 | size_type const l_reg_combined = 2*l_prev_merged; | |
1396 | size_type l_irreg_combined = 0; | |
1397 | size_type const l_total_combined = calculate_total_combined(len, l_prev_merged, &l_irreg_combined); | |
1398 | size_type const n_reg_combined = len/l_reg_combined; | |
1399 | RandIt combined_first = first; | |
1400 | ||
1401 | (void)l_total_combined; | |
1402 | BOOST_ASSERT(l_total_combined <= len); | |
1403 | ||
1404 | size_type n_bef_irreg2, n_aft_irreg2, midkey_idx, l_irreg1, l_irreg2; | |
1405 | size_type const max_i = n_reg_combined + (l_irreg_combined != 0); | |
1406 | ||
1407 | if(merge_left || !use_buf) { | |
1408 | for( size_type combined_i = 0; combined_i != max_i; ++combined_i, combined_first += l_reg_combined) { | |
1409 | bool const is_last = combined_i==n_reg_combined; | |
1410 | size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined; | |
1411 | ||
1412 | range_xbuf<RandIt, move_op> rbuf( (use_buf && xbuf_used) ? (combined_first-l_block) : combined_first, combined_first); | |
1413 | combine_params( keys, key_comp, combined_first, l_cur_combined | |
1414 | , l_prev_merged, l_block, rbuf, comp | |
1415 | , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true); //Outputs | |
1416 | //Now merge blocks | |
1417 | if(!use_buf){ | |
1418 | merge_blocks_bufferless | |
1419 | (keys, keys[midkey_idx], key_comp, combined_first, l_block, 0u, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp); | |
1420 | } | |
1421 | else{ | |
1422 | merge_blocks_left | |
1423 | (keys, keys[midkey_idx], key_comp, combined_first, l_block, 0u, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used); | |
1424 | } | |
1425 | //BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After merge_blocks_l: ", len + l_block); | |
1426 | } | |
1427 | } | |
1428 | else{ | |
1429 | combined_first += l_reg_combined*(max_i-1); | |
1430 | for( size_type combined_i = max_i; combined_i--; combined_first -= l_reg_combined) { | |
1431 | bool const is_last = combined_i==n_reg_combined; | |
1432 | size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined; | |
1433 | RandIt const combined_last(combined_first+l_cur_combined); | |
1434 | range_xbuf<RandIt, move_op> rbuf(combined_last, xbuf_used ? (combined_last+l_block) : combined_last); | |
1435 | combine_params( keys, key_comp, combined_first, l_cur_combined | |
1436 | , l_prev_merged, l_block, rbuf, comp | |
1437 | , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, false); //Outputs | |
1438 | //BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After combine_params: ", len + l_block); | |
1439 | merge_blocks_right | |
1440 | (keys, keys[midkey_idx], key_comp, combined_first, l_block, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used); | |
1441 | //BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After merge_blocks_r: ", len + l_block); | |
1442 | } | |
1443 | } | |
1444 | } | |
1445 | ||
1446 | ||
1447 | template<class RandIt, class Compare> | |
1448 | typename iterator_traits<RandIt>::size_type | |
1449 | buffered_merge_blocks | |
1450 | ( RandIt const first, RandIt const last | |
1451 | , typename iterator_traits<RandIt>::size_type const input_combined_size | |
1452 | , Compare comp | |
1453 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> &xbuf) | |
1454 | { | |
1455 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1456 | size_type combined_size = input_combined_size; | |
1457 | ||
1458 | for( size_type const elements_in_blocks = size_type(last - first) | |
1459 | ; elements_in_blocks > combined_size && size_type(xbuf.capacity()) >= combined_size | |
1460 | ; combined_size *=2){ | |
1461 | RandIt merge_point = first; | |
1462 | while(size_type(last - merge_point) > 2*combined_size) { | |
1463 | RandIt const second_half = merge_point+combined_size; | |
1464 | RandIt const next_merge_point = second_half+combined_size; | |
1465 | buffered_merge(merge_point, second_half, next_merge_point, comp, xbuf); | |
1466 | merge_point = next_merge_point; | |
1467 | } | |
1468 | if(size_type(last-merge_point) > combined_size){ | |
1469 | buffered_merge(merge_point, merge_point+combined_size, last, comp, xbuf); | |
1470 | } | |
1471 | } | |
1472 | return combined_size; | |
1473 | } | |
1474 | ||
1475 | template<class RandIt, class Compare, class Op> | |
1476 | typename iterator_traits<RandIt>::size_type | |
1477 | op_insertion_sort_step_left | |
1478 | ( RandIt const first | |
1479 | , typename iterator_traits<RandIt>::size_type const length | |
1480 | , typename iterator_traits<RandIt>::size_type const step | |
1481 | , Compare comp, Op op) | |
1482 | { | |
1483 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1484 | size_type const s = min_value<size_type>(step, AdaptiveSortInsertionSortThreshold); | |
1485 | size_type m = 0; | |
1486 | ||
1487 | while((length - m) > s){ | |
1488 | insertion_sort_op(first+m, first+m+s, first+m-s, comp, op); | |
1489 | m += s; | |
1490 | } | |
1491 | insertion_sort_op(first+m, first+length, first+m-s, comp, op); | |
1492 | return s; | |
1493 | } | |
1494 | ||
1495 | template<class RandIt, class Compare> | |
1496 | typename iterator_traits<RandIt>::size_type | |
1497 | insertion_sort_step | |
1498 | ( RandIt const first | |
1499 | , typename iterator_traits<RandIt>::size_type const length | |
1500 | , typename iterator_traits<RandIt>::size_type const step | |
1501 | , Compare comp) | |
1502 | { | |
1503 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1504 | size_type const s = min_value<size_type>(step, AdaptiveSortInsertionSortThreshold); | |
1505 | size_type m = 0; | |
1506 | ||
1507 | while((length - m) > s){ | |
1508 | insertion_sort(first+m, first+m+s, comp); | |
1509 | m += s; | |
1510 | } | |
1511 | insertion_sort(first+m, first+length, comp); | |
1512 | return s; | |
1513 | } | |
1514 | ||
1515 | template<class RandIt, class Compare, class Op> | |
1516 | typename iterator_traits<RandIt>::size_type | |
1517 | op_merge_left_step | |
1518 | ( RandIt first_block | |
1519 | , typename iterator_traits<RandIt>::size_type const elements_in_blocks | |
1520 | , typename iterator_traits<RandIt>::size_type l_merged | |
1521 | , typename iterator_traits<RandIt>::size_type const l_build_buf | |
1522 | , typename iterator_traits<RandIt>::size_type l_left_space | |
1523 | , Compare comp | |
1524 | , Op op) | |
1525 | { | |
1526 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1527 | for(; l_merged < l_build_buf && l_left_space >= l_merged; l_merged*=2){ | |
1528 | size_type p0=0; | |
1529 | RandIt pos = first_block; | |
1530 | while((elements_in_blocks - p0) > 2*l_merged) { | |
1531 | op_merge_left(pos-l_merged, pos, pos+l_merged, pos+2*l_merged, comp, op); | |
1532 | p0 += 2*l_merged; | |
1533 | pos = first_block+p0; | |
1534 | } | |
1535 | if((elements_in_blocks-p0) > l_merged) { | |
1536 | op_merge_left(pos-l_merged, pos, pos+l_merged, first_block+elements_in_blocks, comp, op); | |
1537 | } | |
1538 | else { | |
1539 | op(forward_t(), pos, first_block+elements_in_blocks, pos-l_merged); | |
1540 | } | |
1541 | first_block -= l_merged; | |
1542 | l_left_space -= l_merged; | |
1543 | } | |
1544 | return l_merged; | |
1545 | } | |
1546 | ||
1547 | template<class RandIt, class Compare, class Op> | |
1548 | void op_merge_right_step | |
1549 | ( RandIt first_block | |
1550 | , typename iterator_traits<RandIt>::size_type const elements_in_blocks | |
1551 | , typename iterator_traits<RandIt>::size_type const l_build_buf | |
1552 | , Compare comp | |
1553 | , Op op) | |
1554 | { | |
1555 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1556 | size_type restk = elements_in_blocks%(2*l_build_buf); | |
1557 | size_type p = elements_in_blocks - restk; | |
1558 | BOOST_ASSERT(0 == (p%(2*l_build_buf))); | |
1559 | ||
1560 | if(restk <= l_build_buf){ | |
1561 | op(backward_t(),first_block+p, first_block+p+restk, first_block+p+restk+l_build_buf); | |
1562 | } | |
1563 | else{ | |
1564 | op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+restk, first_block+p+restk+l_build_buf, comp, op); | |
1565 | } | |
1566 | while(p>0){ | |
1567 | p -= 2*l_build_buf; | |
1568 | op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+2*l_build_buf, first_block+p+3*l_build_buf, comp, op); | |
1569 | } | |
1570 | } | |
1571 | ||
1572 | ||
1573 | // build blocks of length 2*l_build_buf. l_build_buf is power of two | |
1574 | // input: [0, l_build_buf) elements are buffer, rest unsorted elements | |
1575 | // output: [0, l_build_buf) elements are buffer, blocks 2*l_build_buf and last subblock sorted | |
1576 | // | |
1577 | // First elements are merged from right to left until elements start | |
1578 | // at first. All old elements [first, first + l_build_buf) are placed at the end | |
1579 | // [first+len-l_build_buf, first+len). To achieve this: | |
1580 | // - If we have external memory to merge, we save elements from the buffer | |
1581 | // so that a non-swapping merge is used. Buffer elements are restored | |
1582 | // at the end of the buffer from the external memory. | |
1583 | // | |
1584 | // - When the external memory is not available or it is insufficient | |
1585 | // for a merge operation, left swap merging is used. | |
1586 | // | |
1587 | // Once elements are merged left to right in blocks of l_build_buf, then a single left | |
1588 | // to right merge step is performed to achieve merged blocks of size 2K. | |
1589 | // If external memory is available, usual merge is used, swap merging otherwise. | |
1590 | // | |
1591 | // As a last step, if auxiliary memory is available in-place merge is performed. | |
1592 | // until all is merged or auxiliary memory is not large enough. | |
1593 | template<class RandIt, class Compare> | |
1594 | typename iterator_traits<RandIt>::size_type | |
1595 | adaptive_sort_build_blocks | |
1596 | ( RandIt const first | |
1597 | , typename iterator_traits<RandIt>::size_type const len | |
1598 | , typename iterator_traits<RandIt>::size_type const l_base | |
1599 | , typename iterator_traits<RandIt>::size_type const l_build_buf | |
1600 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
1601 | , Compare comp) | |
1602 | { | |
1603 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1604 | BOOST_ASSERT(l_build_buf <= len); | |
1605 | BOOST_ASSERT(0 == ((l_build_buf / l_base)&(l_build_buf/l_base-1))); | |
1606 | ||
1607 | //Place the start pointer after the buffer | |
1608 | RandIt first_block = first + l_build_buf; | |
1609 | size_type const elements_in_blocks = len - l_build_buf; | |
1610 | ||
1611 | ////////////////////////////////// | |
1612 | // Start of merge to left step | |
1613 | ////////////////////////////////// | |
1614 | size_type l_merged = 0u; | |
1615 | ||
1616 | // if(xbuf.capacity()>=2*l_build_buf){ | |
1617 | if(!l_build_buf){ | |
1618 | l_merged = insertion_sort_step(first_block, elements_in_blocks, l_base, comp); | |
1619 | //2*l_build_buf already merged, now try to merge further | |
1620 | //using classic in-place mergesort if enough auxiliary memory is available | |
1621 | return buffered_merge_blocks | |
1622 | (first_block, first_block + elements_in_blocks, l_merged, comp, xbuf); | |
1623 | } | |
1624 | else{ | |
1625 | //If there is no enough buffer for the insertion sort step, just avoid the external buffer | |
1626 | size_type kbuf = min_value<size_type>(l_build_buf, size_type(xbuf.capacity())); | |
1627 | kbuf = kbuf < l_base ? 0 : kbuf; | |
1628 | ||
1629 | if(kbuf){ | |
1630 | //Backup internal buffer values in external buffer so they can be overwritten | |
1631 | xbuf.move_assign(first+l_build_buf-kbuf, kbuf); | |
1632 | l_merged = op_insertion_sort_step_left(first_block, elements_in_blocks, l_base, comp, move_op()); | |
1633 | ||
1634 | //Now combine them using the buffer. Elements from buffer can be | |
1635 | //overwritten since they've been saved to xbuf | |
1636 | l_merged = op_merge_left_step | |
1637 | ( first_block - l_merged, elements_in_blocks, l_merged, l_build_buf, kbuf - l_merged, comp, move_op()); | |
1638 | ||
1639 | //Restore internal buffer from external buffer unless kbuf was l_build_buf, | |
1640 | //in that case restoration will happen later | |
1641 | if(kbuf != l_build_buf){ | |
1642 | boost::move(xbuf.data()+kbuf-l_merged, xbuf.data() + kbuf, first_block-l_merged+elements_in_blocks); | |
1643 | } | |
1644 | } | |
1645 | else{ | |
1646 | l_merged = insertion_sort_step(first_block, elements_in_blocks, l_base, comp); | |
1647 | rotate_gcd(first_block - l_merged, first_block, first_block+elements_in_blocks); | |
1648 | } | |
1649 | ||
1650 | //Now combine elements using the buffer. Elements from buffer can't be | |
1651 | //overwritten since xbuf was not big enough, so merge swapping elements. | |
1652 | l_merged = op_merge_left_step | |
1653 | (first_block - l_merged, elements_in_blocks, l_merged, l_build_buf, l_build_buf - l_merged, comp, swap_op()); | |
1654 | ||
1655 | BOOST_ASSERT(l_merged == l_build_buf); | |
1656 | ||
1657 | ////////////////////////////////// | |
1658 | // Start of merge to right step | |
1659 | ////////////////////////////////// | |
1660 | ||
1661 | //If kbuf is l_build_buf then we can merge right without swapping | |
1662 | //Saved data is still in xbuf | |
1663 | if(kbuf && kbuf == l_build_buf){ | |
1664 | op_merge_right_step(first, elements_in_blocks, l_build_buf, comp, move_op()); | |
1665 | //Restore internal buffer from external buffer if kbuf was l_build_buf. | |
1666 | //as this operation was previously delayed. | |
1667 | boost::move(xbuf.data(), xbuf.data() + kbuf, first); | |
1668 | } | |
1669 | else{ | |
1670 | op_merge_right_step(first, elements_in_blocks, l_build_buf, comp, swap_op()); | |
1671 | } | |
1672 | xbuf.clear(); | |
1673 | //2*l_build_buf already merged, now try to merge further | |
1674 | //using classic in-place mergesort if enough auxiliary memory is available | |
1675 | return buffered_merge_blocks | |
1676 | (first_block, first_block + elements_in_blocks, l_build_buf*2, comp, xbuf); | |
1677 | } | |
1678 | } | |
1679 | ||
1680 | //Returns true if buffer is placed in | |
1681 | //[buffer+len-l_intbuf, buffer+len). Otherwise, buffer is | |
1682 | //[buffer,buffer+l_intbuf) | |
1683 | template<class RandIt, class Compare> | |
1684 | bool adaptive_sort_combine_all_blocks | |
1685 | ( RandIt keys | |
1686 | , typename iterator_traits<RandIt>::size_type &n_keys | |
1687 | , RandIt const buffer | |
1688 | , typename iterator_traits<RandIt>::size_type const l_buf_plus_data | |
1689 | , typename iterator_traits<RandIt>::size_type l_merged | |
1690 | , typename iterator_traits<RandIt>::size_type &l_intbuf | |
1691 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
1692 | , Compare comp) | |
1693 | { | |
1694 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1695 | RandIt const first = buffer + l_intbuf; | |
1696 | size_type const l_data = l_buf_plus_data - l_intbuf; | |
1697 | size_type const l_unique = l_intbuf+n_keys; | |
1698 | //Backup data to external buffer once if possible | |
1699 | bool const common_xbuf = l_data > l_merged && l_intbuf && l_intbuf <= xbuf.capacity(); | |
1700 | if(common_xbuf){ | |
1701 | xbuf.move_assign(buffer, l_intbuf); | |
1702 | } | |
1703 | ||
1704 | bool prev_merge_left = true; | |
1705 | size_type l_prev_total_combined = 0u, l_prev_block = 0; | |
1706 | bool prev_use_internal_buf = true; | |
1707 | ||
1708 | for( size_type n = 0; l_data > l_merged | |
1709 | ; l_merged*=2 | |
1710 | , ++n){ | |
1711 | //If l_intbuf is non-zero, use that internal buffer. | |
1712 | // Implies l_block == l_intbuf && use_internal_buf == true | |
1713 | //If l_intbuf is zero, see if half keys can be reused as a reduced emergency buffer, | |
1714 | // Implies l_block == n_keys/2 && use_internal_buf == true | |
1715 | //Otherwise, just give up and and use all keys to merge using rotations (use_internal_buf = false) | |
1716 | bool use_internal_buf = false; | |
1717 | size_type const l_block = lblock_for_combine(l_intbuf, n_keys, 2*l_merged, use_internal_buf); | |
1718 | BOOST_ASSERT(!l_intbuf || (l_block == l_intbuf)); | |
1719 | BOOST_ASSERT(n == 0 || (!use_internal_buf || prev_use_internal_buf) ); | |
1720 | BOOST_ASSERT(n == 0 || (!use_internal_buf || l_prev_block == l_block) ); | |
1721 | ||
1722 | bool const is_merge_left = (n&1) == 0; | |
1723 | size_type const l_total_combined = calculate_total_combined(l_data, l_merged); | |
1724 | if(n && prev_use_internal_buf && prev_merge_left){ | |
1725 | if(is_merge_left || !use_internal_buf){ | |
1726 | move_data_backward(first-l_prev_block, l_prev_total_combined, first, common_xbuf); | |
1727 | } | |
1728 | else{ | |
1729 | //Put the buffer just after l_total_combined | |
1730 | RandIt const buf_end = first+l_prev_total_combined; | |
1731 | RandIt const buf_beg = buf_end-l_block; | |
1732 | if(l_prev_total_combined > l_total_combined){ | |
1733 | size_type const l_diff = l_prev_total_combined - l_total_combined; | |
1734 | move_data_backward(buf_beg-l_diff, l_diff, buf_end-l_diff, common_xbuf); | |
1735 | } | |
1736 | else if(l_prev_total_combined < l_total_combined){ | |
1737 | size_type const l_diff = l_total_combined - l_prev_total_combined; | |
1738 | move_data_forward(buf_end, l_diff, buf_beg, common_xbuf); | |
1739 | } | |
1740 | } | |
1741 | } | |
1742 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After move_data : ", l_data + l_intbuf); | |
1743 | ||
1744 | //Combine to form l_merged*2 segments | |
1745 | if(n_keys){ | |
1746 | adaptive_sort_combine_blocks | |
1747 | ( keys, comp, !use_internal_buf || is_merge_left ? first : first-l_block | |
1748 | , l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left); | |
1749 | } | |
1750 | else{ | |
1751 | size_type *const uint_keys = xbuf.template aligned_trailing<size_type>(); | |
1752 | adaptive_sort_combine_blocks | |
1753 | ( uint_keys, less(), !use_internal_buf || is_merge_left ? first : first-l_block | |
1754 | , l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left); | |
1755 | } | |
1756 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After combine_blocks: ", l_data + l_intbuf); | |
1757 | prev_merge_left = is_merge_left; | |
1758 | l_prev_total_combined = l_total_combined; | |
1759 | l_prev_block = l_block; | |
1760 | prev_use_internal_buf = use_internal_buf; | |
1761 | } | |
1762 | BOOST_ASSERT(l_prev_total_combined == l_data); | |
1763 | bool const buffer_right = prev_use_internal_buf && prev_merge_left; | |
1764 | ||
1765 | l_intbuf = prev_use_internal_buf ? l_prev_block : 0u; | |
1766 | n_keys = l_unique - l_intbuf; | |
1767 | //Restore data from to external common buffer if used | |
1768 | if(common_xbuf){ | |
1769 | if(buffer_right){ | |
1770 | boost::move(xbuf.data(), xbuf.data() + l_intbuf, buffer+l_data); | |
1771 | } | |
1772 | else{ | |
1773 | boost::move(xbuf.data(), xbuf.data() + l_intbuf, buffer); | |
1774 | } | |
1775 | } | |
1776 | return buffer_right; | |
1777 | } | |
1778 | ||
1779 | template<class RandIt, class Compare> | |
1780 | void stable_merge | |
1781 | ( RandIt first, RandIt const middle, RandIt last | |
1782 | , Compare comp | |
1783 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> &xbuf) | |
1784 | { | |
1785 | BOOST_ASSERT(xbuf.empty()); | |
1786 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1787 | size_type const len1 = size_type(middle-first); | |
1788 | size_type const len2 = size_type(last-middle); | |
1789 | size_type const l_min = min_value(len1, len2); | |
1790 | if(xbuf.capacity() >= l_min){ | |
1791 | buffered_merge(first, middle, last, comp, xbuf); | |
1792 | xbuf.clear(); | |
1793 | } | |
1794 | else{ | |
1795 | merge_bufferless(first, middle, last, comp); | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | ||
1800 | template<class RandIt, class Compare> | |
1801 | void adaptive_sort_final_merge( bool buffer_right | |
1802 | , RandIt const first | |
1803 | , typename iterator_traits<RandIt>::size_type const l_intbuf | |
1804 | , typename iterator_traits<RandIt>::size_type const n_keys | |
1805 | , typename iterator_traits<RandIt>::size_type const len | |
1806 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
1807 | , Compare comp) | |
1808 | { | |
1809 | //BOOST_ASSERT(n_keys || xbuf.size() == l_intbuf); | |
1810 | xbuf.clear(); | |
1811 | ||
1812 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1813 | size_type const n_key_plus_buf = l_intbuf+n_keys; | |
1814 | if(buffer_right){ | |
1815 | stable_sort(first+len-l_intbuf, first+len, comp, xbuf); | |
1816 | stable_merge(first+n_keys, first+len-l_intbuf, first+len, antistable<Compare>(comp), xbuf); | |
1817 | stable_sort(first, first+n_keys, comp, xbuf); | |
1818 | stable_merge(first, first+n_keys, first+len, comp, xbuf); | |
1819 | } | |
1820 | else{ | |
1821 | stable_sort(first, first+n_key_plus_buf, comp, xbuf); | |
1822 | if(xbuf.capacity() >= n_key_plus_buf){ | |
1823 | buffered_merge(first, first+n_key_plus_buf, first+len, comp, xbuf); | |
1824 | } | |
1825 | else if(xbuf.capacity() >= min_value<size_type>(l_intbuf, n_keys)){ | |
1826 | stable_merge(first+n_keys, first+n_key_plus_buf, first+len, comp, xbuf); | |
1827 | stable_merge(first, first+n_keys, first+len, comp, xbuf); | |
1828 | } | |
1829 | else{ | |
1830 | merge_bufferless(first, first+n_key_plus_buf, first+len, comp); | |
1831 | } | |
1832 | } | |
1833 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After final_merge : ", len); | |
1834 | } | |
1835 | ||
1836 | template<class RandIt, class Compare, class Unsigned, class T> | |
1837 | bool adaptive_sort_build_params | |
1838 | (RandIt first, Unsigned const len, Compare comp | |
1839 | , Unsigned &n_keys, Unsigned &l_intbuf, Unsigned &l_base, Unsigned &l_build_buf | |
1840 | , adaptive_xbuf<T> & xbuf | |
1841 | ) | |
1842 | { | |
1843 | typedef Unsigned size_type; | |
1844 | ||
1845 | //Calculate ideal parameters and try to collect needed unique keys | |
1846 | l_base = 0u; | |
1847 | ||
1848 | //Try to find a value near sqrt(len) that is 2^N*l_base where | |
1849 | //l_base <= AdaptiveSortInsertionSortThreshold. This property is important | |
1850 | //as build_blocks merges to the left iteratively duplicating the | |
1851 | //merged size and all the buffer must be used just before the final | |
1852 | //merge to right step. This guarantees "build_blocks" produces | |
1853 | //segments of size l_build_buf*2, maximizing the classic merge phase. | |
1854 | l_intbuf = size_type(ceil_sqrt_multiple(len, &l_base)); | |
1855 | ||
1856 | //This is the minimum number of keys to implement the ideal algorithm | |
1857 | // | |
1858 | //l_intbuf is used as buffer plus the key count | |
1859 | size_type n_min_ideal_keys = l_intbuf-1u; | |
1860 | while(n_min_ideal_keys >= (len-l_intbuf-n_min_ideal_keys)/l_intbuf){ | |
1861 | --n_min_ideal_keys; | |
1862 | } | |
1863 | ++n_min_ideal_keys; | |
1864 | BOOST_ASSERT(n_min_ideal_keys < l_intbuf); | |
1865 | ||
1866 | if(xbuf.template supports_aligned_trailing<size_type>(l_intbuf, n_min_ideal_keys)){ | |
1867 | n_keys = 0u; | |
1868 | l_build_buf = l_intbuf; | |
1869 | } | |
1870 | else{ | |
1871 | //Try to achieve a l_build_buf of length l_intbuf*2, so that we can merge with that | |
1872 | //l_intbuf*2 buffer in "build_blocks" and use half of them as buffer and the other half | |
1873 | //as keys in combine_all_blocks. In that case n_keys >= n_min_ideal_keys but by a small margin. | |
1874 | // | |
1875 | //If available memory is 2*sqrt(l), then only sqrt(l) unique keys are needed, | |
1876 | //(to be used for keys in combine_all_blocks) as the whole l_build_buf | |
1877 | //will be backuped in the buffer during build_blocks. | |
1878 | bool const non_unique_buf = xbuf.capacity() >= 2*l_intbuf; | |
1879 | size_type const to_collect = non_unique_buf ? l_intbuf : l_intbuf*2; | |
1880 | size_type collected = collect_unique(first, first+len, to_collect, comp, xbuf); | |
1881 | ||
1882 | //If available memory is 2*sqrt(l), then for "build_params" | |
1883 | //the situation is the same as if 2*l_intbuf were collected. | |
1884 | if(non_unique_buf && (collected >= n_min_ideal_keys)) | |
1885 | collected += l_intbuf; | |
1886 | ||
1887 | //If collected keys are not enough, try to fix n_keys and l_intbuf. If no fix | |
1888 | //is possible (due to very low unique keys), then go to a slow sort based on rotations. | |
1889 | if(collected < (n_min_ideal_keys+l_intbuf)){ | |
1890 | if(collected < 4){ //No combination possible with less that 4 keys | |
1891 | return false; | |
1892 | } | |
1893 | n_keys = l_intbuf; | |
1894 | while(n_keys&(n_keys-1)){ | |
1895 | n_keys &= n_keys-1; // make it power or 2 | |
1896 | } | |
1897 | while(n_keys > collected){ | |
1898 | n_keys/=2; | |
1899 | } | |
1900 | //AdaptiveSortInsertionSortThreshold is always power of two so the minimum is power of two | |
1901 | l_base = min_value<Unsigned>(n_keys, AdaptiveSortInsertionSortThreshold); | |
1902 | l_intbuf = 0; | |
1903 | l_build_buf = n_keys; | |
1904 | } | |
1905 | else if((collected - l_intbuf) >= l_intbuf){ | |
1906 | //l_intbuf*2 elements found. Use all of them in the build phase | |
1907 | l_build_buf = l_intbuf*2; | |
1908 | n_keys = l_intbuf; | |
1909 | } | |
1910 | else{ | |
1911 | l_build_buf = l_intbuf; | |
1912 | n_keys = n_min_ideal_keys; | |
1913 | } | |
1914 | BOOST_ASSERT((n_keys+l_intbuf) >= l_build_buf); | |
1915 | } | |
1916 | ||
1917 | return true; | |
1918 | } | |
1919 | ||
1920 | ||
1921 | #define BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF | |
1922 | ||
1923 | template<class RandIt, class Compare> | |
1924 | inline void adaptive_merge_combine_blocks( RandIt first | |
1925 | , typename iterator_traits<RandIt>::size_type len1 | |
1926 | , typename iterator_traits<RandIt>::size_type len2 | |
1927 | , typename iterator_traits<RandIt>::size_type collected | |
1928 | , typename iterator_traits<RandIt>::size_type n_keys | |
1929 | , typename iterator_traits<RandIt>::size_type l_block | |
1930 | , bool use_internal_buf | |
1931 | , bool xbuf_used | |
1932 | , Compare comp | |
1933 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
1934 | ) | |
1935 | { | |
1936 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
1937 | size_type const len = len1+len2; | |
1938 | size_type const l_combine = len-collected; | |
1939 | size_type const l_combine1 = len1-collected; | |
1940 | size_type n_bef_irreg2, n_aft_irreg2, l_irreg1, l_irreg2, midkey_idx; | |
1941 | ||
1942 | if(n_keys){ | |
1943 | RandIt const first_data = first+collected; | |
1944 | RandIt const keys = first; | |
1945 | combine_params( keys, comp, first_data, l_combine | |
1946 | , l_combine1, l_block, xbuf, comp | |
1947 | , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true, false); //Outputs | |
1948 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len); | |
1949 | if(xbuf_used){ | |
1950 | BOOST_ASSERT(xbuf.size() >= l_block); | |
1951 | merge_blocks_with_buf | |
1952 | (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, xbuf_used); | |
1953 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg xbf: ", len); | |
1954 | } | |
1955 | else if(use_internal_buf){ | |
1956 | #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF | |
1957 | range_xbuf<RandIt, swap_op> rbuf(first_data-l_block, first_data); | |
1958 | merge_blocks_with_buf | |
1959 | (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, rbuf, xbuf_used); | |
1960 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg buf: ", len); | |
1961 | #else | |
1962 | merge_blocks_left | |
1963 | (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used); | |
1964 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg lft: ", len); | |
1965 | #endif | |
1966 | } | |
1967 | else{ | |
1968 | merge_blocks_bufferless | |
1969 | (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp); | |
1970 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg xbf: ", len); | |
1971 | } | |
1972 | } | |
1973 | else{ | |
1974 | xbuf.shrink_to_fit(l_block); | |
1975 | if(xbuf.size() < l_block){ | |
1976 | xbuf.initialize_until(l_block, *first); | |
1977 | } | |
1978 | size_type *const uint_keys = xbuf.template aligned_trailing<size_type>(l_block); | |
1979 | combine_params( uint_keys, less(), first, l_combine | |
1980 | , l_combine1, l_block, xbuf, comp | |
1981 | , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true, true); //Outputs | |
1982 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len); | |
1983 | BOOST_ASSERT(xbuf.size() >= l_block); | |
1984 | merge_blocks_with_buf | |
1985 | (uint_keys, uint_keys[midkey_idx], less(), first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, true); | |
1986 | xbuf.clear(); | |
1987 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg buf: ", len); | |
1988 | } | |
1989 | ||
1990 | } | |
1991 | ||
1992 | template<class RandIt, class Compare> | |
1993 | inline void adaptive_merge_final_merge( RandIt first | |
1994 | , typename iterator_traits<RandIt>::size_type len1 | |
1995 | , typename iterator_traits<RandIt>::size_type len2 | |
1996 | , typename iterator_traits<RandIt>::size_type collected | |
1997 | , typename iterator_traits<RandIt>::size_type l_intbuf | |
1998 | , typename iterator_traits<RandIt>::size_type l_block | |
1999 | , bool use_internal_buf | |
2000 | , bool xbuf_used | |
2001 | , Compare comp | |
2002 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
2003 | ) | |
2004 | { | |
2005 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
2006 | (void)l_block; | |
2007 | size_type n_keys = collected-l_intbuf; | |
2008 | size_type len = len1+len2; | |
2009 | if(use_internal_buf){ | |
2010 | if(xbuf_used){ | |
2011 | xbuf.clear(); | |
2012 | //Nothing to do | |
2013 | if(n_keys){ | |
2014 | stable_sort(first, first+n_keys, comp, xbuf); | |
2015 | stable_merge(first, first+n_keys, first+len, comp, xbuf); | |
2016 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key mrg: ", len); | |
2017 | } | |
2018 | } | |
2019 | else{ | |
2020 | #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF | |
2021 | xbuf.clear(); | |
2022 | stable_sort(first, first+collected, comp, xbuf); | |
2023 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b srt: ", len); | |
2024 | stable_merge(first, first+collected, first+len, comp, xbuf); | |
2025 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b mrg: ", len); | |
2026 | #else | |
2027 | xbuf.clear(); | |
2028 | stable_sort(first+len-l_block, first+len, comp, xbuf); | |
2029 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A buf srt: ", len); | |
2030 | RandIt const pos1 = lower_bound(first+n_keys, first+len-l_block, first[len-1], comp); | |
2031 | RandIt const pos2 = rotate_gcd(pos1, first+len-l_block, first+len); | |
2032 | stable_merge(first+n_keys, pos1, pos2, antistable<Compare>(comp), xbuf); | |
2033 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A buf mrg: ", len); | |
2034 | if(n_keys){ | |
2035 | stable_sort(first, first+n_keys, comp, xbuf); | |
2036 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key srt: ", len); | |
2037 | stable_merge(first, first+n_keys, first+len, comp, xbuf); | |
2038 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key mrg: ", len); | |
2039 | } | |
2040 | #endif | |
2041 | } | |
2042 | } | |
2043 | else{ | |
2044 | xbuf.clear(); | |
2045 | stable_sort(first, first+collected, comp, xbuf); | |
2046 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b srt: ", len); | |
2047 | stable_merge(first, first+collected, first+len1+len2, comp, xbuf); | |
2048 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b mrg: ", len); | |
2049 | } | |
2050 | } | |
2051 | ||
2052 | template<class SizeType, class Xbuf> | |
2053 | inline SizeType adaptive_merge_n_keys_intbuf(SizeType l_block, SizeType len, Xbuf & xbuf, SizeType &l_intbuf_inout) | |
2054 | { | |
2055 | typedef SizeType size_type; | |
2056 | size_type l_intbuf = xbuf.capacity() >= l_block ? 0u : l_block; | |
2057 | ||
2058 | //This is the minimum number of keys to implement the ideal algorithm | |
2059 | //ceil(len/l_block) - 1 (as the first block is used as buffer) | |
2060 | size_type n_keys = len/l_block+1; | |
2061 | while(n_keys >= (len-l_intbuf-n_keys)/l_block){ | |
2062 | --n_keys; | |
2063 | } | |
2064 | ++n_keys; | |
2065 | //BOOST_ASSERT(n_keys < l_block); | |
2066 | ||
2067 | if(xbuf.template supports_aligned_trailing<size_type>(l_block, n_keys)){ | |
2068 | n_keys = 0u; | |
2069 | } | |
2070 | l_intbuf_inout = l_intbuf; | |
2071 | return n_keys; | |
2072 | } | |
2073 | ||
2074 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2075 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2076 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2077 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2078 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2079 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2080 | /////////////////////////////////////////////////////////////////////////////////////////// | |
2081 | ||
2082 | // Main explanation of the sort algorithm. | |
2083 | // | |
2084 | // csqrtlen = ceil(sqrt(len)); | |
2085 | // | |
2086 | // * First, 2*csqrtlen unique elements elements are extracted from elements to be | |
2087 | // sorted and placed in the beginning of the range. | |
2088 | // | |
2089 | // * Step "build_blocks": In this nearly-classic merge step, 2*csqrtlen unique elements | |
2090 | // will be used as auxiliary memory, so trailing len-2*csqrtlen elements are | |
2091 | // are grouped in blocks of sorted 4*csqrtlen elements. At the end of the step | |
2092 | // 2*csqrtlen unique elements are again the leading elements of the whole range. | |
2093 | // | |
2094 | // * Step "combine_blocks": pairs of previously formed blocks are merged with a different | |
2095 | // ("smart") algorithm to form blocks of 8*csqrtlen elements. This step is slower than the | |
2096 | // "build_blocks" step and repeated iteratively (forming blocks of 16*csqrtlen, 32*csqrtlen | |
2097 | // elements, etc) of until all trailing (len-2*csqrtlen) elements are merged. | |
2098 | // | |
2099 | // In "combine_blocks" len/csqrtlen elements used are as "keys" (markers) to | |
2100 | // know if elements belong to the first or second block to be merged and another | |
2101 | // leading csqrtlen elements are used as buffer. Explanation of the "combine_blocks" step: | |
2102 | // | |
2103 | // Iteratively until all trailing (len-2*csqrtlen) elements are merged: | |
2104 | // Iteratively for each pair of previously merged block: | |
2105 | // * Blocks are divided groups of csqrtlen elements and | |
2106 | // 2*merged_block/csqrtlen keys are sorted to be used as markers | |
2107 | // * Groups are selection-sorted by first or last element (depending wheter they | |
2108 | // merged to left or right) and keys are reordered accordingly as an imitation-buffer. | |
2109 | // * Elements of each block pair is merged using the csqrtlen buffer taking into account | |
2110 | // if they belong to the first half or second half (marked by the key). | |
2111 | // | |
2112 | // * In the final merge step leading elements (2*csqrtlen) are sorted and merged with | |
2113 | // rotations with the rest of sorted elements in the "combine_blocks" step. | |
2114 | // | |
2115 | // Corner cases: | |
2116 | // | |
2117 | // * If no 2*csqrtlen elements can be extracted: | |
2118 | // | |
2119 | // * If csqrtlen+len/csqrtlen are extracted, then only csqrtlen elements are used | |
2120 | // as buffer in the "build_blocks" step forming blocks of 2*csqrtlen elements. This | |
2121 | // means that an additional "combine_blocks" step will be needed to merge all elements. | |
2122 | // | |
2123 | // * If no csqrtlen+len/csqrtlen elements can be extracted, but still more than a minimum, | |
2124 | // then reduces the number of elements used as buffer and keys in the "build_blocks" | |
2125 | // and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction | |
2126 | // then uses a rotation based smart merge. | |
2127 | // | |
2128 | // * If the minimum number of keys can't be extracted, a rotation-based sorting is performed. | |
2129 | // | |
2130 | // * If auxiliary memory is more or equal than ceil(len/2), half-copying mergesort is used. | |
2131 | // | |
2132 | // * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t), | |
2133 | // then only csqrtlen elements need to be extracted and "combine_blocks" will use integral | |
2134 | // keys to combine blocks. | |
2135 | // | |
2136 | // * If auxiliary memory is available, the "build_blocks" will be extended to build bigger blocks | |
2137 | // using classic merge. | |
2138 | template<class RandIt, class Compare> | |
2139 | void adaptive_sort_impl | |
2140 | ( RandIt first | |
2141 | , typename iterator_traits<RandIt>::size_type const len | |
2142 | , Compare comp | |
2143 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
2144 | ) | |
2145 | { | |
2146 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
2147 | ||
2148 | //Small sorts go directly to insertion sort | |
2149 | if(len <= size_type(AdaptiveSortInsertionSortThreshold)){ | |
2150 | insertion_sort(first, first + len, comp); | |
2151 | return; | |
2152 | } | |
2153 | ||
2154 | if((len-len/2) <= xbuf.capacity()){ | |
2155 | merge_sort(first, first+len, comp, xbuf.data()); | |
2156 | return; | |
2157 | } | |
2158 | ||
2159 | //Make sure it is at least four | |
2160 | BOOST_STATIC_ASSERT(AdaptiveSortInsertionSortThreshold >= 4); | |
2161 | ||
2162 | size_type l_base = 0; | |
2163 | size_type l_intbuf = 0; | |
2164 | size_type n_keys = 0; | |
2165 | size_type l_build_buf = 0; | |
2166 | ||
2167 | //Calculate and extract needed unique elements. If a minimum is not achieved | |
2168 | //fallback to rotation-based merge | |
2169 | if(!adaptive_sort_build_params(first, len, comp, n_keys, l_intbuf, l_base, l_build_buf, xbuf)){ | |
2170 | stable_sort(first, first+len, comp, xbuf); | |
2171 | return; | |
2172 | } | |
2173 | ||
2174 | //Otherwise, continue the adaptive_sort | |
2175 | BOOST_MOVE_ADAPTIVE_SORT_PRINT("\n After collect_unique: ", len); | |
2176 | size_type const n_key_plus_buf = l_intbuf+n_keys; | |
2177 | //l_build_buf is always power of two if l_intbuf is zero | |
2178 | BOOST_ASSERT(l_intbuf || (0 == (l_build_buf & (l_build_buf-1)))); | |
2179 | ||
2180 | //Classic merge sort until internal buffer and xbuf are exhausted | |
2181 | size_type const l_merged = adaptive_sort_build_blocks | |
2182 | (first+n_key_plus_buf-l_build_buf, len-n_key_plus_buf+l_build_buf, l_base, l_build_buf, xbuf, comp); | |
2183 | BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After build_blocks: ", len); | |
2184 | ||
2185 | //Non-trivial merge | |
2186 | bool const buffer_right = adaptive_sort_combine_all_blocks | |
2187 | (first, n_keys, first+n_keys, len-n_keys, l_merged, l_intbuf, xbuf, comp); | |
2188 | ||
2189 | //Sort keys and buffer and merge the whole sequence | |
2190 | adaptive_sort_final_merge(buffer_right, first, l_intbuf, n_keys, len, xbuf, comp); | |
2191 | } | |
2192 | ||
2193 | // Main explanation of the merge algorithm. | |
2194 | // | |
2195 | // csqrtlen = ceil(sqrt(len)); | |
2196 | // | |
2197 | // * First, csqrtlen [to be used as buffer] + (len/csqrtlen - 1) [to be used as keys] => to_collect | |
2198 | // unique elements are extracted from elements to be sorted and placed in the beginning of the range. | |
2199 | // | |
2200 | // * Step "combine_blocks": the leading (len1-to_collect) elements plus trailing len2 elements | |
2201 | // are merged with a non-trivial ("smart") algorithm to form an ordered range trailing "len-to_collect" elements. | |
2202 | // | |
2203 | // Explanation of the "combine_blocks" step: | |
2204 | // | |
2205 | // * Trailing [first+to_collect, first+len1) elements are divided in groups of cqrtlen elements. | |
2206 | // Remaining elements that can't form a group are grouped in the front of those elements. | |
2207 | // * Trailing [first+len1, first+len1+len2) elements are divided in groups of cqrtlen elements. | |
2208 | // Remaining elements that can't form a group are grouped in the back of those elements. | |
2209 | // * Groups are selection-sorted by first or last element (depending wheter they | |
2210 | // merged to left or right) and keys are reordered accordingly as an imitation-buffer. | |
2211 | // * Elements of each block pair is merged using the csqrtlen buffer taking into account | |
2212 | // if they belong to the first half or second half (marked by the key). | |
2213 | // | |
2214 | // * In the final merge step leading "to_collect" elements are merged with rotations | |
2215 | // with the rest of merged elements in the "combine_blocks" step. | |
2216 | // | |
2217 | // Corner cases: | |
2218 | // | |
2219 | // * If no "to_collect" elements can be extracted: | |
2220 | // | |
2221 | // * If more than a minimum number of elements is extracted | |
2222 | // then reduces the number of elements used as buffer and keys in the | |
2223 | // and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction | |
2224 | // then uses a rotation based smart merge. | |
2225 | // | |
2226 | // * If the minimum number of keys can't be extracted, a rotation-based merge is performed. | |
2227 | // | |
2228 | // * If auxiliary memory is more or equal than min(len1, len2), a buffered merge is performed. | |
2229 | // | |
2230 | // * If the len1 or len2 are less than 2*csqrtlen then a rotation-based merge is performed. | |
2231 | // | |
2232 | // * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t), | |
2233 | // then no csqrtlen need to be extracted and "combine_blocks" will use integral | |
2234 | // keys to combine blocks. | |
2235 | template<class RandIt, class Compare> | |
2236 | void adaptive_merge_impl | |
2237 | ( RandIt first | |
2238 | , typename iterator_traits<RandIt>::size_type const len1 | |
2239 | , typename iterator_traits<RandIt>::size_type const len2 | |
2240 | , Compare comp | |
2241 | , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf | |
2242 | ) | |
2243 | { | |
2244 | typedef typename iterator_traits<RandIt>::size_type size_type; | |
2245 | ||
2246 | if(xbuf.capacity() >= min_value<size_type>(len1, len2)){ | |
2247 | buffered_merge(first, first+len1, first+(len1+len2), comp, xbuf); | |
2248 | } | |
2249 | else{ | |
2250 | const size_type len = len1+len2; | |
2251 | //Calculate ideal parameters and try to collect needed unique keys | |
2252 | size_type l_block = size_type(ceil_sqrt(len)); | |
2253 | ||
2254 | //One range is not big enough to extract keys and the internal buffer so a | |
2255 | //rotation-based based merge will do just fine | |
2256 | if(len1 <= l_block*2 || len2 <= l_block*2){ | |
2257 | merge_bufferless(first, first+len1, first+len1+len2, comp); | |
2258 | return; | |
2259 | } | |
2260 | ||
2261 | //Detail the number of keys and internal buffer. If xbuf has enough memory, no | |
2262 | //internal buffer is needed so l_intbuf will remain 0. | |
2263 | size_type l_intbuf = 0; | |
2264 | size_type n_keys = adaptive_merge_n_keys_intbuf(l_block, len, xbuf, l_intbuf); | |
2265 | size_type const to_collect = l_intbuf+n_keys; | |
2266 | //Try to extract needed unique values from the first range | |
2267 | size_type const collected = collect_unique(first, first+len1, to_collect, comp, xbuf); | |
2268 | BOOST_MOVE_ADAPTIVE_SORT_PRINT("\n A collect: ", len); | |
2269 | ||
2270 | //Not the minimum number of keys is not available on the first range, so fallback to rotations | |
2271 | if(collected != to_collect && collected < 4){ | |
2272 | merge_bufferless(first, first+len1, first+len1+len2, comp); | |
2273 | return; | |
2274 | } | |
2275 | ||
2276 | //If not enough keys but more than minimum, adjust the internal buffer and key count | |
2277 | bool use_internal_buf = collected == to_collect; | |
2278 | if (!use_internal_buf){ | |
2279 | l_intbuf = 0u; | |
2280 | n_keys = collected; | |
2281 | l_block = lblock_for_combine(l_intbuf, n_keys, len, use_internal_buf); | |
2282 | //If use_internal_buf is false, then then internal buffer will be zero and rotation-based combination will be used | |
2283 | l_intbuf = use_internal_buf ? l_block : 0u; | |
2284 | } | |
2285 | ||
2286 | bool const xbuf_used = collected == to_collect && xbuf.capacity() >= l_block; | |
2287 | //Merge trailing elements using smart merges | |
2288 | adaptive_merge_combine_blocks(first, len1, len2, collected, n_keys, l_block, use_internal_buf, xbuf_used, comp, xbuf); | |
2289 | //Merge buffer and keys with the rest of the values | |
2290 | adaptive_merge_final_merge (first, len1, len2, collected, l_intbuf, l_block, use_internal_buf, xbuf_used, comp, xbuf); | |
2291 | } | |
2292 | } | |
2293 | ||
2294 | } //namespace detail_adaptive { | |
2295 | } //namespace movelib { | |
2296 | } //namespace boost { | |
2297 | ||
2298 | #include <boost/move/detail/config_end.hpp> | |
2299 | ||
2300 | #endif //#define BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP |