]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * Code for working with individual keys, and sorted sets of keys with in a | |
4 | * btree node | |
5 | * | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
89ebb4a2 KO |
9 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
10 | ||
11 | #include "util.h" | |
12 | #include "bset.h" | |
cafe5635 | 13 | |
dc9d98d6 | 14 | #include <linux/console.h> |
e6017571 | 15 | #include <linux/sched/clock.h> |
cafe5635 | 16 | #include <linux/random.h> |
cd953ed0 | 17 | #include <linux/prefetch.h> |
cafe5635 | 18 | |
dc9d98d6 KO |
19 | #ifdef CONFIG_BCACHE_DEBUG |
20 | ||
21 | void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) | |
22 | { | |
23 | struct bkey *k, *next; | |
24 | ||
25 | for (k = i->start; k < bset_bkey_last(i); k = next) { | |
26 | next = bkey_next(k); | |
27 | ||
85cbe1f8 KO |
28 | printk(KERN_ERR "block %u key %u/%u: ", set, |
29 | (unsigned) ((u64 *) k - i->d), i->keys); | |
dc9d98d6 KO |
30 | |
31 | if (b->ops->key_dump) | |
32 | b->ops->key_dump(b, k); | |
33 | else | |
34 | printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); | |
35 | ||
36 | if (next < bset_bkey_last(i) && | |
37 | bkey_cmp(k, b->ops->is_extents ? | |
38 | &START_KEY(next) : next) > 0) | |
39 | printk(KERN_ERR "Key skipped backwards\n"); | |
40 | } | |
41 | } | |
42 | ||
43 | void bch_dump_bucket(struct btree_keys *b) | |
44 | { | |
45 | unsigned i; | |
46 | ||
47 | console_lock(); | |
48 | for (i = 0; i <= b->nsets; i++) | |
49 | bch_dump_bset(b, b->set[i].data, | |
50 | bset_sector_offset(b, b->set[i].data)); | |
51 | console_unlock(); | |
52 | } | |
53 | ||
54 | int __bch_count_data(struct btree_keys *b) | |
55 | { | |
56 | unsigned ret = 0; | |
57 | struct btree_iter iter; | |
58 | struct bkey *k; | |
59 | ||
60 | if (b->ops->is_extents) | |
61 | for_each_key(b, k, &iter) | |
62 | ret += KEY_SIZE(k); | |
63 | return ret; | |
64 | } | |
65 | ||
66 | void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) | |
67 | { | |
68 | va_list args; | |
69 | struct bkey *k, *p = NULL; | |
70 | struct btree_iter iter; | |
71 | const char *err; | |
72 | ||
73 | for_each_key(b, k, &iter) { | |
74 | if (b->ops->is_extents) { | |
75 | err = "Keys out of order"; | |
76 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) | |
77 | goto bug; | |
78 | ||
79 | if (bch_ptr_invalid(b, k)) | |
80 | continue; | |
81 | ||
82 | err = "Overlapping keys"; | |
83 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) | |
84 | goto bug; | |
85 | } else { | |
86 | if (bch_ptr_bad(b, k)) | |
87 | continue; | |
88 | ||
89 | err = "Duplicate keys"; | |
90 | if (p && !bkey_cmp(p, k)) | |
91 | goto bug; | |
92 | } | |
93 | p = k; | |
94 | } | |
95 | #if 0 | |
96 | err = "Key larger than btree node key"; | |
97 | if (p && bkey_cmp(p, &b->key) > 0) | |
98 | goto bug; | |
99 | #endif | |
100 | return; | |
101 | bug: | |
102 | bch_dump_bucket(b); | |
103 | ||
104 | va_start(args, fmt); | |
105 | vprintk(fmt, args); | |
106 | va_end(args); | |
107 | ||
108 | panic("bch_check_keys error: %s:\n", err); | |
109 | } | |
110 | ||
111 | static void bch_btree_iter_next_check(struct btree_iter *iter) | |
112 | { | |
113 | struct bkey *k = iter->data->k, *next = bkey_next(k); | |
114 | ||
115 | if (next < iter->data->end && | |
116 | bkey_cmp(k, iter->b->ops->is_extents ? | |
117 | &START_KEY(next) : next) > 0) { | |
118 | bch_dump_bucket(iter->b); | |
119 | panic("Key skipped backwards\n"); | |
120 | } | |
121 | } | |
122 | ||
123 | #else | |
124 | ||
125 | static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} | |
126 | ||
127 | #endif | |
128 | ||
cafe5635 KO |
129 | /* Keylists */ |
130 | ||
085d2a3d | 131 | int __bch_keylist_realloc(struct keylist *l, unsigned u64s) |
cafe5635 | 132 | { |
c2f95ae2 | 133 | size_t oldsize = bch_keylist_nkeys(l); |
085d2a3d | 134 | size_t newsize = oldsize + u64s; |
c2f95ae2 KO |
135 | uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; |
136 | uint64_t *new_keys; | |
cafe5635 | 137 | |
cafe5635 KO |
138 | newsize = roundup_pow_of_two(newsize); |
139 | ||
140 | if (newsize <= KEYLIST_INLINE || | |
141 | roundup_pow_of_two(oldsize) == newsize) | |
142 | return 0; | |
143 | ||
c2f95ae2 | 144 | new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); |
cafe5635 | 145 | |
c2f95ae2 | 146 | if (!new_keys) |
cafe5635 KO |
147 | return -ENOMEM; |
148 | ||
c2f95ae2 KO |
149 | if (!old_keys) |
150 | memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); | |
cafe5635 | 151 | |
c2f95ae2 KO |
152 | l->keys_p = new_keys; |
153 | l->top_p = new_keys + oldsize; | |
cafe5635 KO |
154 | |
155 | return 0; | |
156 | } | |
157 | ||
158 | struct bkey *bch_keylist_pop(struct keylist *l) | |
159 | { | |
c2f95ae2 | 160 | struct bkey *k = l->keys; |
cafe5635 KO |
161 | |
162 | if (k == l->top) | |
163 | return NULL; | |
164 | ||
165 | while (bkey_next(k) != l->top) | |
166 | k = bkey_next(k); | |
167 | ||
168 | return l->top = k; | |
169 | } | |
170 | ||
26c949f8 KO |
171 | void bch_keylist_pop_front(struct keylist *l) |
172 | { | |
c2f95ae2 | 173 | l->top_p -= bkey_u64s(l->keys); |
26c949f8 | 174 | |
c2f95ae2 KO |
175 | memmove(l->keys, |
176 | bkey_next(l->keys), | |
177 | bch_keylist_bytes(l)); | |
26c949f8 KO |
178 | } |
179 | ||
cafe5635 KO |
180 | /* Key/pointer manipulation */ |
181 | ||
182 | void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, | |
183 | unsigned i) | |
184 | { | |
185 | BUG_ON(i > KEY_PTRS(src)); | |
186 | ||
187 | /* Only copy the header, key, and one pointer. */ | |
188 | memcpy(dest, src, 2 * sizeof(uint64_t)); | |
189 | dest->ptr[0] = src->ptr[i]; | |
190 | SET_KEY_PTRS(dest, 1); | |
191 | /* We didn't copy the checksum so clear that bit. */ | |
192 | SET_KEY_CSUM(dest, 0); | |
193 | } | |
194 | ||
195 | bool __bch_cut_front(const struct bkey *where, struct bkey *k) | |
196 | { | |
197 | unsigned i, len = 0; | |
198 | ||
199 | if (bkey_cmp(where, &START_KEY(k)) <= 0) | |
200 | return false; | |
201 | ||
202 | if (bkey_cmp(where, k) < 0) | |
203 | len = KEY_OFFSET(k) - KEY_OFFSET(where); | |
204 | else | |
205 | bkey_copy_key(k, where); | |
206 | ||
207 | for (i = 0; i < KEY_PTRS(k); i++) | |
208 | SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); | |
209 | ||
210 | BUG_ON(len > KEY_SIZE(k)); | |
211 | SET_KEY_SIZE(k, len); | |
212 | return true; | |
213 | } | |
214 | ||
215 | bool __bch_cut_back(const struct bkey *where, struct bkey *k) | |
216 | { | |
217 | unsigned len = 0; | |
218 | ||
219 | if (bkey_cmp(where, k) >= 0) | |
220 | return false; | |
221 | ||
222 | BUG_ON(KEY_INODE(where) != KEY_INODE(k)); | |
223 | ||
224 | if (bkey_cmp(where, &START_KEY(k)) > 0) | |
225 | len = KEY_OFFSET(where) - KEY_START(k); | |
226 | ||
227 | bkey_copy_key(k, where); | |
228 | ||
229 | BUG_ON(len > KEY_SIZE(k)); | |
230 | SET_KEY_SIZE(k, len); | |
231 | return true; | |
232 | } | |
233 | ||
ee811287 KO |
234 | /* Auxiliary search trees */ |
235 | ||
236 | /* 32 bits total: */ | |
237 | #define BKEY_MID_BITS 3 | |
238 | #define BKEY_EXPONENT_BITS 7 | |
239 | #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) | |
240 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) | |
241 | ||
242 | struct bkey_float { | |
243 | unsigned exponent:BKEY_EXPONENT_BITS; | |
244 | unsigned m:BKEY_MID_BITS; | |
245 | unsigned mantissa:BKEY_MANTISSA_BITS; | |
246 | } __packed; | |
247 | ||
248 | /* | |
249 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - | |
250 | * it used to be 64, but I realized the lookup code would touch slightly less | |
251 | * memory if it was 128. | |
252 | * | |
253 | * It definites the number of bytes (in struct bset) per struct bkey_float in | |
254 | * the auxiliar search tree - when we're done searching the bset_float tree we | |
255 | * have this many bytes left that we do a linear search over. | |
256 | * | |
257 | * Since (after level 5) every level of the bset_tree is on a new cacheline, | |
258 | * we're touching one fewer cacheline in the bset tree in exchange for one more | |
259 | * cacheline in the linear search - but the linear search might stop before it | |
260 | * gets to the second cacheline. | |
261 | */ | |
262 | ||
263 | #define BSET_CACHELINE 128 | |
264 | ||
265 | /* Space required for the btree node keys */ | |
a85e968e | 266 | static inline size_t btree_keys_bytes(struct btree_keys *b) |
ee811287 KO |
267 | { |
268 | return PAGE_SIZE << b->page_order; | |
269 | } | |
270 | ||
a85e968e | 271 | static inline size_t btree_keys_cachelines(struct btree_keys *b) |
ee811287 KO |
272 | { |
273 | return btree_keys_bytes(b) / BSET_CACHELINE; | |
274 | } | |
275 | ||
276 | /* Space required for the auxiliary search trees */ | |
a85e968e | 277 | static inline size_t bset_tree_bytes(struct btree_keys *b) |
ee811287 KO |
278 | { |
279 | return btree_keys_cachelines(b) * sizeof(struct bkey_float); | |
280 | } | |
281 | ||
282 | /* Space required for the prev pointers */ | |
a85e968e | 283 | static inline size_t bset_prev_bytes(struct btree_keys *b) |
ee811287 KO |
284 | { |
285 | return btree_keys_cachelines(b) * sizeof(uint8_t); | |
286 | } | |
287 | ||
288 | /* Memory allocation */ | |
289 | ||
a85e968e | 290 | void bch_btree_keys_free(struct btree_keys *b) |
ee811287 | 291 | { |
a85e968e | 292 | struct bset_tree *t = b->set; |
ee811287 KO |
293 | |
294 | if (bset_prev_bytes(b) < PAGE_SIZE) | |
295 | kfree(t->prev); | |
296 | else | |
297 | free_pages((unsigned long) t->prev, | |
298 | get_order(bset_prev_bytes(b))); | |
299 | ||
300 | if (bset_tree_bytes(b) < PAGE_SIZE) | |
301 | kfree(t->tree); | |
302 | else | |
303 | free_pages((unsigned long) t->tree, | |
304 | get_order(bset_tree_bytes(b))); | |
305 | ||
306 | free_pages((unsigned long) t->data, b->page_order); | |
307 | ||
308 | t->prev = NULL; | |
309 | t->tree = NULL; | |
310 | t->data = NULL; | |
311 | } | |
a85e968e | 312 | EXPORT_SYMBOL(bch_btree_keys_free); |
ee811287 | 313 | |
a85e968e | 314 | int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) |
ee811287 | 315 | { |
a85e968e | 316 | struct bset_tree *t = b->set; |
ee811287 KO |
317 | |
318 | BUG_ON(t->data); | |
319 | ||
320 | b->page_order = page_order; | |
321 | ||
322 | t->data = (void *) __get_free_pages(gfp, b->page_order); | |
323 | if (!t->data) | |
324 | goto err; | |
325 | ||
326 | t->tree = bset_tree_bytes(b) < PAGE_SIZE | |
327 | ? kmalloc(bset_tree_bytes(b), gfp) | |
328 | : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); | |
329 | if (!t->tree) | |
330 | goto err; | |
331 | ||
332 | t->prev = bset_prev_bytes(b) < PAGE_SIZE | |
333 | ? kmalloc(bset_prev_bytes(b), gfp) | |
334 | : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); | |
335 | if (!t->prev) | |
336 | goto err; | |
337 | ||
338 | return 0; | |
339 | err: | |
340 | bch_btree_keys_free(b); | |
341 | return -ENOMEM; | |
342 | } | |
a85e968e KO |
343 | EXPORT_SYMBOL(bch_btree_keys_alloc); |
344 | ||
345 | void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, | |
346 | bool *expensive_debug_checks) | |
347 | { | |
348 | unsigned i; | |
349 | ||
350 | b->ops = ops; | |
351 | b->expensive_debug_checks = expensive_debug_checks; | |
352 | b->nsets = 0; | |
353 | b->last_set_unwritten = 0; | |
354 | ||
355 | /* XXX: shouldn't be needed */ | |
356 | for (i = 0; i < MAX_BSETS; i++) | |
357 | b->set[i].size = 0; | |
358 | /* | |
359 | * Second loop starts at 1 because b->keys[0]->data is the memory we | |
360 | * allocated | |
361 | */ | |
362 | for (i = 1; i < MAX_BSETS; i++) | |
363 | b->set[i].data = NULL; | |
364 | } | |
365 | EXPORT_SYMBOL(bch_btree_keys_init); | |
ee811287 | 366 | |
cafe5635 KO |
367 | /* Binary tree stuff for auxiliary search trees */ |
368 | ||
369 | static unsigned inorder_next(unsigned j, unsigned size) | |
370 | { | |
371 | if (j * 2 + 1 < size) { | |
372 | j = j * 2 + 1; | |
373 | ||
374 | while (j * 2 < size) | |
375 | j *= 2; | |
376 | } else | |
377 | j >>= ffz(j) + 1; | |
378 | ||
379 | return j; | |
380 | } | |
381 | ||
382 | static unsigned inorder_prev(unsigned j, unsigned size) | |
383 | { | |
384 | if (j * 2 < size) { | |
385 | j = j * 2; | |
386 | ||
387 | while (j * 2 + 1 < size) | |
388 | j = j * 2 + 1; | |
389 | } else | |
390 | j >>= ffs(j); | |
391 | ||
392 | return j; | |
393 | } | |
394 | ||
395 | /* I have no idea why this code works... and I'm the one who wrote it | |
396 | * | |
397 | * However, I do know what it does: | |
398 | * Given a binary tree constructed in an array (i.e. how you normally implement | |
399 | * a heap), it converts a node in the tree - referenced by array index - to the | |
400 | * index it would have if you did an inorder traversal. | |
401 | * | |
402 | * Also tested for every j, size up to size somewhere around 6 million. | |
403 | * | |
404 | * The binary tree starts at array index 1, not 0 | |
405 | * extra is a function of size: | |
406 | * extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
407 | */ | |
408 | static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) | |
409 | { | |
410 | unsigned b = fls(j); | |
411 | unsigned shift = fls(size - 1) - b; | |
412 | ||
413 | j ^= 1U << (b - 1); | |
414 | j <<= 1; | |
415 | j |= 1; | |
416 | j <<= shift; | |
417 | ||
418 | if (j > extra) | |
419 | j -= (j - extra) >> 1; | |
420 | ||
421 | return j; | |
422 | } | |
423 | ||
424 | static unsigned to_inorder(unsigned j, struct bset_tree *t) | |
425 | { | |
426 | return __to_inorder(j, t->size, t->extra); | |
427 | } | |
428 | ||
429 | static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) | |
430 | { | |
431 | unsigned shift; | |
432 | ||
433 | if (j > extra) | |
434 | j += j - extra; | |
435 | ||
436 | shift = ffs(j); | |
437 | ||
438 | j >>= shift; | |
439 | j |= roundup_pow_of_two(size) >> shift; | |
440 | ||
441 | return j; | |
442 | } | |
443 | ||
444 | static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) | |
445 | { | |
446 | return __inorder_to_tree(j, t->size, t->extra); | |
447 | } | |
448 | ||
449 | #if 0 | |
450 | void inorder_test(void) | |
451 | { | |
452 | unsigned long done = 0; | |
453 | ktime_t start = ktime_get(); | |
454 | ||
455 | for (unsigned size = 2; | |
456 | size < 65536000; | |
457 | size++) { | |
458 | unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
459 | unsigned i = 1, j = rounddown_pow_of_two(size - 1); | |
460 | ||
461 | if (!(size % 4096)) | |
462 | printk(KERN_NOTICE "loop %u, %llu per us\n", size, | |
463 | done / ktime_us_delta(ktime_get(), start)); | |
464 | ||
465 | while (1) { | |
466 | if (__inorder_to_tree(i, size, extra) != j) | |
467 | panic("size %10u j %10u i %10u", size, j, i); | |
468 | ||
469 | if (__to_inorder(j, size, extra) != i) | |
470 | panic("size %10u j %10u i %10u", size, j, i); | |
471 | ||
472 | if (j == rounddown_pow_of_two(size) - 1) | |
473 | break; | |
474 | ||
475 | BUG_ON(inorder_prev(inorder_next(j, size), size) != j); | |
476 | ||
477 | j = inorder_next(j, size); | |
478 | i++; | |
479 | } | |
480 | ||
481 | done += size - 1; | |
482 | } | |
483 | } | |
484 | #endif | |
485 | ||
486 | /* | |
48a73025 | 487 | * Cacheline/offset <-> bkey pointer arithmetic: |
cafe5635 KO |
488 | * |
489 | * t->tree is a binary search tree in an array; each node corresponds to a key | |
490 | * in one cacheline in t->set (BSET_CACHELINE bytes). | |
491 | * | |
492 | * This means we don't have to store the full index of the key that a node in | |
493 | * the binary tree points to; to_inorder() gives us the cacheline, and then | |
494 | * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. | |
495 | * | |
48a73025 | 496 | * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to |
cafe5635 KO |
497 | * make this work. |
498 | * | |
499 | * To construct the bfloat for an arbitrary key we need to know what the key | |
500 | * immediately preceding it is: we have to check if the two keys differ in the | |
501 | * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size | |
502 | * of the previous key so we can walk backwards to it from t->tree[j]'s key. | |
503 | */ | |
504 | ||
505 | static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, | |
506 | unsigned offset) | |
507 | { | |
508 | return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; | |
509 | } | |
510 | ||
511 | static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) | |
512 | { | |
513 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; | |
514 | } | |
515 | ||
9dd6358a KO |
516 | static unsigned bkey_to_cacheline_offset(struct bset_tree *t, |
517 | unsigned cacheline, | |
518 | struct bkey *k) | |
cafe5635 | 519 | { |
9dd6358a | 520 | return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); |
cafe5635 KO |
521 | } |
522 | ||
523 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) | |
524 | { | |
525 | return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); | |
526 | } | |
527 | ||
528 | static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) | |
529 | { | |
530 | return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); | |
531 | } | |
532 | ||
533 | /* | |
534 | * For the write set - the one we're currently inserting keys into - we don't | |
535 | * maintain a full search tree, we just keep a simple lookup table in t->prev. | |
536 | */ | |
537 | static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) | |
538 | { | |
539 | return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); | |
540 | } | |
541 | ||
542 | static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) | |
543 | { | |
cafe5635 KO |
544 | low >>= shift; |
545 | low |= (high << 1) << (63U - shift); | |
cafe5635 KO |
546 | return low; |
547 | } | |
548 | ||
549 | static inline unsigned bfloat_mantissa(const struct bkey *k, | |
550 | struct bkey_float *f) | |
551 | { | |
552 | const uint64_t *p = &k->low - (f->exponent >> 6); | |
553 | return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; | |
554 | } | |
555 | ||
556 | static void make_bfloat(struct bset_tree *t, unsigned j) | |
557 | { | |
558 | struct bkey_float *f = &t->tree[j]; | |
559 | struct bkey *m = tree_to_bkey(t, j); | |
560 | struct bkey *p = tree_to_prev_bkey(t, j); | |
561 | ||
562 | struct bkey *l = is_power_of_2(j) | |
563 | ? t->data->start | |
564 | : tree_to_prev_bkey(t, j >> ffs(j)); | |
565 | ||
566 | struct bkey *r = is_power_of_2(j + 1) | |
fafff81c | 567 | ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) |
cafe5635 KO |
568 | : tree_to_bkey(t, j >> (ffz(j) + 1)); |
569 | ||
570 | BUG_ON(m < l || m > r); | |
571 | BUG_ON(bkey_next(p) != m); | |
572 | ||
573 | if (KEY_INODE(l) != KEY_INODE(r)) | |
574 | f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; | |
575 | else | |
576 | f->exponent = fls64(r->low ^ l->low); | |
577 | ||
578 | f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); | |
579 | ||
580 | /* | |
581 | * Setting f->exponent = 127 flags this node as failed, and causes the | |
582 | * lookup code to fall back to comparing against the original key. | |
583 | */ | |
584 | ||
585 | if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) | |
586 | f->mantissa = bfloat_mantissa(m, f) - 1; | |
587 | else | |
588 | f->exponent = 127; | |
589 | } | |
590 | ||
a85e968e | 591 | static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) |
cafe5635 | 592 | { |
a85e968e | 593 | if (t != b->set) { |
cafe5635 KO |
594 | unsigned j = roundup(t[-1].size, |
595 | 64 / sizeof(struct bkey_float)); | |
596 | ||
597 | t->tree = t[-1].tree + j; | |
598 | t->prev = t[-1].prev + j; | |
599 | } | |
600 | ||
a85e968e | 601 | while (t < b->set + MAX_BSETS) |
cafe5635 KO |
602 | t++->size = 0; |
603 | } | |
604 | ||
a85e968e | 605 | static void bch_bset_build_unwritten_tree(struct btree_keys *b) |
cafe5635 | 606 | { |
ee811287 | 607 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 608 | |
a85e968e KO |
609 | BUG_ON(b->last_set_unwritten); |
610 | b->last_set_unwritten = 1; | |
611 | ||
cafe5635 KO |
612 | bset_alloc_tree(b, t); |
613 | ||
a85e968e | 614 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { |
9dd6358a | 615 | t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); |
cafe5635 KO |
616 | t->size = 1; |
617 | } | |
618 | } | |
619 | ||
a85e968e | 620 | void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) |
ee811287 | 621 | { |
a85e968e KO |
622 | if (i != b->set->data) { |
623 | b->set[++b->nsets].data = i; | |
624 | i->seq = b->set->data->seq; | |
ee811287 KO |
625 | } else |
626 | get_random_bytes(&i->seq, sizeof(uint64_t)); | |
627 | ||
628 | i->magic = magic; | |
629 | i->version = 0; | |
630 | i->keys = 0; | |
631 | ||
632 | bch_bset_build_unwritten_tree(b); | |
633 | } | |
a85e968e | 634 | EXPORT_SYMBOL(bch_bset_init_next); |
ee811287 | 635 | |
a85e968e | 636 | void bch_bset_build_written_tree(struct btree_keys *b) |
cafe5635 | 637 | { |
ee811287 | 638 | struct bset_tree *t = bset_tree_last(b); |
9dd6358a | 639 | struct bkey *prev = NULL, *k = t->data->start; |
cafe5635 KO |
640 | unsigned j, cacheline = 1; |
641 | ||
a85e968e KO |
642 | b->last_set_unwritten = 0; |
643 | ||
cafe5635 KO |
644 | bset_alloc_tree(b, t); |
645 | ||
646 | t->size = min_t(unsigned, | |
fafff81c | 647 | bkey_to_cacheline(t, bset_bkey_last(t->data)), |
a85e968e | 648 | b->set->tree + btree_keys_cachelines(b) - t->tree); |
cafe5635 KO |
649 | |
650 | if (t->size < 2) { | |
651 | t->size = 0; | |
652 | return; | |
653 | } | |
654 | ||
655 | t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; | |
656 | ||
657 | /* First we figure out where the first key in each cacheline is */ | |
658 | for (j = inorder_next(0, t->size); | |
659 | j; | |
660 | j = inorder_next(j, t->size)) { | |
9dd6358a KO |
661 | while (bkey_to_cacheline(t, k) < cacheline) |
662 | prev = k, k = bkey_next(k); | |
cafe5635 | 663 | |
9dd6358a KO |
664 | t->prev[j] = bkey_u64s(prev); |
665 | t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); | |
cafe5635 KO |
666 | } |
667 | ||
fafff81c | 668 | while (bkey_next(k) != bset_bkey_last(t->data)) |
cafe5635 KO |
669 | k = bkey_next(k); |
670 | ||
671 | t->end = *k; | |
672 | ||
673 | /* Then we build the tree */ | |
674 | for (j = inorder_next(0, t->size); | |
675 | j; | |
676 | j = inorder_next(j, t->size)) | |
677 | make_bfloat(t, j); | |
678 | } | |
a85e968e | 679 | EXPORT_SYMBOL(bch_bset_build_written_tree); |
cafe5635 | 680 | |
829a60b9 KO |
681 | /* Insert */ |
682 | ||
a85e968e | 683 | void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) |
cafe5635 KO |
684 | { |
685 | struct bset_tree *t; | |
686 | unsigned inorder, j = 1; | |
687 | ||
a85e968e | 688 | for (t = b->set; t <= bset_tree_last(b); t++) |
fafff81c | 689 | if (k < bset_bkey_last(t->data)) |
cafe5635 KO |
690 | goto found_set; |
691 | ||
692 | BUG(); | |
693 | found_set: | |
694 | if (!t->size || !bset_written(b, t)) | |
695 | return; | |
696 | ||
697 | inorder = bkey_to_cacheline(t, k); | |
698 | ||
699 | if (k == t->data->start) | |
700 | goto fix_left; | |
701 | ||
fafff81c | 702 | if (bkey_next(k) == bset_bkey_last(t->data)) { |
cafe5635 KO |
703 | t->end = *k; |
704 | goto fix_right; | |
705 | } | |
706 | ||
707 | j = inorder_to_tree(inorder, t); | |
708 | ||
709 | if (j && | |
710 | j < t->size && | |
711 | k == tree_to_bkey(t, j)) | |
712 | fix_left: do { | |
713 | make_bfloat(t, j); | |
714 | j = j * 2; | |
715 | } while (j < t->size); | |
716 | ||
717 | j = inorder_to_tree(inorder + 1, t); | |
718 | ||
719 | if (j && | |
720 | j < t->size && | |
721 | k == tree_to_prev_bkey(t, j)) | |
722 | fix_right: do { | |
723 | make_bfloat(t, j); | |
724 | j = j * 2 + 1; | |
725 | } while (j < t->size); | |
726 | } | |
a85e968e | 727 | EXPORT_SYMBOL(bch_bset_fix_invalidated_key); |
cafe5635 | 728 | |
a85e968e | 729 | static void bch_bset_fix_lookup_table(struct btree_keys *b, |
ee811287 KO |
730 | struct bset_tree *t, |
731 | struct bkey *k) | |
cafe5635 | 732 | { |
cafe5635 KO |
733 | unsigned shift = bkey_u64s(k); |
734 | unsigned j = bkey_to_cacheline(t, k); | |
735 | ||
736 | /* We're getting called from btree_split() or btree_gc, just bail out */ | |
737 | if (!t->size) | |
738 | return; | |
739 | ||
740 | /* k is the key we just inserted; we need to find the entry in the | |
741 | * lookup table for the first key that is strictly greater than k: | |
742 | * it's either k's cacheline or the next one | |
743 | */ | |
9dd6358a KO |
744 | while (j < t->size && |
745 | table_to_bkey(t, j) <= k) | |
cafe5635 KO |
746 | j++; |
747 | ||
748 | /* Adjust all the lookup table entries, and find a new key for any that | |
749 | * have gotten too big | |
750 | */ | |
751 | for (; j < t->size; j++) { | |
752 | t->prev[j] += shift; | |
753 | ||
754 | if (t->prev[j] > 7) { | |
755 | k = table_to_bkey(t, j - 1); | |
756 | ||
757 | while (k < cacheline_to_bkey(t, j, 0)) | |
758 | k = bkey_next(k); | |
759 | ||
9dd6358a | 760 | t->prev[j] = bkey_to_cacheline_offset(t, j, k); |
cafe5635 KO |
761 | } |
762 | } | |
763 | ||
a85e968e | 764 | if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) |
cafe5635 KO |
765 | return; |
766 | ||
767 | /* Possibly add a new entry to the end of the lookup table */ | |
768 | ||
769 | for (k = table_to_bkey(t, t->size - 1); | |
fafff81c | 770 | k != bset_bkey_last(t->data); |
cafe5635 KO |
771 | k = bkey_next(k)) |
772 | if (t->size == bkey_to_cacheline(t, k)) { | |
9dd6358a | 773 | t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); |
cafe5635 KO |
774 | t->size++; |
775 | } | |
776 | } | |
777 | ||
0f49cf3d NS |
778 | /* |
779 | * Tries to merge l and r: l should be lower than r | |
780 | * Returns true if we were able to merge. If we did merge, l will be the merged | |
781 | * key, r will be untouched. | |
782 | */ | |
783 | bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) | |
784 | { | |
785 | if (!b->ops->key_merge) | |
786 | return false; | |
787 | ||
788 | /* | |
789 | * Generic header checks | |
790 | * Assumes left and right are in order | |
791 | * Left and right must be exactly aligned | |
792 | */ | |
3bdad1e4 NS |
793 | if (!bch_bkey_equal_header(l, r) || |
794 | bkey_cmp(l, &START_KEY(r))) | |
0f49cf3d NS |
795 | return false; |
796 | ||
797 | return b->ops->key_merge(b, l, r); | |
798 | } | |
799 | EXPORT_SYMBOL(bch_bkey_try_merge); | |
800 | ||
a85e968e | 801 | void bch_bset_insert(struct btree_keys *b, struct bkey *where, |
ee811287 | 802 | struct bkey *insert) |
cafe5635 | 803 | { |
ee811287 | 804 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 805 | |
a85e968e | 806 | BUG_ON(!b->last_set_unwritten); |
ee811287 KO |
807 | BUG_ON(bset_byte_offset(b, t->data) + |
808 | __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > | |
809 | PAGE_SIZE << b->page_order); | |
cafe5635 | 810 | |
ee811287 KO |
811 | memmove((uint64_t *) where + bkey_u64s(insert), |
812 | where, | |
813 | (void *) bset_bkey_last(t->data) - (void *) where); | |
cafe5635 | 814 | |
ee811287 KO |
815 | t->data->keys += bkey_u64s(insert); |
816 | bkey_copy(where, insert); | |
817 | bch_bset_fix_lookup_table(b, t, where); | |
cafe5635 | 818 | } |
a85e968e | 819 | EXPORT_SYMBOL(bch_bset_insert); |
cafe5635 | 820 | |
829a60b9 KO |
821 | unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, |
822 | struct bkey *replace_key) | |
823 | { | |
824 | unsigned status = BTREE_INSERT_STATUS_NO_INSERT; | |
825 | struct bset *i = bset_tree_last(b)->data; | |
826 | struct bkey *m, *prev = NULL; | |
827 | struct btree_iter iter; | |
828 | ||
829 | BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); | |
830 | ||
831 | m = bch_btree_iter_init(b, &iter, b->ops->is_extents | |
832 | ? PRECEDING_KEY(&START_KEY(k)) | |
833 | : PRECEDING_KEY(k)); | |
834 | ||
835 | if (b->ops->insert_fixup(b, k, &iter, replace_key)) | |
836 | return status; | |
837 | ||
838 | status = BTREE_INSERT_STATUS_INSERT; | |
839 | ||
840 | while (m != bset_bkey_last(i) && | |
841 | bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) | |
842 | prev = m, m = bkey_next(m); | |
843 | ||
844 | /* prev is in the tree, if we merge we're done */ | |
845 | status = BTREE_INSERT_STATUS_BACK_MERGE; | |
846 | if (prev && | |
847 | bch_bkey_try_merge(b, prev, k)) | |
848 | goto merged; | |
849 | #if 0 | |
850 | status = BTREE_INSERT_STATUS_OVERWROTE; | |
851 | if (m != bset_bkey_last(i) && | |
852 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) | |
853 | goto copy; | |
854 | #endif | |
855 | status = BTREE_INSERT_STATUS_FRONT_MERGE; | |
856 | if (m != bset_bkey_last(i) && | |
857 | bch_bkey_try_merge(b, k, m)) | |
858 | goto copy; | |
859 | ||
860 | bch_bset_insert(b, m, k); | |
861 | copy: bkey_copy(m, k); | |
862 | merged: | |
863 | return status; | |
864 | } | |
865 | EXPORT_SYMBOL(bch_btree_insert_key); | |
866 | ||
867 | /* Lookup */ | |
868 | ||
cafe5635 KO |
869 | struct bset_search_iter { |
870 | struct bkey *l, *r; | |
871 | }; | |
872 | ||
a85e968e | 873 | static struct bset_search_iter bset_search_write_set(struct bset_tree *t, |
cafe5635 KO |
874 | const struct bkey *search) |
875 | { | |
876 | unsigned li = 0, ri = t->size; | |
877 | ||
cafe5635 KO |
878 | while (li + 1 != ri) { |
879 | unsigned m = (li + ri) >> 1; | |
880 | ||
881 | if (bkey_cmp(table_to_bkey(t, m), search) > 0) | |
882 | ri = m; | |
883 | else | |
884 | li = m; | |
885 | } | |
886 | ||
887 | return (struct bset_search_iter) { | |
888 | table_to_bkey(t, li), | |
fafff81c | 889 | ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) |
cafe5635 KO |
890 | }; |
891 | } | |
892 | ||
a85e968e | 893 | static struct bset_search_iter bset_search_tree(struct bset_tree *t, |
cafe5635 KO |
894 | const struct bkey *search) |
895 | { | |
896 | struct bkey *l, *r; | |
897 | struct bkey_float *f; | |
898 | unsigned inorder, j, n = 1; | |
899 | ||
900 | do { | |
901 | unsigned p = n << 4; | |
902 | p &= ((int) (p - t->size)) >> 31; | |
903 | ||
904 | prefetch(&t->tree[p]); | |
905 | ||
906 | j = n; | |
907 | f = &t->tree[j]; | |
908 | ||
909 | /* | |
910 | * n = (f->mantissa > bfloat_mantissa()) | |
911 | * ? j * 2 | |
912 | * : j * 2 + 1; | |
913 | * | |
914 | * We need to subtract 1 from f->mantissa for the sign bit trick | |
915 | * to work - that's done in make_bfloat() | |
916 | */ | |
917 | if (likely(f->exponent != 127)) | |
918 | n = j * 2 + (((unsigned) | |
919 | (f->mantissa - | |
920 | bfloat_mantissa(search, f))) >> 31); | |
921 | else | |
922 | n = (bkey_cmp(tree_to_bkey(t, j), search) > 0) | |
923 | ? j * 2 | |
924 | : j * 2 + 1; | |
925 | } while (n < t->size); | |
926 | ||
927 | inorder = to_inorder(j, t); | |
928 | ||
929 | /* | |
930 | * n would have been the node we recursed to - the low bit tells us if | |
931 | * we recursed left or recursed right. | |
932 | */ | |
933 | if (n & 1) { | |
934 | l = cacheline_to_bkey(t, inorder, f->m); | |
935 | ||
936 | if (++inorder != t->size) { | |
937 | f = &t->tree[inorder_next(j, t->size)]; | |
938 | r = cacheline_to_bkey(t, inorder, f->m); | |
939 | } else | |
fafff81c | 940 | r = bset_bkey_last(t->data); |
cafe5635 KO |
941 | } else { |
942 | r = cacheline_to_bkey(t, inorder, f->m); | |
943 | ||
944 | if (--inorder) { | |
945 | f = &t->tree[inorder_prev(j, t->size)]; | |
946 | l = cacheline_to_bkey(t, inorder, f->m); | |
947 | } else | |
948 | l = t->data->start; | |
949 | } | |
950 | ||
951 | return (struct bset_search_iter) {l, r}; | |
952 | } | |
953 | ||
c052dd9a | 954 | struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, |
cafe5635 KO |
955 | const struct bkey *search) |
956 | { | |
957 | struct bset_search_iter i; | |
958 | ||
959 | /* | |
960 | * First, we search for a cacheline, then lastly we do a linear search | |
961 | * within that cacheline. | |
962 | * | |
963 | * To search for the cacheline, there's three different possibilities: | |
964 | * * The set is too small to have a search tree, so we just do a linear | |
965 | * search over the whole set. | |
966 | * * The set is the one we're currently inserting into; keeping a full | |
967 | * auxiliary search tree up to date would be too expensive, so we | |
968 | * use a much simpler lookup table to do a binary search - | |
969 | * bset_search_write_set(). | |
970 | * * Or we use the auxiliary search tree we constructed earlier - | |
971 | * bset_search_tree() | |
972 | */ | |
973 | ||
974 | if (unlikely(!t->size)) { | |
975 | i.l = t->data->start; | |
fafff81c | 976 | i.r = bset_bkey_last(t->data); |
c052dd9a | 977 | } else if (bset_written(b, t)) { |
cafe5635 KO |
978 | /* |
979 | * Each node in the auxiliary search tree covers a certain range | |
980 | * of bits, and keys above and below the set it covers might | |
981 | * differ outside those bits - so we have to special case the | |
982 | * start and end - handle that here: | |
983 | */ | |
984 | ||
985 | if (unlikely(bkey_cmp(search, &t->end) >= 0)) | |
fafff81c | 986 | return bset_bkey_last(t->data); |
cafe5635 KO |
987 | |
988 | if (unlikely(bkey_cmp(search, t->data->start) < 0)) | |
989 | return t->data->start; | |
990 | ||
a85e968e KO |
991 | i = bset_search_tree(t, search); |
992 | } else { | |
c052dd9a | 993 | BUG_ON(!b->nsets && |
a85e968e KO |
994 | t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); |
995 | ||
996 | i = bset_search_write_set(t, search); | |
997 | } | |
cafe5635 | 998 | |
c052dd9a KO |
999 | if (btree_keys_expensive_checks(b)) { |
1000 | BUG_ON(bset_written(b, t) && | |
280481d0 KO |
1001 | i.l != t->data->start && |
1002 | bkey_cmp(tree_to_prev_bkey(t, | |
1003 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), | |
1004 | search) > 0); | |
cafe5635 | 1005 | |
fafff81c | 1006 | BUG_ON(i.r != bset_bkey_last(t->data) && |
280481d0 KO |
1007 | bkey_cmp(i.r, search) <= 0); |
1008 | } | |
cafe5635 KO |
1009 | |
1010 | while (likely(i.l != i.r) && | |
1011 | bkey_cmp(i.l, search) <= 0) | |
1012 | i.l = bkey_next(i.l); | |
1013 | ||
1014 | return i.l; | |
1015 | } | |
a85e968e | 1016 | EXPORT_SYMBOL(__bch_bset_search); |
cafe5635 KO |
1017 | |
1018 | /* Btree iterator */ | |
1019 | ||
911c9610 KO |
1020 | typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, |
1021 | struct btree_iter_set); | |
1022 | ||
cafe5635 KO |
1023 | static inline bool btree_iter_cmp(struct btree_iter_set l, |
1024 | struct btree_iter_set r) | |
1025 | { | |
911c9610 | 1026 | return bkey_cmp(l.k, r.k) > 0; |
cafe5635 KO |
1027 | } |
1028 | ||
1029 | static inline bool btree_iter_end(struct btree_iter *iter) | |
1030 | { | |
1031 | return !iter->used; | |
1032 | } | |
1033 | ||
1034 | void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, | |
1035 | struct bkey *end) | |
1036 | { | |
1037 | if (k != end) | |
1038 | BUG_ON(!heap_add(iter, | |
1039 | ((struct btree_iter_set) { k, end }), | |
1040 | btree_iter_cmp)); | |
1041 | } | |
1042 | ||
c052dd9a | 1043 | static struct bkey *__bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1044 | struct btree_iter *iter, |
1045 | struct bkey *search, | |
1046 | struct bset_tree *start) | |
cafe5635 KO |
1047 | { |
1048 | struct bkey *ret = NULL; | |
1049 | iter->size = ARRAY_SIZE(iter->data); | |
1050 | iter->used = 0; | |
1051 | ||
280481d0 KO |
1052 | #ifdef CONFIG_BCACHE_DEBUG |
1053 | iter->b = b; | |
1054 | #endif | |
1055 | ||
c052dd9a | 1056 | for (; start <= bset_tree_last(b); start++) { |
cafe5635 | 1057 | ret = bch_bset_search(b, start, search); |
fafff81c | 1058 | bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); |
cafe5635 KO |
1059 | } |
1060 | ||
1061 | return ret; | |
1062 | } | |
1063 | ||
c052dd9a | 1064 | struct bkey *bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1065 | struct btree_iter *iter, |
1066 | struct bkey *search) | |
1067 | { | |
c052dd9a | 1068 | return __bch_btree_iter_init(b, iter, search, b->set); |
911c9610 | 1069 | } |
a85e968e | 1070 | EXPORT_SYMBOL(bch_btree_iter_init); |
911c9610 KO |
1071 | |
1072 | static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, | |
1073 | btree_iter_cmp_fn *cmp) | |
cafe5635 KO |
1074 | { |
1075 | struct btree_iter_set unused; | |
1076 | struct bkey *ret = NULL; | |
1077 | ||
1078 | if (!btree_iter_end(iter)) { | |
280481d0 KO |
1079 | bch_btree_iter_next_check(iter); |
1080 | ||
cafe5635 KO |
1081 | ret = iter->data->k; |
1082 | iter->data->k = bkey_next(iter->data->k); | |
1083 | ||
1084 | if (iter->data->k > iter->data->end) { | |
cc0f4eaa | 1085 | WARN_ONCE(1, "bset was corrupt!\n"); |
cafe5635 KO |
1086 | iter->data->k = iter->data->end; |
1087 | } | |
1088 | ||
1089 | if (iter->data->k == iter->data->end) | |
911c9610 | 1090 | heap_pop(iter, unused, cmp); |
cafe5635 | 1091 | else |
911c9610 | 1092 | heap_sift(iter, 0, cmp); |
cafe5635 KO |
1093 | } |
1094 | ||
1095 | return ret; | |
1096 | } | |
1097 | ||
911c9610 KO |
1098 | struct bkey *bch_btree_iter_next(struct btree_iter *iter) |
1099 | { | |
1100 | return __bch_btree_iter_next(iter, btree_iter_cmp); | |
1101 | ||
1102 | } | |
a85e968e | 1103 | EXPORT_SYMBOL(bch_btree_iter_next); |
911c9610 | 1104 | |
cafe5635 | 1105 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, |
a85e968e | 1106 | struct btree_keys *b, ptr_filter_fn fn) |
cafe5635 KO |
1107 | { |
1108 | struct bkey *ret; | |
1109 | ||
1110 | do { | |
1111 | ret = bch_btree_iter_next(iter); | |
1112 | } while (ret && fn(b, ret)); | |
1113 | ||
1114 | return ret; | |
1115 | } | |
1116 | ||
cafe5635 KO |
1117 | /* Mergesort */ |
1118 | ||
67539e85 KO |
1119 | void bch_bset_sort_state_free(struct bset_sort_state *state) |
1120 | { | |
1121 | if (state->pool) | |
1122 | mempool_destroy(state->pool); | |
1123 | } | |
1124 | ||
1125 | int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) | |
1126 | { | |
1127 | spin_lock_init(&state->time.lock); | |
1128 | ||
1129 | state->page_order = page_order; | |
1130 | state->crit_factor = int_sqrt(1 << page_order); | |
1131 | ||
1132 | state->pool = mempool_create_page_pool(1, page_order); | |
1133 | if (!state->pool) | |
1134 | return -ENOMEM; | |
1135 | ||
1136 | return 0; | |
1137 | } | |
a85e968e | 1138 | EXPORT_SYMBOL(bch_bset_sort_state_init); |
67539e85 | 1139 | |
a85e968e | 1140 | static void btree_mergesort(struct btree_keys *b, struct bset *out, |
cafe5635 KO |
1141 | struct btree_iter *iter, |
1142 | bool fixup, bool remove_stale) | |
1143 | { | |
911c9610 | 1144 | int i; |
cafe5635 | 1145 | struct bkey *k, *last = NULL; |
ef71ec00 | 1146 | BKEY_PADDED(k) tmp; |
a85e968e | 1147 | bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale |
cafe5635 KO |
1148 | ? bch_ptr_bad |
1149 | : bch_ptr_invalid; | |
1150 | ||
911c9610 KO |
1151 | /* Heapify the iterator, using our comparison function */ |
1152 | for (i = iter->used / 2 - 1; i >= 0; --i) | |
65d45231 | 1153 | heap_sift(iter, i, b->ops->sort_cmp); |
911c9610 | 1154 | |
cafe5635 | 1155 | while (!btree_iter_end(iter)) { |
65d45231 KO |
1156 | if (b->ops->sort_fixup && fixup) |
1157 | k = b->ops->sort_fixup(iter, &tmp.k); | |
ef71ec00 KO |
1158 | else |
1159 | k = NULL; | |
1160 | ||
1161 | if (!k) | |
65d45231 | 1162 | k = __bch_btree_iter_next(iter, b->ops->sort_cmp); |
cafe5635 | 1163 | |
cafe5635 KO |
1164 | if (bad(b, k)) |
1165 | continue; | |
1166 | ||
1167 | if (!last) { | |
1168 | last = out->start; | |
1169 | bkey_copy(last, k); | |
65d45231 | 1170 | } else if (!bch_bkey_try_merge(b, last, k)) { |
cafe5635 KO |
1171 | last = bkey_next(last); |
1172 | bkey_copy(last, k); | |
1173 | } | |
1174 | } | |
1175 | ||
1176 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; | |
1177 | ||
1178 | pr_debug("sorted %i keys", out->keys); | |
cafe5635 KO |
1179 | } |
1180 | ||
a85e968e | 1181 | static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, |
67539e85 KO |
1182 | unsigned start, unsigned order, bool fixup, |
1183 | struct bset_sort_state *state) | |
cafe5635 KO |
1184 | { |
1185 | uint64_t start_time; | |
0a451145 | 1186 | bool used_mempool = false; |
501d52a9 | 1187 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, |
cafe5635 KO |
1188 | order); |
1189 | if (!out) { | |
3572324a KO |
1190 | struct page *outp; |
1191 | ||
67539e85 KO |
1192 | BUG_ON(order > state->page_order); |
1193 | ||
3572324a KO |
1194 | outp = mempool_alloc(state->pool, GFP_NOIO); |
1195 | out = page_address(outp); | |
0a451145 | 1196 | used_mempool = true; |
a85e968e | 1197 | order = state->page_order; |
cafe5635 KO |
1198 | } |
1199 | ||
1200 | start_time = local_clock(); | |
1201 | ||
67539e85 | 1202 | btree_mergesort(b, out, iter, fixup, false); |
cafe5635 KO |
1203 | b->nsets = start; |
1204 | ||
cafe5635 KO |
1205 | if (!start && order == b->page_order) { |
1206 | /* | |
1207 | * Our temporary buffer is the same size as the btree node's | |
1208 | * buffer, we can just swap buffers instead of doing a big | |
1209 | * memcpy() | |
1210 | */ | |
1211 | ||
a85e968e KO |
1212 | out->magic = b->set->data->magic; |
1213 | out->seq = b->set->data->seq; | |
1214 | out->version = b->set->data->version; | |
1215 | swap(out, b->set->data); | |
cafe5635 | 1216 | } else { |
a85e968e KO |
1217 | b->set[start].data->keys = out->keys; |
1218 | memcpy(b->set[start].data->start, out->start, | |
fafff81c | 1219 | (void *) bset_bkey_last(out) - (void *) out->start); |
cafe5635 KO |
1220 | } |
1221 | ||
0a451145 | 1222 | if (used_mempool) |
67539e85 | 1223 | mempool_free(virt_to_page(out), state->pool); |
cafe5635 KO |
1224 | else |
1225 | free_pages((unsigned long) out, order); | |
1226 | ||
a85e968e | 1227 | bch_bset_build_written_tree(b); |
cafe5635 | 1228 | |
65d22e91 | 1229 | if (!start) |
67539e85 | 1230 | bch_time_stats_update(&state->time, start_time); |
cafe5635 KO |
1231 | } |
1232 | ||
89ebb4a2 | 1233 | void bch_btree_sort_partial(struct btree_keys *b, unsigned start, |
67539e85 | 1234 | struct bset_sort_state *state) |
cafe5635 | 1235 | { |
89ebb4a2 | 1236 | size_t order = b->page_order, keys = 0; |
cafe5635 | 1237 | struct btree_iter iter; |
89ebb4a2 | 1238 | int oldsize = bch_count_data(b); |
280481d0 | 1239 | |
89ebb4a2 | 1240 | __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); |
cafe5635 KO |
1241 | |
1242 | if (start) { | |
1243 | unsigned i; | |
1244 | ||
89ebb4a2 KO |
1245 | for (i = start; i <= b->nsets; i++) |
1246 | keys += b->set[i].data->keys; | |
cafe5635 | 1247 | |
89ebb4a2 | 1248 | order = get_order(__set_bytes(b->set->data, keys)); |
cafe5635 KO |
1249 | } |
1250 | ||
89ebb4a2 | 1251 | __btree_sort(b, &iter, start, order, false, state); |
cafe5635 | 1252 | |
89ebb4a2 | 1253 | EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); |
cafe5635 | 1254 | } |
65d45231 | 1255 | EXPORT_SYMBOL(bch_btree_sort_partial); |
cafe5635 | 1256 | |
a85e968e KO |
1257 | void bch_btree_sort_and_fix_extents(struct btree_keys *b, |
1258 | struct btree_iter *iter, | |
67539e85 | 1259 | struct bset_sort_state *state) |
cafe5635 | 1260 | { |
67539e85 | 1261 | __btree_sort(b, iter, 0, b->page_order, true, state); |
cafe5635 KO |
1262 | } |
1263 | ||
89ebb4a2 | 1264 | void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, |
67539e85 | 1265 | struct bset_sort_state *state) |
cafe5635 KO |
1266 | { |
1267 | uint64_t start_time = local_clock(); | |
1268 | ||
1269 | struct btree_iter iter; | |
89ebb4a2 | 1270 | bch_btree_iter_init(b, &iter, NULL); |
cafe5635 | 1271 | |
89ebb4a2 | 1272 | btree_mergesort(b, new->set->data, &iter, false, true); |
cafe5635 | 1273 | |
67539e85 | 1274 | bch_time_stats_update(&state->time, start_time); |
cafe5635 | 1275 | |
89ebb4a2 | 1276 | new->set->size = 0; // XXX: why? |
cafe5635 KO |
1277 | } |
1278 | ||
6ded34d1 KO |
1279 | #define SORT_CRIT (4096 / sizeof(uint64_t)) |
1280 | ||
89ebb4a2 | 1281 | void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) |
cafe5635 | 1282 | { |
6ded34d1 KO |
1283 | unsigned crit = SORT_CRIT; |
1284 | int i; | |
cafe5635 | 1285 | |
6ded34d1 | 1286 | /* Don't sort if nothing to do */ |
89ebb4a2 | 1287 | if (!b->nsets) |
6ded34d1 | 1288 | goto out; |
cafe5635 | 1289 | |
89ebb4a2 | 1290 | for (i = b->nsets - 1; i >= 0; --i) { |
67539e85 | 1291 | crit *= state->crit_factor; |
cafe5635 | 1292 | |
89ebb4a2 | 1293 | if (b->set[i].data->keys < crit) { |
67539e85 | 1294 | bch_btree_sort_partial(b, i, state); |
cafe5635 KO |
1295 | return; |
1296 | } | |
1297 | } | |
1298 | ||
6ded34d1 | 1299 | /* Sort if we'd overflow */ |
89ebb4a2 | 1300 | if (b->nsets + 1 == MAX_BSETS) { |
67539e85 | 1301 | bch_btree_sort(b, state); |
6ded34d1 KO |
1302 | return; |
1303 | } | |
1304 | ||
1305 | out: | |
89ebb4a2 | 1306 | bch_bset_build_written_tree(b); |
cafe5635 | 1307 | } |
a85e968e | 1308 | EXPORT_SYMBOL(bch_btree_sort_lazy); |
cafe5635 | 1309 | |
f67342dd | 1310 | void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) |
cafe5635 | 1311 | { |
cafe5635 KO |
1312 | unsigned i; |
1313 | ||
f67342dd KO |
1314 | for (i = 0; i <= b->nsets; i++) { |
1315 | struct bset_tree *t = &b->set[i]; | |
cafe5635 KO |
1316 | size_t bytes = t->data->keys * sizeof(uint64_t); |
1317 | size_t j; | |
1318 | ||
f67342dd | 1319 | if (bset_written(b, t)) { |
cafe5635 KO |
1320 | stats->sets_written++; |
1321 | stats->bytes_written += bytes; | |
1322 | ||
1323 | stats->floats += t->size - 1; | |
1324 | ||
1325 | for (j = 1; j < t->size; j++) | |
1326 | if (t->tree[j].exponent == 127) | |
1327 | stats->failed++; | |
1328 | } else { | |
1329 | stats->sets_unwritten++; | |
1330 | stats->bytes_unwritten += bytes; | |
1331 | } | |
1332 | } | |
cafe5635 | 1333 | } |