]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ceph/crush/mapper.c
Merge tag 'media/v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-bionic-kernel.git] / net / ceph / crush / mapper.c
1 /*
2 * Ceph - scalable distributed file system
3 *
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
5 *
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
10 *
11 */
12
13 #ifdef __KERNEL__
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
20 # include <linux/crush/mapper.h>
21 #else
22 # include "crush_compat.h"
23 # include "crush.h"
24 # include "hash.h"
25 # include "mapper.h"
26 #endif
27 #include "crush_ln_table.h"
28
29 #define dprintk(args...) /* printf(args) */
30
31 /*
32 * Implement the core CRUSH mapping algorithm.
33 */
34
35 /**
36 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
37 * @map: the crush_map
38 * @ruleset: the storage ruleset id (user defined)
39 * @type: storage ruleset type (user defined)
40 * @size: output set size
41 */
42 int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
43 {
44 __u32 i;
45
46 for (i = 0; i < map->max_rules; i++) {
47 if (map->rules[i] &&
48 map->rules[i]->mask.ruleset == ruleset &&
49 map->rules[i]->mask.type == type &&
50 map->rules[i]->mask.min_size <= size &&
51 map->rules[i]->mask.max_size >= size)
52 return i;
53 }
54 return -1;
55 }
56
57 /*
58 * bucket choose methods
59 *
60 * For each bucket algorithm, we have a "choose" method that, given a
61 * crush input @x and replica position (usually, position in output set) @r,
62 * will produce an item in the bucket.
63 */
64
65 /*
66 * Choose based on a random permutation of the bucket.
67 *
68 * We used to use some prime number arithmetic to do this, but it
69 * wasn't very random, and had some other bad behaviors. Instead, we
70 * calculate an actual random permutation of the bucket members.
71 * Since this is expensive, we optimize for the r=0 case, which
72 * captures the vast majority of calls.
73 */
74 static int bucket_perm_choose(const struct crush_bucket *bucket,
75 struct crush_work_bucket *work,
76 int x, int r)
77 {
78 unsigned int pr = r % bucket->size;
79 unsigned int i, s;
80
81 /* start a new permutation if @x has changed */
82 if (work->perm_x != (__u32)x || work->perm_n == 0) {
83 dprintk("bucket %d new x=%d\n", bucket->id, x);
84 work->perm_x = x;
85
86 /* optimize common r=0 case */
87 if (pr == 0) {
88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
89 bucket->size;
90 work->perm[0] = s;
91 work->perm_n = 0xffff; /* magic value, see below */
92 goto out;
93 }
94
95 for (i = 0; i < bucket->size; i++)
96 work->perm[i] = i;
97 work->perm_n = 0;
98 } else if (work->perm_n == 0xffff) {
99 /* clean up after the r=0 case above */
100 for (i = 1; i < bucket->size; i++)
101 work->perm[i] = i;
102 work->perm[work->perm[0]] = 0;
103 work->perm_n = 1;
104 }
105
106 /* calculate permutation up to pr */
107 for (i = 0; i < work->perm_n; i++)
108 dprintk(" perm_choose have %d: %d\n", i, work->perm[i]);
109 while (work->perm_n <= pr) {
110 unsigned int p = work->perm_n;
111 /* no point in swapping the final entry */
112 if (p < bucket->size - 1) {
113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
114 (bucket->size - p);
115 if (i) {
116 unsigned int t = work->perm[p + i];
117 work->perm[p + i] = work->perm[p];
118 work->perm[p] = t;
119 }
120 dprintk(" perm_choose swap %d with %d\n", p, p+i);
121 }
122 work->perm_n++;
123 }
124 for (i = 0; i < bucket->size; i++)
125 dprintk(" perm_choose %d: %d\n", i, work->perm[i]);
126
127 s = work->perm[pr];
128 out:
129 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
130 bucket->size, x, r, pr, s);
131 return bucket->items[s];
132 }
133
134 /* uniform */
135 static int bucket_uniform_choose(const struct crush_bucket_uniform *bucket,
136 struct crush_work_bucket *work, int x, int r)
137 {
138 return bucket_perm_choose(&bucket->h, work, x, r);
139 }
140
141 /* list */
142 static int bucket_list_choose(const struct crush_bucket_list *bucket,
143 int x, int r)
144 {
145 int i;
146
147 for (i = bucket->h.size-1; i >= 0; i--) {
148 __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i],
149 r, bucket->h.id);
150 w &= 0xffff;
151 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
152 "sw %x rand %llx",
153 i, x, r, bucket->h.items[i], bucket->item_weights[i],
154 bucket->sum_weights[i], w);
155 w *= bucket->sum_weights[i];
156 w = w >> 16;
157 /*dprintk(" scaled %llx\n", w);*/
158 if (w < bucket->item_weights[i]) {
159 return bucket->h.items[i];
160 }
161 }
162
163 dprintk("bad list sums for bucket %d\n", bucket->h.id);
164 return bucket->h.items[0];
165 }
166
167
168 /* (binary) tree */
169 static int height(int n)
170 {
171 int h = 0;
172 while ((n & 1) == 0) {
173 h++;
174 n = n >> 1;
175 }
176 return h;
177 }
178
179 static int left(int x)
180 {
181 int h = height(x);
182 return x - (1 << (h-1));
183 }
184
185 static int right(int x)
186 {
187 int h = height(x);
188 return x + (1 << (h-1));
189 }
190
191 static int terminal(int x)
192 {
193 return x & 1;
194 }
195
196 static int bucket_tree_choose(const struct crush_bucket_tree *bucket,
197 int x, int r)
198 {
199 int n;
200 __u32 w;
201 __u64 t;
202
203 /* start at root */
204 n = bucket->num_nodes >> 1;
205
206 while (!terminal(n)) {
207 int l;
208 /* pick point in [0, w) */
209 w = bucket->node_weights[n];
210 t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
211 bucket->h.id) * (__u64)w;
212 t = t >> 32;
213
214 /* descend to the left or right? */
215 l = left(n);
216 if (t < bucket->node_weights[l])
217 n = l;
218 else
219 n = right(n);
220 }
221
222 return bucket->h.items[n >> 1];
223 }
224
225
226 /* straw */
227
228 static int bucket_straw_choose(const struct crush_bucket_straw *bucket,
229 int x, int r)
230 {
231 __u32 i;
232 int high = 0;
233 __u64 high_draw = 0;
234 __u64 draw;
235
236 for (i = 0; i < bucket->h.size; i++) {
237 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
238 draw &= 0xffff;
239 draw *= bucket->straws[i];
240 if (i == 0 || draw > high_draw) {
241 high = i;
242 high_draw = draw;
243 }
244 }
245 return bucket->h.items[high];
246 }
247
248 /* compute 2^44*log2(input+1) */
249 static __u64 crush_ln(unsigned int xin)
250 {
251 unsigned int x = xin;
252 int iexpon, index1, index2;
253 __u64 RH, LH, LL, xl64, result;
254
255 x++;
256
257 /* normalize input */
258 iexpon = 15;
259
260 /*
261 * figure out number of bits we need to shift and
262 * do it in one step instead of iteratively
263 */
264 if (!(x & 0x18000)) {
265 int bits = __builtin_clz(x & 0x1FFFF) - 16;
266 x <<= bits;
267 iexpon = 15 - bits;
268 }
269
270 index1 = (x >> 8) << 1;
271 /* RH ~ 2^56/index1 */
272 RH = __RH_LH_tbl[index1 - 256];
273 /* LH ~ 2^48 * log2(index1/256) */
274 LH = __RH_LH_tbl[index1 + 1 - 256];
275
276 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
277 xl64 = (__s64)x * RH;
278 xl64 >>= 48;
279
280 result = iexpon;
281 result <<= (12 + 32);
282
283 index2 = xl64 & 0xff;
284 /* LL ~ 2^48*log2(1.0+index2/2^15) */
285 LL = __LL_tbl[index2];
286
287 LH = LH + LL;
288
289 LH >>= (48 - 12 - 32);
290 result += LH;
291
292 return result;
293 }
294
295
296 /*
297 * straw2
298 *
299 * for reference, see:
300 *
301 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
302 *
303 */
304
305 static int bucket_straw2_choose(const struct crush_bucket_straw2 *bucket,
306 int x, int r)
307 {
308 unsigned int i, high = 0;
309 unsigned int u;
310 unsigned int w;
311 __s64 ln, draw, high_draw = 0;
312
313 for (i = 0; i < bucket->h.size; i++) {
314 w = bucket->item_weights[i];
315 if (w) {
316 u = crush_hash32_3(bucket->h.hash, x,
317 bucket->h.items[i], r);
318 u &= 0xffff;
319
320 /*
321 * for some reason slightly less than 0x10000 produces
322 * a slightly more accurate distribution... probably a
323 * rounding effect.
324 *
325 * the natural log lookup table maps [0,0xffff]
326 * (corresponding to real numbers [1/0x10000, 1] to
327 * [0, 0xffffffffffff] (corresponding to real numbers
328 * [-11.090355,0]).
329 */
330 ln = crush_ln(u) - 0x1000000000000ll;
331
332 /*
333 * divide by 16.16 fixed-point weight. note
334 * that the ln value is negative, so a larger
335 * weight means a larger (less negative) value
336 * for draw.
337 */
338 draw = div64_s64(ln, w);
339 } else {
340 draw = S64_MIN;
341 }
342
343 if (i == 0 || draw > high_draw) {
344 high = i;
345 high_draw = draw;
346 }
347 }
348
349 return bucket->h.items[high];
350 }
351
352
353 static int crush_bucket_choose(const struct crush_bucket *in,
354 struct crush_work_bucket *work,
355 int x, int r)
356 {
357 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
358 BUG_ON(in->size == 0);
359 switch (in->alg) {
360 case CRUSH_BUCKET_UNIFORM:
361 return bucket_uniform_choose(
362 (const struct crush_bucket_uniform *)in,
363 work, x, r);
364 case CRUSH_BUCKET_LIST:
365 return bucket_list_choose((const struct crush_bucket_list *)in,
366 x, r);
367 case CRUSH_BUCKET_TREE:
368 return bucket_tree_choose((const struct crush_bucket_tree *)in,
369 x, r);
370 case CRUSH_BUCKET_STRAW:
371 return bucket_straw_choose(
372 (const struct crush_bucket_straw *)in,
373 x, r);
374 case CRUSH_BUCKET_STRAW2:
375 return bucket_straw2_choose(
376 (const struct crush_bucket_straw2 *)in,
377 x, r);
378 default:
379 dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
380 return in->items[0];
381 }
382 }
383
384 /*
385 * true if device is marked "out" (failed, fully offloaded)
386 * of the cluster
387 */
388 static int is_out(const struct crush_map *map,
389 const __u32 *weight, int weight_max,
390 int item, int x)
391 {
392 if (item >= weight_max)
393 return 1;
394 if (weight[item] >= 0x10000)
395 return 0;
396 if (weight[item] == 0)
397 return 1;
398 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
399 < weight[item])
400 return 0;
401 return 1;
402 }
403
404 /**
405 * crush_choose_firstn - choose numrep distinct items of given type
406 * @map: the crush_map
407 * @bucket: the bucket we are choose an item from
408 * @x: crush input value
409 * @numrep: the number of items to choose
410 * @type: the type of item to choose
411 * @out: pointer to output vector
412 * @outpos: our position in that vector
413 * @out_size: size of the out vector
414 * @tries: number of attempts to make
415 * @recurse_tries: number of attempts to have recursive chooseleaf make
416 * @local_retries: localized retries
417 * @local_fallback_retries: localized fallback retries
418 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
419 * @stable: stable mode starts rep=0 in the recursive call for all replicas
420 * @vary_r: pass r to recursive calls
421 * @out2: second output vector for leaf items (if @recurse_to_leaf)
422 * @parent_r: r value passed from the parent
423 */
424 static int crush_choose_firstn(const struct crush_map *map,
425 struct crush_work *work,
426 const struct crush_bucket *bucket,
427 const __u32 *weight, int weight_max,
428 int x, int numrep, int type,
429 int *out, int outpos,
430 int out_size,
431 unsigned int tries,
432 unsigned int recurse_tries,
433 unsigned int local_retries,
434 unsigned int local_fallback_retries,
435 int recurse_to_leaf,
436 unsigned int vary_r,
437 unsigned int stable,
438 int *out2,
439 int parent_r)
440 {
441 int rep;
442 unsigned int ftotal, flocal;
443 int retry_descent, retry_bucket, skip_rep;
444 const struct crush_bucket *in = bucket;
445 int r;
446 int i;
447 int item = 0;
448 int itemtype;
449 int collide, reject;
450 int count = out_size;
451
452 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
453 recurse_to_leaf ? "_LEAF" : "",
454 bucket->id, x, outpos, numrep,
455 tries, recurse_tries, local_retries, local_fallback_retries,
456 parent_r, stable);
457
458 for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
459 /* keep trying until we get a non-out, non-colliding item */
460 ftotal = 0;
461 skip_rep = 0;
462 do {
463 retry_descent = 0;
464 in = bucket; /* initial bucket */
465
466 /* choose through intervening buckets */
467 flocal = 0;
468 do {
469 collide = 0;
470 retry_bucket = 0;
471 r = rep + parent_r;
472 /* r' = r + f_total */
473 r += ftotal;
474
475 /* bucket choose */
476 if (in->size == 0) {
477 reject = 1;
478 goto reject;
479 }
480 if (local_fallback_retries > 0 &&
481 flocal >= (in->size>>1) &&
482 flocal > local_fallback_retries)
483 item = bucket_perm_choose(
484 in, work->work[-1-in->id],
485 x, r);
486 else
487 item = crush_bucket_choose(
488 in, work->work[-1-in->id],
489 x, r);
490 if (item >= map->max_devices) {
491 dprintk(" bad item %d\n", item);
492 skip_rep = 1;
493 break;
494 }
495
496 /* desired type? */
497 if (item < 0)
498 itemtype = map->buckets[-1-item]->type;
499 else
500 itemtype = 0;
501 dprintk(" item %d type %d\n", item, itemtype);
502
503 /* keep going? */
504 if (itemtype != type) {
505 if (item >= 0 ||
506 (-1-item) >= map->max_buckets) {
507 dprintk(" bad item type %d\n", type);
508 skip_rep = 1;
509 break;
510 }
511 in = map->buckets[-1-item];
512 retry_bucket = 1;
513 continue;
514 }
515
516 /* collision? */
517 for (i = 0; i < outpos; i++) {
518 if (out[i] == item) {
519 collide = 1;
520 break;
521 }
522 }
523
524 reject = 0;
525 if (!collide && recurse_to_leaf) {
526 if (item < 0) {
527 int sub_r;
528 if (vary_r)
529 sub_r = r >> (vary_r-1);
530 else
531 sub_r = 0;
532 if (crush_choose_firstn(
533 map,
534 work,
535 map->buckets[-1-item],
536 weight, weight_max,
537 x, stable ? 1 : outpos+1, 0,
538 out2, outpos, count,
539 recurse_tries, 0,
540 local_retries,
541 local_fallback_retries,
542 0,
543 vary_r,
544 stable,
545 NULL,
546 sub_r) <= outpos)
547 /* didn't get leaf */
548 reject = 1;
549 } else {
550 /* we already have a leaf! */
551 out2[outpos] = item;
552 }
553 }
554
555 if (!reject && !collide) {
556 /* out? */
557 if (itemtype == 0)
558 reject = is_out(map, weight,
559 weight_max,
560 item, x);
561 }
562
563 reject:
564 if (reject || collide) {
565 ftotal++;
566 flocal++;
567
568 if (collide && flocal <= local_retries)
569 /* retry locally a few times */
570 retry_bucket = 1;
571 else if (local_fallback_retries > 0 &&
572 flocal <= in->size + local_fallback_retries)
573 /* exhaustive bucket search */
574 retry_bucket = 1;
575 else if (ftotal < tries)
576 /* then retry descent */
577 retry_descent = 1;
578 else
579 /* else give up */
580 skip_rep = 1;
581 dprintk(" reject %d collide %d "
582 "ftotal %u flocal %u\n",
583 reject, collide, ftotal,
584 flocal);
585 }
586 } while (retry_bucket);
587 } while (retry_descent);
588
589 if (skip_rep) {
590 dprintk("skip rep\n");
591 continue;
592 }
593
594 dprintk("CHOOSE got %d\n", item);
595 out[outpos] = item;
596 outpos++;
597 count--;
598 #ifndef __KERNEL__
599 if (map->choose_tries && ftotal <= map->choose_total_tries)
600 map->choose_tries[ftotal]++;
601 #endif
602 }
603
604 dprintk("CHOOSE returns %d\n", outpos);
605 return outpos;
606 }
607
608
609 /**
610 * crush_choose_indep: alternative breadth-first positionally stable mapping
611 *
612 */
613 static void crush_choose_indep(const struct crush_map *map,
614 struct crush_work *work,
615 const struct crush_bucket *bucket,
616 const __u32 *weight, int weight_max,
617 int x, int left, int numrep, int type,
618 int *out, int outpos,
619 unsigned int tries,
620 unsigned int recurse_tries,
621 int recurse_to_leaf,
622 int *out2,
623 int parent_r)
624 {
625 const struct crush_bucket *in = bucket;
626 int endpos = outpos + left;
627 int rep;
628 unsigned int ftotal;
629 int r;
630 int i;
631 int item = 0;
632 int itemtype;
633 int collide;
634
635 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
636 bucket->id, x, outpos, numrep);
637
638 /* initially my result is undefined */
639 for (rep = outpos; rep < endpos; rep++) {
640 out[rep] = CRUSH_ITEM_UNDEF;
641 if (out2)
642 out2[rep] = CRUSH_ITEM_UNDEF;
643 }
644
645 for (ftotal = 0; left > 0 && ftotal < tries; ftotal++) {
646 #ifdef DEBUG_INDEP
647 if (out2 && ftotal) {
648 dprintk("%u %d a: ", ftotal, left);
649 for (rep = outpos; rep < endpos; rep++) {
650 dprintk(" %d", out[rep]);
651 }
652 dprintk("\n");
653 dprintk("%u %d b: ", ftotal, left);
654 for (rep = outpos; rep < endpos; rep++) {
655 dprintk(" %d", out2[rep]);
656 }
657 dprintk("\n");
658 }
659 #endif
660 for (rep = outpos; rep < endpos; rep++) {
661 if (out[rep] != CRUSH_ITEM_UNDEF)
662 continue;
663
664 in = bucket; /* initial bucket */
665
666 /* choose through intervening buckets */
667 for (;;) {
668 /* note: we base the choice on the position
669 * even in the nested call. that means that
670 * if the first layer chooses the same bucket
671 * in a different position, we will tend to
672 * choose a different item in that bucket.
673 * this will involve more devices in data
674 * movement and tend to distribute the load.
675 */
676 r = rep + parent_r;
677
678 /* be careful */
679 if (in->alg == CRUSH_BUCKET_UNIFORM &&
680 in->size % numrep == 0)
681 /* r'=r+(n+1)*f_total */
682 r += (numrep+1) * ftotal;
683 else
684 /* r' = r + n*f_total */
685 r += numrep * ftotal;
686
687 /* bucket choose */
688 if (in->size == 0) {
689 dprintk(" empty bucket\n");
690 break;
691 }
692
693 item = crush_bucket_choose(
694 in, work->work[-1-in->id],
695 x, r);
696 if (item >= map->max_devices) {
697 dprintk(" bad item %d\n", item);
698 out[rep] = CRUSH_ITEM_NONE;
699 if (out2)
700 out2[rep] = CRUSH_ITEM_NONE;
701 left--;
702 break;
703 }
704
705 /* desired type? */
706 if (item < 0)
707 itemtype = map->buckets[-1-item]->type;
708 else
709 itemtype = 0;
710 dprintk(" item %d type %d\n", item, itemtype);
711
712 /* keep going? */
713 if (itemtype != type) {
714 if (item >= 0 ||
715 (-1-item) >= map->max_buckets) {
716 dprintk(" bad item type %d\n", type);
717 out[rep] = CRUSH_ITEM_NONE;
718 if (out2)
719 out2[rep] =
720 CRUSH_ITEM_NONE;
721 left--;
722 break;
723 }
724 in = map->buckets[-1-item];
725 continue;
726 }
727
728 /* collision? */
729 collide = 0;
730 for (i = outpos; i < endpos; i++) {
731 if (out[i] == item) {
732 collide = 1;
733 break;
734 }
735 }
736 if (collide)
737 break;
738
739 if (recurse_to_leaf) {
740 if (item < 0) {
741 crush_choose_indep(
742 map,
743 work,
744 map->buckets[-1-item],
745 weight, weight_max,
746 x, 1, numrep, 0,
747 out2, rep,
748 recurse_tries, 0,
749 0, NULL, r);
750 if (out2[rep] == CRUSH_ITEM_NONE) {
751 /* placed nothing; no leaf */
752 break;
753 }
754 } else {
755 /* we already have a leaf! */
756 out2[rep] = item;
757 }
758 }
759
760 /* out? */
761 if (itemtype == 0 &&
762 is_out(map, weight, weight_max, item, x))
763 break;
764
765 /* yay! */
766 out[rep] = item;
767 left--;
768 break;
769 }
770 }
771 }
772 for (rep = outpos; rep < endpos; rep++) {
773 if (out[rep] == CRUSH_ITEM_UNDEF) {
774 out[rep] = CRUSH_ITEM_NONE;
775 }
776 if (out2 && out2[rep] == CRUSH_ITEM_UNDEF) {
777 out2[rep] = CRUSH_ITEM_NONE;
778 }
779 }
780 #ifndef __KERNEL__
781 if (map->choose_tries && ftotal <= map->choose_total_tries)
782 map->choose_tries[ftotal]++;
783 #endif
784 #ifdef DEBUG_INDEP
785 if (out2) {
786 dprintk("%u %d a: ", ftotal, left);
787 for (rep = outpos; rep < endpos; rep++) {
788 dprintk(" %d", out[rep]);
789 }
790 dprintk("\n");
791 dprintk("%u %d b: ", ftotal, left);
792 for (rep = outpos; rep < endpos; rep++) {
793 dprintk(" %d", out2[rep]);
794 }
795 dprintk("\n");
796 }
797 #endif
798 }
799
800
801 /*
802 * This takes a chunk of memory and sets it up to be a shiny new
803 * working area for a CRUSH placement computation. It must be called
804 * on any newly allocated memory before passing it in to
805 * crush_do_rule. It may be used repeatedly after that, so long as the
806 * map has not changed. If the map /has/ changed, you must make sure
807 * the working size is no smaller than what was allocated and re-run
808 * crush_init_workspace.
809 *
810 * If you do retain the working space between calls to crush, make it
811 * thread-local.
812 */
813 void crush_init_workspace(const struct crush_map *map, void *v)
814 {
815 struct crush_work *w = v;
816 __s32 b;
817
818 /*
819 * We work by moving through the available space and setting
820 * values and pointers as we go.
821 *
822 * It's a bit like Forth's use of the 'allot' word since we
823 * set the pointer first and then reserve the space for it to
824 * point to by incrementing the point.
825 */
826 v += sizeof(struct crush_work *);
827 w->work = v;
828 v += map->max_buckets * sizeof(struct crush_work_bucket *);
829 for (b = 0; b < map->max_buckets; ++b) {
830 if (!map->buckets[b])
831 continue;
832
833 w->work[b] = v;
834 switch (map->buckets[b]->alg) {
835 default:
836 v += sizeof(struct crush_work_bucket);
837 break;
838 }
839 w->work[b]->perm_x = 0;
840 w->work[b]->perm_n = 0;
841 w->work[b]->perm = v;
842 v += map->buckets[b]->size * sizeof(__u32);
843 }
844 BUG_ON(v - (void *)w != map->working_size);
845 }
846
847 /**
848 * crush_do_rule - calculate a mapping with the given input and rule
849 * @map: the crush_map
850 * @ruleno: the rule id
851 * @x: hash input
852 * @result: pointer to result vector
853 * @result_max: maximum result size
854 * @weight: weight vector (for map leaves)
855 * @weight_max: size of weight vector
856 * @cwin: pointer to at least crush_work_size() bytes of memory
857 */
858 int crush_do_rule(const struct crush_map *map,
859 int ruleno, int x, int *result, int result_max,
860 const __u32 *weight, int weight_max,
861 void *cwin)
862 {
863 int result_len;
864 struct crush_work *cw = cwin;
865 int *a = cwin + map->working_size;
866 int *b = a + result_max;
867 int *c = b + result_max;
868 int *w = a;
869 int *o = b;
870 int recurse_to_leaf;
871 int wsize = 0;
872 int osize;
873 int *tmp;
874 const struct crush_rule *rule;
875 __u32 step;
876 int i, j;
877 int numrep;
878 int out_size;
879 /*
880 * the original choose_total_tries value was off by one (it
881 * counted "retries" and not "tries"). add one.
882 */
883 int choose_tries = map->choose_total_tries + 1;
884 int choose_leaf_tries = 0;
885 /*
886 * the local tries values were counted as "retries", though,
887 * and need no adjustment
888 */
889 int choose_local_retries = map->choose_local_tries;
890 int choose_local_fallback_retries = map->choose_local_fallback_tries;
891
892 int vary_r = map->chooseleaf_vary_r;
893 int stable = map->chooseleaf_stable;
894
895 if ((__u32)ruleno >= map->max_rules) {
896 dprintk(" bad ruleno %d\n", ruleno);
897 return 0;
898 }
899
900 rule = map->rules[ruleno];
901 result_len = 0;
902
903 for (step = 0; step < rule->len; step++) {
904 int firstn = 0;
905 const struct crush_rule_step *curstep = &rule->steps[step];
906
907 switch (curstep->op) {
908 case CRUSH_RULE_TAKE:
909 if ((curstep->arg1 >= 0 &&
910 curstep->arg1 < map->max_devices) ||
911 (-1-curstep->arg1 >= 0 &&
912 -1-curstep->arg1 < map->max_buckets &&
913 map->buckets[-1-curstep->arg1])) {
914 w[0] = curstep->arg1;
915 wsize = 1;
916 } else {
917 dprintk(" bad take value %d\n", curstep->arg1);
918 }
919 break;
920
921 case CRUSH_RULE_SET_CHOOSE_TRIES:
922 if (curstep->arg1 > 0)
923 choose_tries = curstep->arg1;
924 break;
925
926 case CRUSH_RULE_SET_CHOOSELEAF_TRIES:
927 if (curstep->arg1 > 0)
928 choose_leaf_tries = curstep->arg1;
929 break;
930
931 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES:
932 if (curstep->arg1 >= 0)
933 choose_local_retries = curstep->arg1;
934 break;
935
936 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES:
937 if (curstep->arg1 >= 0)
938 choose_local_fallback_retries = curstep->arg1;
939 break;
940
941 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R:
942 if (curstep->arg1 >= 0)
943 vary_r = curstep->arg1;
944 break;
945
946 case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
947 if (curstep->arg1 >= 0)
948 stable = curstep->arg1;
949 break;
950
951 case CRUSH_RULE_CHOOSELEAF_FIRSTN:
952 case CRUSH_RULE_CHOOSE_FIRSTN:
953 firstn = 1;
954 /* fall through */
955 case CRUSH_RULE_CHOOSELEAF_INDEP:
956 case CRUSH_RULE_CHOOSE_INDEP:
957 if (wsize == 0)
958 break;
959
960 recurse_to_leaf =
961 curstep->op ==
962 CRUSH_RULE_CHOOSELEAF_FIRSTN ||
963 curstep->op ==
964 CRUSH_RULE_CHOOSELEAF_INDEP;
965
966 /* reset output */
967 osize = 0;
968
969 for (i = 0; i < wsize; i++) {
970 int bno;
971 /*
972 * see CRUSH_N, CRUSH_N_MINUS macros.
973 * basically, numrep <= 0 means relative to
974 * the provided result_max
975 */
976 numrep = curstep->arg1;
977 if (numrep <= 0) {
978 numrep += result_max;
979 if (numrep <= 0)
980 continue;
981 }
982 j = 0;
983 /* make sure bucket id is valid */
984 bno = -1 - w[i];
985 if (bno < 0 || bno >= map->max_buckets) {
986 /* w[i] is probably CRUSH_ITEM_NONE */
987 dprintk(" bad w[i] %d\n", w[i]);
988 continue;
989 }
990 if (firstn) {
991 int recurse_tries;
992 if (choose_leaf_tries)
993 recurse_tries =
994 choose_leaf_tries;
995 else if (map->chooseleaf_descend_once)
996 recurse_tries = 1;
997 else
998 recurse_tries = choose_tries;
999 osize += crush_choose_firstn(
1000 map,
1001 cw,
1002 map->buckets[bno],
1003 weight, weight_max,
1004 x, numrep,
1005 curstep->arg2,
1006 o+osize, j,
1007 result_max-osize,
1008 choose_tries,
1009 recurse_tries,
1010 choose_local_retries,
1011 choose_local_fallback_retries,
1012 recurse_to_leaf,
1013 vary_r,
1014 stable,
1015 c+osize,
1016 0);
1017 } else {
1018 out_size = ((numrep < (result_max-osize)) ?
1019 numrep : (result_max-osize));
1020 crush_choose_indep(
1021 map,
1022 cw,
1023 map->buckets[bno],
1024 weight, weight_max,
1025 x, out_size, numrep,
1026 curstep->arg2,
1027 o+osize, j,
1028 choose_tries,
1029 choose_leaf_tries ?
1030 choose_leaf_tries : 1,
1031 recurse_to_leaf,
1032 c+osize,
1033 0);
1034 osize += out_size;
1035 }
1036 }
1037
1038 if (recurse_to_leaf)
1039 /* copy final _leaf_ values to output set */
1040 memcpy(o, c, osize*sizeof(*o));
1041
1042 /* swap o and w arrays */
1043 tmp = o;
1044 o = w;
1045 w = tmp;
1046 wsize = osize;
1047 break;
1048
1049
1050 case CRUSH_RULE_EMIT:
1051 for (i = 0; i < wsize && result_len < result_max; i++) {
1052 result[result_len] = w[i];
1053 result_len++;
1054 }
1055 wsize = 0;
1056 break;
1057
1058 default:
1059 dprintk(" unknown op %d at step %d\n",
1060 curstep->op, step);
1061 break;
1062 }
1063 }
1064
1065 return result_len;
1066 }