2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
21 # include "crush_compat.h"
25 #include "crush_ln_table.h"
28 #define dprintk(args...) /* printf(args) */
31 * Implement the core CRUSH mapping algorithm.
35 * bucket choose methods
37 * For each bucket algorithm, we have a "choose" method that, given a
38 * crush input @x and replica position (usually, position in output set) @r,
39 * will produce an item in the bucket.
43 * Choose based on a random permutation of the bucket.
45 * We used to use some prime number arithmetic to do this, but it
46 * wasn't very random, and had some other bad behaviors. Instead, we
47 * calculate an actual random permutation of the bucket members.
48 * Since this is expensive, we optimize for the r=0 case, which
49 * captures the vast majority of calls.
51 static int bucket_perm_choose(const struct crush_bucket
*bucket
,
52 struct crush_work_bucket
*work
,
55 unsigned int pr
= r
% bucket
->size
;
58 /* start a new permutation if @x has changed */
59 if (work
->perm_x
!= (__u32
)x
|| work
->perm_n
== 0) {
60 dprintk("bucket %d new x=%d\n", bucket
->id
, x
);
63 /* optimize common r=0 case */
65 s
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, 0) %
68 work
->perm_n
= 0xffff; /* magic value, see below */
72 for (i
= 0; i
< bucket
->size
; i
++)
75 } else if (work
->perm_n
== 0xffff) {
76 /* clean up after the r=0 case above */
77 for (i
= 1; i
< bucket
->size
; i
++)
79 work
->perm
[work
->perm
[0]] = 0;
83 /* calculate permutation up to pr */
84 for (i
= 0; i
< work
->perm_n
; i
++)
85 dprintk(" perm_choose have %d: %d\n", i
, work
->perm
[i
]);
86 while (work
->perm_n
<= pr
) {
87 unsigned int p
= work
->perm_n
;
88 /* no point in swapping the final entry */
89 if (p
< bucket
->size
- 1) {
90 i
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, p
) %
93 unsigned int t
= work
->perm
[p
+ i
];
94 work
->perm
[p
+ i
] = work
->perm
[p
];
97 dprintk(" perm_choose swap %d with %d\n", p
, p
+i
);
101 for (i
= 0; i
< bucket
->size
; i
++)
102 dprintk(" perm_choose %d: %d\n", i
, work
->perm
[i
]);
106 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket
->id
,
107 bucket
->size
, x
, r
, pr
, s
);
108 return bucket
->items
[s
];
112 static int bucket_uniform_choose(const struct crush_bucket_uniform
*bucket
,
113 struct crush_work_bucket
*work
, int x
, int r
)
115 return bucket_perm_choose(&bucket
->h
, work
, x
, r
);
119 static int bucket_list_choose(const struct crush_bucket_list
*bucket
,
124 for (i
= bucket
->h
.size
-1; i
>= 0; i
--) {
125 __u64 w
= crush_hash32_4(bucket
->h
.hash
, x
, bucket
->h
.items
[i
],
128 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
130 i
, x
, r
, bucket
->h
.items
[i
], bucket
->item_weights
[i
],
131 bucket
->sum_weights
[i
], w
);
132 w
*= bucket
->sum_weights
[i
];
134 /*dprintk(" scaled %llx\n", w);*/
135 if (w
< bucket
->item_weights
[i
]) {
136 return bucket
->h
.items
[i
];
140 dprintk("bad list sums for bucket %d\n", bucket
->h
.id
);
141 return bucket
->h
.items
[0];
146 static int height(int n
)
149 while ((n
& 1) == 0) {
156 static int left(int x
)
159 return x
- (1 << (h
-1));
162 static int right(int x
)
165 return x
+ (1 << (h
-1));
168 static int terminal(int x
)
173 static int bucket_tree_choose(const struct crush_bucket_tree
*bucket
,
181 n
= bucket
->num_nodes
>> 1;
183 while (!terminal(n
)) {
185 /* pick point in [0, w) */
186 w
= bucket
->node_weights
[n
];
187 t
= (__u64
)crush_hash32_4(bucket
->h
.hash
, x
, n
, r
,
188 bucket
->h
.id
) * (__u64
)w
;
191 /* descend to the left or right? */
193 if (t
< bucket
->node_weights
[l
])
199 return bucket
->h
.items
[n
>> 1];
205 static int bucket_straw_choose(const struct crush_bucket_straw
*bucket
,
213 for (i
= 0; i
< bucket
->h
.size
; i
++) {
214 draw
= crush_hash32_3(bucket
->h
.hash
, x
, bucket
->h
.items
[i
], r
);
216 draw
*= bucket
->straws
[i
];
217 if (i
== 0 || draw
> high_draw
) {
222 return bucket
->h
.items
[high
];
225 /* compute 2^44*log2(input+1) */
226 static __u64
crush_ln(unsigned int xin
)
228 unsigned int x
= xin
;
229 int iexpon
, index1
, index2
;
230 __u64 RH
, LH
, LL
, xl64
, result
;
234 /* normalize input */
237 // figure out number of bits we need to shift and
238 // do it in one step instead of iteratively
239 if (!(x
& 0x18000)) {
240 int bits
= __builtin_clz(x
& 0x1FFFF) - 16;
245 index1
= (x
>> 8) << 1;
246 /* RH ~ 2^56/index1 */
247 RH
= __RH_LH_tbl
[index1
- 256];
248 /* LH ~ 2^48 * log2(index1/256) */
249 LH
= __RH_LH_tbl
[index1
+ 1 - 256];
251 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
252 xl64
= (__s64
)x
* RH
;
256 result
<<= (12 + 32);
258 index2
= xl64
& 0xff;
259 /* LL ~ 2^48*log2(1.0+index2/2^15) */
260 LL
= __LL_tbl
[index2
];
264 LH
>>= (48 - 12 - 32);
274 * Suppose we have two osds: osd.0 and osd.1, with weight 8 and 4 respectively, It means:
275 * a). For osd.0, the time interval between each io request apply to exponential distribution
276 * with lamba equals 8
277 * b). For osd.1, the time interval between each io request apply to exponential distribution
278 * with lamba equals 4
279 * c). If we apply to each osd's exponential random variable, then the total pgs on each osd
280 * is proportional to its weight.
282 * for reference, see:
284 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
287 static inline __u32
*get_choose_arg_weights(const struct crush_bucket_straw2
*bucket
,
288 const struct crush_choose_arg
*arg
,
291 if ((arg
== NULL
) || (arg
->weight_set
== NULL
))
292 return bucket
->item_weights
;
293 if (position
>= arg
->weight_set_positions
)
294 position
= arg
->weight_set_positions
- 1;
295 return arg
->weight_set
[position
].weights
;
298 static inline __s32
*get_choose_arg_ids(const struct crush_bucket_straw2
*bucket
,
299 const struct crush_choose_arg
*arg
)
301 if ((arg
== NULL
) || (arg
->ids
== NULL
))
302 return bucket
->h
.items
;
307 * Compute exponential random variable using inversion method.
309 * for reference, see the exponential distribution example at:
310 * https://en.wikipedia.org/wiki/Inverse_transform_sampling#Examples
312 static inline __s64
generate_exponential_distribution(int type
, int x
, int y
, int z
,
315 unsigned int u
= crush_hash32_3(type
, x
, y
, z
);
319 * for some reason slightly less than 0x10000 produces
320 * a slightly more accurate distribution... probably a
323 * the natural log lookup table maps [0,0xffff]
324 * (corresponding to real numbers [1/0x10000, 1] to
325 * [0, 0xffffffffffff] (corresponding to real numbers
328 __s64 ln
= crush_ln(u
) - 0x1000000000000ll
;
331 * divide by 16.16 fixed-point weight. note
332 * that the ln value is negative, so a larger
333 * weight means a larger (less negative) value
336 return div64_s64(ln
, weight
);
339 static int bucket_straw2_choose(const struct crush_bucket_straw2
*bucket
,
340 int x
, int r
, const struct crush_choose_arg
*arg
,
343 unsigned int i
, high
= 0;
344 __s64 draw
, high_draw
= 0;
345 __u32
*weights
= get_choose_arg_weights(bucket
, arg
, position
);
346 __s32
*ids
= get_choose_arg_ids(bucket
, arg
);
347 for (i
= 0; i
< bucket
->h
.size
; i
++) {
348 dprintk("weight 0x%x item %d\n", weights
[i
], ids
[i
]);
350 draw
= generate_exponential_distribution(bucket
->h
.hash
, x
, ids
[i
], r
, weights
[i
]);
355 if (i
== 0 || draw
> high_draw
) {
361 return bucket
->h
.items
[high
];
365 static int crush_bucket_choose(const struct crush_bucket
*in
,
366 struct crush_work_bucket
*work
,
368 const struct crush_choose_arg
*arg
,
371 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in
->id
, x
, r
);
372 BUG_ON(in
->size
== 0);
374 case CRUSH_BUCKET_UNIFORM
:
375 return bucket_uniform_choose(
376 (const struct crush_bucket_uniform
*)in
,
378 case CRUSH_BUCKET_LIST
:
379 return bucket_list_choose((const struct crush_bucket_list
*)in
,
381 case CRUSH_BUCKET_TREE
:
382 return bucket_tree_choose((const struct crush_bucket_tree
*)in
,
384 case CRUSH_BUCKET_STRAW
:
385 return bucket_straw_choose(
386 (const struct crush_bucket_straw
*)in
,
388 case CRUSH_BUCKET_STRAW2
:
389 return bucket_straw2_choose(
390 (const struct crush_bucket_straw2
*)in
,
391 x
, r
, arg
, position
);
393 dprintk("unknown bucket %d alg %d\n", in
->id
, in
->alg
);
399 * true if device is marked "out" (failed, fully offloaded)
402 static int is_out(const struct crush_map
*map
,
403 const __u32
*weight
, int weight_max
,
406 if (item
>= weight_max
)
408 if (weight
[item
] >= 0x10000)
410 if (weight
[item
] == 0)
412 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1
, x
, item
) & 0xffff)
419 * crush_choose_firstn - choose numrep distinct items of given type
420 * @map: the crush_map
421 * @bucket: the bucket we are choose an item from
422 * @x: crush input value
423 * @numrep: the number of items to choose
424 * @type: the type of item to choose
425 * @out: pointer to output vector
426 * @outpos: our position in that vector
427 * @out_size: size of the out vector
428 * @tries: number of attempts to make
429 * @recurse_tries: number of attempts to have recursive chooseleaf make
430 * @local_retries: localized retries
431 * @local_fallback_retries: localized fallback retries
432 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
433 * @stable: stable mode starts rep=0 in the recursive call for all replicas
434 * @vary_r: pass r to recursive calls
435 * @out2: second output vector for leaf items (if @recurse_to_leaf)
436 * @parent_r: r value passed from the parent
438 static int crush_choose_firstn(const struct crush_map
*map
,
439 struct crush_work
*work
,
440 const struct crush_bucket
*bucket
,
441 const __u32
*weight
, int weight_max
,
442 int x
, int numrep
, int type
,
443 int *out
, int outpos
,
446 unsigned int recurse_tries
,
447 unsigned int local_retries
,
448 unsigned int local_fallback_retries
,
454 const struct crush_choose_arg
*choose_args
)
457 unsigned int ftotal
, flocal
;
458 int retry_descent
, retry_bucket
, skip_rep
;
459 const struct crush_bucket
*in
= bucket
;
465 int count
= out_size
;
467 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d \
468 recurse_tries %d local_retries %d local_fallback_retries %d \
469 parent_r %d stable %d\n",
470 recurse_to_leaf
? "_LEAF" : "",
471 bucket
->id
, x
, outpos
, numrep
,
472 tries
, recurse_tries
, local_retries
, local_fallback_retries
,
475 for (rep
= stable
? 0 : outpos
; rep
< numrep
&& count
> 0 ; rep
++) {
476 /* keep trying until we get a non-out, non-colliding item */
481 in
= bucket
; /* initial bucket */
483 /* choose through intervening buckets */
489 /* r' = r + f_total */
497 if (local_fallback_retries
> 0 &&
498 flocal
>= (in
->size
>>1) &&
499 flocal
> local_fallback_retries
)
500 item
= bucket_perm_choose(
501 in
, work
->work
[-1-in
->id
],
504 item
= crush_bucket_choose(
505 in
, work
->work
[-1-in
->id
],
507 (choose_args
? &choose_args
[-1-in
->id
] : 0),
509 if (item
>= map
->max_devices
) {
510 dprintk(" bad item %d\n", item
);
517 itemtype
= map
->buckets
[-1-item
]->type
;
520 dprintk(" item %d type %d\n", item
, itemtype
);
523 if (itemtype
!= type
) {
525 (-1-item
) >= map
->max_buckets
) {
526 dprintk(" bad item type %d\n", type
);
530 in
= map
->buckets
[-1-item
];
536 for (i
= 0; i
< outpos
; i
++) {
537 if (out
[i
] == item
) {
544 if (!collide
&& recurse_to_leaf
) {
548 sub_r
= r
>> (vary_r
-1);
551 if (crush_choose_firstn(
554 map
->buckets
[-1-item
],
556 x
, stable
? 1 : outpos
+1, 0,
560 local_fallback_retries
,
566 choose_args
) <= outpos
)
567 /* didn't get leaf */
570 /* we already have a leaf! */
575 if (!reject
&& !collide
) {
578 reject
= is_out(map
, weight
,
584 if (reject
|| collide
) {
588 if (collide
&& flocal
<= local_retries
)
589 /* retry locally a few times */
591 else if (local_fallback_retries
> 0 &&
592 flocal
<= in
->size
+ local_fallback_retries
)
593 /* exhaustive bucket search */
595 else if (ftotal
< tries
)
596 /* then retry descent */
601 dprintk(" reject %d collide %d "
602 "ftotal %u flocal %u\n",
603 reject
, collide
, ftotal
,
606 } while (retry_bucket
);
607 } while (retry_descent
);
610 dprintk("skip rep\n");
614 dprintk("CHOOSE got %d\n", item
);
619 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
620 map
->choose_tries
[ftotal
]++;
624 dprintk("CHOOSE returns %d\n", outpos
);
630 * crush_choose_indep: alternative breadth-first positionally stable mapping
633 static void crush_choose_indep(const struct crush_map
*map
,
634 struct crush_work
*work
,
635 const struct crush_bucket
*bucket
,
636 const __u32
*weight
, int weight_max
,
637 int x
, int left
, int numrep
, int type
,
638 int *out
, int outpos
,
640 unsigned int recurse_tries
,
644 const struct crush_choose_arg
*choose_args
)
646 const struct crush_bucket
*in
= bucket
;
647 int endpos
= outpos
+ left
;
656 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf
? "_LEAF" : "",
657 bucket
->id
, x
, outpos
, numrep
);
659 /* initially my result is undefined */
660 for (rep
= outpos
; rep
< endpos
; rep
++) {
661 out
[rep
] = CRUSH_ITEM_UNDEF
;
663 out2
[rep
] = CRUSH_ITEM_UNDEF
;
666 for (ftotal
= 0; left
> 0 && ftotal
< tries
; ftotal
++) {
668 if (out2
&& ftotal
) {
669 dprintk("%u %d a: ", ftotal
, left
);
670 for (rep
= outpos
; rep
< endpos
; rep
++) {
671 dprintk(" %d", out
[rep
]);
674 dprintk("%u %d b: ", ftotal
, left
);
675 for (rep
= outpos
; rep
< endpos
; rep
++) {
676 dprintk(" %d", out2
[rep
]);
681 for (rep
= outpos
; rep
< endpos
; rep
++) {
682 if (out
[rep
] != CRUSH_ITEM_UNDEF
)
685 in
= bucket
; /* initial bucket */
687 /* choose through intervening buckets */
689 /* note: we base the choice on the position
690 * even in the nested call. that means that
691 * if the first layer chooses the same bucket
692 * in a different position, we will tend to
693 * choose a different item in that bucket.
694 * this will involve more devices in data
695 * movement and tend to distribute the load.
700 if (in
->alg
== CRUSH_BUCKET_UNIFORM
&&
701 in
->size
% numrep
== 0)
702 /* r'=r+(n+1)*f_total */
703 r
+= (numrep
+1) * ftotal
;
705 /* r' = r + n*f_total */
706 r
+= numrep
* ftotal
;
710 dprintk(" empty bucket\n");
714 item
= crush_bucket_choose(
715 in
, work
->work
[-1-in
->id
],
717 (choose_args
? &choose_args
[-1-in
->id
] : 0),
719 if (item
>= map
->max_devices
) {
720 dprintk(" bad item %d\n", item
);
721 out
[rep
] = CRUSH_ITEM_NONE
;
723 out2
[rep
] = CRUSH_ITEM_NONE
;
730 itemtype
= map
->buckets
[-1-item
]->type
;
733 dprintk(" item %d type %d\n", item
, itemtype
);
736 if (itemtype
!= type
) {
738 (-1-item
) >= map
->max_buckets
) {
739 dprintk(" bad item type %d\n", type
);
740 out
[rep
] = CRUSH_ITEM_NONE
;
747 in
= map
->buckets
[-1-item
];
753 for (i
= outpos
; i
< endpos
; i
++) {
754 if (out
[i
] == item
) {
762 if (recurse_to_leaf
) {
767 map
->buckets
[-1-item
],
772 0, NULL
, r
, choose_args
);
773 if (out2
&& out2
[rep
] == CRUSH_ITEM_NONE
) {
774 /* placed nothing; no leaf */
778 /* we already have a leaf! */
785 is_out(map
, weight
, weight_max
, item
, x
))
795 for (rep
= outpos
; rep
< endpos
; rep
++) {
796 if (out
[rep
] == CRUSH_ITEM_UNDEF
) {
797 out
[rep
] = CRUSH_ITEM_NONE
;
799 if (out2
&& out2
[rep
] == CRUSH_ITEM_UNDEF
) {
800 out2
[rep
] = CRUSH_ITEM_NONE
;
804 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
805 map
->choose_tries
[ftotal
]++;
809 dprintk("%u %d a: ", ftotal
, left
);
810 for (rep
= outpos
; rep
< endpos
; rep
++) {
811 dprintk(" %d", out
[rep
]);
814 dprintk("%u %d b: ", ftotal
, left
);
815 for (rep
= outpos
; rep
< endpos
; rep
++) {
816 dprintk(" %d", out2
[rep
]);
824 /* This takes a chunk of memory and sets it up to be a shiny new
825 working area for a CRUSH placement computation. It must be called
826 on any newly allocated memory before passing it in to
827 crush_do_rule. It may be used repeatedly after that, so long as the
828 map has not changed. If the map /has/ changed, you must make sure
829 the working size is no smaller than what was allocated and re-run
830 crush_init_workspace.
832 If you do retain the working space between calls to crush, make it
833 thread-local. If you reinstitute the locking I've spent so much
834 time getting rid of, I will be very unhappy with you. */
836 void crush_init_workspace(const struct crush_map
*m
, void *v
) {
837 /* We work by moving through the available space and setting
838 values and pointers as we go.
840 It's a bit like Forth's use of the 'allot' word since we
841 set the pointer first and then reserve the space for it to
842 point to by incrementing the point. */
843 struct crush_work
*w
= (struct crush_work
*)v
;
844 char *point
= (char *)v
;
846 point
+= sizeof(struct crush_work
);
847 w
->work
= (struct crush_work_bucket
**)point
;
848 point
+= m
->max_buckets
* sizeof(struct crush_work_bucket
*);
849 for (b
= 0; b
< m
->max_buckets
; ++b
) {
850 if (m
->buckets
[b
] == 0)
853 w
->work
[b
] = (struct crush_work_bucket
*) point
;
854 switch (m
->buckets
[b
]->alg
) {
856 point
+= sizeof(struct crush_work_bucket
);
859 w
->work
[b
]->perm_x
= 0;
860 w
->work
[b
]->perm_n
= 0;
861 w
->work
[b
]->perm
= (__u32
*)point
;
862 point
+= m
->buckets
[b
]->size
* sizeof(__u32
);
864 BUG_ON((char *)point
- (char *)w
!= m
->working_size
);
868 * crush_do_rule - calculate a mapping with the given input and rule
869 * @map: the crush_map
870 * @ruleno: the rule id
872 * @result: pointer to result vector
873 * @result_max: maximum result size
874 * @weight: weight vector (for map leaves)
875 * @weight_max: size of weight vector
876 * @cwin: Pointer to at least map->working_size bytes of memory or NULL.
878 int crush_do_rule(const struct crush_map
*map
,
879 int ruleno
, int x
, int *result
, int result_max
,
880 const __u32
*weight
, int weight_max
,
881 void *cwin
, const struct crush_choose_arg
*choose_args
)
884 struct crush_work
*cw
= cwin
;
885 int *a
= (int *)((char *)cw
+ map
->working_size
);
886 int *b
= a
+ result_max
;
887 int *c
= b
+ result_max
;
894 const struct crush_rule
*rule
;
900 * the original choose_total_tries value was off by one (it
901 * counted "retries" and not "tries"). add one.
903 int choose_tries
= map
->choose_total_tries
+ 1;
904 int choose_leaf_tries
= 0;
906 * the local tries values were counted as "retries", though,
907 * and need no adjustment
909 int choose_local_retries
= map
->choose_local_tries
;
910 int choose_local_fallback_retries
= map
->choose_local_fallback_tries
;
912 int vary_r
= map
->chooseleaf_vary_r
;
913 int stable
= map
->chooseleaf_stable
;
915 if ((__u32
)ruleno
>= map
->max_rules
) {
916 dprintk(" bad ruleno %d\n", ruleno
);
920 rule
= map
->rules
[ruleno
];
923 for (step
= 0; step
< rule
->len
; step
++) {
925 const struct crush_rule_step
*curstep
= &rule
->steps
[step
];
927 switch (curstep
->op
) {
928 case CRUSH_RULE_TAKE
:
929 if ((curstep
->arg1
>= 0 &&
930 curstep
->arg1
< map
->max_devices
) ||
931 (-1-curstep
->arg1
>= 0 &&
932 -1-curstep
->arg1
< map
->max_buckets
&&
933 map
->buckets
[-1-curstep
->arg1
])) {
934 w
[0] = curstep
->arg1
;
937 dprintk(" bad take value %d\n", curstep
->arg1
);
941 case CRUSH_RULE_SET_CHOOSE_TRIES
:
942 if (curstep
->arg1
> 0)
943 choose_tries
= curstep
->arg1
;
946 case CRUSH_RULE_SET_CHOOSELEAF_TRIES
:
947 if (curstep
->arg1
> 0)
948 choose_leaf_tries
= curstep
->arg1
;
951 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES
:
952 if (curstep
->arg1
>= 0)
953 choose_local_retries
= curstep
->arg1
;
956 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES
:
957 if (curstep
->arg1
>= 0)
958 choose_local_fallback_retries
= curstep
->arg1
;
961 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R
:
962 if (curstep
->arg1
>= 0)
963 vary_r
= curstep
->arg1
;
966 case CRUSH_RULE_SET_CHOOSELEAF_STABLE
:
967 if (curstep
->arg1
>= 0)
968 stable
= curstep
->arg1
;
971 case CRUSH_RULE_CHOOSELEAF_FIRSTN
:
972 case CRUSH_RULE_CHOOSE_FIRSTN
:
975 case CRUSH_RULE_CHOOSELEAF_INDEP
:
976 case CRUSH_RULE_CHOOSE_INDEP
:
982 CRUSH_RULE_CHOOSELEAF_FIRSTN
||
984 CRUSH_RULE_CHOOSELEAF_INDEP
;
989 for (i
= 0; i
< wsize
; i
++) {
991 numrep
= curstep
->arg1
;
993 numrep
+= result_max
;
998 /* make sure bucket id is valid */
1000 if (bno
< 0 || bno
>= map
->max_buckets
) {
1001 // w[i] is probably CRUSH_ITEM_NONE
1002 dprintk(" bad w[i] %d\n", w
[i
]);
1007 if (choose_leaf_tries
)
1010 else if (map
->chooseleaf_descend_once
)
1013 recurse_tries
= choose_tries
;
1014 osize
+= crush_choose_firstn(
1025 choose_local_retries
,
1026 choose_local_fallback_retries
,
1034 out_size
= ((numrep
< (result_max
-osize
)) ?
1035 numrep
: (result_max
-osize
));
1041 x
, out_size
, numrep
,
1046 choose_leaf_tries
: 1,
1055 if (recurse_to_leaf
)
1056 /* copy final _leaf_ values to output set */
1057 memcpy(o
, c
, osize
*sizeof(*o
));
1059 /* swap o and w arrays */
1067 case CRUSH_RULE_EMIT
:
1068 for (i
= 0; i
< wsize
&& result_len
< result_max
; i
++) {
1069 result
[result_len
] = w
[i
];
1076 dprintk(" unknown op %d at step %d\n",