2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
21 # include "crush_compat.h"
25 #include "crush_ln_table.h"
28 #define dprintk(args...) /* printf(args) */
31 * Implement the core CRUSH mapping algorithm.
35 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
37 * @ruleset: the storage ruleset id (user defined)
38 * @type: storage ruleset type (user defined)
39 * @size: output set size
41 int crush_find_rule(const struct crush_map
*map
, int ruleset
, int type
, int size
)
45 for (i
= 0; i
< map
->max_rules
; i
++) {
47 map
->rules
[i
]->mask
.ruleset
== ruleset
&&
48 map
->rules
[i
]->mask
.type
== type
&&
49 map
->rules
[i
]->mask
.min_size
<= size
&&
50 map
->rules
[i
]->mask
.max_size
>= size
)
57 * bucket choose methods
59 * For each bucket algorithm, we have a "choose" method that, given a
60 * crush input @x and replica position (usually, position in output set) @r,
61 * will produce an item in the bucket.
65 * Choose based on a random permutation of the bucket.
67 * We used to use some prime number arithmetic to do this, but it
68 * wasn't very random, and had some other bad behaviors. Instead, we
69 * calculate an actual random permutation of the bucket members.
70 * Since this is expensive, we optimize for the r=0 case, which
71 * captures the vast majority of calls.
73 static int bucket_perm_choose(const struct crush_bucket
*bucket
,
74 struct crush_work_bucket
*work
,
77 unsigned int pr
= r
% bucket
->size
;
80 /* start a new permutation if @x has changed */
81 if (work
->perm_x
!= (__u32
)x
|| work
->perm_n
== 0) {
82 dprintk("bucket %d new x=%d\n", bucket
->id
, x
);
85 /* optimize common r=0 case */
87 s
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, 0) %
90 work
->perm_n
= 0xffff; /* magic value, see below */
94 for (i
= 0; i
< bucket
->size
; i
++)
97 } else if (work
->perm_n
== 0xffff) {
98 /* clean up after the r=0 case above */
99 for (i
= 1; i
< bucket
->size
; i
++)
101 work
->perm
[work
->perm
[0]] = 0;
105 /* calculate permutation up to pr */
106 for (i
= 0; i
< work
->perm_n
; i
++)
107 dprintk(" perm_choose have %d: %d\n", i
, work
->perm
[i
]);
108 while (work
->perm_n
<= pr
) {
109 unsigned int p
= work
->perm_n
;
110 /* no point in swapping the final entry */
111 if (p
< bucket
->size
- 1) {
112 i
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, p
) %
115 unsigned int t
= work
->perm
[p
+ i
];
116 work
->perm
[p
+ i
] = work
->perm
[p
];
119 dprintk(" perm_choose swap %d with %d\n", p
, p
+i
);
123 for (i
= 0; i
< bucket
->size
; i
++)
124 dprintk(" perm_choose %d: %d\n", i
, work
->perm
[i
]);
128 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket
->id
,
129 bucket
->size
, x
, r
, pr
, s
);
130 return bucket
->items
[s
];
134 static int bucket_uniform_choose(const struct crush_bucket_uniform
*bucket
,
135 struct crush_work_bucket
*work
, int x
, int r
)
137 return bucket_perm_choose(&bucket
->h
, work
, x
, r
);
141 static int bucket_list_choose(const struct crush_bucket_list
*bucket
,
146 for (i
= bucket
->h
.size
-1; i
>= 0; i
--) {
147 __u64 w
= crush_hash32_4(bucket
->h
.hash
, x
, bucket
->h
.items
[i
],
150 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
152 i
, x
, r
, bucket
->h
.items
[i
], bucket
->item_weights
[i
],
153 bucket
->sum_weights
[i
], w
);
154 w
*= bucket
->sum_weights
[i
];
156 /*dprintk(" scaled %llx\n", w);*/
157 if (w
< bucket
->item_weights
[i
]) {
158 return bucket
->h
.items
[i
];
162 dprintk("bad list sums for bucket %d\n", bucket
->h
.id
);
163 return bucket
->h
.items
[0];
168 static int height(int n
)
171 while ((n
& 1) == 0) {
178 static int left(int x
)
181 return x
- (1 << (h
-1));
184 static int right(int x
)
187 return x
+ (1 << (h
-1));
190 static int terminal(int x
)
195 static int bucket_tree_choose(const struct crush_bucket_tree
*bucket
,
203 n
= bucket
->num_nodes
>> 1;
205 while (!terminal(n
)) {
207 /* pick point in [0, w) */
208 w
= bucket
->node_weights
[n
];
209 t
= (__u64
)crush_hash32_4(bucket
->h
.hash
, x
, n
, r
,
210 bucket
->h
.id
) * (__u64
)w
;
213 /* descend to the left or right? */
215 if (t
< bucket
->node_weights
[l
])
221 return bucket
->h
.items
[n
>> 1];
227 static int bucket_straw_choose(const struct crush_bucket_straw
*bucket
,
235 for (i
= 0; i
< bucket
->h
.size
; i
++) {
236 draw
= crush_hash32_3(bucket
->h
.hash
, x
, bucket
->h
.items
[i
], r
);
238 draw
*= bucket
->straws
[i
];
239 if (i
== 0 || draw
> high_draw
) {
244 return bucket
->h
.items
[high
];
247 /* compute 2^44*log2(input+1) */
248 static __u64
crush_ln(unsigned int xin
)
250 unsigned int x
= xin
;
251 int iexpon
, index1
, index2
;
252 __u64 RH
, LH
, LL
, xl64
, result
;
256 /* normalize input */
259 // figure out number of bits we need to shift and
260 // do it in one step instead of iteratively
261 if (!(x
& 0x18000)) {
262 int bits
= __builtin_clz(x
& 0x1FFFF) - 16;
267 index1
= (x
>> 8) << 1;
268 /* RH ~ 2^56/index1 */
269 RH
= __RH_LH_tbl
[index1
- 256];
270 /* LH ~ 2^48 * log2(index1/256) */
271 LH
= __RH_LH_tbl
[index1
+ 1 - 256];
273 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
274 xl64
= (__s64
)x
* RH
;
278 result
<<= (12 + 32);
280 index2
= xl64
& 0xff;
281 /* LL ~ 2^48*log2(1.0+index2/2^15) */
282 LL
= __LL_tbl
[index2
];
286 LH
>>= (48 - 12 - 32);
296 * for reference, see:
298 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
302 static inline __u32
*get_choose_arg_weights(const struct crush_bucket_straw2
*bucket
,
303 const struct crush_choose_arg
*arg
,
307 (arg
->weight_set
== NULL
))
308 return bucket
->item_weights
;
309 if (position
>= arg
->weight_set_positions
)
310 position
= arg
->weight_set_positions
- 1;
311 return arg
->weight_set
[position
].weights
;
314 static inline __s32
*get_choose_arg_ids(const struct crush_bucket_straw2
*bucket
,
315 const struct crush_choose_arg
*arg
)
317 if ((arg
== NULL
) || (arg
->ids
== NULL
))
318 return bucket
->h
.items
;
322 static int bucket_straw2_choose(const struct crush_bucket_straw2
*bucket
,
323 int x
, int r
, const struct crush_choose_arg
*arg
,
326 unsigned int i
, high
= 0;
328 __s64 ln
, draw
, high_draw
= 0;
329 __u32
*weights
= get_choose_arg_weights(bucket
, arg
, position
);
330 __s32
*ids
= get_choose_arg_ids(bucket
, arg
);
331 for (i
= 0; i
< bucket
->h
.size
; i
++) {
332 dprintk("weight 0x%x item %d\n", weights
[i
], ids
[i
]);
334 u
= crush_hash32_3(bucket
->h
.hash
, x
, ids
[i
], r
);
338 * for some reason slightly less than 0x10000 produces
339 * a slightly more accurate distribution... probably a
342 * the natural log lookup table maps [0,0xffff]
343 * (corresponding to real numbers [1/0x10000, 1] to
344 * [0, 0xffffffffffff] (corresponding to real numbers
347 ln
= crush_ln(u
) - 0x1000000000000ll
;
350 * divide by 16.16 fixed-point weight. note
351 * that the ln value is negative, so a larger
352 * weight means a larger (less negative) value
355 draw
= div64_s64(ln
, weights
[i
]);
360 if (i
== 0 || draw
> high_draw
) {
366 return bucket
->h
.items
[high
];
370 static int crush_bucket_choose(const struct crush_bucket
*in
,
371 struct crush_work_bucket
*work
,
373 const struct crush_choose_arg
*arg
,
376 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in
->id
, x
, r
);
377 BUG_ON(in
->size
== 0);
379 case CRUSH_BUCKET_UNIFORM
:
380 return bucket_uniform_choose(
381 (const struct crush_bucket_uniform
*)in
,
383 case CRUSH_BUCKET_LIST
:
384 return bucket_list_choose((const struct crush_bucket_list
*)in
,
386 case CRUSH_BUCKET_TREE
:
387 return bucket_tree_choose((const struct crush_bucket_tree
*)in
,
389 case CRUSH_BUCKET_STRAW
:
390 return bucket_straw_choose(
391 (const struct crush_bucket_straw
*)in
,
393 case CRUSH_BUCKET_STRAW2
:
394 return bucket_straw2_choose(
395 (const struct crush_bucket_straw2
*)in
,
396 x
, r
, arg
, position
);
398 dprintk("unknown bucket %d alg %d\n", in
->id
, in
->alg
);
404 * true if device is marked "out" (failed, fully offloaded)
407 static int is_out(const struct crush_map
*map
,
408 const __u32
*weight
, int weight_max
,
411 if (item
>= weight_max
)
413 if (weight
[item
] >= 0x10000)
415 if (weight
[item
] == 0)
417 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1
, x
, item
) & 0xffff)
424 * crush_choose_firstn - choose numrep distinct items of given type
425 * @map: the crush_map
426 * @bucket: the bucket we are choose an item from
427 * @x: crush input value
428 * @numrep: the number of items to choose
429 * @type: the type of item to choose
430 * @out: pointer to output vector
431 * @outpos: our position in that vector
432 * @out_size: size of the out vector
433 * @tries: number of attempts to make
434 * @recurse_tries: number of attempts to have recursive chooseleaf make
435 * @local_retries: localized retries
436 * @local_fallback_retries: localized fallback retries
437 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
438 * @stable: stable mode starts rep=0 in the recursive call for all replicas
439 * @vary_r: pass r to recursive calls
440 * @out2: second output vector for leaf items (if @recurse_to_leaf)
441 * @parent_r: r value passed from the parent
443 static int crush_choose_firstn(const struct crush_map
*map
,
444 struct crush_work
*work
,
445 const struct crush_bucket
*bucket
,
446 const __u32
*weight
, int weight_max
,
447 int x
, int numrep
, int type
,
448 int *out
, int outpos
,
451 unsigned int recurse_tries
,
452 unsigned int local_retries
,
453 unsigned int local_fallback_retries
,
459 const struct crush_choose_arg
*choose_args
)
462 unsigned int ftotal
, flocal
;
463 int retry_descent
, retry_bucket
, skip_rep
;
464 const struct crush_bucket
*in
= bucket
;
470 int count
= out_size
;
472 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d \
473 recurse_tries %d local_retries %d local_fallback_retries %d \
474 parent_r %d stable %d\n",
475 recurse_to_leaf
? "_LEAF" : "",
476 bucket
->id
, x
, outpos
, numrep
,
477 tries
, recurse_tries
, local_retries
, local_fallback_retries
,
480 for (rep
= stable
? 0 : outpos
; rep
< numrep
&& count
> 0 ; rep
++) {
481 /* keep trying until we get a non-out, non-colliding item */
486 in
= bucket
; /* initial bucket */
488 /* choose through intervening buckets */
494 /* r' = r + f_total */
502 if (local_fallback_retries
> 0 &&
503 flocal
>= (in
->size
>>1) &&
504 flocal
> local_fallback_retries
)
505 item
= bucket_perm_choose(
506 in
, work
->work
[-1-in
->id
],
509 item
= crush_bucket_choose(
510 in
, work
->work
[-1-in
->id
],
512 (choose_args
? &choose_args
[-1-in
->id
] : 0),
514 if (item
>= map
->max_devices
) {
515 dprintk(" bad item %d\n", item
);
522 itemtype
= map
->buckets
[-1-item
]->type
;
525 dprintk(" item %d type %d\n", item
, itemtype
);
528 if (itemtype
!= type
) {
530 (-1-item
) >= map
->max_buckets
) {
531 dprintk(" bad item type %d\n", type
);
535 in
= map
->buckets
[-1-item
];
541 for (i
= 0; i
< outpos
; i
++) {
542 if (out
[i
] == item
) {
549 if (!collide
&& recurse_to_leaf
) {
553 sub_r
= r
>> (vary_r
-1);
556 if (crush_choose_firstn(
559 map
->buckets
[-1-item
],
561 x
, stable
? 1 : outpos
+1, 0,
565 local_fallback_retries
,
571 choose_args
) <= outpos
)
572 /* didn't get leaf */
575 /* we already have a leaf! */
580 if (!reject
&& !collide
) {
583 reject
= is_out(map
, weight
,
589 if (reject
|| collide
) {
593 if (collide
&& flocal
<= local_retries
)
594 /* retry locally a few times */
596 else if (local_fallback_retries
> 0 &&
597 flocal
<= in
->size
+ local_fallback_retries
)
598 /* exhaustive bucket search */
600 else if (ftotal
< tries
)
601 /* then retry descent */
606 dprintk(" reject %d collide %d "
607 "ftotal %u flocal %u\n",
608 reject
, collide
, ftotal
,
611 } while (retry_bucket
);
612 } while (retry_descent
);
615 dprintk("skip rep\n");
619 dprintk("CHOOSE got %d\n", item
);
624 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
625 map
->choose_tries
[ftotal
]++;
629 dprintk("CHOOSE returns %d\n", outpos
);
635 * crush_choose_indep: alternative breadth-first positionally stable mapping
638 static void crush_choose_indep(const struct crush_map
*map
,
639 struct crush_work
*work
,
640 const struct crush_bucket
*bucket
,
641 const __u32
*weight
, int weight_max
,
642 int x
, int left
, int numrep
, int type
,
643 int *out
, int outpos
,
645 unsigned int recurse_tries
,
649 const struct crush_choose_arg
*choose_args
)
651 const struct crush_bucket
*in
= bucket
;
652 int endpos
= outpos
+ left
;
661 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf
? "_LEAF" : "",
662 bucket
->id
, x
, outpos
, numrep
);
664 /* initially my result is undefined */
665 for (rep
= outpos
; rep
< endpos
; rep
++) {
666 out
[rep
] = CRUSH_ITEM_UNDEF
;
668 out2
[rep
] = CRUSH_ITEM_UNDEF
;
671 for (ftotal
= 0; left
> 0 && ftotal
< tries
; ftotal
++) {
673 if (out2
&& ftotal
) {
674 dprintk("%u %d a: ", ftotal
, left
);
675 for (rep
= outpos
; rep
< endpos
; rep
++) {
676 dprintk(" %d", out
[rep
]);
679 dprintk("%u %d b: ", ftotal
, left
);
680 for (rep
= outpos
; rep
< endpos
; rep
++) {
681 dprintk(" %d", out2
[rep
]);
686 for (rep
= outpos
; rep
< endpos
; rep
++) {
687 if (out
[rep
] != CRUSH_ITEM_UNDEF
)
690 in
= bucket
; /* initial bucket */
692 /* choose through intervening buckets */
694 /* note: we base the choice on the position
695 * even in the nested call. that means that
696 * if the first layer chooses the same bucket
697 * in a different position, we will tend to
698 * choose a different item in that bucket.
699 * this will involve more devices in data
700 * movement and tend to distribute the load.
705 if (in
->alg
== CRUSH_BUCKET_UNIFORM
&&
706 in
->size
% numrep
== 0)
707 /* r'=r+(n+1)*f_total */
708 r
+= (numrep
+1) * ftotal
;
710 /* r' = r + n*f_total */
711 r
+= numrep
* ftotal
;
715 dprintk(" empty bucket\n");
719 item
= crush_bucket_choose(
720 in
, work
->work
[-1-in
->id
],
722 (choose_args
? &choose_args
[-1-in
->id
] : 0),
724 if (item
>= map
->max_devices
) {
725 dprintk(" bad item %d\n", item
);
726 out
[rep
] = CRUSH_ITEM_NONE
;
728 out2
[rep
] = CRUSH_ITEM_NONE
;
735 itemtype
= map
->buckets
[-1-item
]->type
;
738 dprintk(" item %d type %d\n", item
, itemtype
);
741 if (itemtype
!= type
) {
743 (-1-item
) >= map
->max_buckets
) {
744 dprintk(" bad item type %d\n", type
);
745 out
[rep
] = CRUSH_ITEM_NONE
;
752 in
= map
->buckets
[-1-item
];
758 for (i
= outpos
; i
< endpos
; i
++) {
759 if (out
[i
] == item
) {
767 if (recurse_to_leaf
) {
772 map
->buckets
[-1-item
],
777 0, NULL
, r
, choose_args
);
778 if (out2
[rep
] == CRUSH_ITEM_NONE
) {
779 /* placed nothing; no leaf */
783 /* we already have a leaf! */
790 is_out(map
, weight
, weight_max
, item
, x
))
800 for (rep
= outpos
; rep
< endpos
; rep
++) {
801 if (out
[rep
] == CRUSH_ITEM_UNDEF
) {
802 out
[rep
] = CRUSH_ITEM_NONE
;
804 if (out2
&& out2
[rep
] == CRUSH_ITEM_UNDEF
) {
805 out2
[rep
] = CRUSH_ITEM_NONE
;
809 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
810 map
->choose_tries
[ftotal
]++;
814 dprintk("%u %d a: ", ftotal
, left
);
815 for (rep
= outpos
; rep
< endpos
; rep
++) {
816 dprintk(" %d", out
[rep
]);
819 dprintk("%u %d b: ", ftotal
, left
);
820 for (rep
= outpos
; rep
< endpos
; rep
++) {
821 dprintk(" %d", out2
[rep
]);
829 /* This takes a chunk of memory and sets it up to be a shiny new
830 working area for a CRUSH placement computation. It must be called
831 on any newly allocated memory before passing it in to
832 crush_do_rule. It may be used repeatedly after that, so long as the
833 map has not changed. If the map /has/ changed, you must make sure
834 the working size is no smaller than what was allocated and re-run
835 crush_init_workspace.
837 If you do retain the working space between calls to crush, make it
838 thread-local. If you reinstitute the locking I've spent so much
839 time getting rid of, I will be very unhappy with you. */
841 void crush_init_workspace(const struct crush_map
*m
, void *v
) {
842 /* We work by moving through the available space and setting
843 values and pointers as we go.
845 It's a bit like Forth's use of the 'allot' word since we
846 set the pointer first and then reserve the space for it to
847 point to by incrementing the point. */
848 struct crush_work
*w
= (struct crush_work
*)v
;
849 char *point
= (char *)v
;
851 point
+= sizeof(struct crush_work
);
852 w
->work
= (struct crush_work_bucket
**)point
;
853 point
+= m
->max_buckets
* sizeof(struct crush_work_bucket
*);
854 for (b
= 0; b
< m
->max_buckets
; ++b
) {
855 if (m
->buckets
[b
] == 0)
858 w
->work
[b
] = (struct crush_work_bucket
*) point
;
859 switch (m
->buckets
[b
]->alg
) {
861 point
+= sizeof(struct crush_work_bucket
);
864 w
->work
[b
]->perm_x
= 0;
865 w
->work
[b
]->perm_n
= 0;
866 w
->work
[b
]->perm
= (__u32
*)point
;
867 point
+= m
->buckets
[b
]->size
* sizeof(__u32
);
869 BUG_ON((char *)point
- (char *)w
!= m
->working_size
);
873 * crush_do_rule - calculate a mapping with the given input and rule
874 * @map: the crush_map
875 * @ruleno: the rule id
877 * @result: pointer to result vector
878 * @result_max: maximum result size
879 * @weight: weight vector (for map leaves)
880 * @weight_max: size of weight vector
881 * @cwin: Pointer to at least map->working_size bytes of memory or NULL.
883 int crush_do_rule(const struct crush_map
*map
,
884 int ruleno
, int x
, int *result
, int result_max
,
885 const __u32
*weight
, int weight_max
,
886 void *cwin
, const struct crush_choose_arg
*choose_args
)
889 struct crush_work
*cw
= cwin
;
890 int *a
= (int *)((char *)cw
+ map
->working_size
);
891 int *b
= a
+ result_max
;
892 int *c
= b
+ result_max
;
899 const struct crush_rule
*rule
;
905 * the original choose_total_tries value was off by one (it
906 * counted "retries" and not "tries"). add one.
908 int choose_tries
= map
->choose_total_tries
+ 1;
909 int choose_leaf_tries
= 0;
911 * the local tries values were counted as "retries", though,
912 * and need no adjustment
914 int choose_local_retries
= map
->choose_local_tries
;
915 int choose_local_fallback_retries
= map
->choose_local_fallback_tries
;
917 int vary_r
= map
->chooseleaf_vary_r
;
918 int stable
= map
->chooseleaf_stable
;
920 if ((__u32
)ruleno
>= map
->max_rules
) {
921 dprintk(" bad ruleno %d\n", ruleno
);
925 rule
= map
->rules
[ruleno
];
928 for (step
= 0; step
< rule
->len
; step
++) {
930 const struct crush_rule_step
*curstep
= &rule
->steps
[step
];
932 switch (curstep
->op
) {
933 case CRUSH_RULE_TAKE
:
934 if ((curstep
->arg1
>= 0 &&
935 curstep
->arg1
< map
->max_devices
) ||
936 (-1-curstep
->arg1
>= 0 &&
937 -1-curstep
->arg1
< map
->max_buckets
&&
938 map
->buckets
[-1-curstep
->arg1
])) {
939 w
[0] = curstep
->arg1
;
942 dprintk(" bad take value %d\n", curstep
->arg1
);
946 case CRUSH_RULE_SET_CHOOSE_TRIES
:
947 if (curstep
->arg1
> 0)
948 choose_tries
= curstep
->arg1
;
951 case CRUSH_RULE_SET_CHOOSELEAF_TRIES
:
952 if (curstep
->arg1
> 0)
953 choose_leaf_tries
= curstep
->arg1
;
956 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES
:
957 if (curstep
->arg1
>= 0)
958 choose_local_retries
= curstep
->arg1
;
961 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES
:
962 if (curstep
->arg1
>= 0)
963 choose_local_fallback_retries
= curstep
->arg1
;
966 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R
:
967 if (curstep
->arg1
>= 0)
968 vary_r
= curstep
->arg1
;
971 case CRUSH_RULE_SET_CHOOSELEAF_STABLE
:
972 if (curstep
->arg1
>= 0)
973 stable
= curstep
->arg1
;
976 case CRUSH_RULE_CHOOSELEAF_FIRSTN
:
977 case CRUSH_RULE_CHOOSE_FIRSTN
:
980 case CRUSH_RULE_CHOOSELEAF_INDEP
:
981 case CRUSH_RULE_CHOOSE_INDEP
:
987 CRUSH_RULE_CHOOSELEAF_FIRSTN
||
989 CRUSH_RULE_CHOOSELEAF_INDEP
;
994 for (i
= 0; i
< wsize
; i
++) {
996 numrep
= curstep
->arg1
;
998 numrep
+= result_max
;
1003 /* make sure bucket id is valid */
1005 if (bno
< 0 || bno
>= map
->max_buckets
) {
1006 // w[i] is probably CRUSH_ITEM_NONE
1007 dprintk(" bad w[i] %d\n", w
[i
]);
1012 if (choose_leaf_tries
)
1015 else if (map
->chooseleaf_descend_once
)
1018 recurse_tries
= choose_tries
;
1019 osize
+= crush_choose_firstn(
1030 choose_local_retries
,
1031 choose_local_fallback_retries
,
1039 out_size
= ((numrep
< (result_max
-osize
)) ?
1040 numrep
: (result_max
-osize
));
1046 x
, out_size
, numrep
,
1051 choose_leaf_tries
: 1,
1060 if (recurse_to_leaf
)
1061 /* copy final _leaf_ values to output set */
1062 memcpy(o
, c
, osize
*sizeof(*o
));
1064 /* swap o and w arrays */
1072 case CRUSH_RULE_EMIT
:
1073 for (i
= 0; i
< wsize
&& result_len
< result_max
; i
++) {
1074 result
[result_len
] = w
[i
];
1081 dprintk(" unknown op %d at step %d\n",