2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
21 # include "crush_compat.h"
25 #include "crush_ln_table.h"
28 #define dprintk(args...) /* printf(args) */
31 * Implement the core CRUSH mapping algorithm.
35 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
37 * @ruleset: the storage ruleset id (user defined)
38 * @type: storage ruleset type (user defined)
39 * @size: output set size
41 int crush_find_rule(const struct crush_map
*map
, int ruleset
, int type
, int size
)
45 for (i
= 0; i
< map
->max_rules
; i
++) {
47 map
->rules
[i
]->mask
.ruleset
== ruleset
&&
48 map
->rules
[i
]->mask
.type
== type
&&
49 map
->rules
[i
]->mask
.min_size
<= size
&&
50 map
->rules
[i
]->mask
.max_size
>= size
)
57 * bucket choose methods
59 * For each bucket algorithm, we have a "choose" method that, given a
60 * crush input @x and replica position (usually, position in output set) @r,
61 * will produce an item in the bucket.
65 * Choose based on a random permutation of the bucket.
67 * We used to use some prime number arithmetic to do this, but it
68 * wasn't very random, and had some other bad behaviors. Instead, we
69 * calculate an actual random permutation of the bucket members.
70 * Since this is expensive, we optimize for the r=0 case, which
71 * captures the vast majority of calls.
73 static int bucket_perm_choose(const struct crush_bucket
*bucket
,
74 struct crush_work_bucket
*work
,
77 unsigned int pr
= r
% bucket
->size
;
80 /* start a new permutation if @x has changed */
81 if (work
->perm_x
!= (__u32
)x
|| work
->perm_n
== 0) {
82 dprintk("bucket %d new x=%d\n", bucket
->id
, x
);
85 /* optimize common r=0 case */
87 s
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, 0) %
90 work
->perm_n
= 0xffff; /* magic value, see below */
94 for (i
= 0; i
< bucket
->size
; i
++)
97 } else if (work
->perm_n
== 0xffff) {
98 /* clean up after the r=0 case above */
99 for (i
= 1; i
< bucket
->size
; i
++)
101 work
->perm
[work
->perm
[0]] = 0;
105 /* calculate permutation up to pr */
106 for (i
= 0; i
< work
->perm_n
; i
++)
107 dprintk(" perm_choose have %d: %d\n", i
, work
->perm
[i
]);
108 while (work
->perm_n
<= pr
) {
109 unsigned int p
= work
->perm_n
;
110 /* no point in swapping the final entry */
111 if (p
< bucket
->size
- 1) {
112 i
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, p
) %
115 unsigned int t
= work
->perm
[p
+ i
];
116 work
->perm
[p
+ i
] = work
->perm
[p
];
119 dprintk(" perm_choose swap %d with %d\n", p
, p
+i
);
123 for (i
= 0; i
< bucket
->size
; i
++)
124 dprintk(" perm_choose %d: %d\n", i
, work
->perm
[i
]);
128 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket
->id
,
129 bucket
->size
, x
, r
, pr
, s
);
130 return bucket
->items
[s
];
134 static int bucket_uniform_choose(const struct crush_bucket_uniform
*bucket
,
135 struct crush_work_bucket
*work
, int x
, int r
)
137 return bucket_perm_choose(&bucket
->h
, work
, x
, r
);
141 static int bucket_list_choose(const struct crush_bucket_list
*bucket
,
146 for (i
= bucket
->h
.size
-1; i
>= 0; i
--) {
147 __u64 w
= crush_hash32_4(bucket
->h
.hash
, x
, bucket
->h
.items
[i
],
150 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
152 i
, x
, r
, bucket
->h
.items
[i
], bucket
->item_weights
[i
],
153 bucket
->sum_weights
[i
], w
);
154 w
*= bucket
->sum_weights
[i
];
156 /*dprintk(" scaled %llx\n", w);*/
157 if (w
< bucket
->item_weights
[i
]) {
158 return bucket
->h
.items
[i
];
162 dprintk("bad list sums for bucket %d\n", bucket
->h
.id
);
163 return bucket
->h
.items
[0];
168 static int height(int n
)
171 while ((n
& 1) == 0) {
178 static int left(int x
)
181 return x
- (1 << (h
-1));
184 static int right(int x
)
187 return x
+ (1 << (h
-1));
190 static int terminal(int x
)
195 static int bucket_tree_choose(const struct crush_bucket_tree
*bucket
,
203 n
= bucket
->num_nodes
>> 1;
205 while (!terminal(n
)) {
207 /* pick point in [0, w) */
208 w
= bucket
->node_weights
[n
];
209 t
= (__u64
)crush_hash32_4(bucket
->h
.hash
, x
, n
, r
,
210 bucket
->h
.id
) * (__u64
)w
;
213 /* descend to the left or right? */
215 if (t
< bucket
->node_weights
[l
])
221 return bucket
->h
.items
[n
>> 1];
227 static int bucket_straw_choose(const struct crush_bucket_straw
*bucket
,
235 for (i
= 0; i
< bucket
->h
.size
; i
++) {
236 draw
= crush_hash32_3(bucket
->h
.hash
, x
, bucket
->h
.items
[i
], r
);
238 draw
*= bucket
->straws
[i
];
239 if (i
== 0 || draw
> high_draw
) {
244 return bucket
->h
.items
[high
];
247 /* compute 2^44*log2(input+1) */
248 static __u64
crush_ln(unsigned int xin
)
250 unsigned int x
= xin
;
251 int iexpon
, index1
, index2
;
252 __u64 RH
, LH
, LL
, xl64
, result
;
256 /* normalize input */
259 // figure out number of bits we need to shift and
260 // do it in one step instead of iteratively
261 if (!(x
& 0x18000)) {
262 int bits
= __builtin_clz(x
& 0x1FFFF) - 16;
267 index1
= (x
>> 8) << 1;
268 /* RH ~ 2^56/index1 */
269 RH
= __RH_LH_tbl
[index1
- 256];
270 /* LH ~ 2^48 * log2(index1/256) */
271 LH
= __RH_LH_tbl
[index1
+ 1 - 256];
273 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
274 xl64
= (__s64
)x
* RH
;
278 result
<<= (12 + 32);
280 index2
= xl64
& 0xff;
281 /* LL ~ 2^48*log2(1.0+index2/2^15) */
282 LL
= __LL_tbl
[index2
];
286 LH
>>= (48 - 12 - 32);
296 * for reference, see:
298 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
302 static inline __u32
*get_choose_arg_weights(const struct crush_bucket_straw2
*bucket
,
303 const struct crush_choose_arg
*arg
,
307 (arg
->weight_set
== NULL
) ||
308 (arg
->weight_set_size
== 0))
309 return bucket
->item_weights
;
310 if (position
>= arg
->weight_set_size
)
311 position
= arg
->weight_set_size
- 1;
312 return arg
->weight_set
[position
].weights
;
315 static inline int *get_choose_arg_ids(const struct crush_bucket_straw2
*bucket
,
316 const struct crush_choose_arg
*arg
)
318 if ((arg
== NULL
) || (arg
->ids
== NULL
))
319 return bucket
->h
.items
;
323 static int bucket_straw2_choose(const struct crush_bucket_straw2
*bucket
,
324 int x
, int r
, const struct crush_choose_arg
*arg
,
327 unsigned int i
, high
= 0;
329 __s64 ln
, draw
, high_draw
= 0;
330 __u32
*weights
= get_choose_arg_weights(bucket
, arg
, position
);
331 int *ids
= get_choose_arg_ids(bucket
, arg
);
332 for (i
= 0; i
< bucket
->h
.size
; i
++) {
333 dprintk("weight 0x%x item %d\n", weights
[i
], ids
[i
]);
335 u
= crush_hash32_3(bucket
->h
.hash
, x
, ids
[i
], r
);
339 * for some reason slightly less than 0x10000 produces
340 * a slightly more accurate distribution... probably a
343 * the natural log lookup table maps [0,0xffff]
344 * (corresponding to real numbers [1/0x10000, 1] to
345 * [0, 0xffffffffffff] (corresponding to real numbers
348 ln
= crush_ln(u
) - 0x1000000000000ll
;
351 * divide by 16.16 fixed-point weight. note
352 * that the ln value is negative, so a larger
353 * weight means a larger (less negative) value
356 draw
= div64_s64(ln
, weights
[i
]);
361 if (i
== 0 || draw
> high_draw
) {
367 return bucket
->h
.items
[high
];
371 static int crush_bucket_choose(const struct crush_bucket
*in
,
372 struct crush_work_bucket
*work
,
374 const struct crush_choose_arg
*arg
,
377 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in
->id
, x
, r
);
378 BUG_ON(in
->size
== 0);
380 case CRUSH_BUCKET_UNIFORM
:
381 return bucket_uniform_choose(
382 (const struct crush_bucket_uniform
*)in
,
384 case CRUSH_BUCKET_LIST
:
385 return bucket_list_choose((const struct crush_bucket_list
*)in
,
387 case CRUSH_BUCKET_TREE
:
388 return bucket_tree_choose((const struct crush_bucket_tree
*)in
,
390 case CRUSH_BUCKET_STRAW
:
391 return bucket_straw_choose(
392 (const struct crush_bucket_straw
*)in
,
394 case CRUSH_BUCKET_STRAW2
:
395 return bucket_straw2_choose(
396 (const struct crush_bucket_straw2
*)in
,
397 x
, r
, arg
, position
);
399 dprintk("unknown bucket %d alg %d\n", in
->id
, in
->alg
);
405 * true if device is marked "out" (failed, fully offloaded)
408 static int is_out(const struct crush_map
*map
,
409 const __u32
*weight
, int weight_max
,
412 if (item
>= weight_max
)
414 if (weight
[item
] >= 0x10000)
416 if (weight
[item
] == 0)
418 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1
, x
, item
) & 0xffff)
425 * crush_choose_firstn - choose numrep distinct items of given type
426 * @map: the crush_map
427 * @bucket: the bucket we are choose an item from
428 * @x: crush input value
429 * @numrep: the number of items to choose
430 * @type: the type of item to choose
431 * @out: pointer to output vector
432 * @outpos: our position in that vector
433 * @out_size: size of the out vector
434 * @tries: number of attempts to make
435 * @recurse_tries: number of attempts to have recursive chooseleaf make
436 * @local_retries: localized retries
437 * @local_fallback_retries: localized fallback retries
438 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
439 * @stable: stable mode starts rep=0 in the recursive call for all replicas
440 * @vary_r: pass r to recursive calls
441 * @out2: second output vector for leaf items (if @recurse_to_leaf)
442 * @parent_r: r value passed from the parent
444 static int crush_choose_firstn(const struct crush_map
*map
,
445 struct crush_work
*work
,
446 const struct crush_bucket
*bucket
,
447 const __u32
*weight
, int weight_max
,
448 int x
, int numrep
, int type
,
449 int *out
, int outpos
,
452 unsigned int recurse_tries
,
453 unsigned int local_retries
,
454 unsigned int local_fallback_retries
,
460 const struct crush_choose_arg
*choose_args
)
463 unsigned int ftotal
, flocal
;
464 int retry_descent
, retry_bucket
, skip_rep
;
465 const struct crush_bucket
*in
= bucket
;
471 int count
= out_size
;
473 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d \
474 recurse_tries %d local_retries %d local_fallback_retries %d \
475 parent_r %d stable %d\n",
476 recurse_to_leaf
? "_LEAF" : "",
477 bucket
->id
, x
, outpos
, numrep
,
478 tries
, recurse_tries
, local_retries
, local_fallback_retries
,
481 for (rep
= stable
? 0 : outpos
; rep
< numrep
&& count
> 0 ; rep
++) {
482 /* keep trying until we get a non-out, non-colliding item */
487 in
= bucket
; /* initial bucket */
489 /* choose through intervening buckets */
495 /* r' = r + f_total */
503 if (local_fallback_retries
> 0 &&
504 flocal
>= (in
->size
>>1) &&
505 flocal
> local_fallback_retries
)
506 item
= bucket_perm_choose(
507 in
, work
->work
[-1-in
->id
],
510 item
= crush_bucket_choose(
511 in
, work
->work
[-1-in
->id
],
513 (choose_args
? &choose_args
[-1-in
->id
] : 0),
515 if (item
>= map
->max_devices
) {
516 dprintk(" bad item %d\n", item
);
523 itemtype
= map
->buckets
[-1-item
]->type
;
526 dprintk(" item %d type %d\n", item
, itemtype
);
529 if (itemtype
!= type
) {
531 (-1-item
) >= map
->max_buckets
) {
532 dprintk(" bad item type %d\n", type
);
536 in
= map
->buckets
[-1-item
];
542 for (i
= 0; i
< outpos
; i
++) {
543 if (out
[i
] == item
) {
550 if (!collide
&& recurse_to_leaf
) {
554 sub_r
= r
>> (vary_r
-1);
557 if (crush_choose_firstn(
560 map
->buckets
[-1-item
],
562 x
, stable
? 1 : outpos
+1, 0,
566 local_fallback_retries
,
572 choose_args
) <= outpos
)
573 /* didn't get leaf */
576 /* we already have a leaf! */
581 if (!reject
&& !collide
) {
584 reject
= is_out(map
, weight
,
590 if (reject
|| collide
) {
594 if (collide
&& flocal
<= local_retries
)
595 /* retry locally a few times */
597 else if (local_fallback_retries
> 0 &&
598 flocal
<= in
->size
+ local_fallback_retries
)
599 /* exhaustive bucket search */
601 else if (ftotal
< tries
)
602 /* then retry descent */
607 dprintk(" reject %d collide %d "
608 "ftotal %u flocal %u\n",
609 reject
, collide
, ftotal
,
612 } while (retry_bucket
);
613 } while (retry_descent
);
616 dprintk("skip rep\n");
620 dprintk("CHOOSE got %d\n", item
);
625 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
626 map
->choose_tries
[ftotal
]++;
630 dprintk("CHOOSE returns %d\n", outpos
);
636 * crush_choose_indep: alternative breadth-first positionally stable mapping
639 static void crush_choose_indep(const struct crush_map
*map
,
640 struct crush_work
*work
,
641 const struct crush_bucket
*bucket
,
642 const __u32
*weight
, int weight_max
,
643 int x
, int left
, int numrep
, int type
,
644 int *out
, int outpos
,
646 unsigned int recurse_tries
,
650 const struct crush_choose_arg
*choose_args
)
652 const struct crush_bucket
*in
= bucket
;
653 int endpos
= outpos
+ left
;
662 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf
? "_LEAF" : "",
663 bucket
->id
, x
, outpos
, numrep
);
665 /* initially my result is undefined */
666 for (rep
= outpos
; rep
< endpos
; rep
++) {
667 out
[rep
] = CRUSH_ITEM_UNDEF
;
669 out2
[rep
] = CRUSH_ITEM_UNDEF
;
672 for (ftotal
= 0; left
> 0 && ftotal
< tries
; ftotal
++) {
674 if (out2
&& ftotal
) {
675 dprintk("%u %d a: ", ftotal
, left
);
676 for (rep
= outpos
; rep
< endpos
; rep
++) {
677 dprintk(" %d", out
[rep
]);
680 dprintk("%u %d b: ", ftotal
, left
);
681 for (rep
= outpos
; rep
< endpos
; rep
++) {
682 dprintk(" %d", out2
[rep
]);
687 for (rep
= outpos
; rep
< endpos
; rep
++) {
688 if (out
[rep
] != CRUSH_ITEM_UNDEF
)
691 in
= bucket
; /* initial bucket */
693 /* choose through intervening buckets */
695 /* note: we base the choice on the position
696 * even in the nested call. that means that
697 * if the first layer chooses the same bucket
698 * in a different position, we will tend to
699 * choose a different item in that bucket.
700 * this will involve more devices in data
701 * movement and tend to distribute the load.
706 if (in
->alg
== CRUSH_BUCKET_UNIFORM
&&
707 in
->size
% numrep
== 0)
708 /* r'=r+(n+1)*f_total */
709 r
+= (numrep
+1) * ftotal
;
711 /* r' = r + n*f_total */
712 r
+= numrep
* ftotal
;
716 dprintk(" empty bucket\n");
720 item
= crush_bucket_choose(
721 in
, work
->work
[-1-in
->id
],
723 (choose_args
? &choose_args
[-1-in
->id
] : 0),
725 if (item
>= map
->max_devices
) {
726 dprintk(" bad item %d\n", item
);
727 out
[rep
] = CRUSH_ITEM_NONE
;
729 out2
[rep
] = CRUSH_ITEM_NONE
;
736 itemtype
= map
->buckets
[-1-item
]->type
;
739 dprintk(" item %d type %d\n", item
, itemtype
);
742 if (itemtype
!= type
) {
744 (-1-item
) >= map
->max_buckets
) {
745 dprintk(" bad item type %d\n", type
);
746 out
[rep
] = CRUSH_ITEM_NONE
;
753 in
= map
->buckets
[-1-item
];
759 for (i
= outpos
; i
< endpos
; i
++) {
760 if (out
[i
] == item
) {
768 if (recurse_to_leaf
) {
773 map
->buckets
[-1-item
],
778 0, NULL
, r
, choose_args
);
779 if (out2
[rep
] == CRUSH_ITEM_NONE
) {
780 /* placed nothing; no leaf */
784 /* we already have a leaf! */
791 is_out(map
, weight
, weight_max
, item
, x
))
801 for (rep
= outpos
; rep
< endpos
; rep
++) {
802 if (out
[rep
] == CRUSH_ITEM_UNDEF
) {
803 out
[rep
] = CRUSH_ITEM_NONE
;
805 if (out2
&& out2
[rep
] == CRUSH_ITEM_UNDEF
) {
806 out2
[rep
] = CRUSH_ITEM_NONE
;
810 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
811 map
->choose_tries
[ftotal
]++;
815 dprintk("%u %d a: ", ftotal
, left
);
816 for (rep
= outpos
; rep
< endpos
; rep
++) {
817 dprintk(" %d", out
[rep
]);
820 dprintk("%u %d b: ", ftotal
, left
);
821 for (rep
= outpos
; rep
< endpos
; rep
++) {
822 dprintk(" %d", out2
[rep
]);
830 /* This takes a chunk of memory and sets it up to be a shiny new
831 working area for a CRUSH placement computation. It must be called
832 on any newly allocated memory before passing it in to
833 crush_do_rule. It may be used repeatedly after that, so long as the
834 map has not changed. If the map /has/ changed, you must make sure
835 the working size is no smaller than what was allocated and re-run
836 crush_init_workspace.
838 If you do retain the working space between calls to crush, make it
839 thread-local. If you reinstitute the locking I've spent so much
840 time getting rid of, I will be very unhappy with you. */
842 void crush_init_workspace(const struct crush_map
*m
, void *v
) {
843 /* We work by moving through the available space and setting
844 values and pointers as we go.
846 It's a bit like Forth's use of the 'allot' word since we
847 set the pointer first and then reserve the space for it to
848 point to by incrementing the point. */
849 struct crush_work
*w
= (struct crush_work
*)v
;
850 char *point
= (char *)v
;
852 point
+= sizeof(struct crush_work
);
853 w
->work
= (struct crush_work_bucket
**)point
;
854 point
+= m
->max_buckets
* sizeof(struct crush_work_bucket
*);
855 for (b
= 0; b
< m
->max_buckets
; ++b
) {
856 if (m
->buckets
[b
] == 0)
859 w
->work
[b
] = (struct crush_work_bucket
*) point
;
860 switch (m
->buckets
[b
]->alg
) {
862 point
+= sizeof(struct crush_work_bucket
);
865 w
->work
[b
]->perm_x
= 0;
866 w
->work
[b
]->perm_n
= 0;
867 w
->work
[b
]->perm
= (__u32
*)point
;
868 point
+= m
->buckets
[b
]->size
* sizeof(__u32
);
870 BUG_ON((char *)point
- (char *)w
!= m
->working_size
);
874 * crush_do_rule - calculate a mapping with the given input and rule
875 * @map: the crush_map
876 * @ruleno: the rule id
878 * @result: pointer to result vector
879 * @result_max: maximum result size
880 * @weight: weight vector (for map leaves)
881 * @weight_max: size of weight vector
882 * @cwin: Pointer to at least map->working_size bytes of memory or NULL.
884 int crush_do_rule(const struct crush_map
*map
,
885 int ruleno
, int x
, int *result
, int result_max
,
886 const __u32
*weight
, int weight_max
,
887 void *cwin
, const struct crush_choose_arg
*choose_args
)
890 struct crush_work
*cw
= cwin
;
891 int *a
= (int *)((char *)cw
+ map
->working_size
);
892 int *b
= a
+ result_max
;
893 int *c
= b
+ result_max
;
900 const struct crush_rule
*rule
;
906 * the original choose_total_tries value was off by one (it
907 * counted "retries" and not "tries"). add one.
909 int choose_tries
= map
->choose_total_tries
+ 1;
910 int choose_leaf_tries
= 0;
912 * the local tries values were counted as "retries", though,
913 * and need no adjustment
915 int choose_local_retries
= map
->choose_local_tries
;
916 int choose_local_fallback_retries
= map
->choose_local_fallback_tries
;
918 int vary_r
= map
->chooseleaf_vary_r
;
919 int stable
= map
->chooseleaf_stable
;
921 if ((__u32
)ruleno
>= map
->max_rules
) {
922 dprintk(" bad ruleno %d\n", ruleno
);
926 rule
= map
->rules
[ruleno
];
929 for (step
= 0; step
< rule
->len
; step
++) {
931 const struct crush_rule_step
*curstep
= &rule
->steps
[step
];
933 switch (curstep
->op
) {
934 case CRUSH_RULE_TAKE
:
935 if ((curstep
->arg1
>= 0 &&
936 curstep
->arg1
< map
->max_devices
) ||
937 (-1-curstep
->arg1
>= 0 &&
938 -1-curstep
->arg1
< map
->max_buckets
&&
939 map
->buckets
[-1-curstep
->arg1
])) {
940 w
[0] = curstep
->arg1
;
943 dprintk(" bad take value %d\n", curstep
->arg1
);
947 case CRUSH_RULE_SET_CHOOSE_TRIES
:
948 if (curstep
->arg1
> 0)
949 choose_tries
= curstep
->arg1
;
952 case CRUSH_RULE_SET_CHOOSELEAF_TRIES
:
953 if (curstep
->arg1
> 0)
954 choose_leaf_tries
= curstep
->arg1
;
957 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES
:
958 if (curstep
->arg1
>= 0)
959 choose_local_retries
= curstep
->arg1
;
962 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES
:
963 if (curstep
->arg1
>= 0)
964 choose_local_fallback_retries
= curstep
->arg1
;
967 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R
:
968 if (curstep
->arg1
>= 0)
969 vary_r
= curstep
->arg1
;
972 case CRUSH_RULE_SET_CHOOSELEAF_STABLE
:
973 if (curstep
->arg1
>= 0)
974 stable
= curstep
->arg1
;
977 case CRUSH_RULE_CHOOSELEAF_FIRSTN
:
978 case CRUSH_RULE_CHOOSE_FIRSTN
:
981 case CRUSH_RULE_CHOOSELEAF_INDEP
:
982 case CRUSH_RULE_CHOOSE_INDEP
:
988 CRUSH_RULE_CHOOSELEAF_FIRSTN
||
990 CRUSH_RULE_CHOOSELEAF_INDEP
;
995 for (i
= 0; i
< wsize
; i
++) {
997 numrep
= curstep
->arg1
;
999 numrep
+= result_max
;
1004 /* make sure bucket id is valid */
1006 if (bno
< 0 || bno
>= map
->max_buckets
) {
1007 // w[i] is probably CRUSH_ITEM_NONE
1008 dprintk(" bad w[i] %d\n", w
[i
]);
1013 if (choose_leaf_tries
)
1016 else if (map
->chooseleaf_descend_once
)
1019 recurse_tries
= choose_tries
;
1020 osize
+= crush_choose_firstn(
1031 choose_local_retries
,
1032 choose_local_fallback_retries
,
1040 out_size
= ((numrep
< (result_max
-osize
)) ?
1041 numrep
: (result_max
-osize
));
1047 x
, out_size
, numrep
,
1052 choose_leaf_tries
: 1,
1061 if (recurse_to_leaf
)
1062 /* copy final _leaf_ values to output set */
1063 memcpy(o
, c
, osize
*sizeof(*o
));
1065 /* swap o and w arrays */
1073 case CRUSH_RULE_EMIT
:
1074 for (i
= 0; i
< wsize
&& result_len
< result_max
; i
++) {
1075 result
[result_len
] = w
[i
];
1082 dprintk(" unknown op %d at step %d\n",