2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str
, int len
, u32 state
)
19 if ((state
& CEPH_OSD_EXISTS
) && (state
& CEPH_OSD_UP
))
20 snprintf(str
, len
, "exists, up");
21 else if (state
& CEPH_OSD_EXISTS
)
22 snprintf(str
, len
, "exists");
23 else if (state
& CEPH_OSD_UP
)
24 snprintf(str
, len
, "up");
26 snprintf(str
, len
, "doesn't exist");
33 static int calc_bits_of(unsigned int t
)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
48 pi
->pg_num_mask
= (1 << calc_bits_of(pi
->pg_num
-1)) - 1;
49 pi
->pgp_num_mask
= (1 << calc_bits_of(pi
->pgp_num
-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p
, void *end
,
56 struct crush_bucket_uniform
*b
)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
59 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
60 b
->item_weight
= ceph_decode_32(p
);
66 static int crush_decode_list_bucket(void **p
, void *end
,
67 struct crush_bucket_list
*b
)
70 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
71 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
72 if (b
->item_weights
== NULL
)
74 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
75 if (b
->sum_weights
== NULL
)
77 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
78 for (j
= 0; j
< b
->h
.size
; j
++) {
79 b
->item_weights
[j
] = ceph_decode_32(p
);
80 b
->sum_weights
[j
] = ceph_decode_32(p
);
87 static int crush_decode_tree_bucket(void **p
, void *end
,
88 struct crush_bucket_tree
*b
)
91 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
92 ceph_decode_8_safe(p
, end
, b
->num_nodes
, bad
);
93 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
94 if (b
->node_weights
== NULL
)
96 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
97 for (j
= 0; j
< b
->num_nodes
; j
++)
98 b
->node_weights
[j
] = ceph_decode_32(p
);
104 static int crush_decode_straw_bucket(void **p
, void *end
,
105 struct crush_bucket_straw
*b
)
108 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
109 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
110 if (b
->item_weights
== NULL
)
112 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
113 if (b
->straws
== NULL
)
115 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
116 for (j
= 0; j
< b
->h
.size
; j
++) {
117 b
->item_weights
[j
] = ceph_decode_32(p
);
118 b
->straws
[j
] = ceph_decode_32(p
);
125 static int crush_decode_straw2_bucket(void **p
, void *end
,
126 struct crush_bucket_straw2
*b
)
129 dout("crush_decode_straw2_bucket %p to %p\n", *p
, end
);
130 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
131 if (b
->item_weights
== NULL
)
133 ceph_decode_need(p
, end
, b
->h
.size
* sizeof(u32
), bad
);
134 for (j
= 0; j
< b
->h
.size
; j
++)
135 b
->item_weights
[j
] = ceph_decode_32(p
);
141 static struct crush_choose_arg_map
*alloc_choose_arg_map(void)
143 struct crush_choose_arg_map
*arg_map
;
145 arg_map
= kzalloc(sizeof(*arg_map
), GFP_NOIO
);
149 RB_CLEAR_NODE(&arg_map
->node
);
153 static void free_choose_arg_map(struct crush_choose_arg_map
*arg_map
)
158 WARN_ON(!RB_EMPTY_NODE(&arg_map
->node
));
160 for (i
= 0; i
< arg_map
->size
; i
++) {
161 struct crush_choose_arg
*arg
= &arg_map
->args
[i
];
163 for (j
= 0; j
< arg
->weight_set_size
; j
++)
164 kfree(arg
->weight_set
[j
].weights
);
165 kfree(arg
->weight_set
);
168 kfree(arg_map
->args
);
173 DEFINE_RB_FUNCS(choose_arg_map
, struct crush_choose_arg_map
, choose_args_index
,
176 void clear_choose_args(struct crush_map
*c
)
178 while (!RB_EMPTY_ROOT(&c
->choose_args
)) {
179 struct crush_choose_arg_map
*arg_map
=
180 rb_entry(rb_first(&c
->choose_args
),
181 struct crush_choose_arg_map
, node
);
183 erase_choose_arg_map(&c
->choose_args
, arg_map
);
184 free_choose_arg_map(arg_map
);
188 static u32
*decode_array_32_alloc(void **p
, void *end
, u32
*plen
)
194 ceph_decode_32_safe(p
, end
, len
, e_inval
);
198 a
= kmalloc_array(len
, sizeof(u32
), GFP_NOIO
);
204 ceph_decode_need(p
, end
, len
* sizeof(u32
), e_inval
);
205 for (i
= 0; i
< len
; i
++)
206 a
[i
] = ceph_decode_32(p
);
220 * Assumes @arg is zero-initialized.
222 static int decode_choose_arg(void **p
, void *end
, struct crush_choose_arg
*arg
)
226 ceph_decode_32_safe(p
, end
, arg
->weight_set_size
, e_inval
);
227 if (arg
->weight_set_size
) {
230 arg
->weight_set
= kmalloc_array(arg
->weight_set_size
,
231 sizeof(*arg
->weight_set
),
233 if (!arg
->weight_set
)
236 for (i
= 0; i
< arg
->weight_set_size
; i
++) {
237 struct crush_weight_set
*w
= &arg
->weight_set
[i
];
239 w
->weights
= decode_array_32_alloc(p
, end
, &w
->size
);
240 if (IS_ERR(w
->weights
)) {
241 ret
= PTR_ERR(w
->weights
);
248 arg
->ids
= decode_array_32_alloc(p
, end
, &arg
->ids_size
);
249 if (IS_ERR(arg
->ids
)) {
250 ret
= PTR_ERR(arg
->ids
);
261 static int decode_choose_args(void **p
, void *end
, struct crush_map
*c
)
263 struct crush_choose_arg_map
*arg_map
= NULL
;
264 u32 num_choose_arg_maps
, num_buckets
;
267 ceph_decode_32_safe(p
, end
, num_choose_arg_maps
, e_inval
);
268 while (num_choose_arg_maps
--) {
269 arg_map
= alloc_choose_arg_map();
275 ceph_decode_64_safe(p
, end
, arg_map
->choose_args_index
,
277 arg_map
->size
= c
->max_buckets
;
278 arg_map
->args
= kcalloc(arg_map
->size
, sizeof(*arg_map
->args
),
280 if (!arg_map
->args
) {
285 ceph_decode_32_safe(p
, end
, num_buckets
, e_inval
);
286 while (num_buckets
--) {
287 struct crush_choose_arg
*arg
;
290 ceph_decode_32_safe(p
, end
, bucket_index
, e_inval
);
291 if (bucket_index
>= arg_map
->size
)
294 arg
= &arg_map
->args
[bucket_index
];
295 ret
= decode_choose_arg(p
, end
, arg
);
300 insert_choose_arg_map(&c
->choose_args
, arg_map
);
308 free_choose_arg_map(arg_map
);
312 static void crush_finalize(struct crush_map
*c
)
316 /* Space for the array of pointers to per-bucket workspace */
317 c
->working_size
= sizeof(struct crush_work
) +
318 c
->max_buckets
* sizeof(struct crush_work_bucket
*);
320 for (b
= 0; b
< c
->max_buckets
; b
++) {
324 switch (c
->buckets
[b
]->alg
) {
327 * The base case, permutation variables and
328 * the pointer to the permutation array.
330 c
->working_size
+= sizeof(struct crush_work_bucket
);
333 /* Every bucket has a permutation array. */
334 c
->working_size
+= c
->buckets
[b
]->size
* sizeof(__u32
);
338 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
344 void *start
= pbyval
;
347 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
349 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
351 return ERR_PTR(-ENOMEM
);
353 c
->choose_args
= RB_ROOT
;
355 /* set tunables to default values */
356 c
->choose_local_tries
= 2;
357 c
->choose_local_fallback_tries
= 5;
358 c
->choose_total_tries
= 19;
359 c
->chooseleaf_descend_once
= 0;
361 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
362 magic
= ceph_decode_32(p
);
363 if (magic
!= CRUSH_MAGIC
) {
364 pr_err("crush_decode magic %x != current %x\n",
365 (unsigned int)magic
, (unsigned int)CRUSH_MAGIC
);
368 c
->max_buckets
= ceph_decode_32(p
);
369 c
->max_rules
= ceph_decode_32(p
);
370 c
->max_devices
= ceph_decode_32(p
);
372 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
373 if (c
->buckets
== NULL
)
375 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
376 if (c
->rules
== NULL
)
380 for (i
= 0; i
< c
->max_buckets
; i
++) {
383 struct crush_bucket
*b
;
385 ceph_decode_32_safe(p
, end
, alg
, bad
);
387 c
->buckets
[i
] = NULL
;
390 dout("crush_decode bucket %d off %x %p to %p\n",
391 i
, (int)(*p
-start
), *p
, end
);
394 case CRUSH_BUCKET_UNIFORM
:
395 size
= sizeof(struct crush_bucket_uniform
);
397 case CRUSH_BUCKET_LIST
:
398 size
= sizeof(struct crush_bucket_list
);
400 case CRUSH_BUCKET_TREE
:
401 size
= sizeof(struct crush_bucket_tree
);
403 case CRUSH_BUCKET_STRAW
:
404 size
= sizeof(struct crush_bucket_straw
);
406 case CRUSH_BUCKET_STRAW2
:
407 size
= sizeof(struct crush_bucket_straw2
);
414 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
418 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
419 b
->id
= ceph_decode_32(p
);
420 b
->type
= ceph_decode_16(p
);
421 b
->alg
= ceph_decode_8(p
);
422 b
->hash
= ceph_decode_8(p
);
423 b
->weight
= ceph_decode_32(p
);
424 b
->size
= ceph_decode_32(p
);
426 dout("crush_decode bucket size %d off %x %p to %p\n",
427 b
->size
, (int)(*p
-start
), *p
, end
);
429 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
430 if (b
->items
== NULL
)
433 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
434 for (j
= 0; j
< b
->size
; j
++)
435 b
->items
[j
] = ceph_decode_32(p
);
438 case CRUSH_BUCKET_UNIFORM
:
439 err
= crush_decode_uniform_bucket(p
, end
,
440 (struct crush_bucket_uniform
*)b
);
444 case CRUSH_BUCKET_LIST
:
445 err
= crush_decode_list_bucket(p
, end
,
446 (struct crush_bucket_list
*)b
);
450 case CRUSH_BUCKET_TREE
:
451 err
= crush_decode_tree_bucket(p
, end
,
452 (struct crush_bucket_tree
*)b
);
456 case CRUSH_BUCKET_STRAW
:
457 err
= crush_decode_straw_bucket(p
, end
,
458 (struct crush_bucket_straw
*)b
);
462 case CRUSH_BUCKET_STRAW2
:
463 err
= crush_decode_straw2_bucket(p
, end
,
464 (struct crush_bucket_straw2
*)b
);
472 dout("rule vec is %p\n", c
->rules
);
473 for (i
= 0; i
< c
->max_rules
; i
++) {
475 struct crush_rule
*r
;
478 ceph_decode_32_safe(p
, end
, yes
, bad
);
480 dout("crush_decode NO rule %d off %x %p to %p\n",
481 i
, (int)(*p
-start
), *p
, end
);
486 dout("crush_decode rule %d off %x %p to %p\n",
487 i
, (int)(*p
-start
), *p
, end
);
490 ceph_decode_32_safe(p
, end
, yes
, bad
);
491 #if BITS_PER_LONG == 32
493 if (yes
> (ULONG_MAX
- sizeof(*r
))
494 / sizeof(struct crush_rule_step
))
497 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
498 yes
*sizeof(struct crush_rule_step
),
502 dout(" rule %d is at %p\n", i
, r
);
504 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
505 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
506 for (j
= 0; j
< r
->len
; j
++) {
507 r
->steps
[j
].op
= ceph_decode_32(p
);
508 r
->steps
[j
].arg1
= ceph_decode_32(p
);
509 r
->steps
[j
].arg2
= ceph_decode_32(p
);
513 ceph_decode_skip_map(p
, end
, 32, string
, bad
); /* type_map */
514 ceph_decode_skip_map(p
, end
, 32, string
, bad
); /* name_map */
515 ceph_decode_skip_map(p
, end
, 32, string
, bad
); /* rule_name_map */
518 ceph_decode_need(p
, end
, 3*sizeof(u32
), done
);
519 c
->choose_local_tries
= ceph_decode_32(p
);
520 c
->choose_local_fallback_tries
= ceph_decode_32(p
);
521 c
->choose_total_tries
= ceph_decode_32(p
);
522 dout("crush decode tunable choose_local_tries = %d\n",
523 c
->choose_local_tries
);
524 dout("crush decode tunable choose_local_fallback_tries = %d\n",
525 c
->choose_local_fallback_tries
);
526 dout("crush decode tunable choose_total_tries = %d\n",
527 c
->choose_total_tries
);
529 ceph_decode_need(p
, end
, sizeof(u32
), done
);
530 c
->chooseleaf_descend_once
= ceph_decode_32(p
);
531 dout("crush decode tunable chooseleaf_descend_once = %d\n",
532 c
->chooseleaf_descend_once
);
534 ceph_decode_need(p
, end
, sizeof(u8
), done
);
535 c
->chooseleaf_vary_r
= ceph_decode_8(p
);
536 dout("crush decode tunable chooseleaf_vary_r = %d\n",
537 c
->chooseleaf_vary_r
);
539 /* skip straw_calc_version, allowed_bucket_algs */
540 ceph_decode_need(p
, end
, sizeof(u8
) + sizeof(u32
), done
);
541 *p
+= sizeof(u8
) + sizeof(u32
);
543 ceph_decode_need(p
, end
, sizeof(u8
), done
);
544 c
->chooseleaf_stable
= ceph_decode_8(p
);
545 dout("crush decode tunable chooseleaf_stable = %d\n",
546 c
->chooseleaf_stable
);
550 ceph_decode_skip_map(p
, end
, 32, 32, bad
);
552 ceph_decode_skip_map(p
, end
, 32, string
, bad
);
554 ceph_decode_skip_map_of_map(p
, end
, 32, 32, 32, bad
);
558 err
= decode_choose_args(p
, end
, c
);
565 dout("crush_decode success\n");
571 dout("crush_decode fail %d\n", err
);
576 int ceph_pg_compare(const struct ceph_pg
*lhs
, const struct ceph_pg
*rhs
)
578 if (lhs
->pool
< rhs
->pool
)
580 if (lhs
->pool
> rhs
->pool
)
582 if (lhs
->seed
< rhs
->seed
)
584 if (lhs
->seed
> rhs
->seed
)
590 int ceph_spg_compare(const struct ceph_spg
*lhs
, const struct ceph_spg
*rhs
)
594 ret
= ceph_pg_compare(&lhs
->pgid
, &rhs
->pgid
);
598 if (lhs
->shard
< rhs
->shard
)
600 if (lhs
->shard
> rhs
->shard
)
606 static struct ceph_pg_mapping
*alloc_pg_mapping(size_t payload_len
)
608 struct ceph_pg_mapping
*pg
;
610 pg
= kmalloc(sizeof(*pg
) + payload_len
, GFP_NOIO
);
614 RB_CLEAR_NODE(&pg
->node
);
618 static void free_pg_mapping(struct ceph_pg_mapping
*pg
)
620 WARN_ON(!RB_EMPTY_NODE(&pg
->node
));
626 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
627 * to a set of osds) and primary_temp (explicit primary setting)
629 DEFINE_RB_FUNCS2(pg_mapping
, struct ceph_pg_mapping
, pgid
, ceph_pg_compare
,
630 RB_BYPTR
, const struct ceph_pg
*, node
)
633 * rbtree of pg pool info
635 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
637 struct rb_node
**p
= &root
->rb_node
;
638 struct rb_node
*parent
= NULL
;
639 struct ceph_pg_pool_info
*pi
= NULL
;
643 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
644 if (new->id
< pi
->id
)
646 else if (new->id
> pi
->id
)
652 rb_link_node(&new->node
, parent
, p
);
653 rb_insert_color(&new->node
, root
);
657 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, u64 id
)
659 struct ceph_pg_pool_info
*pi
;
660 struct rb_node
*n
= root
->rb_node
;
663 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
666 else if (id
> pi
->id
)
674 struct ceph_pg_pool_info
*ceph_pg_pool_by_id(struct ceph_osdmap
*map
, u64 id
)
676 return __lookup_pg_pool(&map
->pg_pools
, id
);
679 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap
*map
, u64 id
)
681 struct ceph_pg_pool_info
*pi
;
683 if (id
== CEPH_NOPOOL
)
686 if (WARN_ON_ONCE(id
> (u64
) INT_MAX
))
689 pi
= __lookup_pg_pool(&map
->pg_pools
, (int) id
);
691 return pi
? pi
->name
: NULL
;
693 EXPORT_SYMBOL(ceph_pg_pool_name_by_id
);
695 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
699 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
700 struct ceph_pg_pool_info
*pi
=
701 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
702 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
707 EXPORT_SYMBOL(ceph_pg_poolid_by_name
);
709 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
711 rb_erase(&pi
->node
, root
);
716 static int decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
722 ceph_decode_need(p
, end
, 2 + 4, bad
);
723 ev
= ceph_decode_8(p
); /* encoding version */
724 cv
= ceph_decode_8(p
); /* compat version */
726 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev
, cv
);
730 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev
, cv
);
733 len
= ceph_decode_32(p
);
734 ceph_decode_need(p
, end
, len
, bad
);
737 pi
->type
= ceph_decode_8(p
);
738 pi
->size
= ceph_decode_8(p
);
739 pi
->crush_ruleset
= ceph_decode_8(p
);
740 pi
->object_hash
= ceph_decode_8(p
);
742 pi
->pg_num
= ceph_decode_32(p
);
743 pi
->pgp_num
= ceph_decode_32(p
);
745 *p
+= 4 + 4; /* skip lpg* */
746 *p
+= 4; /* skip last_change */
747 *p
+= 8 + 4; /* skip snap_seq, snap_epoch */
750 num
= ceph_decode_32(p
);
752 *p
+= 8; /* snapid key */
753 *p
+= 1 + 1; /* versions */
754 len
= ceph_decode_32(p
);
758 /* skip removed_snaps */
759 num
= ceph_decode_32(p
);
762 *p
+= 8; /* skip auid */
763 pi
->flags
= ceph_decode_64(p
);
764 *p
+= 4; /* skip crash_replay_interval */
767 pi
->min_size
= ceph_decode_8(p
);
769 pi
->min_size
= pi
->size
- pi
->size
/ 2;
772 *p
+= 8 + 8; /* skip quota_max_* */
776 num
= ceph_decode_32(p
);
779 *p
+= 8; /* skip tier_of */
780 *p
+= 1; /* skip cache_mode */
782 pi
->read_tier
= ceph_decode_64(p
);
783 pi
->write_tier
= ceph_decode_64(p
);
790 /* skip properties */
791 num
= ceph_decode_32(p
);
793 len
= ceph_decode_32(p
);
795 len
= ceph_decode_32(p
);
801 /* skip hit_set_params */
802 *p
+= 1 + 1; /* versions */
803 len
= ceph_decode_32(p
);
806 *p
+= 4; /* skip hit_set_period */
807 *p
+= 4; /* skip hit_set_count */
811 *p
+= 4; /* skip stripe_width */
814 *p
+= 8; /* skip target_max_bytes */
815 *p
+= 8; /* skip target_max_objects */
816 *p
+= 4; /* skip cache_target_dirty_ratio_micro */
817 *p
+= 4; /* skip cache_target_full_ratio_micro */
818 *p
+= 4; /* skip cache_min_flush_age */
819 *p
+= 4; /* skip cache_min_evict_age */
823 /* skip erasure_code_profile */
824 len
= ceph_decode_32(p
);
829 * last_force_op_resend_preluminous, will be overridden if the
830 * map was encoded with RESEND_ON_SPLIT
833 pi
->last_force_request_resend
= ceph_decode_32(p
);
835 pi
->last_force_request_resend
= 0;
838 *p
+= 4; /* skip min_read_recency_for_promote */
841 *p
+= 8; /* skip expected_num_objects */
844 *p
+= 4; /* skip cache_target_dirty_high_ratio_micro */
847 *p
+= 4; /* skip min_write_recency_for_promote */
850 *p
+= 1; /* skip use_gmt_hitset */
853 *p
+= 1; /* skip fast_read */
856 *p
+= 4; /* skip hit_set_grade_decay_rate */
857 *p
+= 4; /* skip hit_set_search_last_n */
862 *p
+= 1 + 1; /* versions */
863 len
= ceph_decode_32(p
);
868 pi
->last_force_request_resend
= ceph_decode_32(p
);
870 /* ignore the rest */
880 static int decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
882 struct ceph_pg_pool_info
*pi
;
886 ceph_decode_32_safe(p
, end
, num
, bad
);
887 dout(" %d pool names\n", num
);
889 ceph_decode_64_safe(p
, end
, pool
, bad
);
890 ceph_decode_32_safe(p
, end
, len
, bad
);
891 dout(" pool %llu len %d\n", pool
, len
);
892 ceph_decode_need(p
, end
, len
, bad
);
893 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
895 char *name
= kstrndup(*p
, len
, GFP_NOFS
);
901 dout(" name is %s\n", pi
->name
);
914 struct ceph_osdmap
*ceph_osdmap_alloc(void)
916 struct ceph_osdmap
*map
;
918 map
= kzalloc(sizeof(*map
), GFP_NOIO
);
922 map
->pg_pools
= RB_ROOT
;
924 map
->pg_temp
= RB_ROOT
;
925 map
->primary_temp
= RB_ROOT
;
926 map
->pg_upmap
= RB_ROOT
;
927 map
->pg_upmap_items
= RB_ROOT
;
928 mutex_init(&map
->crush_workspace_mutex
);
933 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
935 dout("osdmap_destroy %p\n", map
);
937 crush_destroy(map
->crush
);
938 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
939 struct ceph_pg_mapping
*pg
=
940 rb_entry(rb_first(&map
->pg_temp
),
941 struct ceph_pg_mapping
, node
);
942 erase_pg_mapping(&map
->pg_temp
, pg
);
945 while (!RB_EMPTY_ROOT(&map
->primary_temp
)) {
946 struct ceph_pg_mapping
*pg
=
947 rb_entry(rb_first(&map
->primary_temp
),
948 struct ceph_pg_mapping
, node
);
949 erase_pg_mapping(&map
->primary_temp
, pg
);
952 while (!RB_EMPTY_ROOT(&map
->pg_upmap
)) {
953 struct ceph_pg_mapping
*pg
=
954 rb_entry(rb_first(&map
->pg_upmap
),
955 struct ceph_pg_mapping
, node
);
956 rb_erase(&pg
->node
, &map
->pg_upmap
);
959 while (!RB_EMPTY_ROOT(&map
->pg_upmap_items
)) {
960 struct ceph_pg_mapping
*pg
=
961 rb_entry(rb_first(&map
->pg_upmap_items
),
962 struct ceph_pg_mapping
, node
);
963 rb_erase(&pg
->node
, &map
->pg_upmap_items
);
966 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
967 struct ceph_pg_pool_info
*pi
=
968 rb_entry(rb_first(&map
->pg_pools
),
969 struct ceph_pg_pool_info
, node
);
970 __remove_pg_pool(&map
->pg_pools
, pi
);
972 kfree(map
->osd_state
);
973 kfree(map
->osd_weight
);
974 kfree(map
->osd_addr
);
975 kfree(map
->osd_primary_affinity
);
976 kfree(map
->crush_workspace
);
981 * Adjust max_osd value, (re)allocate arrays.
983 * The new elements are properly initialized.
985 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
989 struct ceph_entity_addr
*addr
;
992 state
= krealloc(map
->osd_state
, max
*sizeof(*state
), GFP_NOFS
);
995 map
->osd_state
= state
;
997 weight
= krealloc(map
->osd_weight
, max
*sizeof(*weight
), GFP_NOFS
);
1000 map
->osd_weight
= weight
;
1002 addr
= krealloc(map
->osd_addr
, max
*sizeof(*addr
), GFP_NOFS
);
1005 map
->osd_addr
= addr
;
1007 for (i
= map
->max_osd
; i
< max
; i
++) {
1008 map
->osd_state
[i
] = 0;
1009 map
->osd_weight
[i
] = CEPH_OSD_OUT
;
1010 memset(map
->osd_addr
+ i
, 0, sizeof(*map
->osd_addr
));
1013 if (map
->osd_primary_affinity
) {
1016 affinity
= krealloc(map
->osd_primary_affinity
,
1017 max
*sizeof(*affinity
), GFP_NOFS
);
1020 map
->osd_primary_affinity
= affinity
;
1022 for (i
= map
->max_osd
; i
< max
; i
++)
1023 map
->osd_primary_affinity
[i
] =
1024 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1032 static int osdmap_set_crush(struct ceph_osdmap
*map
, struct crush_map
*crush
)
1038 return PTR_ERR(crush
);
1040 work_size
= crush_work_size(crush
, CEPH_PG_MAX_SIZE
);
1041 dout("%s work_size %zu bytes\n", __func__
, work_size
);
1042 workspace
= kmalloc(work_size
, GFP_NOIO
);
1044 crush_destroy(crush
);
1047 crush_init_workspace(crush
, workspace
);
1050 crush_destroy(map
->crush
);
1051 kfree(map
->crush_workspace
);
1053 map
->crush_workspace
= workspace
;
1057 #define OSDMAP_WRAPPER_COMPAT_VER 7
1058 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
1061 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
1062 * to struct_v of the client_data section for new (v7 and above)
1065 static int get_osdmap_client_data_v(void **p
, void *end
,
1066 const char *prefix
, u8
*v
)
1070 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
1071 if (struct_v
>= 7) {
1074 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
1075 if (struct_compat
> OSDMAP_WRAPPER_COMPAT_VER
) {
1076 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1077 struct_v
, struct_compat
,
1078 OSDMAP_WRAPPER_COMPAT_VER
, prefix
);
1081 *p
+= 4; /* ignore wrapper struct_len */
1083 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
1084 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
1085 if (struct_compat
> OSDMAP_CLIENT_DATA_COMPAT_VER
) {
1086 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1087 struct_v
, struct_compat
,
1088 OSDMAP_CLIENT_DATA_COMPAT_VER
, prefix
);
1091 *p
+= 4; /* ignore client data struct_len */
1096 ceph_decode_16_safe(p
, end
, version
, e_inval
);
1098 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1103 /* old osdmap enconding */
1114 static int __decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
,
1119 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1121 struct ceph_pg_pool_info
*pi
;
1125 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1127 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
1128 if (!incremental
|| !pi
) {
1129 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
1135 ret
= __insert_pg_pool(&map
->pg_pools
, pi
);
1142 ret
= decode_pool(p
, end
, pi
);
1153 static int decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
1155 return __decode_pools(p
, end
, map
, false);
1158 static int decode_new_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
1160 return __decode_pools(p
, end
, map
, true);
1163 typedef struct ceph_pg_mapping
*(*decode_mapping_fn_t
)(void **, void *, bool);
1165 static int decode_pg_mapping(void **p
, void *end
, struct rb_root
*mapping_root
,
1166 decode_mapping_fn_t fn
, bool incremental
)
1170 WARN_ON(!incremental
&& !fn
);
1172 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1174 struct ceph_pg_mapping
*pg
;
1175 struct ceph_pg pgid
;
1178 ret
= ceph_decode_pgid(p
, end
, &pgid
);
1182 pg
= lookup_pg_mapping(mapping_root
, &pgid
);
1184 WARN_ON(!incremental
);
1185 erase_pg_mapping(mapping_root
, pg
);
1186 free_pg_mapping(pg
);
1190 pg
= fn(p
, end
, incremental
);
1195 pg
->pgid
= pgid
; /* struct */
1196 insert_pg_mapping(mapping_root
, pg
);
1207 static struct ceph_pg_mapping
*__decode_pg_temp(void **p
, void *end
,
1210 struct ceph_pg_mapping
*pg
;
1213 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1214 if (len
== 0 && incremental
)
1215 return NULL
; /* new_pg_temp: [] to remove */
1216 if (len
> (SIZE_MAX
- sizeof(*pg
)) / sizeof(u32
))
1217 return ERR_PTR(-EINVAL
);
1219 ceph_decode_need(p
, end
, len
* sizeof(u32
), e_inval
);
1220 pg
= alloc_pg_mapping(len
* sizeof(u32
));
1222 return ERR_PTR(-ENOMEM
);
1224 pg
->pg_temp
.len
= len
;
1225 for (i
= 0; i
< len
; i
++)
1226 pg
->pg_temp
.osds
[i
] = ceph_decode_32(p
);
1231 return ERR_PTR(-EINVAL
);
1234 static int decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1236 return decode_pg_mapping(p
, end
, &map
->pg_temp
, __decode_pg_temp
,
1240 static int decode_new_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1242 return decode_pg_mapping(p
, end
, &map
->pg_temp
, __decode_pg_temp
,
1246 static struct ceph_pg_mapping
*__decode_primary_temp(void **p
, void *end
,
1249 struct ceph_pg_mapping
*pg
;
1252 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1253 if (osd
== (u32
)-1 && incremental
)
1254 return NULL
; /* new_primary_temp: -1 to remove */
1256 pg
= alloc_pg_mapping(0);
1258 return ERR_PTR(-ENOMEM
);
1260 pg
->primary_temp
.osd
= osd
;
1264 return ERR_PTR(-EINVAL
);
1267 static int decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1269 return decode_pg_mapping(p
, end
, &map
->primary_temp
,
1270 __decode_primary_temp
, false);
1273 static int decode_new_primary_temp(void **p
, void *end
,
1274 struct ceph_osdmap
*map
)
1276 return decode_pg_mapping(p
, end
, &map
->primary_temp
,
1277 __decode_primary_temp
, true);
1280 u32
ceph_get_primary_affinity(struct ceph_osdmap
*map
, int osd
)
1282 BUG_ON(osd
>= map
->max_osd
);
1284 if (!map
->osd_primary_affinity
)
1285 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1287 return map
->osd_primary_affinity
[osd
];
1290 static int set_primary_affinity(struct ceph_osdmap
*map
, int osd
, u32 aff
)
1292 BUG_ON(osd
>= map
->max_osd
);
1294 if (!map
->osd_primary_affinity
) {
1297 map
->osd_primary_affinity
= kmalloc(map
->max_osd
*sizeof(u32
),
1299 if (!map
->osd_primary_affinity
)
1302 for (i
= 0; i
< map
->max_osd
; i
++)
1303 map
->osd_primary_affinity
[i
] =
1304 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1307 map
->osd_primary_affinity
[osd
] = aff
;
1312 static int decode_primary_affinity(void **p
, void *end
,
1313 struct ceph_osdmap
*map
)
1317 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1319 kfree(map
->osd_primary_affinity
);
1320 map
->osd_primary_affinity
= NULL
;
1323 if (len
!= map
->max_osd
)
1326 ceph_decode_need(p
, end
, map
->max_osd
*sizeof(u32
), e_inval
);
1328 for (i
= 0; i
< map
->max_osd
; i
++) {
1331 ret
= set_primary_affinity(map
, i
, ceph_decode_32(p
));
1342 static int decode_new_primary_affinity(void **p
, void *end
,
1343 struct ceph_osdmap
*map
)
1347 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1352 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1353 ceph_decode_32_safe(p
, end
, aff
, e_inval
);
1355 ret
= set_primary_affinity(map
, osd
, aff
);
1359 pr_info("osd%d primary-affinity 0x%x\n", osd
, aff
);
1368 static struct ceph_pg_mapping
*__decode_pg_upmap(void **p
, void *end
,
1371 return __decode_pg_temp(p
, end
, false);
1374 static int decode_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1376 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, __decode_pg_upmap
,
1380 static int decode_new_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1382 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, __decode_pg_upmap
,
1386 static int decode_old_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1388 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, NULL
, true);
1391 static struct ceph_pg_mapping
*__decode_pg_upmap_items(void **p
, void *end
,
1394 struct ceph_pg_mapping
*pg
;
1397 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1398 if (len
> (SIZE_MAX
- sizeof(*pg
)) / (2 * sizeof(u32
)))
1399 return ERR_PTR(-EINVAL
);
1401 ceph_decode_need(p
, end
, 2 * len
* sizeof(u32
), e_inval
);
1402 pg
= kzalloc(sizeof(*pg
) + 2 * len
* sizeof(u32
), GFP_NOIO
);
1404 return ERR_PTR(-ENOMEM
);
1406 pg
->pg_upmap_items
.len
= len
;
1407 for (i
= 0; i
< len
; i
++) {
1408 pg
->pg_upmap_items
.from_to
[i
][0] = ceph_decode_32(p
);
1409 pg
->pg_upmap_items
.from_to
[i
][1] = ceph_decode_32(p
);
1415 return ERR_PTR(-EINVAL
);
1418 static int decode_pg_upmap_items(void **p
, void *end
, struct ceph_osdmap
*map
)
1420 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
,
1421 __decode_pg_upmap_items
, false);
1424 static int decode_new_pg_upmap_items(void **p
, void *end
,
1425 struct ceph_osdmap
*map
)
1427 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
,
1428 __decode_pg_upmap_items
, true);
1431 static int decode_old_pg_upmap_items(void **p
, void *end
,
1432 struct ceph_osdmap
*map
)
1434 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
, NULL
, true);
1438 * decode a full map.
1440 static int osdmap_decode(void **p
, void *end
, struct ceph_osdmap
*map
)
1449 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1451 err
= get_osdmap_client_data_v(p
, end
, "full", &struct_v
);
1455 /* fsid, epoch, created, modified */
1456 ceph_decode_need(p
, end
, sizeof(map
->fsid
) + sizeof(u32
) +
1457 sizeof(map
->created
) + sizeof(map
->modified
), e_inval
);
1458 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
1459 epoch
= map
->epoch
= ceph_decode_32(p
);
1460 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
1461 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
1464 err
= decode_pools(p
, end
, map
);
1469 err
= decode_pool_names(p
, end
, map
);
1473 ceph_decode_32_safe(p
, end
, map
->pool_max
, e_inval
);
1475 ceph_decode_32_safe(p
, end
, map
->flags
, e_inval
);
1478 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1480 /* (re)alloc osd arrays */
1481 err
= osdmap_set_max_osd(map
, max
);
1485 /* osd_state, osd_weight, osd_addrs->client_addr */
1486 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
1487 map
->max_osd
*((struct_v
>= 5 ? sizeof(u32
) :
1489 sizeof(*map
->osd_weight
) +
1490 sizeof(*map
->osd_addr
)), e_inval
);
1492 if (ceph_decode_32(p
) != map
->max_osd
)
1495 if (struct_v
>= 5) {
1496 for (i
= 0; i
< map
->max_osd
; i
++)
1497 map
->osd_state
[i
] = ceph_decode_32(p
);
1499 for (i
= 0; i
< map
->max_osd
; i
++)
1500 map
->osd_state
[i
] = ceph_decode_8(p
);
1503 if (ceph_decode_32(p
) != map
->max_osd
)
1506 for (i
= 0; i
< map
->max_osd
; i
++)
1507 map
->osd_weight
[i
] = ceph_decode_32(p
);
1509 if (ceph_decode_32(p
) != map
->max_osd
)
1512 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
1513 for (i
= 0; i
< map
->max_osd
; i
++)
1514 ceph_decode_addr(&map
->osd_addr
[i
]);
1517 err
= decode_pg_temp(p
, end
, map
);
1522 if (struct_v
>= 1) {
1523 err
= decode_primary_temp(p
, end
, map
);
1528 /* primary_affinity */
1529 if (struct_v
>= 2) {
1530 err
= decode_primary_affinity(p
, end
, map
);
1534 WARN_ON(map
->osd_primary_affinity
);
1538 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1539 err
= osdmap_set_crush(map
, crush_decode(*p
, min(*p
+ len
, end
)));
1544 if (struct_v
>= 3) {
1545 /* erasure_code_profiles */
1546 ceph_decode_skip_map_of_map(p
, end
, string
, string
, string
,
1550 if (struct_v
>= 4) {
1551 err
= decode_pg_upmap(p
, end
, map
);
1555 err
= decode_pg_upmap_items(p
, end
, map
);
1559 WARN_ON(!RB_EMPTY_ROOT(&map
->pg_upmap
));
1560 WARN_ON(!RB_EMPTY_ROOT(&map
->pg_upmap_items
));
1563 /* ignore the rest */
1566 dout("full osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1572 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1573 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1574 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1575 DUMP_PREFIX_OFFSET
, 16, 1,
1576 start
, end
- start
, true);
1581 * Allocate and decode a full map.
1583 struct ceph_osdmap
*ceph_osdmap_decode(void **p
, void *end
)
1585 struct ceph_osdmap
*map
;
1588 map
= ceph_osdmap_alloc();
1590 return ERR_PTR(-ENOMEM
);
1592 ret
= osdmap_decode(p
, end
, map
);
1594 ceph_osdmap_destroy(map
);
1595 return ERR_PTR(ret
);
1602 * Encoding order is (new_up_client, new_state, new_weight). Need to
1603 * apply in the (new_weight, new_state, new_up_client) order, because
1604 * an incremental map may look like e.g.
1606 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1607 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1609 static int decode_new_up_state_weight(void **p
, void *end
, u8 struct_v
,
1610 struct ceph_osdmap
*map
)
1612 void *new_up_client
;
1614 void *new_weight_end
;
1618 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1619 len
*= sizeof(u32
) + sizeof(struct ceph_entity_addr
);
1620 ceph_decode_need(p
, end
, len
, e_inval
);
1624 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1625 len
*= sizeof(u32
) + (struct_v
>= 5 ? sizeof(u32
) : sizeof(u8
));
1626 ceph_decode_need(p
, end
, len
, e_inval
);
1630 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1635 ceph_decode_need(p
, end
, 2*sizeof(u32
), e_inval
);
1636 osd
= ceph_decode_32(p
);
1637 w
= ceph_decode_32(p
);
1638 BUG_ON(osd
>= map
->max_osd
);
1639 pr_info("osd%d weight 0x%x %s\n", osd
, w
,
1640 w
== CEPH_OSD_IN
? "(in)" :
1641 (w
== CEPH_OSD_OUT
? "(out)" : ""));
1642 map
->osd_weight
[osd
] = w
;
1645 * If we are marking in, set the EXISTS, and clear the
1646 * AUTOOUT and NEW bits.
1649 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
;
1650 map
->osd_state
[osd
] &= ~(CEPH_OSD_AUTOOUT
|
1654 new_weight_end
= *p
;
1656 /* new_state (up/down) */
1658 len
= ceph_decode_32(p
);
1664 osd
= ceph_decode_32(p
);
1666 xorstate
= ceph_decode_32(p
);
1668 xorstate
= ceph_decode_8(p
);
1670 xorstate
= CEPH_OSD_UP
;
1671 BUG_ON(osd
>= map
->max_osd
);
1672 if ((map
->osd_state
[osd
] & CEPH_OSD_UP
) &&
1673 (xorstate
& CEPH_OSD_UP
))
1674 pr_info("osd%d down\n", osd
);
1675 if ((map
->osd_state
[osd
] & CEPH_OSD_EXISTS
) &&
1676 (xorstate
& CEPH_OSD_EXISTS
)) {
1677 pr_info("osd%d does not exist\n", osd
);
1678 ret
= set_primary_affinity(map
, osd
,
1679 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
);
1682 memset(map
->osd_addr
+ osd
, 0, sizeof(*map
->osd_addr
));
1683 map
->osd_state
[osd
] = 0;
1685 map
->osd_state
[osd
] ^= xorstate
;
1691 len
= ceph_decode_32(p
);
1694 struct ceph_entity_addr addr
;
1696 osd
= ceph_decode_32(p
);
1697 ceph_decode_copy(p
, &addr
, sizeof(addr
));
1698 ceph_decode_addr(&addr
);
1699 BUG_ON(osd
>= map
->max_osd
);
1700 pr_info("osd%d up\n", osd
);
1701 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
| CEPH_OSD_UP
;
1702 map
->osd_addr
[osd
] = addr
;
1705 *p
= new_weight_end
;
1713 * decode and apply an incremental map update.
1715 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
1716 struct ceph_osdmap
*map
)
1718 struct ceph_fsid fsid
;
1720 struct ceph_timespec modified
;
1724 __s32 new_flags
, max
;
1729 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1731 err
= get_osdmap_client_data_v(p
, end
, "inc", &struct_v
);
1735 /* fsid, epoch, modified, new_pool_max, new_flags */
1736 ceph_decode_need(p
, end
, sizeof(fsid
) + sizeof(u32
) + sizeof(modified
) +
1737 sizeof(u64
) + sizeof(u32
), e_inval
);
1738 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
1739 epoch
= ceph_decode_32(p
);
1740 BUG_ON(epoch
!= map
->epoch
+1);
1741 ceph_decode_copy(p
, &modified
, sizeof(modified
));
1742 new_pool_max
= ceph_decode_64(p
);
1743 new_flags
= ceph_decode_32(p
);
1746 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1748 dout("apply_incremental full map len %d, %p to %p\n",
1750 return ceph_osdmap_decode(p
, min(*p
+len
, end
));
1754 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1756 err
= osdmap_set_crush(map
,
1757 crush_decode(*p
, min(*p
+ len
, end
)));
1765 map
->flags
= new_flags
;
1766 if (new_pool_max
>= 0)
1767 map
->pool_max
= new_pool_max
;
1770 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1772 err
= osdmap_set_max_osd(map
, max
);
1778 map
->modified
= modified
;
1781 err
= decode_new_pools(p
, end
, map
);
1785 /* new_pool_names */
1786 err
= decode_pool_names(p
, end
, map
);
1791 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1793 struct ceph_pg_pool_info
*pi
;
1795 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1796 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
1798 __remove_pg_pool(&map
->pg_pools
, pi
);
1801 /* new_up_client, new_state, new_weight */
1802 err
= decode_new_up_state_weight(p
, end
, struct_v
, map
);
1807 err
= decode_new_pg_temp(p
, end
, map
);
1811 /* new_primary_temp */
1812 if (struct_v
>= 1) {
1813 err
= decode_new_primary_temp(p
, end
, map
);
1818 /* new_primary_affinity */
1819 if (struct_v
>= 2) {
1820 err
= decode_new_primary_affinity(p
, end
, map
);
1825 if (struct_v
>= 3) {
1826 /* new_erasure_code_profiles */
1827 ceph_decode_skip_map_of_map(p
, end
, string
, string
, string
,
1829 /* old_erasure_code_profiles */
1830 ceph_decode_skip_set(p
, end
, string
, bad
);
1833 if (struct_v
>= 4) {
1834 err
= decode_new_pg_upmap(p
, end
, map
);
1838 err
= decode_old_pg_upmap(p
, end
, map
);
1842 err
= decode_new_pg_upmap_items(p
, end
, map
);
1846 err
= decode_old_pg_upmap_items(p
, end
, map
);
1851 /* ignore the rest */
1854 dout("inc osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1860 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1861 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1862 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1863 DUMP_PREFIX_OFFSET
, 16, 1,
1864 start
, end
- start
, true);
1865 return ERR_PTR(err
);
1868 void ceph_oloc_copy(struct ceph_object_locator
*dest
,
1869 const struct ceph_object_locator
*src
)
1871 ceph_oloc_destroy(dest
);
1873 dest
->pool
= src
->pool
;
1875 dest
->pool_ns
= ceph_get_string(src
->pool_ns
);
1877 dest
->pool_ns
= NULL
;
1879 EXPORT_SYMBOL(ceph_oloc_copy
);
1881 void ceph_oloc_destroy(struct ceph_object_locator
*oloc
)
1883 ceph_put_string(oloc
->pool_ns
);
1885 EXPORT_SYMBOL(ceph_oloc_destroy
);
1887 void ceph_oid_copy(struct ceph_object_id
*dest
,
1888 const struct ceph_object_id
*src
)
1890 ceph_oid_destroy(dest
);
1892 if (src
->name
!= src
->inline_name
) {
1893 /* very rare, see ceph_object_id definition */
1894 dest
->name
= kmalloc(src
->name_len
+ 1,
1895 GFP_NOIO
| __GFP_NOFAIL
);
1897 dest
->name
= dest
->inline_name
;
1899 memcpy(dest
->name
, src
->name
, src
->name_len
+ 1);
1900 dest
->name_len
= src
->name_len
;
1902 EXPORT_SYMBOL(ceph_oid_copy
);
1904 static __printf(2, 0)
1905 int oid_printf_vargs(struct ceph_object_id
*oid
, const char *fmt
, va_list ap
)
1909 WARN_ON(!ceph_oid_empty(oid
));
1911 len
= vsnprintf(oid
->inline_name
, sizeof(oid
->inline_name
), fmt
, ap
);
1912 if (len
>= sizeof(oid
->inline_name
))
1915 oid
->name_len
= len
;
1920 * If oid doesn't fit into inline buffer, BUG.
1922 void ceph_oid_printf(struct ceph_object_id
*oid
, const char *fmt
, ...)
1927 BUG_ON(oid_printf_vargs(oid
, fmt
, ap
));
1930 EXPORT_SYMBOL(ceph_oid_printf
);
1932 static __printf(3, 0)
1933 int oid_aprintf_vargs(struct ceph_object_id
*oid
, gfp_t gfp
,
1934 const char *fmt
, va_list ap
)
1940 len
= oid_printf_vargs(oid
, fmt
, aq
);
1944 char *external_name
;
1946 external_name
= kmalloc(len
+ 1, gfp
);
1950 oid
->name
= external_name
;
1951 WARN_ON(vsnprintf(oid
->name
, len
+ 1, fmt
, ap
) != len
);
1952 oid
->name_len
= len
;
1959 * If oid doesn't fit into inline buffer, allocate.
1961 int ceph_oid_aprintf(struct ceph_object_id
*oid
, gfp_t gfp
,
1962 const char *fmt
, ...)
1968 ret
= oid_aprintf_vargs(oid
, gfp
, fmt
, ap
);
1973 EXPORT_SYMBOL(ceph_oid_aprintf
);
1975 void ceph_oid_destroy(struct ceph_object_id
*oid
)
1977 if (oid
->name
!= oid
->inline_name
)
1980 EXPORT_SYMBOL(ceph_oid_destroy
);
1985 static bool __osds_equal(const struct ceph_osds
*lhs
,
1986 const struct ceph_osds
*rhs
)
1988 if (lhs
->size
== rhs
->size
&&
1989 !memcmp(lhs
->osds
, rhs
->osds
, rhs
->size
* sizeof(rhs
->osds
[0])))
1998 static bool osds_equal(const struct ceph_osds
*lhs
,
1999 const struct ceph_osds
*rhs
)
2001 if (__osds_equal(lhs
, rhs
) &&
2002 lhs
->primary
== rhs
->primary
)
2008 static bool osds_valid(const struct ceph_osds
*set
)
2011 if (set
->size
> 0 && set
->primary
>= 0)
2014 /* empty can_shift_osds set */
2015 if (!set
->size
&& set
->primary
== -1)
2018 /* empty !can_shift_osds set - all NONE */
2019 if (set
->size
> 0 && set
->primary
== -1) {
2022 for (i
= 0; i
< set
->size
; i
++) {
2023 if (set
->osds
[i
] != CRUSH_ITEM_NONE
)
2033 void ceph_osds_copy(struct ceph_osds
*dest
, const struct ceph_osds
*src
)
2035 memcpy(dest
->osds
, src
->osds
, src
->size
* sizeof(src
->osds
[0]));
2036 dest
->size
= src
->size
;
2037 dest
->primary
= src
->primary
;
2040 bool ceph_pg_is_split(const struct ceph_pg
*pgid
, u32 old_pg_num
,
2043 int old_bits
= calc_bits_of(old_pg_num
);
2044 int old_mask
= (1 << old_bits
) - 1;
2047 WARN_ON(pgid
->seed
>= old_pg_num
);
2048 if (new_pg_num
<= old_pg_num
)
2051 for (n
= 1; ; n
++) {
2052 int next_bit
= n
<< (old_bits
- 1);
2053 u32 s
= next_bit
| pgid
->seed
;
2055 if (s
< old_pg_num
|| s
== pgid
->seed
)
2057 if (s
>= new_pg_num
)
2060 s
= ceph_stable_mod(s
, old_pg_num
, old_mask
);
2061 if (s
== pgid
->seed
)
2068 bool ceph_is_new_interval(const struct ceph_osds
*old_acting
,
2069 const struct ceph_osds
*new_acting
,
2070 const struct ceph_osds
*old_up
,
2071 const struct ceph_osds
*new_up
,
2078 bool old_sort_bitwise
,
2079 bool new_sort_bitwise
,
2080 const struct ceph_pg
*pgid
)
2082 return !osds_equal(old_acting
, new_acting
) ||
2083 !osds_equal(old_up
, new_up
) ||
2084 old_size
!= new_size
||
2085 old_min_size
!= new_min_size
||
2086 ceph_pg_is_split(pgid
, old_pg_num
, new_pg_num
) ||
2087 old_sort_bitwise
!= new_sort_bitwise
;
2090 static int calc_pg_rank(int osd
, const struct ceph_osds
*acting
)
2094 for (i
= 0; i
< acting
->size
; i
++) {
2095 if (acting
->osds
[i
] == osd
)
2102 static bool primary_changed(const struct ceph_osds
*old_acting
,
2103 const struct ceph_osds
*new_acting
)
2105 if (!old_acting
->size
&& !new_acting
->size
)
2106 return false; /* both still empty */
2108 if (!old_acting
->size
^ !new_acting
->size
)
2109 return true; /* was empty, now not, or vice versa */
2111 if (old_acting
->primary
!= new_acting
->primary
)
2112 return true; /* primary changed */
2114 if (calc_pg_rank(old_acting
->primary
, old_acting
) !=
2115 calc_pg_rank(new_acting
->primary
, new_acting
))
2118 return false; /* same primary (tho replicas may have changed) */
2121 bool ceph_osds_changed(const struct ceph_osds
*old_acting
,
2122 const struct ceph_osds
*new_acting
,
2125 if (primary_changed(old_acting
, new_acting
))
2128 if (any_change
&& !__osds_equal(old_acting
, new_acting
))
2135 * calculate file layout from given offset, length.
2136 * fill in correct oid, logical length, and object extent
2139 * for now, we write only a single su, until we can
2140 * pass a stride back to the caller.
2142 int ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
2145 u64
*oxoff
, u64
*oxlen
)
2147 u32 osize
= layout
->object_size
;
2148 u32 su
= layout
->stripe_unit
;
2149 u32 sc
= layout
->stripe_count
;
2150 u32 bl
, stripeno
, stripepos
, objsetno
;
2154 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, len
,
2156 if (su
== 0 || sc
== 0)
2158 su_per_object
= osize
/ su
;
2159 if (su_per_object
== 0)
2161 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
2164 if ((su
& ~PAGE_MASK
) != 0)
2167 /* bl = *off / su; */
2171 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
2174 stripepos
= bl
% sc
;
2175 objsetno
= stripeno
/ su_per_object
;
2177 *ono
= objsetno
* sc
+ stripepos
;
2178 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned int)*ono
);
2180 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
2182 su_offset
= do_div(t
, su
);
2183 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
2186 * Calculate the length of the extent being written to the selected
2187 * object. This is the minimum of the full length requested (len) or
2188 * the remainder of the current stripe being written to.
2190 *oxlen
= min_t(u64
, len
, su
- su_offset
);
2192 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
2196 dout(" invalid layout\n");
2202 EXPORT_SYMBOL(ceph_calc_file_object_mapping
);
2205 * Map an object into a PG.
2207 * Should only be called with target_oid and target_oloc (as opposed to
2208 * base_oid and base_oloc), since tiering isn't taken into account.
2210 int __ceph_object_locator_to_pg(struct ceph_pg_pool_info
*pi
,
2211 const struct ceph_object_id
*oid
,
2212 const struct ceph_object_locator
*oloc
,
2213 struct ceph_pg
*raw_pgid
)
2215 WARN_ON(pi
->id
!= oloc
->pool
);
2217 if (!oloc
->pool_ns
) {
2218 raw_pgid
->pool
= oloc
->pool
;
2219 raw_pgid
->seed
= ceph_str_hash(pi
->object_hash
, oid
->name
,
2221 dout("%s %s -> raw_pgid %llu.%x\n", __func__
, oid
->name
,
2222 raw_pgid
->pool
, raw_pgid
->seed
);
2224 char stack_buf
[256];
2225 char *buf
= stack_buf
;
2226 int nsl
= oloc
->pool_ns
->len
;
2227 size_t total
= nsl
+ 1 + oid
->name_len
;
2229 if (total
> sizeof(stack_buf
)) {
2230 buf
= kmalloc(total
, GFP_NOIO
);
2234 memcpy(buf
, oloc
->pool_ns
->str
, nsl
);
2236 memcpy(buf
+ nsl
+ 1, oid
->name
, oid
->name_len
);
2237 raw_pgid
->pool
= oloc
->pool
;
2238 raw_pgid
->seed
= ceph_str_hash(pi
->object_hash
, buf
, total
);
2239 if (buf
!= stack_buf
)
2241 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__
,
2242 oid
->name
, nsl
, oloc
->pool_ns
->str
,
2243 raw_pgid
->pool
, raw_pgid
->seed
);
2248 int ceph_object_locator_to_pg(struct ceph_osdmap
*osdmap
,
2249 const struct ceph_object_id
*oid
,
2250 const struct ceph_object_locator
*oloc
,
2251 struct ceph_pg
*raw_pgid
)
2253 struct ceph_pg_pool_info
*pi
;
2255 pi
= ceph_pg_pool_by_id(osdmap
, oloc
->pool
);
2259 return __ceph_object_locator_to_pg(pi
, oid
, oloc
, raw_pgid
);
2261 EXPORT_SYMBOL(ceph_object_locator_to_pg
);
2264 * Map a raw PG (full precision ps) into an actual PG.
2266 static void raw_pg_to_pg(struct ceph_pg_pool_info
*pi
,
2267 const struct ceph_pg
*raw_pgid
,
2268 struct ceph_pg
*pgid
)
2270 pgid
->pool
= raw_pgid
->pool
;
2271 pgid
->seed
= ceph_stable_mod(raw_pgid
->seed
, pi
->pg_num
,
2276 * Map a raw PG (full precision ps) into a placement ps (placement
2277 * seed). Include pool id in that value so that different pools don't
2278 * use the same seeds.
2280 static u32
raw_pg_to_pps(struct ceph_pg_pool_info
*pi
,
2281 const struct ceph_pg
*raw_pgid
)
2283 if (pi
->flags
& CEPH_POOL_FLAG_HASHPSPOOL
) {
2284 /* hash pool id and seed so that pool PGs do not overlap */
2285 return crush_hash32_2(CRUSH_HASH_RJENKINS1
,
2286 ceph_stable_mod(raw_pgid
->seed
,
2292 * legacy behavior: add ps and pool together. this is
2293 * not a great approach because the PGs from each pool
2294 * will overlap on top of each other: 0.5 == 1.4 ==
2297 return ceph_stable_mod(raw_pgid
->seed
, pi
->pgp_num
,
2299 (unsigned)raw_pgid
->pool
;
2303 static int do_crush(struct ceph_osdmap
*map
, int ruleno
, int x
,
2304 int *result
, int result_max
,
2305 const __u32
*weight
, int weight_max
,
2306 u64 choose_args_index
)
2308 struct crush_choose_arg_map
*arg_map
;
2311 BUG_ON(result_max
> CEPH_PG_MAX_SIZE
);
2313 arg_map
= lookup_choose_arg_map(&map
->crush
->choose_args
,
2316 mutex_lock(&map
->crush_workspace_mutex
);
2317 r
= crush_do_rule(map
->crush
, ruleno
, x
, result
, result_max
,
2318 weight
, weight_max
, map
->crush_workspace
,
2319 arg_map
? arg_map
->args
: NULL
);
2320 mutex_unlock(&map
->crush_workspace_mutex
);
2325 static void remove_nonexistent_osds(struct ceph_osdmap
*osdmap
,
2326 struct ceph_pg_pool_info
*pi
,
2327 struct ceph_osds
*set
)
2331 if (ceph_can_shift_osds(pi
)) {
2335 for (i
= 0; i
< set
->size
; i
++) {
2336 if (!ceph_osd_exists(osdmap
, set
->osds
[i
])) {
2341 set
->osds
[i
- removed
] = set
->osds
[i
];
2343 set
->size
-= removed
;
2345 /* set dne devices to NONE */
2346 for (i
= 0; i
< set
->size
; i
++) {
2347 if (!ceph_osd_exists(osdmap
, set
->osds
[i
]))
2348 set
->osds
[i
] = CRUSH_ITEM_NONE
;
2354 * Calculate raw set (CRUSH output) for given PG and filter out
2355 * nonexistent OSDs. ->primary is undefined for a raw set.
2357 * Placement seed (CRUSH input) is returned through @ppps.
2359 static void pg_to_raw_osds(struct ceph_osdmap
*osdmap
,
2360 struct ceph_pg_pool_info
*pi
,
2361 const struct ceph_pg
*raw_pgid
,
2362 struct ceph_osds
*raw
,
2365 u32 pps
= raw_pg_to_pps(pi
, raw_pgid
);
2369 ceph_osds_init(raw
);
2373 ruleno
= crush_find_rule(osdmap
->crush
, pi
->crush_ruleset
, pi
->type
,
2376 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2377 pi
->id
, pi
->crush_ruleset
, pi
->type
, pi
->size
);
2381 if (pi
->size
> ARRAY_SIZE(raw
->osds
)) {
2382 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2383 pi
->id
, pi
->crush_ruleset
, pi
->type
, pi
->size
,
2384 ARRAY_SIZE(raw
->osds
));
2388 len
= do_crush(osdmap
, ruleno
, pps
, raw
->osds
, pi
->size
,
2389 osdmap
->osd_weight
, osdmap
->max_osd
, pi
->id
);
2391 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2392 len
, ruleno
, pi
->id
, pi
->crush_ruleset
, pi
->type
,
2398 remove_nonexistent_osds(osdmap
, pi
, raw
);
2401 /* apply pg_upmap[_items] mappings */
2402 static void apply_upmap(struct ceph_osdmap
*osdmap
,
2403 const struct ceph_pg
*pgid
,
2404 struct ceph_osds
*raw
)
2406 struct ceph_pg_mapping
*pg
;
2409 pg
= lookup_pg_mapping(&osdmap
->pg_upmap
, pgid
);
2411 /* make sure targets aren't marked out */
2412 for (i
= 0; i
< pg
->pg_upmap
.len
; i
++) {
2413 int osd
= pg
->pg_upmap
.osds
[i
];
2415 if (osd
!= CRUSH_ITEM_NONE
&&
2416 osd
< osdmap
->max_osd
&&
2417 osdmap
->osd_weight
[osd
] == 0) {
2418 /* reject/ignore explicit mapping */
2422 for (i
= 0; i
< pg
->pg_upmap
.len
; i
++)
2423 raw
->osds
[i
] = pg
->pg_upmap
.osds
[i
];
2424 raw
->size
= pg
->pg_upmap
.len
;
2428 pg
= lookup_pg_mapping(&osdmap
->pg_upmap_items
, pgid
);
2431 * Note: this approach does not allow a bidirectional swap,
2432 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2434 for (i
= 0; i
< pg
->pg_upmap_items
.len
; i
++) {
2435 int from
= pg
->pg_upmap_items
.from_to
[i
][0];
2436 int to
= pg
->pg_upmap_items
.from_to
[i
][1];
2438 bool exists
= false;
2440 /* make sure replacement doesn't already appear */
2441 for (j
= 0; j
< raw
->size
; j
++) {
2442 int osd
= raw
->osds
[j
];
2448 /* ignore mapping if target is marked out */
2449 if (osd
== from
&& pos
< 0 &&
2450 !(to
!= CRUSH_ITEM_NONE
&&
2451 to
< osdmap
->max_osd
&&
2452 osdmap
->osd_weight
[to
] == 0)) {
2456 if (!exists
&& pos
>= 0) {
2457 raw
->osds
[pos
] = to
;
2465 * Given raw set, calculate up set and up primary. By definition of an
2466 * up set, the result won't contain nonexistent or down OSDs.
2468 * This is done in-place - on return @set is the up set. If it's
2469 * empty, ->primary will remain undefined.
2471 static void raw_to_up_osds(struct ceph_osdmap
*osdmap
,
2472 struct ceph_pg_pool_info
*pi
,
2473 struct ceph_osds
*set
)
2477 /* ->primary is undefined for a raw set */
2478 BUG_ON(set
->primary
!= -1);
2480 if (ceph_can_shift_osds(pi
)) {
2484 for (i
= 0; i
< set
->size
; i
++) {
2485 if (ceph_osd_is_down(osdmap
, set
->osds
[i
])) {
2490 set
->osds
[i
- removed
] = set
->osds
[i
];
2492 set
->size
-= removed
;
2494 set
->primary
= set
->osds
[0];
2496 /* set down/dne devices to NONE */
2497 for (i
= set
->size
- 1; i
>= 0; i
--) {
2498 if (ceph_osd_is_down(osdmap
, set
->osds
[i
]))
2499 set
->osds
[i
] = CRUSH_ITEM_NONE
;
2501 set
->primary
= set
->osds
[i
];
2506 static void apply_primary_affinity(struct ceph_osdmap
*osdmap
,
2507 struct ceph_pg_pool_info
*pi
,
2509 struct ceph_osds
*up
)
2515 * Do we have any non-default primary_affinity values for these
2518 if (!osdmap
->osd_primary_affinity
)
2521 for (i
= 0; i
< up
->size
; i
++) {
2522 int osd
= up
->osds
[i
];
2524 if (osd
!= CRUSH_ITEM_NONE
&&
2525 osdmap
->osd_primary_affinity
[osd
] !=
2526 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
) {
2534 * Pick the primary. Feed both the seed (for the pg) and the
2535 * osd into the hash/rng so that a proportional fraction of an
2536 * osd's pgs get rejected as primary.
2538 for (i
= 0; i
< up
->size
; i
++) {
2539 int osd
= up
->osds
[i
];
2542 if (osd
== CRUSH_ITEM_NONE
)
2545 aff
= osdmap
->osd_primary_affinity
[osd
];
2546 if (aff
< CEPH_OSD_MAX_PRIMARY_AFFINITY
&&
2547 (crush_hash32_2(CRUSH_HASH_RJENKINS1
,
2548 pps
, osd
) >> 16) >= aff
) {
2550 * We chose not to use this primary. Note it
2551 * anyway as a fallback in case we don't pick
2552 * anyone else, but keep looking.
2564 up
->primary
= up
->osds
[pos
];
2566 if (ceph_can_shift_osds(pi
) && pos
> 0) {
2567 /* move the new primary to the front */
2568 for (i
= pos
; i
> 0; i
--)
2569 up
->osds
[i
] = up
->osds
[i
- 1];
2570 up
->osds
[0] = up
->primary
;
2575 * Get pg_temp and primary_temp mappings for given PG.
2577 * Note that a PG may have none, only pg_temp, only primary_temp or
2578 * both pg_temp and primary_temp mappings. This means @temp isn't
2579 * always a valid OSD set on return: in the "only primary_temp" case,
2580 * @temp will have its ->primary >= 0 but ->size == 0.
2582 static void get_temp_osds(struct ceph_osdmap
*osdmap
,
2583 struct ceph_pg_pool_info
*pi
,
2584 const struct ceph_pg
*pgid
,
2585 struct ceph_osds
*temp
)
2587 struct ceph_pg_mapping
*pg
;
2590 ceph_osds_init(temp
);
2593 pg
= lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
2595 for (i
= 0; i
< pg
->pg_temp
.len
; i
++) {
2596 if (ceph_osd_is_down(osdmap
, pg
->pg_temp
.osds
[i
])) {
2597 if (ceph_can_shift_osds(pi
))
2600 temp
->osds
[temp
->size
++] = CRUSH_ITEM_NONE
;
2602 temp
->osds
[temp
->size
++] = pg
->pg_temp
.osds
[i
];
2606 /* apply pg_temp's primary */
2607 for (i
= 0; i
< temp
->size
; i
++) {
2608 if (temp
->osds
[i
] != CRUSH_ITEM_NONE
) {
2609 temp
->primary
= temp
->osds
[i
];
2616 pg
= lookup_pg_mapping(&osdmap
->primary_temp
, pgid
);
2618 temp
->primary
= pg
->primary_temp
.osd
;
2622 * Map a PG to its acting set as well as its up set.
2624 * Acting set is used for data mapping purposes, while up set can be
2625 * recorded for detecting interval changes and deciding whether to
2628 void ceph_pg_to_up_acting_osds(struct ceph_osdmap
*osdmap
,
2629 struct ceph_pg_pool_info
*pi
,
2630 const struct ceph_pg
*raw_pgid
,
2631 struct ceph_osds
*up
,
2632 struct ceph_osds
*acting
)
2634 struct ceph_pg pgid
;
2637 WARN_ON(pi
->id
!= raw_pgid
->pool
);
2638 raw_pg_to_pg(pi
, raw_pgid
, &pgid
);
2640 pg_to_raw_osds(osdmap
, pi
, raw_pgid
, up
, &pps
);
2641 apply_upmap(osdmap
, &pgid
, up
);
2642 raw_to_up_osds(osdmap
, pi
, up
);
2643 apply_primary_affinity(osdmap
, pi
, pps
, up
);
2644 get_temp_osds(osdmap
, pi
, &pgid
, acting
);
2645 if (!acting
->size
) {
2646 memcpy(acting
->osds
, up
->osds
, up
->size
* sizeof(up
->osds
[0]));
2647 acting
->size
= up
->size
;
2648 if (acting
->primary
== -1)
2649 acting
->primary
= up
->primary
;
2651 WARN_ON(!osds_valid(up
) || !osds_valid(acting
));
2654 bool ceph_pg_to_primary_shard(struct ceph_osdmap
*osdmap
,
2655 struct ceph_pg_pool_info
*pi
,
2656 const struct ceph_pg
*raw_pgid
,
2657 struct ceph_spg
*spgid
)
2659 struct ceph_pg pgid
;
2660 struct ceph_osds up
, acting
;
2663 WARN_ON(pi
->id
!= raw_pgid
->pool
);
2664 raw_pg_to_pg(pi
, raw_pgid
, &pgid
);
2666 if (ceph_can_shift_osds(pi
)) {
2667 spgid
->pgid
= pgid
; /* struct */
2668 spgid
->shard
= CEPH_SPG_NOSHARD
;
2672 ceph_pg_to_up_acting_osds(osdmap
, pi
, &pgid
, &up
, &acting
);
2673 for (i
= 0; i
< acting
.size
; i
++) {
2674 if (acting
.osds
[i
] == acting
.primary
) {
2675 spgid
->pgid
= pgid
; /* struct */
2685 * Return acting primary for given PG, or -1 if none.
2687 int ceph_pg_to_acting_primary(struct ceph_osdmap
*osdmap
,
2688 const struct ceph_pg
*raw_pgid
)
2690 struct ceph_pg_pool_info
*pi
;
2691 struct ceph_osds up
, acting
;
2693 pi
= ceph_pg_pool_by_id(osdmap
, raw_pgid
->pool
);
2697 ceph_pg_to_up_acting_osds(osdmap
, pi
, raw_pgid
, &up
, &acting
);
2698 return acting
.primary
;
2700 EXPORT_SYMBOL(ceph_pg_to_acting_primary
);