2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
19 if ((state
& CEPH_OSD_EXISTS
) && (state
& CEPH_OSD_UP
))
20 snprintf(str
, len
, "exists, up");
21 else if (state
& CEPH_OSD_EXISTS
)
22 snprintf(str
, len
, "exists");
23 else if (state
& CEPH_OSD_UP
)
24 snprintf(str
, len
, "up");
26 snprintf(str
, len
, "doesn't exist");
33 static int calc_bits_of(unsigned int t
)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
48 pi
->pg_num_mask
= (1 << calc_bits_of(pi
->pg_num
-1)) - 1;
49 pi
->pgp_num_mask
= (1 << calc_bits_of(pi
->pgp_num
-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p
, void *end
,
56 struct crush_bucket_uniform
*b
)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
59 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
60 b
->item_weight
= ceph_decode_32(p
);
66 static int crush_decode_list_bucket(void **p
, void *end
,
67 struct crush_bucket_list
*b
)
70 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
71 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
72 if (b
->item_weights
== NULL
)
74 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
75 if (b
->sum_weights
== NULL
)
77 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
78 for (j
= 0; j
< b
->h
.size
; j
++) {
79 b
->item_weights
[j
] = ceph_decode_32(p
);
80 b
->sum_weights
[j
] = ceph_decode_32(p
);
87 static int crush_decode_tree_bucket(void **p
, void *end
,
88 struct crush_bucket_tree
*b
)
91 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
92 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
93 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
94 if (b
->node_weights
== NULL
)
96 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
97 for (j
= 0; j
< b
->num_nodes
; j
++)
98 b
->node_weights
[j
] = ceph_decode_32(p
);
104 static int crush_decode_straw_bucket(void **p
, void *end
,
105 struct crush_bucket_straw
*b
)
108 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
109 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
110 if (b
->item_weights
== NULL
)
112 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
113 if (b
->straws
== NULL
)
115 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
116 for (j
= 0; j
< b
->h
.size
; j
++) {
117 b
->item_weights
[j
] = ceph_decode_32(p
);
118 b
->straws
[j
] = ceph_decode_32(p
);
125 static int skip_name_map(void **p
, void *end
)
128 ceph_decode_32_safe(p
, end
, len
,bad
);
132 ceph_decode_32_safe(p
, end
, strlen
, bad
);
140 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
146 void *start
= pbyval
;
150 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
152 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
154 return ERR_PTR(-ENOMEM
);
156 /* set tunables to default values */
157 c
->choose_local_tries
= 2;
158 c
->choose_local_fallback_tries
= 5;
159 c
->choose_total_tries
= 19;
160 c
->chooseleaf_descend_once
= 0;
162 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
163 magic
= ceph_decode_32(p
);
164 if (magic
!= CRUSH_MAGIC
) {
165 pr_err("crush_decode magic %x != current %x\n",
166 (unsigned int)magic
, (unsigned int)CRUSH_MAGIC
);
169 c
->max_buckets
= ceph_decode_32(p
);
170 c
->max_rules
= ceph_decode_32(p
);
171 c
->max_devices
= ceph_decode_32(p
);
173 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
174 if (c
->buckets
== NULL
)
176 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
177 if (c
->rules
== NULL
)
181 for (i
= 0; i
< c
->max_buckets
; i
++) {
184 struct crush_bucket
*b
;
186 ceph_decode_32_safe(p
, end
, alg
, bad
);
188 c
->buckets
[i
] = NULL
;
191 dout("crush_decode bucket %d off %x %p to %p\n",
192 i
, (int)(*p
-start
), *p
, end
);
195 case CRUSH_BUCKET_UNIFORM
:
196 size
= sizeof(struct crush_bucket_uniform
);
198 case CRUSH_BUCKET_LIST
:
199 size
= sizeof(struct crush_bucket_list
);
201 case CRUSH_BUCKET_TREE
:
202 size
= sizeof(struct crush_bucket_tree
);
204 case CRUSH_BUCKET_STRAW
:
205 size
= sizeof(struct crush_bucket_straw
);
212 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
216 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
217 b
->id
= ceph_decode_32(p
);
218 b
->type
= ceph_decode_16(p
);
219 b
->alg
= ceph_decode_8(p
);
220 b
->hash
= ceph_decode_8(p
);
221 b
->weight
= ceph_decode_32(p
);
222 b
->size
= ceph_decode_32(p
);
224 dout("crush_decode bucket size %d off %x %p to %p\n",
225 b
->size
, (int)(*p
-start
), *p
, end
);
227 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
228 if (b
->items
== NULL
)
230 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
235 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
236 for (j
= 0; j
< b
->size
; j
++)
237 b
->items
[j
] = ceph_decode_32(p
);
240 case CRUSH_BUCKET_UNIFORM
:
241 err
= crush_decode_uniform_bucket(p
, end
,
242 (struct crush_bucket_uniform
*)b
);
246 case CRUSH_BUCKET_LIST
:
247 err
= crush_decode_list_bucket(p
, end
,
248 (struct crush_bucket_list
*)b
);
252 case CRUSH_BUCKET_TREE
:
253 err
= crush_decode_tree_bucket(p
, end
,
254 (struct crush_bucket_tree
*)b
);
258 case CRUSH_BUCKET_STRAW
:
259 err
= crush_decode_straw_bucket(p
, end
,
260 (struct crush_bucket_straw
*)b
);
268 dout("rule vec is %p\n", c
->rules
);
269 for (i
= 0; i
< c
->max_rules
; i
++) {
271 struct crush_rule
*r
;
273 ceph_decode_32_safe(p
, end
, yes
, bad
);
275 dout("crush_decode NO rule %d off %x %p to %p\n",
276 i
, (int)(*p
-start
), *p
, end
);
281 dout("crush_decode rule %d off %x %p to %p\n",
282 i
, (int)(*p
-start
), *p
, end
);
285 ceph_decode_32_safe(p
, end
, yes
, bad
);
286 #if BITS_PER_LONG == 32
288 if (yes
> (ULONG_MAX
- sizeof(*r
))
289 / sizeof(struct crush_rule_step
))
292 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
293 yes
*sizeof(struct crush_rule_step
),
297 dout(" rule %d is at %p\n", i
, r
);
299 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
300 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
301 for (j
= 0; j
< r
->len
; j
++) {
302 r
->steps
[j
].op
= ceph_decode_32(p
);
303 r
->steps
[j
].arg1
= ceph_decode_32(p
);
304 r
->steps
[j
].arg2
= ceph_decode_32(p
);
308 /* ignore trailing name maps. */
309 for (num_name_maps
= 0; num_name_maps
< 3; num_name_maps
++) {
310 err
= skip_name_map(p
, end
);
316 ceph_decode_need(p
, end
, 3*sizeof(u32
), done
);
317 c
->choose_local_tries
= ceph_decode_32(p
);
318 c
->choose_local_fallback_tries
= ceph_decode_32(p
);
319 c
->choose_total_tries
= ceph_decode_32(p
);
320 dout("crush decode tunable choose_local_tries = %d",
321 c
->choose_local_tries
);
322 dout("crush decode tunable choose_local_fallback_tries = %d",
323 c
->choose_local_fallback_tries
);
324 dout("crush decode tunable choose_total_tries = %d",
325 c
->choose_total_tries
);
327 ceph_decode_need(p
, end
, sizeof(u32
), done
);
328 c
->chooseleaf_descend_once
= ceph_decode_32(p
);
329 dout("crush decode tunable chooseleaf_descend_once = %d",
330 c
->chooseleaf_descend_once
);
333 dout("crush_decode success\n");
339 dout("crush_decode fail %d\n", err
);
345 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
346 * to a set of osds) and primary_temp (explicit primary setting)
348 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
361 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
362 struct rb_root
*root
)
364 struct rb_node
**p
= &root
->rb_node
;
365 struct rb_node
*parent
= NULL
;
366 struct ceph_pg_mapping
*pg
= NULL
;
369 dout("__insert_pg_mapping %llx %p\n", *(u64
*)&new->pgid
, new);
372 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
373 c
= pgid_cmp(new->pgid
, pg
->pgid
);
382 rb_link_node(&new->node
, parent
, p
);
383 rb_insert_color(&new->node
, root
);
387 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
390 struct rb_node
*n
= root
->rb_node
;
391 struct ceph_pg_mapping
*pg
;
395 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
396 c
= pgid_cmp(pgid
, pg
->pgid
);
402 dout("__lookup_pg_mapping %lld.%x got %p\n",
403 pgid
.pool
, pgid
.seed
, pg
);
410 static int __remove_pg_mapping(struct rb_root
*root
, struct ceph_pg pgid
)
412 struct ceph_pg_mapping
*pg
= __lookup_pg_mapping(root
, pgid
);
415 dout("__remove_pg_mapping %lld.%x %p\n", pgid
.pool
, pgid
.seed
,
417 rb_erase(&pg
->node
, root
);
421 dout("__remove_pg_mapping %lld.%x dne\n", pgid
.pool
, pgid
.seed
);
426 * rbtree of pg pool info
428 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
430 struct rb_node
**p
= &root
->rb_node
;
431 struct rb_node
*parent
= NULL
;
432 struct ceph_pg_pool_info
*pi
= NULL
;
436 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
437 if (new->id
< pi
->id
)
439 else if (new->id
> pi
->id
)
445 rb_link_node(&new->node
, parent
, p
);
446 rb_insert_color(&new->node
, root
);
450 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, u64 id
)
452 struct ceph_pg_pool_info
*pi
;
453 struct rb_node
*n
= root
->rb_node
;
456 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
459 else if (id
> pi
->id
)
467 struct ceph_pg_pool_info
*ceph_pg_pool_by_id(struct ceph_osdmap
*map
, u64 id
)
469 return __lookup_pg_pool(&map
->pg_pools
, id
);
472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap
*map
, u64 id
)
474 struct ceph_pg_pool_info
*pi
;
476 if (id
== CEPH_NOPOOL
)
479 if (WARN_ON_ONCE(id
> (u64
) INT_MAX
))
482 pi
= __lookup_pg_pool(&map
->pg_pools
, (int) id
);
484 return pi
? pi
->name
: NULL
;
486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id
);
488 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
492 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
493 struct ceph_pg_pool_info
*pi
=
494 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
495 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
500 EXPORT_SYMBOL(ceph_pg_poolid_by_name
);
502 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
504 rb_erase(&pi
->node
, root
);
509 static int decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
515 ceph_decode_need(p
, end
, 2 + 4, bad
);
516 ev
= ceph_decode_8(p
); /* encoding version */
517 cv
= ceph_decode_8(p
); /* compat version */
519 pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev
, cv
);
523 pr_warning("got v %d cv %d > 9 of ceph_pg_pool\n", ev
, cv
);
526 len
= ceph_decode_32(p
);
527 ceph_decode_need(p
, end
, len
, bad
);
530 pi
->type
= ceph_decode_8(p
);
531 pi
->size
= ceph_decode_8(p
);
532 pi
->crush_ruleset
= ceph_decode_8(p
);
533 pi
->object_hash
= ceph_decode_8(p
);
535 pi
->pg_num
= ceph_decode_32(p
);
536 pi
->pgp_num
= ceph_decode_32(p
);
538 *p
+= 4 + 4; /* skip lpg* */
539 *p
+= 4; /* skip last_change */
540 *p
+= 8 + 4; /* skip snap_seq, snap_epoch */
543 num
= ceph_decode_32(p
);
545 *p
+= 8; /* snapid key */
546 *p
+= 1 + 1; /* versions */
547 len
= ceph_decode_32(p
);
551 /* skip removed_snaps */
552 num
= ceph_decode_32(p
);
555 *p
+= 8; /* skip auid */
556 pi
->flags
= ceph_decode_64(p
);
557 *p
+= 4; /* skip crash_replay_interval */
560 *p
+= 1; /* skip min_size */
563 *p
+= 8 + 8; /* skip quota_max_* */
567 num
= ceph_decode_32(p
);
570 *p
+= 8; /* skip tier_of */
571 *p
+= 1; /* skip cache_mode */
573 pi
->read_tier
= ceph_decode_64(p
);
574 pi
->write_tier
= ceph_decode_64(p
);
580 /* ignore the rest */
590 static int decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
592 struct ceph_pg_pool_info
*pi
;
596 ceph_decode_32_safe(p
, end
, num
, bad
);
597 dout(" %d pool names\n", num
);
599 ceph_decode_64_safe(p
, end
, pool
, bad
);
600 ceph_decode_32_safe(p
, end
, len
, bad
);
601 dout(" pool %llu len %d\n", pool
, len
);
602 ceph_decode_need(p
, end
, len
, bad
);
603 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
605 char *name
= kstrndup(*p
, len
, GFP_NOFS
);
611 dout(" name is %s\n", pi
->name
);
624 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
626 dout("osdmap_destroy %p\n", map
);
628 crush_destroy(map
->crush
);
629 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
630 struct ceph_pg_mapping
*pg
=
631 rb_entry(rb_first(&map
->pg_temp
),
632 struct ceph_pg_mapping
, node
);
633 rb_erase(&pg
->node
, &map
->pg_temp
);
636 while (!RB_EMPTY_ROOT(&map
->primary_temp
)) {
637 struct ceph_pg_mapping
*pg
=
638 rb_entry(rb_first(&map
->primary_temp
),
639 struct ceph_pg_mapping
, node
);
640 rb_erase(&pg
->node
, &map
->primary_temp
);
643 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
644 struct ceph_pg_pool_info
*pi
=
645 rb_entry(rb_first(&map
->pg_pools
),
646 struct ceph_pg_pool_info
, node
);
647 __remove_pg_pool(&map
->pg_pools
, pi
);
649 kfree(map
->osd_state
);
650 kfree(map
->osd_weight
);
651 kfree(map
->osd_addr
);
652 kfree(map
->osd_primary_affinity
);
657 * Adjust max_osd value, (re)allocate arrays.
659 * The new elements are properly initialized.
661 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
665 struct ceph_entity_addr
*addr
;
668 state
= krealloc(map
->osd_state
, max
*sizeof(*state
), GFP_NOFS
);
669 weight
= krealloc(map
->osd_weight
, max
*sizeof(*weight
), GFP_NOFS
);
670 addr
= krealloc(map
->osd_addr
, max
*sizeof(*addr
), GFP_NOFS
);
671 if (!state
|| !weight
|| !addr
) {
679 for (i
= map
->max_osd
; i
< max
; i
++) {
681 weight
[i
] = CEPH_OSD_OUT
;
682 memset(addr
+ i
, 0, sizeof(*addr
));
685 map
->osd_state
= state
;
686 map
->osd_weight
= weight
;
687 map
->osd_addr
= addr
;
689 if (map
->osd_primary_affinity
) {
692 affinity
= krealloc(map
->osd_primary_affinity
,
693 max
*sizeof(*affinity
), GFP_NOFS
);
697 for (i
= map
->max_osd
; i
< max
; i
++)
698 affinity
[i
] = CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
700 map
->osd_primary_affinity
= affinity
;
708 #define OSDMAP_WRAPPER_COMPAT_VER 7
709 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
712 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
713 * to struct_v of the client_data section for new (v7 and above)
716 static int get_osdmap_client_data_v(void **p
, void *end
,
717 const char *prefix
, u8
*v
)
721 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
725 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
726 if (struct_compat
> OSDMAP_WRAPPER_COMPAT_VER
) {
727 pr_warning("got v %d cv %d > %d of %s ceph_osdmap\n",
728 struct_v
, struct_compat
,
729 OSDMAP_WRAPPER_COMPAT_VER
, prefix
);
732 *p
+= 4; /* ignore wrapper struct_len */
734 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
735 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
736 if (struct_compat
> OSDMAP_CLIENT_DATA_COMPAT_VER
) {
737 pr_warning("got v %d cv %d > %d of %s ceph_osdmap client data\n",
738 struct_v
, struct_compat
,
739 OSDMAP_CLIENT_DATA_COMPAT_VER
, prefix
);
742 *p
+= 4; /* ignore client data struct_len */
747 ceph_decode_16_safe(p
, end
, version
, e_inval
);
749 pr_warning("got v %d < 6 of %s ceph_osdmap\n", version
,
754 /* old osdmap enconding */
765 static int __decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
,
770 ceph_decode_32_safe(p
, end
, n
, e_inval
);
772 struct ceph_pg_pool_info
*pi
;
776 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
778 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
779 if (!incremental
|| !pi
) {
780 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
786 ret
= __insert_pg_pool(&map
->pg_pools
, pi
);
793 ret
= decode_pool(p
, end
, pi
);
804 static int decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
806 return __decode_pools(p
, end
, map
, false);
809 static int decode_new_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
811 return __decode_pools(p
, end
, map
, true);
814 static int __decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
819 ceph_decode_32_safe(p
, end
, n
, e_inval
);
825 ret
= ceph_decode_pgid(p
, end
, &pgid
);
829 ceph_decode_32_safe(p
, end
, len
, e_inval
);
831 ret
= __remove_pg_mapping(&map
->pg_temp
, pgid
);
832 BUG_ON(!incremental
&& ret
!= -ENOENT
);
834 if (!incremental
|| len
> 0) {
835 struct ceph_pg_mapping
*pg
;
837 ceph_decode_need(p
, end
, len
*sizeof(u32
), e_inval
);
839 if (len
> (UINT_MAX
- sizeof(*pg
)) / sizeof(u32
))
842 pg
= kzalloc(sizeof(*pg
) + len
*sizeof(u32
), GFP_NOFS
);
847 pg
->pg_temp
.len
= len
;
848 for (i
= 0; i
< len
; i
++)
849 pg
->pg_temp
.osds
[i
] = ceph_decode_32(p
);
851 ret
= __insert_pg_mapping(pg
, &map
->pg_temp
);
865 static int decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
867 return __decode_pg_temp(p
, end
, map
, false);
870 static int decode_new_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
872 return __decode_pg_temp(p
, end
, map
, true);
875 static int __decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
880 ceph_decode_32_safe(p
, end
, n
, e_inval
);
886 ret
= ceph_decode_pgid(p
, end
, &pgid
);
890 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
892 ret
= __remove_pg_mapping(&map
->primary_temp
, pgid
);
893 BUG_ON(!incremental
&& ret
!= -ENOENT
);
895 if (!incremental
|| osd
!= (u32
)-1) {
896 struct ceph_pg_mapping
*pg
;
898 pg
= kzalloc(sizeof(*pg
), GFP_NOFS
);
903 pg
->primary_temp
.osd
= osd
;
905 ret
= __insert_pg_mapping(pg
, &map
->primary_temp
);
919 static int decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
921 return __decode_primary_temp(p
, end
, map
, false);
924 static int decode_new_primary_temp(void **p
, void *end
,
925 struct ceph_osdmap
*map
)
927 return __decode_primary_temp(p
, end
, map
, true);
930 u32
ceph_get_primary_affinity(struct ceph_osdmap
*map
, int osd
)
932 BUG_ON(osd
>= map
->max_osd
);
934 if (!map
->osd_primary_affinity
)
935 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
937 return map
->osd_primary_affinity
[osd
];
940 static int set_primary_affinity(struct ceph_osdmap
*map
, int osd
, u32 aff
)
942 BUG_ON(osd
>= map
->max_osd
);
944 if (!map
->osd_primary_affinity
) {
947 map
->osd_primary_affinity
= kmalloc(map
->max_osd
*sizeof(u32
),
949 if (!map
->osd_primary_affinity
)
952 for (i
= 0; i
< map
->max_osd
; i
++)
953 map
->osd_primary_affinity
[i
] =
954 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
957 map
->osd_primary_affinity
[osd
] = aff
;
962 static int decode_primary_affinity(void **p
, void *end
,
963 struct ceph_osdmap
*map
)
967 ceph_decode_32_safe(p
, end
, len
, e_inval
);
969 kfree(map
->osd_primary_affinity
);
970 map
->osd_primary_affinity
= NULL
;
973 if (len
!= map
->max_osd
)
976 ceph_decode_need(p
, end
, map
->max_osd
*sizeof(u32
), e_inval
);
978 for (i
= 0; i
< map
->max_osd
; i
++) {
981 ret
= set_primary_affinity(map
, i
, ceph_decode_32(p
));
992 static int decode_new_primary_affinity(void **p
, void *end
,
993 struct ceph_osdmap
*map
)
997 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1002 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1003 ceph_decode_32_safe(p
, end
, aff
, e_inval
);
1005 ret
= set_primary_affinity(map
, osd
, aff
);
1009 pr_info("osd%d primary-affinity 0x%x\n", osd
, aff
);
1019 * decode a full map.
1021 static int osdmap_decode(void **p
, void *end
, struct ceph_osdmap
*map
)
1030 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1032 err
= get_osdmap_client_data_v(p
, end
, "full", &struct_v
);
1036 /* fsid, epoch, created, modified */
1037 ceph_decode_need(p
, end
, sizeof(map
->fsid
) + sizeof(u32
) +
1038 sizeof(map
->created
) + sizeof(map
->modified
), e_inval
);
1039 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
1040 epoch
= map
->epoch
= ceph_decode_32(p
);
1041 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
1042 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
1045 err
= decode_pools(p
, end
, map
);
1050 err
= decode_pool_names(p
, end
, map
);
1054 ceph_decode_32_safe(p
, end
, map
->pool_max
, e_inval
);
1056 ceph_decode_32_safe(p
, end
, map
->flags
, e_inval
);
1059 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1061 /* (re)alloc osd arrays */
1062 err
= osdmap_set_max_osd(map
, max
);
1066 /* osd_state, osd_weight, osd_addrs->client_addr */
1067 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
1068 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
1069 sizeof(*map
->osd_addr
)), e_inval
);
1071 if (ceph_decode_32(p
) != map
->max_osd
)
1074 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
1076 if (ceph_decode_32(p
) != map
->max_osd
)
1079 for (i
= 0; i
< map
->max_osd
; i
++)
1080 map
->osd_weight
[i
] = ceph_decode_32(p
);
1082 if (ceph_decode_32(p
) != map
->max_osd
)
1085 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
1086 for (i
= 0; i
< map
->max_osd
; i
++)
1087 ceph_decode_addr(&map
->osd_addr
[i
]);
1090 err
= decode_pg_temp(p
, end
, map
);
1095 if (struct_v
>= 1) {
1096 err
= decode_primary_temp(p
, end
, map
);
1101 /* primary_affinity */
1102 if (struct_v
>= 2) {
1103 err
= decode_primary_affinity(p
, end
, map
);
1107 /* XXX can this happen? */
1108 kfree(map
->osd_primary_affinity
);
1109 map
->osd_primary_affinity
= NULL
;
1113 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1114 map
->crush
= crush_decode(*p
, min(*p
+ len
, end
));
1115 if (IS_ERR(map
->crush
)) {
1116 err
= PTR_ERR(map
->crush
);
1122 /* ignore the rest */
1125 dout("full osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1131 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1132 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1133 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1134 DUMP_PREFIX_OFFSET
, 16, 1,
1135 start
, end
- start
, true);
1140 * Allocate and decode a full map.
1142 struct ceph_osdmap
*ceph_osdmap_decode(void **p
, void *end
)
1144 struct ceph_osdmap
*map
;
1147 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
1149 return ERR_PTR(-ENOMEM
);
1151 map
->pg_temp
= RB_ROOT
;
1152 map
->primary_temp
= RB_ROOT
;
1153 mutex_init(&map
->crush_scratch_mutex
);
1155 ret
= osdmap_decode(p
, end
, map
);
1157 ceph_osdmap_destroy(map
);
1158 return ERR_PTR(ret
);
1165 * decode and apply an incremental map update.
1167 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
1168 struct ceph_osdmap
*map
,
1169 struct ceph_messenger
*msgr
)
1171 struct crush_map
*newcrush
= NULL
;
1172 struct ceph_fsid fsid
;
1174 struct ceph_timespec modified
;
1178 __s32 new_flags
, max
;
1183 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1185 err
= get_osdmap_client_data_v(p
, end
, "inc", &struct_v
);
1189 /* fsid, epoch, modified, new_pool_max, new_flags */
1190 ceph_decode_need(p
, end
, sizeof(fsid
) + sizeof(u32
) + sizeof(modified
) +
1191 sizeof(u64
) + sizeof(u32
), e_inval
);
1192 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
1193 epoch
= ceph_decode_32(p
);
1194 BUG_ON(epoch
!= map
->epoch
+1);
1195 ceph_decode_copy(p
, &modified
, sizeof(modified
));
1196 new_pool_max
= ceph_decode_64(p
);
1197 new_flags
= ceph_decode_32(p
);
1200 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1202 dout("apply_incremental full map len %d, %p to %p\n",
1204 return ceph_osdmap_decode(p
, min(*p
+len
, end
));
1208 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1210 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
1211 if (IS_ERR(newcrush
)) {
1212 err
= PTR_ERR(newcrush
);
1221 map
->flags
= new_flags
;
1222 if (new_pool_max
>= 0)
1223 map
->pool_max
= new_pool_max
;
1226 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1228 err
= osdmap_set_max_osd(map
, max
);
1234 map
->modified
= modified
;
1237 crush_destroy(map
->crush
);
1238 map
->crush
= newcrush
;
1243 err
= decode_new_pools(p
, end
, map
);
1247 /* new_pool_names */
1248 err
= decode_pool_names(p
, end
, map
);
1253 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1255 struct ceph_pg_pool_info
*pi
;
1257 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1258 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
1260 __remove_pg_pool(&map
->pg_pools
, pi
);
1264 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1267 struct ceph_entity_addr addr
;
1268 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1269 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), e_inval
);
1270 ceph_decode_addr(&addr
);
1271 pr_info("osd%d up\n", osd
);
1272 BUG_ON(osd
>= map
->max_osd
);
1273 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
1274 map
->osd_addr
[osd
] = addr
;
1278 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1282 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1283 xorstate
= **(u8
**)p
;
1284 (*p
)++; /* clean flag */
1286 xorstate
= CEPH_OSD_UP
;
1287 if (xorstate
& CEPH_OSD_UP
)
1288 pr_info("osd%d down\n", osd
);
1289 if (osd
< map
->max_osd
)
1290 map
->osd_state
[osd
] ^= xorstate
;
1294 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1297 ceph_decode_need(p
, end
, sizeof(u32
)*2, e_inval
);
1298 osd
= ceph_decode_32(p
);
1299 off
= ceph_decode_32(p
);
1300 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
1301 off
== CEPH_OSD_IN
? "(in)" :
1302 (off
== CEPH_OSD_OUT
? "(out)" : ""));
1303 if (osd
< map
->max_osd
)
1304 map
->osd_weight
[osd
] = off
;
1308 err
= decode_new_pg_temp(p
, end
, map
);
1312 /* new_primary_temp */
1313 if (struct_v
>= 1) {
1314 err
= decode_new_primary_temp(p
, end
, map
);
1319 /* new_primary_affinity */
1320 if (struct_v
>= 2) {
1321 err
= decode_new_primary_affinity(p
, end
, map
);
1326 /* ignore the rest */
1329 dout("inc osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1335 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1336 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1337 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1338 DUMP_PREFIX_OFFSET
, 16, 1,
1339 start
, end
- start
, true);
1341 crush_destroy(newcrush
);
1342 return ERR_PTR(err
);
1349 * calculate file layout from given offset, length.
1350 * fill in correct oid, logical length, and object extent
1353 * for now, we write only a single su, until we can
1354 * pass a stride back to the caller.
1356 int ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
1359 u64
*oxoff
, u64
*oxlen
)
1361 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
1362 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
1363 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
1364 u32 bl
, stripeno
, stripepos
, objsetno
;
1368 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, len
,
1370 if (su
== 0 || sc
== 0)
1372 su_per_object
= osize
/ su
;
1373 if (su_per_object
== 0)
1375 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
1378 if ((su
& ~PAGE_MASK
) != 0)
1381 /* bl = *off / su; */
1385 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
1388 stripepos
= bl
% sc
;
1389 objsetno
= stripeno
/ su_per_object
;
1391 *ono
= objsetno
* sc
+ stripepos
;
1392 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned int)*ono
);
1394 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1396 su_offset
= do_div(t
, su
);
1397 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
1400 * Calculate the length of the extent being written to the selected
1401 * object. This is the minimum of the full length requested (len) or
1402 * the remainder of the current stripe being written to.
1404 *oxlen
= min_t(u64
, len
, su
- su_offset
);
1406 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
1410 dout(" invalid layout\n");
1416 EXPORT_SYMBOL(ceph_calc_file_object_mapping
);
1419 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be
1420 * called with target's (oloc, oid), since tiering isn't taken into
1423 int ceph_oloc_oid_to_pg(struct ceph_osdmap
*osdmap
,
1424 struct ceph_object_locator
*oloc
,
1425 struct ceph_object_id
*oid
,
1426 struct ceph_pg
*pg_out
)
1428 struct ceph_pg_pool_info
*pi
;
1430 pi
= __lookup_pg_pool(&osdmap
->pg_pools
, oloc
->pool
);
1434 pg_out
->pool
= oloc
->pool
;
1435 pg_out
->seed
= ceph_str_hash(pi
->object_hash
, oid
->name
,
1438 dout("%s '%.*s' pgid %llu.%x\n", __func__
, oid
->name_len
, oid
->name
,
1439 pg_out
->pool
, pg_out
->seed
);
1442 EXPORT_SYMBOL(ceph_oloc_oid_to_pg
);
1444 static int do_crush(struct ceph_osdmap
*map
, int ruleno
, int x
,
1445 int *result
, int result_max
,
1446 const __u32
*weight
, int weight_max
)
1450 BUG_ON(result_max
> CEPH_PG_MAX_SIZE
);
1452 mutex_lock(&map
->crush_scratch_mutex
);
1453 r
= crush_do_rule(map
->crush
, ruleno
, x
, result
, result_max
,
1454 weight
, weight_max
, map
->crush_scratch_ary
);
1455 mutex_unlock(&map
->crush_scratch_mutex
);
1461 * Calculate raw (crush) set for given pgid.
1463 * Return raw set length, or error.
1465 static int pg_to_raw_osds(struct ceph_osdmap
*osdmap
,
1466 struct ceph_pg_pool_info
*pool
,
1467 struct ceph_pg pgid
, u32 pps
, int *osds
)
1473 ruleno
= crush_find_rule(osdmap
->crush
, pool
->crush_ruleset
,
1474 pool
->type
, pool
->size
);
1476 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
1477 pgid
.pool
, pool
->crush_ruleset
, pool
->type
,
1482 len
= do_crush(osdmap
, ruleno
, pps
, osds
,
1483 min_t(int, pool
->size
, CEPH_PG_MAX_SIZE
),
1484 osdmap
->osd_weight
, osdmap
->max_osd
);
1486 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
1487 len
, ruleno
, pgid
.pool
, pool
->crush_ruleset
,
1488 pool
->type
, pool
->size
);
1496 * Given raw set, calculate up set and up primary.
1498 * Return up set length. *primary is set to up primary osd id, or -1
1499 * if up set is empty.
1501 static int raw_to_up_osds(struct ceph_osdmap
*osdmap
,
1502 struct ceph_pg_pool_info
*pool
,
1503 int *osds
, int len
, int *primary
)
1505 int up_primary
= -1;
1508 if (ceph_can_shift_osds(pool
)) {
1511 for (i
= 0; i
< len
; i
++) {
1512 if (ceph_osd_is_down(osdmap
, osds
[i
])) {
1517 osds
[i
- removed
] = osds
[i
];
1522 up_primary
= osds
[0];
1524 for (i
= len
- 1; i
>= 0; i
--) {
1525 if (ceph_osd_is_down(osdmap
, osds
[i
]))
1526 osds
[i
] = CRUSH_ITEM_NONE
;
1528 up_primary
= osds
[i
];
1532 *primary
= up_primary
;
1536 static void apply_primary_affinity(struct ceph_osdmap
*osdmap
, u32 pps
,
1537 struct ceph_pg_pool_info
*pool
,
1538 int *osds
, int len
, int *primary
)
1544 * Do we have any non-default primary_affinity values for these
1547 if (!osdmap
->osd_primary_affinity
)
1550 for (i
= 0; i
< len
; i
++) {
1551 if (osds
[i
] != CRUSH_ITEM_NONE
&&
1552 osdmap
->osd_primary_affinity
[i
] !=
1553 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
) {
1561 * Pick the primary. Feed both the seed (for the pg) and the
1562 * osd into the hash/rng so that a proportional fraction of an
1563 * osd's pgs get rejected as primary.
1565 for (i
= 0; i
< len
; i
++) {
1570 if (osd
== CRUSH_ITEM_NONE
)
1573 aff
= osdmap
->osd_primary_affinity
[osd
];
1574 if (aff
< CEPH_OSD_MAX_PRIMARY_AFFINITY
&&
1575 (crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1576 pps
, osd
) >> 16) >= aff
) {
1578 * We chose not to use this primary. Note it
1579 * anyway as a fallback in case we don't pick
1580 * anyone else, but keep looking.
1592 *primary
= osds
[pos
];
1594 if (ceph_can_shift_osds(pool
) && pos
> 0) {
1595 /* move the new primary to the front */
1596 for (i
= pos
; i
> 0; i
--)
1597 osds
[i
] = osds
[i
- 1];
1603 * Given up set, apply pg_temp and primary_temp mappings.
1605 * Return acting set length. *primary is set to acting primary osd id,
1606 * or -1 if acting set is empty.
1608 static int apply_temps(struct ceph_osdmap
*osdmap
,
1609 struct ceph_pg_pool_info
*pool
, struct ceph_pg pgid
,
1610 int *osds
, int len
, int *primary
)
1612 struct ceph_pg_mapping
*pg
;
1618 pgid
.seed
= ceph_stable_mod(pgid
.seed
, pool
->pg_num
,
1622 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1627 for (i
= 0; i
< pg
->pg_temp
.len
; i
++) {
1628 if (ceph_osd_is_down(osdmap
, pg
->pg_temp
.osds
[i
])) {
1629 if (ceph_can_shift_osds(pool
))
1632 osds
[temp_len
++] = CRUSH_ITEM_NONE
;
1634 osds
[temp_len
++] = pg
->pg_temp
.osds
[i
];
1638 /* apply pg_temp's primary */
1639 for (i
= 0; i
< temp_len
; i
++) {
1640 if (osds
[i
] != CRUSH_ITEM_NONE
) {
1641 temp_primary
= osds
[i
];
1647 temp_primary
= *primary
;
1651 pg
= __lookup_pg_mapping(&osdmap
->primary_temp
, pgid
);
1653 temp_primary
= pg
->primary_temp
.osd
;
1655 *primary
= temp_primary
;
1660 * Calculate acting set for given pgid.
1662 * Return acting set length, or error. *primary is set to acting
1663 * primary osd id, or -1 if acting set is empty or on error.
1665 int ceph_calc_pg_acting(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1666 int *osds
, int *primary
)
1668 struct ceph_pg_pool_info
*pool
;
1672 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, pgid
.pool
);
1678 if (pool
->flags
& CEPH_POOL_FLAG_HASHPSPOOL
) {
1679 /* hash pool id and seed so that pool PGs do not overlap */
1680 pps
= crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1681 ceph_stable_mod(pgid
.seed
, pool
->pgp_num
,
1682 pool
->pgp_num_mask
),
1686 * legacy behavior: add ps and pool together. this is
1687 * not a great approach because the PGs from each pool
1688 * will overlap on top of each other: 0.5 == 1.4 ==
1691 pps
= ceph_stable_mod(pgid
.seed
, pool
->pgp_num
,
1692 pool
->pgp_num_mask
) +
1693 (unsigned)pgid
.pool
;
1696 len
= pg_to_raw_osds(osdmap
, pool
, pgid
, pps
, osds
);
1702 len
= raw_to_up_osds(osdmap
, pool
, osds
, len
, primary
);
1704 apply_primary_affinity(osdmap
, pps
, pool
, osds
, len
, primary
);
1706 len
= apply_temps(osdmap
, pool
, pgid
, osds
, len
, primary
);
1712 * Return primary osd for given pgid, or -1 if none.
1714 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1716 int osds
[CEPH_PG_MAX_SIZE
];
1719 ceph_calc_pg_acting(osdmap
, pgid
, osds
, &primary
);
1723 EXPORT_SYMBOL(ceph_calc_pg_primary
);