6 #include "crush/hash.h"
7 #include "crush/mapper.h"
9 #include "ceph_debug.h"
11 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
20 if (state
& CEPH_OSD_EXISTS
) {
21 snprintf(str
, len
, "exists");
24 if (state
& CEPH_OSD_UP
) {
25 snprintf(str
, len
, "%s%s%s", str
, (flag
? ", " : ""),
30 snprintf(str
, len
, "doesn't exist");
38 static int calc_bits_of(unsigned t
)
49 * the foo_mask is the smallest value 2^n-1 that is >= foo.
51 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
53 pi
->pg_num_mask
= (1 << calc_bits_of(le32_to_cpu(pi
->v
.pg_num
)-1)) - 1;
55 (1 << calc_bits_of(le32_to_cpu(pi
->v
.pgp_num
)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpg_num
)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpgp_num
)-1)) - 1;
65 static int crush_decode_uniform_bucket(void **p
, void *end
,
66 struct crush_bucket_uniform
*b
)
68 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
69 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
70 b
->item_weight
= ceph_decode_32(p
);
76 static int crush_decode_list_bucket(void **p
, void *end
,
77 struct crush_bucket_list
*b
)
80 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
81 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
82 if (b
->item_weights
== NULL
)
84 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
85 if (b
->sum_weights
== NULL
)
87 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
88 for (j
= 0; j
< b
->h
.size
; j
++) {
89 b
->item_weights
[j
] = ceph_decode_32(p
);
90 b
->sum_weights
[j
] = ceph_decode_32(p
);
97 static int crush_decode_tree_bucket(void **p
, void *end
,
98 struct crush_bucket_tree
*b
)
101 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
102 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
103 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
104 if (b
->node_weights
== NULL
)
106 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
107 for (j
= 0; j
< b
->num_nodes
; j
++)
108 b
->node_weights
[j
] = ceph_decode_32(p
);
114 static int crush_decode_straw_bucket(void **p
, void *end
,
115 struct crush_bucket_straw
*b
)
118 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
119 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
120 if (b
->item_weights
== NULL
)
122 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
123 if (b
->straws
== NULL
)
125 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
126 for (j
= 0; j
< b
->h
.size
; j
++) {
127 b
->item_weights
[j
] = ceph_decode_32(p
);
128 b
->straws
[j
] = ceph_decode_32(p
);
135 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
141 void *start
= pbyval
;
144 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
146 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
148 return ERR_PTR(-ENOMEM
);
150 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
151 magic
= ceph_decode_32(p
);
152 if (magic
!= CRUSH_MAGIC
) {
153 pr_err("crush_decode magic %x != current %x\n",
154 (unsigned)magic
, (unsigned)CRUSH_MAGIC
);
157 c
->max_buckets
= ceph_decode_32(p
);
158 c
->max_rules
= ceph_decode_32(p
);
159 c
->max_devices
= ceph_decode_32(p
);
161 c
->device_parents
= kcalloc(c
->max_devices
, sizeof(u32
), GFP_NOFS
);
162 if (c
->device_parents
== NULL
)
164 c
->bucket_parents
= kcalloc(c
->max_buckets
, sizeof(u32
), GFP_NOFS
);
165 if (c
->bucket_parents
== NULL
)
168 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
169 if (c
->buckets
== NULL
)
171 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
172 if (c
->rules
== NULL
)
176 for (i
= 0; i
< c
->max_buckets
; i
++) {
179 struct crush_bucket
*b
;
181 ceph_decode_32_safe(p
, end
, alg
, bad
);
183 c
->buckets
[i
] = NULL
;
186 dout("crush_decode bucket %d off %x %p to %p\n",
187 i
, (int)(*p
-start
), *p
, end
);
190 case CRUSH_BUCKET_UNIFORM
:
191 size
= sizeof(struct crush_bucket_uniform
);
193 case CRUSH_BUCKET_LIST
:
194 size
= sizeof(struct crush_bucket_list
);
196 case CRUSH_BUCKET_TREE
:
197 size
= sizeof(struct crush_bucket_tree
);
199 case CRUSH_BUCKET_STRAW
:
200 size
= sizeof(struct crush_bucket_straw
);
207 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
211 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
212 b
->id
= ceph_decode_32(p
);
213 b
->type
= ceph_decode_16(p
);
214 b
->alg
= ceph_decode_8(p
);
215 b
->hash
= ceph_decode_8(p
);
216 b
->weight
= ceph_decode_32(p
);
217 b
->size
= ceph_decode_32(p
);
219 dout("crush_decode bucket size %d off %x %p to %p\n",
220 b
->size
, (int)(*p
-start
), *p
, end
);
222 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
223 if (b
->items
== NULL
)
225 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
230 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
231 for (j
= 0; j
< b
->size
; j
++)
232 b
->items
[j
] = ceph_decode_32(p
);
235 case CRUSH_BUCKET_UNIFORM
:
236 err
= crush_decode_uniform_bucket(p
, end
,
237 (struct crush_bucket_uniform
*)b
);
241 case CRUSH_BUCKET_LIST
:
242 err
= crush_decode_list_bucket(p
, end
,
243 (struct crush_bucket_list
*)b
);
247 case CRUSH_BUCKET_TREE
:
248 err
= crush_decode_tree_bucket(p
, end
,
249 (struct crush_bucket_tree
*)b
);
253 case CRUSH_BUCKET_STRAW
:
254 err
= crush_decode_straw_bucket(p
, end
,
255 (struct crush_bucket_straw
*)b
);
263 dout("rule vec is %p\n", c
->rules
);
264 for (i
= 0; i
< c
->max_rules
; i
++) {
266 struct crush_rule
*r
;
268 ceph_decode_32_safe(p
, end
, yes
, bad
);
270 dout("crush_decode NO rule %d off %x %p to %p\n",
271 i
, (int)(*p
-start
), *p
, end
);
276 dout("crush_decode rule %d off %x %p to %p\n",
277 i
, (int)(*p
-start
), *p
, end
);
280 ceph_decode_32_safe(p
, end
, yes
, bad
);
281 #if BITS_PER_LONG == 32
283 if (yes
> ULONG_MAX
/ sizeof(struct crush_rule_step
))
286 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
287 yes
*sizeof(struct crush_rule_step
),
291 dout(" rule %d is at %p\n", i
, r
);
293 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
294 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
295 for (j
= 0; j
< r
->len
; j
++) {
296 r
->steps
[j
].op
= ceph_decode_32(p
);
297 r
->steps
[j
].arg1
= ceph_decode_32(p
);
298 r
->steps
[j
].arg2
= ceph_decode_32(p
);
302 /* ignore trailing name maps. */
304 dout("crush_decode success\n");
310 dout("crush_decode fail %d\n", err
);
319 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
321 dout("osdmap_destroy %p\n", map
);
323 crush_destroy(map
->crush
);
324 while (!RB_EMPTY_ROOT(&map
->pg_temp
))
325 rb_erase(rb_first(&map
->pg_temp
), &map
->pg_temp
);
326 kfree(map
->osd_state
);
327 kfree(map
->osd_weight
);
329 kfree(map
->osd_addr
);
334 * adjust max osd value. reallocate arrays.
336 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
339 struct ceph_entity_addr
*addr
;
342 state
= kcalloc(max
, sizeof(*state
), GFP_NOFS
);
343 addr
= kcalloc(max
, sizeof(*addr
), GFP_NOFS
);
344 weight
= kcalloc(max
, sizeof(*weight
), GFP_NOFS
);
345 if (state
== NULL
|| addr
== NULL
|| weight
== NULL
) {
353 if (map
->osd_state
) {
354 memcpy(state
, map
->osd_state
, map
->max_osd
*sizeof(*state
));
355 memcpy(addr
, map
->osd_addr
, map
->max_osd
*sizeof(*addr
));
356 memcpy(weight
, map
->osd_weight
, map
->max_osd
*sizeof(*weight
));
357 kfree(map
->osd_state
);
358 kfree(map
->osd_addr
);
359 kfree(map
->osd_weight
);
362 map
->osd_state
= state
;
363 map
->osd_weight
= weight
;
364 map
->osd_addr
= addr
;
370 * Insert a new pg_temp mapping
372 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
384 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
385 struct rb_root
*root
)
387 struct rb_node
**p
= &root
->rb_node
;
388 struct rb_node
*parent
= NULL
;
389 struct ceph_pg_mapping
*pg
= NULL
;
394 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
395 c
= pgid_cmp(new->pgid
, pg
->pgid
);
404 rb_link_node(&new->node
, parent
, p
);
405 rb_insert_color(&new->node
, root
);
412 struct ceph_osdmap
*osdmap_decode(void **p
, void *end
)
414 struct ceph_osdmap
*map
;
421 dout("osdmap_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
423 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
425 return ERR_PTR(-ENOMEM
);
426 map
->pg_temp
= RB_ROOT
;
428 ceph_decode_16_safe(p
, end
, version
, bad
);
429 if (version
> CEPH_OSDMAP_VERSION
) {
430 pr_warning("got unknown v %d > %d of osdmap\n", version
,
431 CEPH_OSDMAP_VERSION
);
435 ceph_decode_need(p
, end
, 2*sizeof(u64
)+6*sizeof(u32
), bad
);
436 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
437 map
->epoch
= ceph_decode_32(p
);
438 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
439 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
441 map
->num_pools
= ceph_decode_32(p
);
442 map
->pg_pool
= kcalloc(map
->num_pools
, sizeof(*map
->pg_pool
),
448 ceph_decode_32_safe(p
, end
, max
, bad
);
450 ceph_decode_need(p
, end
, 4+1+sizeof(map
->pg_pool
->v
), bad
);
451 i
= ceph_decode_32(p
);
452 if (i
>= map
->num_pools
)
454 ev
= ceph_decode_8(p
); /* encoding version */
455 if (ev
> CEPH_PG_POOL_VERSION
) {
456 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
457 ev
, CEPH_PG_POOL_VERSION
);
460 ceph_decode_copy(p
, &map
->pg_pool
[i
].v
,
461 sizeof(map
->pg_pool
->v
));
462 calc_pg_masks(&map
->pg_pool
[i
]);
463 p
+= le32_to_cpu(map
->pg_pool
[i
].v
.num_snaps
) * sizeof(u64
);
464 p
+= le32_to_cpu(map
->pg_pool
[i
].v
.num_removed_snap_intervals
)
468 ceph_decode_32_safe(p
, end
, map
->flags
, bad
);
470 max
= ceph_decode_32(p
);
472 /* (re)alloc osd arrays */
473 err
= osdmap_set_max_osd(map
, max
);
476 dout("osdmap_decode max_osd = %d\n", map
->max_osd
);
480 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
481 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
482 sizeof(*map
->osd_addr
)), bad
);
483 *p
+= 4; /* skip length field (should match max) */
484 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
486 *p
+= 4; /* skip length field (should match max) */
487 for (i
= 0; i
< map
->max_osd
; i
++)
488 map
->osd_weight
[i
] = ceph_decode_32(p
);
490 *p
+= 4; /* skip length field (should match max) */
491 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
492 for (i
= 0; i
< map
->max_osd
; i
++)
493 ceph_decode_addr(&map
->osd_addr
[i
]);
496 ceph_decode_32_safe(p
, end
, len
, bad
);
497 for (i
= 0; i
< len
; i
++) {
500 struct ceph_pg_mapping
*pg
;
502 ceph_decode_need(p
, end
, sizeof(u32
) + sizeof(u64
), bad
);
503 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
504 n
= ceph_decode_32(p
);
505 ceph_decode_need(p
, end
, n
* sizeof(u32
), bad
);
507 pg
= kmalloc(sizeof(*pg
) + n
*sizeof(u32
), GFP_NOFS
);
512 for (j
= 0; j
< n
; j
++)
513 pg
->osds
[j
] = ceph_decode_32(p
);
515 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
518 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
, len
);
522 ceph_decode_32_safe(p
, end
, len
, bad
);
523 dout("osdmap_decode crush len %d from off 0x%x\n", len
,
525 ceph_decode_need(p
, end
, len
, bad
);
526 map
->crush
= crush_decode(*p
, end
);
528 if (IS_ERR(map
->crush
)) {
529 err
= PTR_ERR(map
->crush
);
534 /* ignore the rest of the map */
537 dout("osdmap_decode done %p %p\n", *p
, end
);
541 dout("osdmap_decode fail\n");
542 ceph_osdmap_destroy(map
);
547 * decode and apply an incremental map update.
549 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
550 struct ceph_osdmap
*map
,
551 struct ceph_messenger
*msgr
)
553 struct crush_map
*newcrush
= NULL
;
554 struct ceph_fsid fsid
;
556 struct ceph_timespec modified
;
558 __s32 new_flags
, max
;
564 ceph_decode_16_safe(p
, end
, version
, bad
);
565 if (version
> CEPH_OSDMAP_INC_VERSION
) {
566 pr_warning("got unknown v %d > %d of inc osdmap\n", version
,
567 CEPH_OSDMAP_INC_VERSION
);
571 ceph_decode_need(p
, end
, sizeof(fsid
)+sizeof(modified
)+2*sizeof(u32
),
573 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
574 epoch
= ceph_decode_32(p
);
575 BUG_ON(epoch
!= map
->epoch
+1);
576 ceph_decode_copy(p
, &modified
, sizeof(modified
));
577 new_flags
= ceph_decode_32(p
);
580 ceph_decode_32_safe(p
, end
, len
, bad
);
582 dout("apply_incremental full map len %d, %p to %p\n",
584 return osdmap_decode(p
, min(*p
+len
, end
));
588 ceph_decode_32_safe(p
, end
, len
, bad
);
590 dout("apply_incremental new crush map len %d, %p to %p\n",
592 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
593 if (IS_ERR(newcrush
))
594 return ERR_PTR(PTR_ERR(newcrush
));
599 map
->flags
= new_flags
;
601 ceph_decode_need(p
, end
, 5*sizeof(u32
), bad
);
604 max
= ceph_decode_32(p
);
606 err
= osdmap_set_max_osd(map
, max
);
612 map
->modified
= map
->modified
;
615 crush_destroy(map
->crush
);
616 map
->crush
= newcrush
;
621 ceph_decode_32_safe(p
, end
, len
, bad
);
625 ceph_decode_32_safe(p
, end
, pool
, bad
);
626 if (pool
>= map
->num_pools
) {
627 void *pg_pool
= kcalloc(pool
+ 1,
628 sizeof(*map
->pg_pool
),
634 memcpy(pg_pool
, map
->pg_pool
,
635 map
->num_pools
* sizeof(*map
->pg_pool
));
637 map
->pg_pool
= pg_pool
;
638 map
->num_pools
= pool
+1;
640 ceph_decode_need(p
, end
, 1 + sizeof(map
->pg_pool
->v
), bad
);
641 ev
= ceph_decode_8(p
); /* encoding version */
642 if (ev
> CEPH_PG_POOL_VERSION
) {
643 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
644 ev
, CEPH_PG_POOL_VERSION
);
647 ceph_decode_copy(p
, &map
->pg_pool
[pool
].v
,
648 sizeof(map
->pg_pool
->v
));
649 calc_pg_masks(&map
->pg_pool
[pool
]);
652 /* old_pool (ignore) */
653 ceph_decode_32_safe(p
, end
, len
, bad
);
654 *p
+= len
* sizeof(u32
);
658 ceph_decode_32_safe(p
, end
, len
, bad
);
661 struct ceph_entity_addr addr
;
662 ceph_decode_32_safe(p
, end
, osd
, bad
);
663 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), bad
);
664 ceph_decode_addr(&addr
);
665 pr_info("osd%d up\n", osd
);
666 BUG_ON(osd
>= map
->max_osd
);
667 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
668 map
->osd_addr
[osd
] = addr
;
672 ceph_decode_32_safe(p
, end
, len
, bad
);
675 ceph_decode_32_safe(p
, end
, osd
, bad
);
676 (*p
)++; /* clean flag */
677 pr_info("osd%d down\n", osd
);
678 if (osd
< map
->max_osd
)
679 map
->osd_state
[osd
] &= ~CEPH_OSD_UP
;
683 ceph_decode_32_safe(p
, end
, len
, bad
);
686 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
687 osd
= ceph_decode_32(p
);
688 off
= ceph_decode_32(p
);
689 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
690 off
== CEPH_OSD_IN
? "(in)" :
691 (off
== CEPH_OSD_OUT
? "(out)" : ""));
692 if (osd
< map
->max_osd
)
693 map
->osd_weight
[osd
] = off
;
697 rbp
= rb_first(&map
->pg_temp
);
698 ceph_decode_32_safe(p
, end
, len
, bad
);
700 struct ceph_pg_mapping
*pg
;
704 ceph_decode_need(p
, end
, sizeof(u64
) + sizeof(u32
), bad
);
705 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
706 pglen
= ceph_decode_32(p
);
709 while (rbp
&& pgid_cmp(rb_entry(rbp
, struct ceph_pg_mapping
,
710 node
)->pgid
, pgid
) <= 0) {
711 struct rb_node
*cur
= rbp
;
713 dout(" removed pg_temp %llx\n",
714 *(u64
*)&rb_entry(cur
, struct ceph_pg_mapping
,
716 rb_erase(cur
, &map
->pg_temp
);
721 ceph_decode_need(p
, end
, pglen
*sizeof(u32
), bad
);
722 pg
= kmalloc(sizeof(*pg
) + sizeof(u32
)*pglen
, GFP_NOFS
);
729 for (j
= 0; j
< pglen
; j
++)
730 pg
->osds
[j
] = ceph_decode_32(p
);
731 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
734 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
,
739 struct rb_node
*cur
= rbp
;
741 dout(" removed pg_temp %llx\n",
742 *(u64
*)&rb_entry(cur
, struct ceph_pg_mapping
,
744 rb_erase(cur
, &map
->pg_temp
);
747 /* ignore the rest */
752 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
753 epoch
, (int)(*p
- start
), *p
, start
, end
);
754 print_hex_dump(KERN_DEBUG
, "osdmap: ",
755 DUMP_PREFIX_OFFSET
, 16, 1,
756 start
, end
- start
, true);
758 crush_destroy(newcrush
);
766 * calculate file layout from given offset, length.
767 * fill in correct oid, logical length, and object extent
770 * for now, we write only a single su, until we can
771 * pass a stride back to the caller.
773 void ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
776 u64
*oxoff
, u64
*oxlen
)
778 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
779 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
780 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
781 u32 bl
, stripeno
, stripepos
, objsetno
;
785 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, *plen
,
787 su_per_object
= osize
/ su
;
788 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
791 BUG_ON((su
& ~PAGE_MASK
) != 0);
792 /* bl = *off / su; */
796 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
800 objsetno
= stripeno
/ su_per_object
;
802 *ono
= objsetno
* sc
+ stripepos
;
803 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned)*ono
);
805 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
807 su_offset
= do_div(t
, su
);
808 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
811 * Calculate the length of the extent being written to the selected
812 * object. This is the minimum of the full length requested (plen) or
813 * the remainder of the current stripe being written to.
815 *oxlen
= min_t(u64
, *plen
, su
- su_offset
);
818 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
822 * calculate an object layout (i.e. pgid) from an oid,
823 * file_layout, and osdmap
825 int ceph_calc_object_layout(struct ceph_object_layout
*ol
,
827 struct ceph_file_layout
*fl
,
828 struct ceph_osdmap
*osdmap
)
830 unsigned num
, num_mask
;
832 s32 preferred
= (s32
)le32_to_cpu(fl
->fl_pg_preferred
);
833 int poolid
= le32_to_cpu(fl
->fl_pg_pool
);
834 struct ceph_pg_pool_info
*pool
;
838 if (poolid
>= osdmap
->num_pools
)
841 pool
= &osdmap
->pg_pool
[poolid
];
842 ps
= ceph_str_hash(pool
->v
.object_hash
, oid
, strlen(oid
));
843 if (preferred
>= 0) {
845 num
= le32_to_cpu(pool
->v
.lpg_num
);
846 num_mask
= pool
->lpg_num_mask
;
848 num
= le32_to_cpu(pool
->v
.pg_num
);
849 num_mask
= pool
->pg_num_mask
;
852 pgid
.ps
= cpu_to_le16(ps
);
853 pgid
.preferred
= cpu_to_le16(preferred
);
854 pgid
.pool
= fl
->fl_pg_pool
;
856 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid
, poolid
, ps
,
859 dout("calc_object_layout '%s' pgid %d.%x\n", oid
, poolid
, ps
);
862 ol
->ol_stripe_unit
= fl
->fl_object_stripe_unit
;
867 * Calculate raw osd vector for the given pgid. Return pointer to osd
868 * array, or NULL on failure.
870 static int *calc_pg_raw(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
873 struct rb_node
*n
= osdmap
->pg_temp
.rb_node
;
874 struct ceph_pg_mapping
*pg
;
875 struct ceph_pg_pool_info
*pool
;
877 unsigned poolid
, ps
, pps
;
883 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
884 c
= pgid_cmp(pgid
, pg
->pgid
);
896 poolid
= le32_to_cpu(pgid
.pool
);
897 ps
= le16_to_cpu(pgid
.ps
);
898 preferred
= (s16
)le16_to_cpu(pgid
.preferred
);
900 /* don't forcefeed bad device ids to crush */
901 if (preferred
>= osdmap
->max_osd
||
902 preferred
>= osdmap
->crush
->max_devices
)
905 if (poolid
>= osdmap
->num_pools
)
907 pool
= &osdmap
->pg_pool
[poolid
];
908 ruleno
= crush_find_rule(osdmap
->crush
, pool
->v
.crush_ruleset
,
909 pool
->v
.type
, pool
->v
.size
);
911 pr_err("no crush rule pool %d type %d size %d\n",
912 poolid
, pool
->v
.type
, pool
->v
.size
);
917 pps
= ceph_stable_mod(ps
,
918 le32_to_cpu(pool
->v
.lpgp_num
),
919 pool
->lpgp_num_mask
);
921 pps
= ceph_stable_mod(ps
,
922 le32_to_cpu(pool
->v
.pgp_num
),
925 *num
= crush_do_rule(osdmap
->crush
, ruleno
, pps
, osds
,
926 min_t(int, pool
->v
.size
, *num
),
927 preferred
, osdmap
->osd_weight
);
932 * Return primary osd for given pgid, or -1 if none.
934 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
936 int rawosds
[10], *osds
;
937 int i
, num
= ARRAY_SIZE(rawosds
);
939 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
943 /* primary is first up osd */
944 for (i
= 0; i
< num
; i
++)
945 if (ceph_osd_is_up(osdmap
, osds
[i
])) {