]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ceph/osdmap.c
Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / net / ceph / osdmap.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <asm/div64.h>
7
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
13
14 char *ceph_osdmap_state_str(char *str, int len, int state)
15 {
16 int flag = 0;
17
18 if (!len)
19 goto done;
20
21 *str = '\0';
22 if (state) {
23 if (state & CEPH_OSD_EXISTS) {
24 snprintf(str, len, "exists");
25 flag = 1;
26 }
27 if (state & CEPH_OSD_UP) {
28 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
29 "up");
30 flag = 1;
31 }
32 } else {
33 snprintf(str, len, "doesn't exist");
34 }
35 done:
36 return str;
37 }
38
39 /* maps */
40
41 static int calc_bits_of(unsigned int t)
42 {
43 int b = 0;
44 while (t) {
45 t = t >> 1;
46 b++;
47 }
48 return b;
49 }
50
51 /*
52 * the foo_mask is the smallest value 2^n-1 that is >= foo.
53 */
54 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
55 {
56 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
57 pi->pgp_num_mask =
58 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
59 pi->lpg_num_mask =
60 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
61 pi->lpgp_num_mask =
62 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
63 }
64
65 /*
66 * decode crush map
67 */
68 static int crush_decode_uniform_bucket(void **p, void *end,
69 struct crush_bucket_uniform *b)
70 {
71 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
72 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
73 b->item_weight = ceph_decode_32(p);
74 return 0;
75 bad:
76 return -EINVAL;
77 }
78
79 static int crush_decode_list_bucket(void **p, void *end,
80 struct crush_bucket_list *b)
81 {
82 int j;
83 dout("crush_decode_list_bucket %p to %p\n", *p, end);
84 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
85 if (b->item_weights == NULL)
86 return -ENOMEM;
87 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
88 if (b->sum_weights == NULL)
89 return -ENOMEM;
90 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
91 for (j = 0; j < b->h.size; j++) {
92 b->item_weights[j] = ceph_decode_32(p);
93 b->sum_weights[j] = ceph_decode_32(p);
94 }
95 return 0;
96 bad:
97 return -EINVAL;
98 }
99
100 static int crush_decode_tree_bucket(void **p, void *end,
101 struct crush_bucket_tree *b)
102 {
103 int j;
104 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
105 ceph_decode_32_safe(p, end, b->num_nodes, bad);
106 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
107 if (b->node_weights == NULL)
108 return -ENOMEM;
109 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
110 for (j = 0; j < b->num_nodes; j++)
111 b->node_weights[j] = ceph_decode_32(p);
112 return 0;
113 bad:
114 return -EINVAL;
115 }
116
117 static int crush_decode_straw_bucket(void **p, void *end,
118 struct crush_bucket_straw *b)
119 {
120 int j;
121 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
122 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
123 if (b->item_weights == NULL)
124 return -ENOMEM;
125 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
126 if (b->straws == NULL)
127 return -ENOMEM;
128 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
129 for (j = 0; j < b->h.size; j++) {
130 b->item_weights[j] = ceph_decode_32(p);
131 b->straws[j] = ceph_decode_32(p);
132 }
133 return 0;
134 bad:
135 return -EINVAL;
136 }
137
138 static struct crush_map *crush_decode(void *pbyval, void *end)
139 {
140 struct crush_map *c;
141 int err = -EINVAL;
142 int i, j;
143 void **p = &pbyval;
144 void *start = pbyval;
145 u32 magic;
146
147 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
148
149 c = kzalloc(sizeof(*c), GFP_NOFS);
150 if (c == NULL)
151 return ERR_PTR(-ENOMEM);
152
153 ceph_decode_need(p, end, 4*sizeof(u32), bad);
154 magic = ceph_decode_32(p);
155 if (magic != CRUSH_MAGIC) {
156 pr_err("crush_decode magic %x != current %x\n",
157 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
158 goto bad;
159 }
160 c->max_buckets = ceph_decode_32(p);
161 c->max_rules = ceph_decode_32(p);
162 c->max_devices = ceph_decode_32(p);
163
164 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
165 if (c->device_parents == NULL)
166 goto badmem;
167 c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
168 if (c->bucket_parents == NULL)
169 goto badmem;
170
171 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
172 if (c->buckets == NULL)
173 goto badmem;
174 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
175 if (c->rules == NULL)
176 goto badmem;
177
178 /* buckets */
179 for (i = 0; i < c->max_buckets; i++) {
180 int size = 0;
181 u32 alg;
182 struct crush_bucket *b;
183
184 ceph_decode_32_safe(p, end, alg, bad);
185 if (alg == 0) {
186 c->buckets[i] = NULL;
187 continue;
188 }
189 dout("crush_decode bucket %d off %x %p to %p\n",
190 i, (int)(*p-start), *p, end);
191
192 switch (alg) {
193 case CRUSH_BUCKET_UNIFORM:
194 size = sizeof(struct crush_bucket_uniform);
195 break;
196 case CRUSH_BUCKET_LIST:
197 size = sizeof(struct crush_bucket_list);
198 break;
199 case CRUSH_BUCKET_TREE:
200 size = sizeof(struct crush_bucket_tree);
201 break;
202 case CRUSH_BUCKET_STRAW:
203 size = sizeof(struct crush_bucket_straw);
204 break;
205 default:
206 err = -EINVAL;
207 goto bad;
208 }
209 BUG_ON(size == 0);
210 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
211 if (b == NULL)
212 goto badmem;
213
214 ceph_decode_need(p, end, 4*sizeof(u32), bad);
215 b->id = ceph_decode_32(p);
216 b->type = ceph_decode_16(p);
217 b->alg = ceph_decode_8(p);
218 b->hash = ceph_decode_8(p);
219 b->weight = ceph_decode_32(p);
220 b->size = ceph_decode_32(p);
221
222 dout("crush_decode bucket size %d off %x %p to %p\n",
223 b->size, (int)(*p-start), *p, end);
224
225 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
226 if (b->items == NULL)
227 goto badmem;
228 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
229 if (b->perm == NULL)
230 goto badmem;
231 b->perm_n = 0;
232
233 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
234 for (j = 0; j < b->size; j++)
235 b->items[j] = ceph_decode_32(p);
236
237 switch (b->alg) {
238 case CRUSH_BUCKET_UNIFORM:
239 err = crush_decode_uniform_bucket(p, end,
240 (struct crush_bucket_uniform *)b);
241 if (err < 0)
242 goto bad;
243 break;
244 case CRUSH_BUCKET_LIST:
245 err = crush_decode_list_bucket(p, end,
246 (struct crush_bucket_list *)b);
247 if (err < 0)
248 goto bad;
249 break;
250 case CRUSH_BUCKET_TREE:
251 err = crush_decode_tree_bucket(p, end,
252 (struct crush_bucket_tree *)b);
253 if (err < 0)
254 goto bad;
255 break;
256 case CRUSH_BUCKET_STRAW:
257 err = crush_decode_straw_bucket(p, end,
258 (struct crush_bucket_straw *)b);
259 if (err < 0)
260 goto bad;
261 break;
262 }
263 }
264
265 /* rules */
266 dout("rule vec is %p\n", c->rules);
267 for (i = 0; i < c->max_rules; i++) {
268 u32 yes;
269 struct crush_rule *r;
270
271 ceph_decode_32_safe(p, end, yes, bad);
272 if (!yes) {
273 dout("crush_decode NO rule %d off %x %p to %p\n",
274 i, (int)(*p-start), *p, end);
275 c->rules[i] = NULL;
276 continue;
277 }
278
279 dout("crush_decode rule %d off %x %p to %p\n",
280 i, (int)(*p-start), *p, end);
281
282 /* len */
283 ceph_decode_32_safe(p, end, yes, bad);
284 #if BITS_PER_LONG == 32
285 err = -EINVAL;
286 if (yes > (ULONG_MAX - sizeof(*r))
287 / sizeof(struct crush_rule_step))
288 goto bad;
289 #endif
290 r = c->rules[i] = kmalloc(sizeof(*r) +
291 yes*sizeof(struct crush_rule_step),
292 GFP_NOFS);
293 if (r == NULL)
294 goto badmem;
295 dout(" rule %d is at %p\n", i, r);
296 r->len = yes;
297 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
298 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
299 for (j = 0; j < r->len; j++) {
300 r->steps[j].op = ceph_decode_32(p);
301 r->steps[j].arg1 = ceph_decode_32(p);
302 r->steps[j].arg2 = ceph_decode_32(p);
303 }
304 }
305
306 /* ignore trailing name maps. */
307
308 dout("crush_decode success\n");
309 return c;
310
311 badmem:
312 err = -ENOMEM;
313 bad:
314 dout("crush_decode fail %d\n", err);
315 crush_destroy(c);
316 return ERR_PTR(err);
317 }
318
319 /*
320 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
321 * to a set of osds)
322 */
323 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
324 {
325 u64 a = *(u64 *)&l;
326 u64 b = *(u64 *)&r;
327
328 if (a < b)
329 return -1;
330 if (a > b)
331 return 1;
332 return 0;
333 }
334
335 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
336 struct rb_root *root)
337 {
338 struct rb_node **p = &root->rb_node;
339 struct rb_node *parent = NULL;
340 struct ceph_pg_mapping *pg = NULL;
341 int c;
342
343 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
344 while (*p) {
345 parent = *p;
346 pg = rb_entry(parent, struct ceph_pg_mapping, node);
347 c = pgid_cmp(new->pgid, pg->pgid);
348 if (c < 0)
349 p = &(*p)->rb_left;
350 else if (c > 0)
351 p = &(*p)->rb_right;
352 else
353 return -EEXIST;
354 }
355
356 rb_link_node(&new->node, parent, p);
357 rb_insert_color(&new->node, root);
358 return 0;
359 }
360
361 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
362 struct ceph_pg pgid)
363 {
364 struct rb_node *n = root->rb_node;
365 struct ceph_pg_mapping *pg;
366 int c;
367
368 while (n) {
369 pg = rb_entry(n, struct ceph_pg_mapping, node);
370 c = pgid_cmp(pgid, pg->pgid);
371 if (c < 0) {
372 n = n->rb_left;
373 } else if (c > 0) {
374 n = n->rb_right;
375 } else {
376 dout("__lookup_pg_mapping %llx got %p\n",
377 *(u64 *)&pgid, pg);
378 return pg;
379 }
380 }
381 return NULL;
382 }
383
384 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
385 {
386 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
387
388 if (pg) {
389 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
390 rb_erase(&pg->node, root);
391 kfree(pg);
392 return 0;
393 }
394 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
395 return -ENOENT;
396 }
397
398 /*
399 * rbtree of pg pool info
400 */
401 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
402 {
403 struct rb_node **p = &root->rb_node;
404 struct rb_node *parent = NULL;
405 struct ceph_pg_pool_info *pi = NULL;
406
407 while (*p) {
408 parent = *p;
409 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
410 if (new->id < pi->id)
411 p = &(*p)->rb_left;
412 else if (new->id > pi->id)
413 p = &(*p)->rb_right;
414 else
415 return -EEXIST;
416 }
417
418 rb_link_node(&new->node, parent, p);
419 rb_insert_color(&new->node, root);
420 return 0;
421 }
422
423 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
424 {
425 struct ceph_pg_pool_info *pi;
426 struct rb_node *n = root->rb_node;
427
428 while (n) {
429 pi = rb_entry(n, struct ceph_pg_pool_info, node);
430 if (id < pi->id)
431 n = n->rb_left;
432 else if (id > pi->id)
433 n = n->rb_right;
434 else
435 return pi;
436 }
437 return NULL;
438 }
439
440 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
441 {
442 struct rb_node *rbp;
443
444 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
445 struct ceph_pg_pool_info *pi =
446 rb_entry(rbp, struct ceph_pg_pool_info, node);
447 if (pi->name && strcmp(pi->name, name) == 0)
448 return pi->id;
449 }
450 return -ENOENT;
451 }
452 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
453
454 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
455 {
456 rb_erase(&pi->node, root);
457 kfree(pi->name);
458 kfree(pi);
459 }
460
461 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
462 {
463 unsigned int n, m;
464
465 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
466 calc_pg_masks(pi);
467
468 /* num_snaps * snap_info_t */
469 n = le32_to_cpu(pi->v.num_snaps);
470 while (n--) {
471 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
472 sizeof(struct ceph_timespec), bad);
473 *p += sizeof(u64) + /* key */
474 1 + sizeof(u64) + /* u8, snapid */
475 sizeof(struct ceph_timespec);
476 m = ceph_decode_32(p); /* snap name */
477 *p += m;
478 }
479
480 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
481 return 0;
482
483 bad:
484 return -EINVAL;
485 }
486
487 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
488 {
489 struct ceph_pg_pool_info *pi;
490 u32 num, len, pool;
491
492 ceph_decode_32_safe(p, end, num, bad);
493 dout(" %d pool names\n", num);
494 while (num--) {
495 ceph_decode_32_safe(p, end, pool, bad);
496 ceph_decode_32_safe(p, end, len, bad);
497 dout(" pool %d len %d\n", pool, len);
498 pi = __lookup_pg_pool(&map->pg_pools, pool);
499 if (pi) {
500 kfree(pi->name);
501 pi->name = kmalloc(len + 1, GFP_NOFS);
502 if (pi->name) {
503 memcpy(pi->name, *p, len);
504 pi->name[len] = '\0';
505 dout(" name is %s\n", pi->name);
506 }
507 }
508 *p += len;
509 }
510 return 0;
511
512 bad:
513 return -EINVAL;
514 }
515
516 /*
517 * osd map
518 */
519 void ceph_osdmap_destroy(struct ceph_osdmap *map)
520 {
521 dout("osdmap_destroy %p\n", map);
522 if (map->crush)
523 crush_destroy(map->crush);
524 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
525 struct ceph_pg_mapping *pg =
526 rb_entry(rb_first(&map->pg_temp),
527 struct ceph_pg_mapping, node);
528 rb_erase(&pg->node, &map->pg_temp);
529 kfree(pg);
530 }
531 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
532 struct ceph_pg_pool_info *pi =
533 rb_entry(rb_first(&map->pg_pools),
534 struct ceph_pg_pool_info, node);
535 __remove_pg_pool(&map->pg_pools, pi);
536 }
537 kfree(map->osd_state);
538 kfree(map->osd_weight);
539 kfree(map->osd_addr);
540 kfree(map);
541 }
542
543 /*
544 * adjust max osd value. reallocate arrays.
545 */
546 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
547 {
548 u8 *state;
549 struct ceph_entity_addr *addr;
550 u32 *weight;
551
552 state = kcalloc(max, sizeof(*state), GFP_NOFS);
553 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
554 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
555 if (state == NULL || addr == NULL || weight == NULL) {
556 kfree(state);
557 kfree(addr);
558 kfree(weight);
559 return -ENOMEM;
560 }
561
562 /* copy old? */
563 if (map->osd_state) {
564 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
565 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
566 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
567 kfree(map->osd_state);
568 kfree(map->osd_addr);
569 kfree(map->osd_weight);
570 }
571
572 map->osd_state = state;
573 map->osd_weight = weight;
574 map->osd_addr = addr;
575 map->max_osd = max;
576 return 0;
577 }
578
579 /*
580 * decode a full map.
581 */
582 struct ceph_osdmap *osdmap_decode(void **p, void *end)
583 {
584 struct ceph_osdmap *map;
585 u16 version;
586 u32 len, max, i;
587 u8 ev;
588 int err = -EINVAL;
589 void *start = *p;
590 struct ceph_pg_pool_info *pi;
591
592 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
593
594 map = kzalloc(sizeof(*map), GFP_NOFS);
595 if (map == NULL)
596 return ERR_PTR(-ENOMEM);
597 map->pg_temp = RB_ROOT;
598
599 ceph_decode_16_safe(p, end, version, bad);
600 if (version > CEPH_OSDMAP_VERSION) {
601 pr_warning("got unknown v %d > %d of osdmap\n", version,
602 CEPH_OSDMAP_VERSION);
603 goto bad;
604 }
605
606 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
607 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
608 map->epoch = ceph_decode_32(p);
609 ceph_decode_copy(p, &map->created, sizeof(map->created));
610 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
611
612 ceph_decode_32_safe(p, end, max, bad);
613 while (max--) {
614 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
615 pi = kzalloc(sizeof(*pi), GFP_NOFS);
616 if (!pi)
617 goto bad;
618 pi->id = ceph_decode_32(p);
619 ev = ceph_decode_8(p); /* encoding version */
620 if (ev > CEPH_PG_POOL_VERSION) {
621 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
622 ev, CEPH_PG_POOL_VERSION);
623 kfree(pi);
624 goto bad;
625 }
626 err = __decode_pool(p, end, pi);
627 if (err < 0) {
628 kfree(pi);
629 goto bad;
630 }
631 __insert_pg_pool(&map->pg_pools, pi);
632 }
633
634 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
635 goto bad;
636
637 ceph_decode_32_safe(p, end, map->pool_max, bad);
638
639 ceph_decode_32_safe(p, end, map->flags, bad);
640
641 max = ceph_decode_32(p);
642
643 /* (re)alloc osd arrays */
644 err = osdmap_set_max_osd(map, max);
645 if (err < 0)
646 goto bad;
647 dout("osdmap_decode max_osd = %d\n", map->max_osd);
648
649 /* osds */
650 err = -EINVAL;
651 ceph_decode_need(p, end, 3*sizeof(u32) +
652 map->max_osd*(1 + sizeof(*map->osd_weight) +
653 sizeof(*map->osd_addr)), bad);
654 *p += 4; /* skip length field (should match max) */
655 ceph_decode_copy(p, map->osd_state, map->max_osd);
656
657 *p += 4; /* skip length field (should match max) */
658 for (i = 0; i < map->max_osd; i++)
659 map->osd_weight[i] = ceph_decode_32(p);
660
661 *p += 4; /* skip length field (should match max) */
662 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
663 for (i = 0; i < map->max_osd; i++)
664 ceph_decode_addr(&map->osd_addr[i]);
665
666 /* pg_temp */
667 ceph_decode_32_safe(p, end, len, bad);
668 for (i = 0; i < len; i++) {
669 int n, j;
670 struct ceph_pg pgid;
671 struct ceph_pg_mapping *pg;
672
673 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
674 ceph_decode_copy(p, &pgid, sizeof(pgid));
675 n = ceph_decode_32(p);
676 ceph_decode_need(p, end, n * sizeof(u32), bad);
677 err = -ENOMEM;
678 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
679 if (!pg)
680 goto bad;
681 pg->pgid = pgid;
682 pg->len = n;
683 for (j = 0; j < n; j++)
684 pg->osds[j] = ceph_decode_32(p);
685
686 err = __insert_pg_mapping(pg, &map->pg_temp);
687 if (err)
688 goto bad;
689 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
690 }
691
692 /* crush */
693 ceph_decode_32_safe(p, end, len, bad);
694 dout("osdmap_decode crush len %d from off 0x%x\n", len,
695 (int)(*p - start));
696 ceph_decode_need(p, end, len, bad);
697 map->crush = crush_decode(*p, end);
698 *p += len;
699 if (IS_ERR(map->crush)) {
700 err = PTR_ERR(map->crush);
701 map->crush = NULL;
702 goto bad;
703 }
704
705 /* ignore the rest of the map */
706 *p = end;
707
708 dout("osdmap_decode done %p %p\n", *p, end);
709 return map;
710
711 bad:
712 dout("osdmap_decode fail\n");
713 ceph_osdmap_destroy(map);
714 return ERR_PTR(err);
715 }
716
717 /*
718 * decode and apply an incremental map update.
719 */
720 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
721 struct ceph_osdmap *map,
722 struct ceph_messenger *msgr)
723 {
724 struct crush_map *newcrush = NULL;
725 struct ceph_fsid fsid;
726 u32 epoch = 0;
727 struct ceph_timespec modified;
728 u32 len, pool;
729 __s32 new_pool_max, new_flags, max;
730 void *start = *p;
731 int err = -EINVAL;
732 u16 version;
733
734 ceph_decode_16_safe(p, end, version, bad);
735 if (version > CEPH_OSDMAP_INC_VERSION) {
736 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
737 CEPH_OSDMAP_INC_VERSION);
738 goto bad;
739 }
740
741 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
742 bad);
743 ceph_decode_copy(p, &fsid, sizeof(fsid));
744 epoch = ceph_decode_32(p);
745 BUG_ON(epoch != map->epoch+1);
746 ceph_decode_copy(p, &modified, sizeof(modified));
747 new_pool_max = ceph_decode_32(p);
748 new_flags = ceph_decode_32(p);
749
750 /* full map? */
751 ceph_decode_32_safe(p, end, len, bad);
752 if (len > 0) {
753 dout("apply_incremental full map len %d, %p to %p\n",
754 len, *p, end);
755 return osdmap_decode(p, min(*p+len, end));
756 }
757
758 /* new crush? */
759 ceph_decode_32_safe(p, end, len, bad);
760 if (len > 0) {
761 dout("apply_incremental new crush map len %d, %p to %p\n",
762 len, *p, end);
763 newcrush = crush_decode(*p, min(*p+len, end));
764 if (IS_ERR(newcrush))
765 return ERR_CAST(newcrush);
766 *p += len;
767 }
768
769 /* new flags? */
770 if (new_flags >= 0)
771 map->flags = new_flags;
772 if (new_pool_max >= 0)
773 map->pool_max = new_pool_max;
774
775 ceph_decode_need(p, end, 5*sizeof(u32), bad);
776
777 /* new max? */
778 max = ceph_decode_32(p);
779 if (max >= 0) {
780 err = osdmap_set_max_osd(map, max);
781 if (err < 0)
782 goto bad;
783 }
784
785 map->epoch++;
786 map->modified = modified;
787 if (newcrush) {
788 if (map->crush)
789 crush_destroy(map->crush);
790 map->crush = newcrush;
791 newcrush = NULL;
792 }
793
794 /* new_pool */
795 ceph_decode_32_safe(p, end, len, bad);
796 while (len--) {
797 __u8 ev;
798 struct ceph_pg_pool_info *pi;
799
800 ceph_decode_32_safe(p, end, pool, bad);
801 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
802 ev = ceph_decode_8(p); /* encoding version */
803 if (ev > CEPH_PG_POOL_VERSION) {
804 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
805 ev, CEPH_PG_POOL_VERSION);
806 goto bad;
807 }
808 pi = __lookup_pg_pool(&map->pg_pools, pool);
809 if (!pi) {
810 pi = kzalloc(sizeof(*pi), GFP_NOFS);
811 if (!pi) {
812 err = -ENOMEM;
813 goto bad;
814 }
815 pi->id = pool;
816 __insert_pg_pool(&map->pg_pools, pi);
817 }
818 err = __decode_pool(p, end, pi);
819 if (err < 0)
820 goto bad;
821 }
822 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
823 goto bad;
824
825 /* old_pool */
826 ceph_decode_32_safe(p, end, len, bad);
827 while (len--) {
828 struct ceph_pg_pool_info *pi;
829
830 ceph_decode_32_safe(p, end, pool, bad);
831 pi = __lookup_pg_pool(&map->pg_pools, pool);
832 if (pi)
833 __remove_pg_pool(&map->pg_pools, pi);
834 }
835
836 /* new_up */
837 err = -EINVAL;
838 ceph_decode_32_safe(p, end, len, bad);
839 while (len--) {
840 u32 osd;
841 struct ceph_entity_addr addr;
842 ceph_decode_32_safe(p, end, osd, bad);
843 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
844 ceph_decode_addr(&addr);
845 pr_info("osd%d up\n", osd);
846 BUG_ON(osd >= map->max_osd);
847 map->osd_state[osd] |= CEPH_OSD_UP;
848 map->osd_addr[osd] = addr;
849 }
850
851 /* new_state */
852 ceph_decode_32_safe(p, end, len, bad);
853 while (len--) {
854 u32 osd;
855 u8 xorstate;
856 ceph_decode_32_safe(p, end, osd, bad);
857 xorstate = **(u8 **)p;
858 (*p)++; /* clean flag */
859 if (xorstate == 0)
860 xorstate = CEPH_OSD_UP;
861 if (xorstate & CEPH_OSD_UP)
862 pr_info("osd%d down\n", osd);
863 if (osd < map->max_osd)
864 map->osd_state[osd] ^= xorstate;
865 }
866
867 /* new_weight */
868 ceph_decode_32_safe(p, end, len, bad);
869 while (len--) {
870 u32 osd, off;
871 ceph_decode_need(p, end, sizeof(u32)*2, bad);
872 osd = ceph_decode_32(p);
873 off = ceph_decode_32(p);
874 pr_info("osd%d weight 0x%x %s\n", osd, off,
875 off == CEPH_OSD_IN ? "(in)" :
876 (off == CEPH_OSD_OUT ? "(out)" : ""));
877 if (osd < map->max_osd)
878 map->osd_weight[osd] = off;
879 }
880
881 /* new_pg_temp */
882 ceph_decode_32_safe(p, end, len, bad);
883 while (len--) {
884 struct ceph_pg_mapping *pg;
885 int j;
886 struct ceph_pg pgid;
887 u32 pglen;
888 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
889 ceph_decode_copy(p, &pgid, sizeof(pgid));
890 pglen = ceph_decode_32(p);
891
892 if (pglen) {
893 /* insert */
894 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
895 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
896 if (!pg) {
897 err = -ENOMEM;
898 goto bad;
899 }
900 pg->pgid = pgid;
901 pg->len = pglen;
902 for (j = 0; j < pglen; j++)
903 pg->osds[j] = ceph_decode_32(p);
904 err = __insert_pg_mapping(pg, &map->pg_temp);
905 if (err) {
906 kfree(pg);
907 goto bad;
908 }
909 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
910 pglen);
911 } else {
912 /* remove */
913 __remove_pg_mapping(&map->pg_temp, pgid);
914 }
915 }
916
917 /* ignore the rest */
918 *p = end;
919 return map;
920
921 bad:
922 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
923 epoch, (int)(*p - start), *p, start, end);
924 print_hex_dump(KERN_DEBUG, "osdmap: ",
925 DUMP_PREFIX_OFFSET, 16, 1,
926 start, end - start, true);
927 if (newcrush)
928 crush_destroy(newcrush);
929 return ERR_PTR(err);
930 }
931
932
933
934
935 /*
936 * calculate file layout from given offset, length.
937 * fill in correct oid, logical length, and object extent
938 * offset, length.
939 *
940 * for now, we write only a single su, until we can
941 * pass a stride back to the caller.
942 */
943 void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
944 u64 off, u64 *plen,
945 u64 *ono,
946 u64 *oxoff, u64 *oxlen)
947 {
948 u32 osize = le32_to_cpu(layout->fl_object_size);
949 u32 su = le32_to_cpu(layout->fl_stripe_unit);
950 u32 sc = le32_to_cpu(layout->fl_stripe_count);
951 u32 bl, stripeno, stripepos, objsetno;
952 u32 su_per_object;
953 u64 t, su_offset;
954
955 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
956 osize, su);
957 su_per_object = osize / su;
958 dout("osize %u / su %u = su_per_object %u\n", osize, su,
959 su_per_object);
960
961 BUG_ON((su & ~PAGE_MASK) != 0);
962 /* bl = *off / su; */
963 t = off;
964 do_div(t, su);
965 bl = t;
966 dout("off %llu / su %u = bl %u\n", off, su, bl);
967
968 stripeno = bl / sc;
969 stripepos = bl % sc;
970 objsetno = stripeno / su_per_object;
971
972 *ono = objsetno * sc + stripepos;
973 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
974
975 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
976 t = off;
977 su_offset = do_div(t, su);
978 *oxoff = su_offset + (stripeno % su_per_object) * su;
979
980 /*
981 * Calculate the length of the extent being written to the selected
982 * object. This is the minimum of the full length requested (plen) or
983 * the remainder of the current stripe being written to.
984 */
985 *oxlen = min_t(u64, *plen, su - su_offset);
986 *plen = *oxlen;
987
988 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
989 }
990 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
991
992 /*
993 * calculate an object layout (i.e. pgid) from an oid,
994 * file_layout, and osdmap
995 */
996 int ceph_calc_object_layout(struct ceph_object_layout *ol,
997 const char *oid,
998 struct ceph_file_layout *fl,
999 struct ceph_osdmap *osdmap)
1000 {
1001 unsigned int num, num_mask;
1002 struct ceph_pg pgid;
1003 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
1004 int poolid = le32_to_cpu(fl->fl_pg_pool);
1005 struct ceph_pg_pool_info *pool;
1006 unsigned int ps;
1007
1008 BUG_ON(!osdmap);
1009
1010 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1011 if (!pool)
1012 return -EIO;
1013 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1014 if (preferred >= 0) {
1015 ps += preferred;
1016 num = le32_to_cpu(pool->v.lpg_num);
1017 num_mask = pool->lpg_num_mask;
1018 } else {
1019 num = le32_to_cpu(pool->v.pg_num);
1020 num_mask = pool->pg_num_mask;
1021 }
1022
1023 pgid.ps = cpu_to_le16(ps);
1024 pgid.preferred = cpu_to_le16(preferred);
1025 pgid.pool = fl->fl_pg_pool;
1026 if (preferred >= 0)
1027 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
1028 (int)preferred);
1029 else
1030 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
1031
1032 ol->ol_pgid = pgid;
1033 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1034 return 0;
1035 }
1036 EXPORT_SYMBOL(ceph_calc_object_layout);
1037
1038 /*
1039 * Calculate raw osd vector for the given pgid. Return pointer to osd
1040 * array, or NULL on failure.
1041 */
1042 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1043 int *osds, int *num)
1044 {
1045 struct ceph_pg_mapping *pg;
1046 struct ceph_pg_pool_info *pool;
1047 int ruleno;
1048 unsigned int poolid, ps, pps, t;
1049 int preferred;
1050
1051 poolid = le32_to_cpu(pgid.pool);
1052 ps = le16_to_cpu(pgid.ps);
1053 preferred = (s16)le16_to_cpu(pgid.preferred);
1054
1055 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1056 if (!pool)
1057 return NULL;
1058
1059 /* pg_temp? */
1060 if (preferred >= 0)
1061 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
1062 pool->lpgp_num_mask);
1063 else
1064 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1065 pool->pgp_num_mask);
1066 pgid.ps = cpu_to_le16(t);
1067 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1068 if (pg) {
1069 *num = pg->len;
1070 return pg->osds;
1071 }
1072
1073 /* crush */
1074 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1075 pool->v.type, pool->v.size);
1076 if (ruleno < 0) {
1077 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1078 poolid, pool->v.crush_ruleset, pool->v.type,
1079 pool->v.size);
1080 return NULL;
1081 }
1082
1083 /* don't forcefeed bad device ids to crush */
1084 if (preferred >= osdmap->max_osd ||
1085 preferred >= osdmap->crush->max_devices)
1086 preferred = -1;
1087
1088 if (preferred >= 0)
1089 pps = ceph_stable_mod(ps,
1090 le32_to_cpu(pool->v.lpgp_num),
1091 pool->lpgp_num_mask);
1092 else
1093 pps = ceph_stable_mod(ps,
1094 le32_to_cpu(pool->v.pgp_num),
1095 pool->pgp_num_mask);
1096 pps += poolid;
1097 *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1098 min_t(int, pool->v.size, *num),
1099 preferred, osdmap->osd_weight);
1100 return osds;
1101 }
1102
1103 /*
1104 * Return acting set for given pgid.
1105 */
1106 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1107 int *acting)
1108 {
1109 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1110 int i, o, num = CEPH_PG_MAX_SIZE;
1111
1112 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1113 if (!osds)
1114 return -1;
1115
1116 /* primary is first up osd */
1117 o = 0;
1118 for (i = 0; i < num; i++)
1119 if (ceph_osd_is_up(osdmap, osds[i]))
1120 acting[o++] = osds[i];
1121 return o;
1122 }
1123
1124 /*
1125 * Return primary osd for given pgid, or -1 if none.
1126 */
1127 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1128 {
1129 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1130 int i, num = CEPH_PG_MAX_SIZE;
1131
1132 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1133 if (!osds)
1134 return -1;
1135
1136 /* primary is first up osd */
1137 for (i = 0; i < num; i++)
1138 if (ceph_osd_is_up(osdmap, osds[i]))
1139 return osds[i];
1140 return -1;
1141 }
1142 EXPORT_SYMBOL(ceph_calc_pg_primary);