]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ceph/osdmap.c
64ae9f89773a1c44f84858eb7622599a81f25de6
[mirror_ubuntu-bionic-kernel.git] / net / ceph / osdmap.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <asm/div64.h>
7
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
13
14 char *ceph_osdmap_state_str(char *str, int len, u32 state)
15 {
16 if (!len)
17 return str;
18
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
25 else
26 snprintf(str, len, "doesn't exist");
27
28 return str;
29 }
30
31 /* maps */
32
33 static int calc_bits_of(unsigned int t)
34 {
35 int b = 0;
36 while (t) {
37 t = t >> 1;
38 b++;
39 }
40 return b;
41 }
42
43 /*
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
45 */
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
47 {
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
50 }
51
52 /*
53 * decode crush map
54 */
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
57 {
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
61 return 0;
62 bad:
63 return -EINVAL;
64 }
65
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
68 {
69 int j;
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
73 return -ENOMEM;
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
76 return -ENOMEM;
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
81 }
82 return 0;
83 bad:
84 return -EINVAL;
85 }
86
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
89 {
90 int j;
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_8_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
95 return -ENOMEM;
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
99 return 0;
100 bad:
101 return -EINVAL;
102 }
103
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
106 {
107 int j;
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
111 return -ENOMEM;
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
114 return -ENOMEM;
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
119 }
120 return 0;
121 bad:
122 return -EINVAL;
123 }
124
125 static int crush_decode_straw2_bucket(void **p, void *end,
126 struct crush_bucket_straw2 *b)
127 {
128 int j;
129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
131 if (b->item_weights == NULL)
132 return -ENOMEM;
133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
134 for (j = 0; j < b->h.size; j++)
135 b->item_weights[j] = ceph_decode_32(p);
136 return 0;
137 bad:
138 return -EINVAL;
139 }
140
141 static struct crush_choose_arg_map *alloc_choose_arg_map(void)
142 {
143 struct crush_choose_arg_map *arg_map;
144
145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
146 if (!arg_map)
147 return NULL;
148
149 RB_CLEAR_NODE(&arg_map->node);
150 return arg_map;
151 }
152
153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
154 {
155 if (arg_map) {
156 int i, j;
157
158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
159
160 for (i = 0; i < arg_map->size; i++) {
161 struct crush_choose_arg *arg = &arg_map->args[i];
162
163 for (j = 0; j < arg->weight_set_size; j++)
164 kfree(arg->weight_set[j].weights);
165 kfree(arg->weight_set);
166 kfree(arg->ids);
167 }
168 kfree(arg_map->args);
169 kfree(arg_map);
170 }
171 }
172
173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
174 node);
175
176 void clear_choose_args(struct crush_map *c)
177 {
178 while (!RB_EMPTY_ROOT(&c->choose_args)) {
179 struct crush_choose_arg_map *arg_map =
180 rb_entry(rb_first(&c->choose_args),
181 struct crush_choose_arg_map, node);
182
183 erase_choose_arg_map(&c->choose_args, arg_map);
184 free_choose_arg_map(arg_map);
185 }
186 }
187
188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
189 {
190 u32 *a = NULL;
191 u32 len;
192 int ret;
193
194 ceph_decode_32_safe(p, end, len, e_inval);
195 if (len) {
196 u32 i;
197
198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
199 if (!a) {
200 ret = -ENOMEM;
201 goto fail;
202 }
203
204 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
205 for (i = 0; i < len; i++)
206 a[i] = ceph_decode_32(p);
207 }
208
209 *plen = len;
210 return a;
211
212 e_inval:
213 ret = -EINVAL;
214 fail:
215 kfree(a);
216 return ERR_PTR(ret);
217 }
218
219 /*
220 * Assumes @arg is zero-initialized.
221 */
222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
223 {
224 int ret;
225
226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
227 if (arg->weight_set_size) {
228 u32 i;
229
230 arg->weight_set = kmalloc_array(arg->weight_set_size,
231 sizeof(*arg->weight_set),
232 GFP_NOIO);
233 if (!arg->weight_set)
234 return -ENOMEM;
235
236 for (i = 0; i < arg->weight_set_size; i++) {
237 struct crush_weight_set *w = &arg->weight_set[i];
238
239 w->weights = decode_array_32_alloc(p, end, &w->size);
240 if (IS_ERR(w->weights)) {
241 ret = PTR_ERR(w->weights);
242 w->weights = NULL;
243 return ret;
244 }
245 }
246 }
247
248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
249 if (IS_ERR(arg->ids)) {
250 ret = PTR_ERR(arg->ids);
251 arg->ids = NULL;
252 return ret;
253 }
254
255 return 0;
256
257 e_inval:
258 return -EINVAL;
259 }
260
261 static int decode_choose_args(void **p, void *end, struct crush_map *c)
262 {
263 struct crush_choose_arg_map *arg_map = NULL;
264 u32 num_choose_arg_maps, num_buckets;
265 int ret;
266
267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
268 while (num_choose_arg_maps--) {
269 arg_map = alloc_choose_arg_map();
270 if (!arg_map) {
271 ret = -ENOMEM;
272 goto fail;
273 }
274
275 ceph_decode_64_safe(p, end, arg_map->choose_args_index,
276 e_inval);
277 arg_map->size = c->max_buckets;
278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
279 GFP_NOIO);
280 if (!arg_map->args) {
281 ret = -ENOMEM;
282 goto fail;
283 }
284
285 ceph_decode_32_safe(p, end, num_buckets, e_inval);
286 while (num_buckets--) {
287 struct crush_choose_arg *arg;
288 u32 bucket_index;
289
290 ceph_decode_32_safe(p, end, bucket_index, e_inval);
291 if (bucket_index >= arg_map->size)
292 goto e_inval;
293
294 arg = &arg_map->args[bucket_index];
295 ret = decode_choose_arg(p, end, arg);
296 if (ret)
297 goto fail;
298 }
299
300 insert_choose_arg_map(&c->choose_args, arg_map);
301 }
302
303 return 0;
304
305 e_inval:
306 ret = -EINVAL;
307 fail:
308 free_choose_arg_map(arg_map);
309 return ret;
310 }
311
312 static void crush_finalize(struct crush_map *c)
313 {
314 __s32 b;
315
316 /* Space for the array of pointers to per-bucket workspace */
317 c->working_size = sizeof(struct crush_work) +
318 c->max_buckets * sizeof(struct crush_work_bucket *);
319
320 for (b = 0; b < c->max_buckets; b++) {
321 if (!c->buckets[b])
322 continue;
323
324 switch (c->buckets[b]->alg) {
325 default:
326 /*
327 * The base case, permutation variables and
328 * the pointer to the permutation array.
329 */
330 c->working_size += sizeof(struct crush_work_bucket);
331 break;
332 }
333 /* Every bucket has a permutation array. */
334 c->working_size += c->buckets[b]->size * sizeof(__u32);
335 }
336 }
337
338 static struct crush_map *crush_decode(void *pbyval, void *end)
339 {
340 struct crush_map *c;
341 int err;
342 int i, j;
343 void **p = &pbyval;
344 void *start = pbyval;
345 u32 magic;
346
347 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
348
349 c = kzalloc(sizeof(*c), GFP_NOFS);
350 if (c == NULL)
351 return ERR_PTR(-ENOMEM);
352
353 c->choose_args = RB_ROOT;
354
355 /* set tunables to default values */
356 c->choose_local_tries = 2;
357 c->choose_local_fallback_tries = 5;
358 c->choose_total_tries = 19;
359 c->chooseleaf_descend_once = 0;
360
361 ceph_decode_need(p, end, 4*sizeof(u32), bad);
362 magic = ceph_decode_32(p);
363 if (magic != CRUSH_MAGIC) {
364 pr_err("crush_decode magic %x != current %x\n",
365 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
366 goto bad;
367 }
368 c->max_buckets = ceph_decode_32(p);
369 c->max_rules = ceph_decode_32(p);
370 c->max_devices = ceph_decode_32(p);
371
372 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
373 if (c->buckets == NULL)
374 goto badmem;
375 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
376 if (c->rules == NULL)
377 goto badmem;
378
379 /* buckets */
380 for (i = 0; i < c->max_buckets; i++) {
381 int size = 0;
382 u32 alg;
383 struct crush_bucket *b;
384
385 ceph_decode_32_safe(p, end, alg, bad);
386 if (alg == 0) {
387 c->buckets[i] = NULL;
388 continue;
389 }
390 dout("crush_decode bucket %d off %x %p to %p\n",
391 i, (int)(*p-start), *p, end);
392
393 switch (alg) {
394 case CRUSH_BUCKET_UNIFORM:
395 size = sizeof(struct crush_bucket_uniform);
396 break;
397 case CRUSH_BUCKET_LIST:
398 size = sizeof(struct crush_bucket_list);
399 break;
400 case CRUSH_BUCKET_TREE:
401 size = sizeof(struct crush_bucket_tree);
402 break;
403 case CRUSH_BUCKET_STRAW:
404 size = sizeof(struct crush_bucket_straw);
405 break;
406 case CRUSH_BUCKET_STRAW2:
407 size = sizeof(struct crush_bucket_straw2);
408 break;
409 default:
410 goto bad;
411 }
412 BUG_ON(size == 0);
413 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
414 if (b == NULL)
415 goto badmem;
416
417 ceph_decode_need(p, end, 4*sizeof(u32), bad);
418 b->id = ceph_decode_32(p);
419 b->type = ceph_decode_16(p);
420 b->alg = ceph_decode_8(p);
421 b->hash = ceph_decode_8(p);
422 b->weight = ceph_decode_32(p);
423 b->size = ceph_decode_32(p);
424
425 dout("crush_decode bucket size %d off %x %p to %p\n",
426 b->size, (int)(*p-start), *p, end);
427
428 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
429 if (b->items == NULL)
430 goto badmem;
431
432 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
433 for (j = 0; j < b->size; j++)
434 b->items[j] = ceph_decode_32(p);
435
436 switch (b->alg) {
437 case CRUSH_BUCKET_UNIFORM:
438 err = crush_decode_uniform_bucket(p, end,
439 (struct crush_bucket_uniform *)b);
440 if (err < 0)
441 goto fail;
442 break;
443 case CRUSH_BUCKET_LIST:
444 err = crush_decode_list_bucket(p, end,
445 (struct crush_bucket_list *)b);
446 if (err < 0)
447 goto fail;
448 break;
449 case CRUSH_BUCKET_TREE:
450 err = crush_decode_tree_bucket(p, end,
451 (struct crush_bucket_tree *)b);
452 if (err < 0)
453 goto fail;
454 break;
455 case CRUSH_BUCKET_STRAW:
456 err = crush_decode_straw_bucket(p, end,
457 (struct crush_bucket_straw *)b);
458 if (err < 0)
459 goto fail;
460 break;
461 case CRUSH_BUCKET_STRAW2:
462 err = crush_decode_straw2_bucket(p, end,
463 (struct crush_bucket_straw2 *)b);
464 if (err < 0)
465 goto fail;
466 break;
467 }
468 }
469
470 /* rules */
471 dout("rule vec is %p\n", c->rules);
472 for (i = 0; i < c->max_rules; i++) {
473 u32 yes;
474 struct crush_rule *r;
475
476 ceph_decode_32_safe(p, end, yes, bad);
477 if (!yes) {
478 dout("crush_decode NO rule %d off %x %p to %p\n",
479 i, (int)(*p-start), *p, end);
480 c->rules[i] = NULL;
481 continue;
482 }
483
484 dout("crush_decode rule %d off %x %p to %p\n",
485 i, (int)(*p-start), *p, end);
486
487 /* len */
488 ceph_decode_32_safe(p, end, yes, bad);
489 #if BITS_PER_LONG == 32
490 if (yes > (ULONG_MAX - sizeof(*r))
491 / sizeof(struct crush_rule_step))
492 goto bad;
493 #endif
494 r = c->rules[i] = kmalloc(sizeof(*r) +
495 yes*sizeof(struct crush_rule_step),
496 GFP_NOFS);
497 if (r == NULL)
498 goto badmem;
499 dout(" rule %d is at %p\n", i, r);
500 r->len = yes;
501 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
502 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
503 for (j = 0; j < r->len; j++) {
504 r->steps[j].op = ceph_decode_32(p);
505 r->steps[j].arg1 = ceph_decode_32(p);
506 r->steps[j].arg2 = ceph_decode_32(p);
507 }
508 }
509
510 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
511 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
512 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
513
514 /* tunables */
515 ceph_decode_need(p, end, 3*sizeof(u32), done);
516 c->choose_local_tries = ceph_decode_32(p);
517 c->choose_local_fallback_tries = ceph_decode_32(p);
518 c->choose_total_tries = ceph_decode_32(p);
519 dout("crush decode tunable choose_local_tries = %d\n",
520 c->choose_local_tries);
521 dout("crush decode tunable choose_local_fallback_tries = %d\n",
522 c->choose_local_fallback_tries);
523 dout("crush decode tunable choose_total_tries = %d\n",
524 c->choose_total_tries);
525
526 ceph_decode_need(p, end, sizeof(u32), done);
527 c->chooseleaf_descend_once = ceph_decode_32(p);
528 dout("crush decode tunable chooseleaf_descend_once = %d\n",
529 c->chooseleaf_descend_once);
530
531 ceph_decode_need(p, end, sizeof(u8), done);
532 c->chooseleaf_vary_r = ceph_decode_8(p);
533 dout("crush decode tunable chooseleaf_vary_r = %d\n",
534 c->chooseleaf_vary_r);
535
536 /* skip straw_calc_version, allowed_bucket_algs */
537 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
538 *p += sizeof(u8) + sizeof(u32);
539
540 ceph_decode_need(p, end, sizeof(u8), done);
541 c->chooseleaf_stable = ceph_decode_8(p);
542 dout("crush decode tunable chooseleaf_stable = %d\n",
543 c->chooseleaf_stable);
544
545 if (*p != end) {
546 /* class_map */
547 ceph_decode_skip_map(p, end, 32, 32, bad);
548 /* class_name */
549 ceph_decode_skip_map(p, end, 32, string, bad);
550 /* class_bucket */
551 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
552 }
553
554 if (*p != end) {
555 err = decode_choose_args(p, end, c);
556 if (err)
557 goto fail;
558 }
559
560 done:
561 crush_finalize(c);
562 dout("crush_decode success\n");
563 return c;
564
565 badmem:
566 err = -ENOMEM;
567 fail:
568 dout("crush_decode fail %d\n", err);
569 crush_destroy(c);
570 return ERR_PTR(err);
571
572 bad:
573 err = -EINVAL;
574 goto fail;
575 }
576
577 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
578 {
579 if (lhs->pool < rhs->pool)
580 return -1;
581 if (lhs->pool > rhs->pool)
582 return 1;
583 if (lhs->seed < rhs->seed)
584 return -1;
585 if (lhs->seed > rhs->seed)
586 return 1;
587
588 return 0;
589 }
590
591 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
592 {
593 int ret;
594
595 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
596 if (ret)
597 return ret;
598
599 if (lhs->shard < rhs->shard)
600 return -1;
601 if (lhs->shard > rhs->shard)
602 return 1;
603
604 return 0;
605 }
606
607 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
608 {
609 struct ceph_pg_mapping *pg;
610
611 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
612 if (!pg)
613 return NULL;
614
615 RB_CLEAR_NODE(&pg->node);
616 return pg;
617 }
618
619 static void free_pg_mapping(struct ceph_pg_mapping *pg)
620 {
621 WARN_ON(!RB_EMPTY_NODE(&pg->node));
622
623 kfree(pg);
624 }
625
626 /*
627 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
628 * to a set of osds) and primary_temp (explicit primary setting)
629 */
630 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
631 RB_BYPTR, const struct ceph_pg *, node)
632
633 /*
634 * rbtree of pg pool info
635 */
636 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
637 {
638 struct rb_node **p = &root->rb_node;
639 struct rb_node *parent = NULL;
640 struct ceph_pg_pool_info *pi = NULL;
641
642 while (*p) {
643 parent = *p;
644 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
645 if (new->id < pi->id)
646 p = &(*p)->rb_left;
647 else if (new->id > pi->id)
648 p = &(*p)->rb_right;
649 else
650 return -EEXIST;
651 }
652
653 rb_link_node(&new->node, parent, p);
654 rb_insert_color(&new->node, root);
655 return 0;
656 }
657
658 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
659 {
660 struct ceph_pg_pool_info *pi;
661 struct rb_node *n = root->rb_node;
662
663 while (n) {
664 pi = rb_entry(n, struct ceph_pg_pool_info, node);
665 if (id < pi->id)
666 n = n->rb_left;
667 else if (id > pi->id)
668 n = n->rb_right;
669 else
670 return pi;
671 }
672 return NULL;
673 }
674
675 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
676 {
677 return __lookup_pg_pool(&map->pg_pools, id);
678 }
679
680 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
681 {
682 struct ceph_pg_pool_info *pi;
683
684 if (id == CEPH_NOPOOL)
685 return NULL;
686
687 if (WARN_ON_ONCE(id > (u64) INT_MAX))
688 return NULL;
689
690 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
691
692 return pi ? pi->name : NULL;
693 }
694 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
695
696 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
697 {
698 struct rb_node *rbp;
699
700 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
701 struct ceph_pg_pool_info *pi =
702 rb_entry(rbp, struct ceph_pg_pool_info, node);
703 if (pi->name && strcmp(pi->name, name) == 0)
704 return pi->id;
705 }
706 return -ENOENT;
707 }
708 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
709
710 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
711 {
712 rb_erase(&pi->node, root);
713 kfree(pi->name);
714 kfree(pi);
715 }
716
717 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
718 {
719 u8 ev, cv;
720 unsigned len, num;
721 void *pool_end;
722
723 ceph_decode_need(p, end, 2 + 4, bad);
724 ev = ceph_decode_8(p); /* encoding version */
725 cv = ceph_decode_8(p); /* compat version */
726 if (ev < 5) {
727 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
728 return -EINVAL;
729 }
730 if (cv > 9) {
731 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
732 return -EINVAL;
733 }
734 len = ceph_decode_32(p);
735 ceph_decode_need(p, end, len, bad);
736 pool_end = *p + len;
737
738 pi->type = ceph_decode_8(p);
739 pi->size = ceph_decode_8(p);
740 pi->crush_ruleset = ceph_decode_8(p);
741 pi->object_hash = ceph_decode_8(p);
742
743 pi->pg_num = ceph_decode_32(p);
744 pi->pgp_num = ceph_decode_32(p);
745
746 *p += 4 + 4; /* skip lpg* */
747 *p += 4; /* skip last_change */
748 *p += 8 + 4; /* skip snap_seq, snap_epoch */
749
750 /* skip snaps */
751 num = ceph_decode_32(p);
752 while (num--) {
753 *p += 8; /* snapid key */
754 *p += 1 + 1; /* versions */
755 len = ceph_decode_32(p);
756 *p += len;
757 }
758
759 /* skip removed_snaps */
760 num = ceph_decode_32(p);
761 *p += num * (8 + 8);
762
763 *p += 8; /* skip auid */
764 pi->flags = ceph_decode_64(p);
765 *p += 4; /* skip crash_replay_interval */
766
767 if (ev >= 7)
768 pi->min_size = ceph_decode_8(p);
769 else
770 pi->min_size = pi->size - pi->size / 2;
771
772 if (ev >= 8)
773 *p += 8 + 8; /* skip quota_max_* */
774
775 if (ev >= 9) {
776 /* skip tiers */
777 num = ceph_decode_32(p);
778 *p += num * 8;
779
780 *p += 8; /* skip tier_of */
781 *p += 1; /* skip cache_mode */
782
783 pi->read_tier = ceph_decode_64(p);
784 pi->write_tier = ceph_decode_64(p);
785 } else {
786 pi->read_tier = -1;
787 pi->write_tier = -1;
788 }
789
790 if (ev >= 10) {
791 /* skip properties */
792 num = ceph_decode_32(p);
793 while (num--) {
794 len = ceph_decode_32(p);
795 *p += len; /* key */
796 len = ceph_decode_32(p);
797 *p += len; /* val */
798 }
799 }
800
801 if (ev >= 11) {
802 /* skip hit_set_params */
803 *p += 1 + 1; /* versions */
804 len = ceph_decode_32(p);
805 *p += len;
806
807 *p += 4; /* skip hit_set_period */
808 *p += 4; /* skip hit_set_count */
809 }
810
811 if (ev >= 12)
812 *p += 4; /* skip stripe_width */
813
814 if (ev >= 13) {
815 *p += 8; /* skip target_max_bytes */
816 *p += 8; /* skip target_max_objects */
817 *p += 4; /* skip cache_target_dirty_ratio_micro */
818 *p += 4; /* skip cache_target_full_ratio_micro */
819 *p += 4; /* skip cache_min_flush_age */
820 *p += 4; /* skip cache_min_evict_age */
821 }
822
823 if (ev >= 14) {
824 /* skip erasure_code_profile */
825 len = ceph_decode_32(p);
826 *p += len;
827 }
828
829 /*
830 * last_force_op_resend_preluminous, will be overridden if the
831 * map was encoded with RESEND_ON_SPLIT
832 */
833 if (ev >= 15)
834 pi->last_force_request_resend = ceph_decode_32(p);
835 else
836 pi->last_force_request_resend = 0;
837
838 if (ev >= 16)
839 *p += 4; /* skip min_read_recency_for_promote */
840
841 if (ev >= 17)
842 *p += 8; /* skip expected_num_objects */
843
844 if (ev >= 19)
845 *p += 4; /* skip cache_target_dirty_high_ratio_micro */
846
847 if (ev >= 20)
848 *p += 4; /* skip min_write_recency_for_promote */
849
850 if (ev >= 21)
851 *p += 1; /* skip use_gmt_hitset */
852
853 if (ev >= 22)
854 *p += 1; /* skip fast_read */
855
856 if (ev >= 23) {
857 *p += 4; /* skip hit_set_grade_decay_rate */
858 *p += 4; /* skip hit_set_search_last_n */
859 }
860
861 if (ev >= 24) {
862 /* skip opts */
863 *p += 1 + 1; /* versions */
864 len = ceph_decode_32(p);
865 *p += len;
866 }
867
868 if (ev >= 25)
869 pi->last_force_request_resend = ceph_decode_32(p);
870
871 /* ignore the rest */
872
873 *p = pool_end;
874 calc_pg_masks(pi);
875 return 0;
876
877 bad:
878 return -EINVAL;
879 }
880
881 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
882 {
883 struct ceph_pg_pool_info *pi;
884 u32 num, len;
885 u64 pool;
886
887 ceph_decode_32_safe(p, end, num, bad);
888 dout(" %d pool names\n", num);
889 while (num--) {
890 ceph_decode_64_safe(p, end, pool, bad);
891 ceph_decode_32_safe(p, end, len, bad);
892 dout(" pool %llu len %d\n", pool, len);
893 ceph_decode_need(p, end, len, bad);
894 pi = __lookup_pg_pool(&map->pg_pools, pool);
895 if (pi) {
896 char *name = kstrndup(*p, len, GFP_NOFS);
897
898 if (!name)
899 return -ENOMEM;
900 kfree(pi->name);
901 pi->name = name;
902 dout(" name is %s\n", pi->name);
903 }
904 *p += len;
905 }
906 return 0;
907
908 bad:
909 return -EINVAL;
910 }
911
912 /*
913 * osd map
914 */
915 struct ceph_osdmap *ceph_osdmap_alloc(void)
916 {
917 struct ceph_osdmap *map;
918
919 map = kzalloc(sizeof(*map), GFP_NOIO);
920 if (!map)
921 return NULL;
922
923 map->pg_pools = RB_ROOT;
924 map->pool_max = -1;
925 map->pg_temp = RB_ROOT;
926 map->primary_temp = RB_ROOT;
927 map->pg_upmap = RB_ROOT;
928 map->pg_upmap_items = RB_ROOT;
929 mutex_init(&map->crush_workspace_mutex);
930
931 return map;
932 }
933
934 void ceph_osdmap_destroy(struct ceph_osdmap *map)
935 {
936 dout("osdmap_destroy %p\n", map);
937 if (map->crush)
938 crush_destroy(map->crush);
939 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
940 struct ceph_pg_mapping *pg =
941 rb_entry(rb_first(&map->pg_temp),
942 struct ceph_pg_mapping, node);
943 erase_pg_mapping(&map->pg_temp, pg);
944 free_pg_mapping(pg);
945 }
946 while (!RB_EMPTY_ROOT(&map->primary_temp)) {
947 struct ceph_pg_mapping *pg =
948 rb_entry(rb_first(&map->primary_temp),
949 struct ceph_pg_mapping, node);
950 erase_pg_mapping(&map->primary_temp, pg);
951 free_pg_mapping(pg);
952 }
953 while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
954 struct ceph_pg_mapping *pg =
955 rb_entry(rb_first(&map->pg_upmap),
956 struct ceph_pg_mapping, node);
957 rb_erase(&pg->node, &map->pg_upmap);
958 kfree(pg);
959 }
960 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
961 struct ceph_pg_mapping *pg =
962 rb_entry(rb_first(&map->pg_upmap_items),
963 struct ceph_pg_mapping, node);
964 rb_erase(&pg->node, &map->pg_upmap_items);
965 kfree(pg);
966 }
967 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
968 struct ceph_pg_pool_info *pi =
969 rb_entry(rb_first(&map->pg_pools),
970 struct ceph_pg_pool_info, node);
971 __remove_pg_pool(&map->pg_pools, pi);
972 }
973 kfree(map->osd_state);
974 kfree(map->osd_weight);
975 kfree(map->osd_addr);
976 kfree(map->osd_primary_affinity);
977 kfree(map->crush_workspace);
978 kfree(map);
979 }
980
981 /*
982 * Adjust max_osd value, (re)allocate arrays.
983 *
984 * The new elements are properly initialized.
985 */
986 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
987 {
988 u32 *state;
989 u32 *weight;
990 struct ceph_entity_addr *addr;
991 int i;
992
993 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS);
994 if (!state)
995 return -ENOMEM;
996 map->osd_state = state;
997
998 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS);
999 if (!weight)
1000 return -ENOMEM;
1001 map->osd_weight = weight;
1002
1003 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS);
1004 if (!addr)
1005 return -ENOMEM;
1006 map->osd_addr = addr;
1007
1008 for (i = map->max_osd; i < max; i++) {
1009 map->osd_state[i] = 0;
1010 map->osd_weight[i] = CEPH_OSD_OUT;
1011 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
1012 }
1013
1014 if (map->osd_primary_affinity) {
1015 u32 *affinity;
1016
1017 affinity = krealloc(map->osd_primary_affinity,
1018 max*sizeof(*affinity), GFP_NOFS);
1019 if (!affinity)
1020 return -ENOMEM;
1021 map->osd_primary_affinity = affinity;
1022
1023 for (i = map->max_osd; i < max; i++)
1024 map->osd_primary_affinity[i] =
1025 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1026 }
1027
1028 map->max_osd = max;
1029
1030 return 0;
1031 }
1032
1033 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
1034 {
1035 void *workspace;
1036 size_t work_size;
1037
1038 if (IS_ERR(crush))
1039 return PTR_ERR(crush);
1040
1041 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE);
1042 dout("%s work_size %zu bytes\n", __func__, work_size);
1043 workspace = kmalloc(work_size, GFP_NOIO);
1044 if (!workspace) {
1045 crush_destroy(crush);
1046 return -ENOMEM;
1047 }
1048 crush_init_workspace(crush, workspace);
1049
1050 if (map->crush)
1051 crush_destroy(map->crush);
1052 kfree(map->crush_workspace);
1053 map->crush = crush;
1054 map->crush_workspace = workspace;
1055 return 0;
1056 }
1057
1058 #define OSDMAP_WRAPPER_COMPAT_VER 7
1059 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
1060
1061 /*
1062 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
1063 * to struct_v of the client_data section for new (v7 and above)
1064 * osdmaps.
1065 */
1066 static int get_osdmap_client_data_v(void **p, void *end,
1067 const char *prefix, u8 *v)
1068 {
1069 u8 struct_v;
1070
1071 ceph_decode_8_safe(p, end, struct_v, e_inval);
1072 if (struct_v >= 7) {
1073 u8 struct_compat;
1074
1075 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1076 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
1077 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1078 struct_v, struct_compat,
1079 OSDMAP_WRAPPER_COMPAT_VER, prefix);
1080 return -EINVAL;
1081 }
1082 *p += 4; /* ignore wrapper struct_len */
1083
1084 ceph_decode_8_safe(p, end, struct_v, e_inval);
1085 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1086 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
1087 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1088 struct_v, struct_compat,
1089 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
1090 return -EINVAL;
1091 }
1092 *p += 4; /* ignore client data struct_len */
1093 } else {
1094 u16 version;
1095
1096 *p -= 1;
1097 ceph_decode_16_safe(p, end, version, e_inval);
1098 if (version < 6) {
1099 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1100 version, prefix);
1101 return -EINVAL;
1102 }
1103
1104 /* old osdmap enconding */
1105 struct_v = 0;
1106 }
1107
1108 *v = struct_v;
1109 return 0;
1110
1111 e_inval:
1112 return -EINVAL;
1113 }
1114
1115 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
1116 bool incremental)
1117 {
1118 u32 n;
1119
1120 ceph_decode_32_safe(p, end, n, e_inval);
1121 while (n--) {
1122 struct ceph_pg_pool_info *pi;
1123 u64 pool;
1124 int ret;
1125
1126 ceph_decode_64_safe(p, end, pool, e_inval);
1127
1128 pi = __lookup_pg_pool(&map->pg_pools, pool);
1129 if (!incremental || !pi) {
1130 pi = kzalloc(sizeof(*pi), GFP_NOFS);
1131 if (!pi)
1132 return -ENOMEM;
1133
1134 pi->id = pool;
1135
1136 ret = __insert_pg_pool(&map->pg_pools, pi);
1137 if (ret) {
1138 kfree(pi);
1139 return ret;
1140 }
1141 }
1142
1143 ret = decode_pool(p, end, pi);
1144 if (ret)
1145 return ret;
1146 }
1147
1148 return 0;
1149
1150 e_inval:
1151 return -EINVAL;
1152 }
1153
1154 static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
1155 {
1156 return __decode_pools(p, end, map, false);
1157 }
1158
1159 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
1160 {
1161 return __decode_pools(p, end, map, true);
1162 }
1163
1164 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
1165
1166 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
1167 decode_mapping_fn_t fn, bool incremental)
1168 {
1169 u32 n;
1170
1171 WARN_ON(!incremental && !fn);
1172
1173 ceph_decode_32_safe(p, end, n, e_inval);
1174 while (n--) {
1175 struct ceph_pg_mapping *pg;
1176 struct ceph_pg pgid;
1177 int ret;
1178
1179 ret = ceph_decode_pgid(p, end, &pgid);
1180 if (ret)
1181 return ret;
1182
1183 pg = lookup_pg_mapping(mapping_root, &pgid);
1184 if (pg) {
1185 WARN_ON(!incremental);
1186 erase_pg_mapping(mapping_root, pg);
1187 free_pg_mapping(pg);
1188 }
1189
1190 if (fn) {
1191 pg = fn(p, end, incremental);
1192 if (IS_ERR(pg))
1193 return PTR_ERR(pg);
1194
1195 if (pg) {
1196 pg->pgid = pgid; /* struct */
1197 insert_pg_mapping(mapping_root, pg);
1198 }
1199 }
1200 }
1201
1202 return 0;
1203
1204 e_inval:
1205 return -EINVAL;
1206 }
1207
1208 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
1209 bool incremental)
1210 {
1211 struct ceph_pg_mapping *pg;
1212 u32 len, i;
1213
1214 ceph_decode_32_safe(p, end, len, e_inval);
1215 if (len == 0 && incremental)
1216 return NULL; /* new_pg_temp: [] to remove */
1217 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
1218 return ERR_PTR(-EINVAL);
1219
1220 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
1221 pg = alloc_pg_mapping(len * sizeof(u32));
1222 if (!pg)
1223 return ERR_PTR(-ENOMEM);
1224
1225 pg->pg_temp.len = len;
1226 for (i = 0; i < len; i++)
1227 pg->pg_temp.osds[i] = ceph_decode_32(p);
1228
1229 return pg;
1230
1231 e_inval:
1232 return ERR_PTR(-EINVAL);
1233 }
1234
1235 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1236 {
1237 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1238 false);
1239 }
1240
1241 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1242 {
1243 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1244 true);
1245 }
1246
1247 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
1248 bool incremental)
1249 {
1250 struct ceph_pg_mapping *pg;
1251 u32 osd;
1252
1253 ceph_decode_32_safe(p, end, osd, e_inval);
1254 if (osd == (u32)-1 && incremental)
1255 return NULL; /* new_primary_temp: -1 to remove */
1256
1257 pg = alloc_pg_mapping(0);
1258 if (!pg)
1259 return ERR_PTR(-ENOMEM);
1260
1261 pg->primary_temp.osd = osd;
1262 return pg;
1263
1264 e_inval:
1265 return ERR_PTR(-EINVAL);
1266 }
1267
1268 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1269 {
1270 return decode_pg_mapping(p, end, &map->primary_temp,
1271 __decode_primary_temp, false);
1272 }
1273
1274 static int decode_new_primary_temp(void **p, void *end,
1275 struct ceph_osdmap *map)
1276 {
1277 return decode_pg_mapping(p, end, &map->primary_temp,
1278 __decode_primary_temp, true);
1279 }
1280
1281 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1282 {
1283 BUG_ON(osd >= map->max_osd);
1284
1285 if (!map->osd_primary_affinity)
1286 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1287
1288 return map->osd_primary_affinity[osd];
1289 }
1290
1291 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1292 {
1293 BUG_ON(osd >= map->max_osd);
1294
1295 if (!map->osd_primary_affinity) {
1296 int i;
1297
1298 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32),
1299 GFP_NOFS);
1300 if (!map->osd_primary_affinity)
1301 return -ENOMEM;
1302
1303 for (i = 0; i < map->max_osd; i++)
1304 map->osd_primary_affinity[i] =
1305 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1306 }
1307
1308 map->osd_primary_affinity[osd] = aff;
1309
1310 return 0;
1311 }
1312
1313 static int decode_primary_affinity(void **p, void *end,
1314 struct ceph_osdmap *map)
1315 {
1316 u32 len, i;
1317
1318 ceph_decode_32_safe(p, end, len, e_inval);
1319 if (len == 0) {
1320 kfree(map->osd_primary_affinity);
1321 map->osd_primary_affinity = NULL;
1322 return 0;
1323 }
1324 if (len != map->max_osd)
1325 goto e_inval;
1326
1327 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1328
1329 for (i = 0; i < map->max_osd; i++) {
1330 int ret;
1331
1332 ret = set_primary_affinity(map, i, ceph_decode_32(p));
1333 if (ret)
1334 return ret;
1335 }
1336
1337 return 0;
1338
1339 e_inval:
1340 return -EINVAL;
1341 }
1342
1343 static int decode_new_primary_affinity(void **p, void *end,
1344 struct ceph_osdmap *map)
1345 {
1346 u32 n;
1347
1348 ceph_decode_32_safe(p, end, n, e_inval);
1349 while (n--) {
1350 u32 osd, aff;
1351 int ret;
1352
1353 ceph_decode_32_safe(p, end, osd, e_inval);
1354 ceph_decode_32_safe(p, end, aff, e_inval);
1355
1356 ret = set_primary_affinity(map, osd, aff);
1357 if (ret)
1358 return ret;
1359
1360 pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1361 }
1362
1363 return 0;
1364
1365 e_inval:
1366 return -EINVAL;
1367 }
1368
1369 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
1370 bool __unused)
1371 {
1372 return __decode_pg_temp(p, end, false);
1373 }
1374
1375 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1376 {
1377 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1378 false);
1379 }
1380
1381 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1382 {
1383 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1384 true);
1385 }
1386
1387 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1388 {
1389 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
1390 }
1391
1392 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1393 bool __unused)
1394 {
1395 struct ceph_pg_mapping *pg;
1396 u32 len, i;
1397
1398 ceph_decode_32_safe(p, end, len, e_inval);
1399 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
1400 return ERR_PTR(-EINVAL);
1401
1402 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1403 pg = alloc_pg_mapping(2 * len * sizeof(u32));
1404 if (!pg)
1405 return ERR_PTR(-ENOMEM);
1406
1407 pg->pg_upmap_items.len = len;
1408 for (i = 0; i < len; i++) {
1409 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
1410 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
1411 }
1412
1413 return pg;
1414
1415 e_inval:
1416 return ERR_PTR(-EINVAL);
1417 }
1418
1419 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
1420 {
1421 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1422 __decode_pg_upmap_items, false);
1423 }
1424
1425 static int decode_new_pg_upmap_items(void **p, void *end,
1426 struct ceph_osdmap *map)
1427 {
1428 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1429 __decode_pg_upmap_items, true);
1430 }
1431
1432 static int decode_old_pg_upmap_items(void **p, void *end,
1433 struct ceph_osdmap *map)
1434 {
1435 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
1436 }
1437
1438 /*
1439 * decode a full map.
1440 */
1441 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1442 {
1443 u8 struct_v;
1444 u32 epoch = 0;
1445 void *start = *p;
1446 u32 max;
1447 u32 len, i;
1448 int err;
1449
1450 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1451
1452 err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1453 if (err)
1454 goto bad;
1455
1456 /* fsid, epoch, created, modified */
1457 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1458 sizeof(map->created) + sizeof(map->modified), e_inval);
1459 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1460 epoch = map->epoch = ceph_decode_32(p);
1461 ceph_decode_copy(p, &map->created, sizeof(map->created));
1462 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1463
1464 /* pools */
1465 err = decode_pools(p, end, map);
1466 if (err)
1467 goto bad;
1468
1469 /* pool_name */
1470 err = decode_pool_names(p, end, map);
1471 if (err)
1472 goto bad;
1473
1474 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1475
1476 ceph_decode_32_safe(p, end, map->flags, e_inval);
1477
1478 /* max_osd */
1479 ceph_decode_32_safe(p, end, max, e_inval);
1480
1481 /* (re)alloc osd arrays */
1482 err = osdmap_set_max_osd(map, max);
1483 if (err)
1484 goto bad;
1485
1486 /* osd_state, osd_weight, osd_addrs->client_addr */
1487 ceph_decode_need(p, end, 3*sizeof(u32) +
1488 map->max_osd*((struct_v >= 5 ? sizeof(u32) :
1489 sizeof(u8)) +
1490 sizeof(*map->osd_weight) +
1491 sizeof(*map->osd_addr)), e_inval);
1492
1493 if (ceph_decode_32(p) != map->max_osd)
1494 goto e_inval;
1495
1496 if (struct_v >= 5) {
1497 for (i = 0; i < map->max_osd; i++)
1498 map->osd_state[i] = ceph_decode_32(p);
1499 } else {
1500 for (i = 0; i < map->max_osd; i++)
1501 map->osd_state[i] = ceph_decode_8(p);
1502 }
1503
1504 if (ceph_decode_32(p) != map->max_osd)
1505 goto e_inval;
1506
1507 for (i = 0; i < map->max_osd; i++)
1508 map->osd_weight[i] = ceph_decode_32(p);
1509
1510 if (ceph_decode_32(p) != map->max_osd)
1511 goto e_inval;
1512
1513 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
1514 for (i = 0; i < map->max_osd; i++)
1515 ceph_decode_addr(&map->osd_addr[i]);
1516
1517 /* pg_temp */
1518 err = decode_pg_temp(p, end, map);
1519 if (err)
1520 goto bad;
1521
1522 /* primary_temp */
1523 if (struct_v >= 1) {
1524 err = decode_primary_temp(p, end, map);
1525 if (err)
1526 goto bad;
1527 }
1528
1529 /* primary_affinity */
1530 if (struct_v >= 2) {
1531 err = decode_primary_affinity(p, end, map);
1532 if (err)
1533 goto bad;
1534 } else {
1535 WARN_ON(map->osd_primary_affinity);
1536 }
1537
1538 /* crush */
1539 ceph_decode_32_safe(p, end, len, e_inval);
1540 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
1541 if (err)
1542 goto bad;
1543
1544 *p += len;
1545 if (struct_v >= 3) {
1546 /* erasure_code_profiles */
1547 ceph_decode_skip_map_of_map(p, end, string, string, string,
1548 e_inval);
1549 }
1550
1551 if (struct_v >= 4) {
1552 err = decode_pg_upmap(p, end, map);
1553 if (err)
1554 goto bad;
1555
1556 err = decode_pg_upmap_items(p, end, map);
1557 if (err)
1558 goto bad;
1559 } else {
1560 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
1561 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
1562 }
1563
1564 /* ignore the rest */
1565 *p = end;
1566
1567 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1568 return 0;
1569
1570 e_inval:
1571 err = -EINVAL;
1572 bad:
1573 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1574 err, epoch, (int)(*p - start), *p, start, end);
1575 print_hex_dump(KERN_DEBUG, "osdmap: ",
1576 DUMP_PREFIX_OFFSET, 16, 1,
1577 start, end - start, true);
1578 return err;
1579 }
1580
1581 /*
1582 * Allocate and decode a full map.
1583 */
1584 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1585 {
1586 struct ceph_osdmap *map;
1587 int ret;
1588
1589 map = ceph_osdmap_alloc();
1590 if (!map)
1591 return ERR_PTR(-ENOMEM);
1592
1593 ret = osdmap_decode(p, end, map);
1594 if (ret) {
1595 ceph_osdmap_destroy(map);
1596 return ERR_PTR(ret);
1597 }
1598
1599 return map;
1600 }
1601
1602 /*
1603 * Encoding order is (new_up_client, new_state, new_weight). Need to
1604 * apply in the (new_weight, new_state, new_up_client) order, because
1605 * an incremental map may look like e.g.
1606 *
1607 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1608 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1609 */
1610 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
1611 struct ceph_osdmap *map)
1612 {
1613 void *new_up_client;
1614 void *new_state;
1615 void *new_weight_end;
1616 u32 len;
1617
1618 new_up_client = *p;
1619 ceph_decode_32_safe(p, end, len, e_inval);
1620 len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
1621 ceph_decode_need(p, end, len, e_inval);
1622 *p += len;
1623
1624 new_state = *p;
1625 ceph_decode_32_safe(p, end, len, e_inval);
1626 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
1627 ceph_decode_need(p, end, len, e_inval);
1628 *p += len;
1629
1630 /* new_weight */
1631 ceph_decode_32_safe(p, end, len, e_inval);
1632 while (len--) {
1633 s32 osd;
1634 u32 w;
1635
1636 ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1637 osd = ceph_decode_32(p);
1638 w = ceph_decode_32(p);
1639 BUG_ON(osd >= map->max_osd);
1640 pr_info("osd%d weight 0x%x %s\n", osd, w,
1641 w == CEPH_OSD_IN ? "(in)" :
1642 (w == CEPH_OSD_OUT ? "(out)" : ""));
1643 map->osd_weight[osd] = w;
1644
1645 /*
1646 * If we are marking in, set the EXISTS, and clear the
1647 * AUTOOUT and NEW bits.
1648 */
1649 if (w) {
1650 map->osd_state[osd] |= CEPH_OSD_EXISTS;
1651 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1652 CEPH_OSD_NEW);
1653 }
1654 }
1655 new_weight_end = *p;
1656
1657 /* new_state (up/down) */
1658 *p = new_state;
1659 len = ceph_decode_32(p);
1660 while (len--) {
1661 s32 osd;
1662 u32 xorstate;
1663 int ret;
1664
1665 osd = ceph_decode_32(p);
1666 if (struct_v >= 5)
1667 xorstate = ceph_decode_32(p);
1668 else
1669 xorstate = ceph_decode_8(p);
1670 if (xorstate == 0)
1671 xorstate = CEPH_OSD_UP;
1672 BUG_ON(osd >= map->max_osd);
1673 if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1674 (xorstate & CEPH_OSD_UP))
1675 pr_info("osd%d down\n", osd);
1676 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1677 (xorstate & CEPH_OSD_EXISTS)) {
1678 pr_info("osd%d does not exist\n", osd);
1679 ret = set_primary_affinity(map, osd,
1680 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1681 if (ret)
1682 return ret;
1683 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1684 map->osd_state[osd] = 0;
1685 } else {
1686 map->osd_state[osd] ^= xorstate;
1687 }
1688 }
1689
1690 /* new_up_client */
1691 *p = new_up_client;
1692 len = ceph_decode_32(p);
1693 while (len--) {
1694 s32 osd;
1695 struct ceph_entity_addr addr;
1696
1697 osd = ceph_decode_32(p);
1698 ceph_decode_copy(p, &addr, sizeof(addr));
1699 ceph_decode_addr(&addr);
1700 BUG_ON(osd >= map->max_osd);
1701 pr_info("osd%d up\n", osd);
1702 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1703 map->osd_addr[osd] = addr;
1704 }
1705
1706 *p = new_weight_end;
1707 return 0;
1708
1709 e_inval:
1710 return -EINVAL;
1711 }
1712
1713 /*
1714 * decode and apply an incremental map update.
1715 */
1716 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1717 struct ceph_osdmap *map)
1718 {
1719 struct ceph_fsid fsid;
1720 u32 epoch = 0;
1721 struct ceph_timespec modified;
1722 s32 len;
1723 u64 pool;
1724 __s64 new_pool_max;
1725 __s32 new_flags, max;
1726 void *start = *p;
1727 int err;
1728 u8 struct_v;
1729
1730 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1731
1732 err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1733 if (err)
1734 goto bad;
1735
1736 /* fsid, epoch, modified, new_pool_max, new_flags */
1737 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1738 sizeof(u64) + sizeof(u32), e_inval);
1739 ceph_decode_copy(p, &fsid, sizeof(fsid));
1740 epoch = ceph_decode_32(p);
1741 BUG_ON(epoch != map->epoch+1);
1742 ceph_decode_copy(p, &modified, sizeof(modified));
1743 new_pool_max = ceph_decode_64(p);
1744 new_flags = ceph_decode_32(p);
1745
1746 /* full map? */
1747 ceph_decode_32_safe(p, end, len, e_inval);
1748 if (len > 0) {
1749 dout("apply_incremental full map len %d, %p to %p\n",
1750 len, *p, end);
1751 return ceph_osdmap_decode(p, min(*p+len, end));
1752 }
1753
1754 /* new crush? */
1755 ceph_decode_32_safe(p, end, len, e_inval);
1756 if (len > 0) {
1757 err = osdmap_set_crush(map,
1758 crush_decode(*p, min(*p + len, end)));
1759 if (err)
1760 goto bad;
1761 *p += len;
1762 }
1763
1764 /* new flags? */
1765 if (new_flags >= 0)
1766 map->flags = new_flags;
1767 if (new_pool_max >= 0)
1768 map->pool_max = new_pool_max;
1769
1770 /* new max? */
1771 ceph_decode_32_safe(p, end, max, e_inval);
1772 if (max >= 0) {
1773 err = osdmap_set_max_osd(map, max);
1774 if (err)
1775 goto bad;
1776 }
1777
1778 map->epoch++;
1779 map->modified = modified;
1780
1781 /* new_pools */
1782 err = decode_new_pools(p, end, map);
1783 if (err)
1784 goto bad;
1785
1786 /* new_pool_names */
1787 err = decode_pool_names(p, end, map);
1788 if (err)
1789 goto bad;
1790
1791 /* old_pool */
1792 ceph_decode_32_safe(p, end, len, e_inval);
1793 while (len--) {
1794 struct ceph_pg_pool_info *pi;
1795
1796 ceph_decode_64_safe(p, end, pool, e_inval);
1797 pi = __lookup_pg_pool(&map->pg_pools, pool);
1798 if (pi)
1799 __remove_pg_pool(&map->pg_pools, pi);
1800 }
1801
1802 /* new_up_client, new_state, new_weight */
1803 err = decode_new_up_state_weight(p, end, struct_v, map);
1804 if (err)
1805 goto bad;
1806
1807 /* new_pg_temp */
1808 err = decode_new_pg_temp(p, end, map);
1809 if (err)
1810 goto bad;
1811
1812 /* new_primary_temp */
1813 if (struct_v >= 1) {
1814 err = decode_new_primary_temp(p, end, map);
1815 if (err)
1816 goto bad;
1817 }
1818
1819 /* new_primary_affinity */
1820 if (struct_v >= 2) {
1821 err = decode_new_primary_affinity(p, end, map);
1822 if (err)
1823 goto bad;
1824 }
1825
1826 if (struct_v >= 3) {
1827 /* new_erasure_code_profiles */
1828 ceph_decode_skip_map_of_map(p, end, string, string, string,
1829 e_inval);
1830 /* old_erasure_code_profiles */
1831 ceph_decode_skip_set(p, end, string, e_inval);
1832 }
1833
1834 if (struct_v >= 4) {
1835 err = decode_new_pg_upmap(p, end, map);
1836 if (err)
1837 goto bad;
1838
1839 err = decode_old_pg_upmap(p, end, map);
1840 if (err)
1841 goto bad;
1842
1843 err = decode_new_pg_upmap_items(p, end, map);
1844 if (err)
1845 goto bad;
1846
1847 err = decode_old_pg_upmap_items(p, end, map);
1848 if (err)
1849 goto bad;
1850 }
1851
1852 /* ignore the rest */
1853 *p = end;
1854
1855 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1856 return map;
1857
1858 e_inval:
1859 err = -EINVAL;
1860 bad:
1861 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1862 err, epoch, (int)(*p - start), *p, start, end);
1863 print_hex_dump(KERN_DEBUG, "osdmap: ",
1864 DUMP_PREFIX_OFFSET, 16, 1,
1865 start, end - start, true);
1866 return ERR_PTR(err);
1867 }
1868
1869 void ceph_oloc_copy(struct ceph_object_locator *dest,
1870 const struct ceph_object_locator *src)
1871 {
1872 ceph_oloc_destroy(dest);
1873
1874 dest->pool = src->pool;
1875 if (src->pool_ns)
1876 dest->pool_ns = ceph_get_string(src->pool_ns);
1877 else
1878 dest->pool_ns = NULL;
1879 }
1880 EXPORT_SYMBOL(ceph_oloc_copy);
1881
1882 void ceph_oloc_destroy(struct ceph_object_locator *oloc)
1883 {
1884 ceph_put_string(oloc->pool_ns);
1885 }
1886 EXPORT_SYMBOL(ceph_oloc_destroy);
1887
1888 void ceph_oid_copy(struct ceph_object_id *dest,
1889 const struct ceph_object_id *src)
1890 {
1891 ceph_oid_destroy(dest);
1892
1893 if (src->name != src->inline_name) {
1894 /* very rare, see ceph_object_id definition */
1895 dest->name = kmalloc(src->name_len + 1,
1896 GFP_NOIO | __GFP_NOFAIL);
1897 } else {
1898 dest->name = dest->inline_name;
1899 }
1900 memcpy(dest->name, src->name, src->name_len + 1);
1901 dest->name_len = src->name_len;
1902 }
1903 EXPORT_SYMBOL(ceph_oid_copy);
1904
1905 static __printf(2, 0)
1906 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
1907 {
1908 int len;
1909
1910 WARN_ON(!ceph_oid_empty(oid));
1911
1912 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
1913 if (len >= sizeof(oid->inline_name))
1914 return len;
1915
1916 oid->name_len = len;
1917 return 0;
1918 }
1919
1920 /*
1921 * If oid doesn't fit into inline buffer, BUG.
1922 */
1923 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
1924 {
1925 va_list ap;
1926
1927 va_start(ap, fmt);
1928 BUG_ON(oid_printf_vargs(oid, fmt, ap));
1929 va_end(ap);
1930 }
1931 EXPORT_SYMBOL(ceph_oid_printf);
1932
1933 static __printf(3, 0)
1934 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
1935 const char *fmt, va_list ap)
1936 {
1937 va_list aq;
1938 int len;
1939
1940 va_copy(aq, ap);
1941 len = oid_printf_vargs(oid, fmt, aq);
1942 va_end(aq);
1943
1944 if (len) {
1945 char *external_name;
1946
1947 external_name = kmalloc(len + 1, gfp);
1948 if (!external_name)
1949 return -ENOMEM;
1950
1951 oid->name = external_name;
1952 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
1953 oid->name_len = len;
1954 }
1955
1956 return 0;
1957 }
1958
1959 /*
1960 * If oid doesn't fit into inline buffer, allocate.
1961 */
1962 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
1963 const char *fmt, ...)
1964 {
1965 va_list ap;
1966 int ret;
1967
1968 va_start(ap, fmt);
1969 ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
1970 va_end(ap);
1971
1972 return ret;
1973 }
1974 EXPORT_SYMBOL(ceph_oid_aprintf);
1975
1976 void ceph_oid_destroy(struct ceph_object_id *oid)
1977 {
1978 if (oid->name != oid->inline_name)
1979 kfree(oid->name);
1980 }
1981 EXPORT_SYMBOL(ceph_oid_destroy);
1982
1983 /*
1984 * osds only
1985 */
1986 static bool __osds_equal(const struct ceph_osds *lhs,
1987 const struct ceph_osds *rhs)
1988 {
1989 if (lhs->size == rhs->size &&
1990 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
1991 return true;
1992
1993 return false;
1994 }
1995
1996 /*
1997 * osds + primary
1998 */
1999 static bool osds_equal(const struct ceph_osds *lhs,
2000 const struct ceph_osds *rhs)
2001 {
2002 if (__osds_equal(lhs, rhs) &&
2003 lhs->primary == rhs->primary)
2004 return true;
2005
2006 return false;
2007 }
2008
2009 static bool osds_valid(const struct ceph_osds *set)
2010 {
2011 /* non-empty set */
2012 if (set->size > 0 && set->primary >= 0)
2013 return true;
2014
2015 /* empty can_shift_osds set */
2016 if (!set->size && set->primary == -1)
2017 return true;
2018
2019 /* empty !can_shift_osds set - all NONE */
2020 if (set->size > 0 && set->primary == -1) {
2021 int i;
2022
2023 for (i = 0; i < set->size; i++) {
2024 if (set->osds[i] != CRUSH_ITEM_NONE)
2025 break;
2026 }
2027 if (i == set->size)
2028 return true;
2029 }
2030
2031 return false;
2032 }
2033
2034 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
2035 {
2036 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
2037 dest->size = src->size;
2038 dest->primary = src->primary;
2039 }
2040
2041 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
2042 u32 new_pg_num)
2043 {
2044 int old_bits = calc_bits_of(old_pg_num);
2045 int old_mask = (1 << old_bits) - 1;
2046 int n;
2047
2048 WARN_ON(pgid->seed >= old_pg_num);
2049 if (new_pg_num <= old_pg_num)
2050 return false;
2051
2052 for (n = 1; ; n++) {
2053 int next_bit = n << (old_bits - 1);
2054 u32 s = next_bit | pgid->seed;
2055
2056 if (s < old_pg_num || s == pgid->seed)
2057 continue;
2058 if (s >= new_pg_num)
2059 break;
2060
2061 s = ceph_stable_mod(s, old_pg_num, old_mask);
2062 if (s == pgid->seed)
2063 return true;
2064 }
2065
2066 return false;
2067 }
2068
2069 bool ceph_is_new_interval(const struct ceph_osds *old_acting,
2070 const struct ceph_osds *new_acting,
2071 const struct ceph_osds *old_up,
2072 const struct ceph_osds *new_up,
2073 int old_size,
2074 int new_size,
2075 int old_min_size,
2076 int new_min_size,
2077 u32 old_pg_num,
2078 u32 new_pg_num,
2079 bool old_sort_bitwise,
2080 bool new_sort_bitwise,
2081 const struct ceph_pg *pgid)
2082 {
2083 return !osds_equal(old_acting, new_acting) ||
2084 !osds_equal(old_up, new_up) ||
2085 old_size != new_size ||
2086 old_min_size != new_min_size ||
2087 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
2088 old_sort_bitwise != new_sort_bitwise;
2089 }
2090
2091 static int calc_pg_rank(int osd, const struct ceph_osds *acting)
2092 {
2093 int i;
2094
2095 for (i = 0; i < acting->size; i++) {
2096 if (acting->osds[i] == osd)
2097 return i;
2098 }
2099
2100 return -1;
2101 }
2102
2103 static bool primary_changed(const struct ceph_osds *old_acting,
2104 const struct ceph_osds *new_acting)
2105 {
2106 if (!old_acting->size && !new_acting->size)
2107 return false; /* both still empty */
2108
2109 if (!old_acting->size ^ !new_acting->size)
2110 return true; /* was empty, now not, or vice versa */
2111
2112 if (old_acting->primary != new_acting->primary)
2113 return true; /* primary changed */
2114
2115 if (calc_pg_rank(old_acting->primary, old_acting) !=
2116 calc_pg_rank(new_acting->primary, new_acting))
2117 return true;
2118
2119 return false; /* same primary (tho replicas may have changed) */
2120 }
2121
2122 bool ceph_osds_changed(const struct ceph_osds *old_acting,
2123 const struct ceph_osds *new_acting,
2124 bool any_change)
2125 {
2126 if (primary_changed(old_acting, new_acting))
2127 return true;
2128
2129 if (any_change && !__osds_equal(old_acting, new_acting))
2130 return true;
2131
2132 return false;
2133 }
2134
2135 /*
2136 * calculate file layout from given offset, length.
2137 * fill in correct oid, logical length, and object extent
2138 * offset, length.
2139 *
2140 * for now, we write only a single su, until we can
2141 * pass a stride back to the caller.
2142 */
2143 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
2144 u64 off, u64 len,
2145 u64 *ono,
2146 u64 *oxoff, u64 *oxlen)
2147 {
2148 u32 osize = layout->object_size;
2149 u32 su = layout->stripe_unit;
2150 u32 sc = layout->stripe_count;
2151 u32 bl, stripeno, stripepos, objsetno;
2152 u32 su_per_object;
2153 u64 t, su_offset;
2154
2155 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
2156 osize, su);
2157 if (su == 0 || sc == 0)
2158 goto invalid;
2159 su_per_object = osize / su;
2160 if (su_per_object == 0)
2161 goto invalid;
2162 dout("osize %u / su %u = su_per_object %u\n", osize, su,
2163 su_per_object);
2164
2165 if ((su & ~PAGE_MASK) != 0)
2166 goto invalid;
2167
2168 /* bl = *off / su; */
2169 t = off;
2170 do_div(t, su);
2171 bl = t;
2172 dout("off %llu / su %u = bl %u\n", off, su, bl);
2173
2174 stripeno = bl / sc;
2175 stripepos = bl % sc;
2176 objsetno = stripeno / su_per_object;
2177
2178 *ono = objsetno * sc + stripepos;
2179 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
2180
2181 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
2182 t = off;
2183 su_offset = do_div(t, su);
2184 *oxoff = su_offset + (stripeno % su_per_object) * su;
2185
2186 /*
2187 * Calculate the length of the extent being written to the selected
2188 * object. This is the minimum of the full length requested (len) or
2189 * the remainder of the current stripe being written to.
2190 */
2191 *oxlen = min_t(u64, len, su - su_offset);
2192
2193 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
2194 return 0;
2195
2196 invalid:
2197 dout(" invalid layout\n");
2198 *ono = 0;
2199 *oxoff = 0;
2200 *oxlen = 0;
2201 return -EINVAL;
2202 }
2203 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
2204
2205 /*
2206 * Map an object into a PG.
2207 *
2208 * Should only be called with target_oid and target_oloc (as opposed to
2209 * base_oid and base_oloc), since tiering isn't taken into account.
2210 */
2211 int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
2212 const struct ceph_object_id *oid,
2213 const struct ceph_object_locator *oloc,
2214 struct ceph_pg *raw_pgid)
2215 {
2216 WARN_ON(pi->id != oloc->pool);
2217
2218 if (!oloc->pool_ns) {
2219 raw_pgid->pool = oloc->pool;
2220 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
2221 oid->name_len);
2222 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
2223 raw_pgid->pool, raw_pgid->seed);
2224 } else {
2225 char stack_buf[256];
2226 char *buf = stack_buf;
2227 int nsl = oloc->pool_ns->len;
2228 size_t total = nsl + 1 + oid->name_len;
2229
2230 if (total > sizeof(stack_buf)) {
2231 buf = kmalloc(total, GFP_NOIO);
2232 if (!buf)
2233 return -ENOMEM;
2234 }
2235 memcpy(buf, oloc->pool_ns->str, nsl);
2236 buf[nsl] = '\037';
2237 memcpy(buf + nsl + 1, oid->name, oid->name_len);
2238 raw_pgid->pool = oloc->pool;
2239 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
2240 if (buf != stack_buf)
2241 kfree(buf);
2242 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
2243 oid->name, nsl, oloc->pool_ns->str,
2244 raw_pgid->pool, raw_pgid->seed);
2245 }
2246 return 0;
2247 }
2248
2249 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
2250 const struct ceph_object_id *oid,
2251 const struct ceph_object_locator *oloc,
2252 struct ceph_pg *raw_pgid)
2253 {
2254 struct ceph_pg_pool_info *pi;
2255
2256 pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
2257 if (!pi)
2258 return -ENOENT;
2259
2260 return __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
2261 }
2262 EXPORT_SYMBOL(ceph_object_locator_to_pg);
2263
2264 /*
2265 * Map a raw PG (full precision ps) into an actual PG.
2266 */
2267 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
2268 const struct ceph_pg *raw_pgid,
2269 struct ceph_pg *pgid)
2270 {
2271 pgid->pool = raw_pgid->pool;
2272 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
2273 pi->pg_num_mask);
2274 }
2275
2276 /*
2277 * Map a raw PG (full precision ps) into a placement ps (placement
2278 * seed). Include pool id in that value so that different pools don't
2279 * use the same seeds.
2280 */
2281 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
2282 const struct ceph_pg *raw_pgid)
2283 {
2284 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
2285 /* hash pool id and seed so that pool PGs do not overlap */
2286 return crush_hash32_2(CRUSH_HASH_RJENKINS1,
2287 ceph_stable_mod(raw_pgid->seed,
2288 pi->pgp_num,
2289 pi->pgp_num_mask),
2290 raw_pgid->pool);
2291 } else {
2292 /*
2293 * legacy behavior: add ps and pool together. this is
2294 * not a great approach because the PGs from each pool
2295 * will overlap on top of each other: 0.5 == 1.4 ==
2296 * 2.3 == ...
2297 */
2298 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
2299 pi->pgp_num_mask) +
2300 (unsigned)raw_pgid->pool;
2301 }
2302 }
2303
2304 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
2305 int *result, int result_max,
2306 const __u32 *weight, int weight_max,
2307 u64 choose_args_index)
2308 {
2309 struct crush_choose_arg_map *arg_map;
2310 int r;
2311
2312 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
2313
2314 arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2315 choose_args_index);
2316
2317 mutex_lock(&map->crush_workspace_mutex);
2318 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
2319 weight, weight_max, map->crush_workspace,
2320 arg_map ? arg_map->args : NULL);
2321 mutex_unlock(&map->crush_workspace_mutex);
2322
2323 return r;
2324 }
2325
2326 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
2327 struct ceph_pg_pool_info *pi,
2328 struct ceph_osds *set)
2329 {
2330 int i;
2331
2332 if (ceph_can_shift_osds(pi)) {
2333 int removed = 0;
2334
2335 /* shift left */
2336 for (i = 0; i < set->size; i++) {
2337 if (!ceph_osd_exists(osdmap, set->osds[i])) {
2338 removed++;
2339 continue;
2340 }
2341 if (removed)
2342 set->osds[i - removed] = set->osds[i];
2343 }
2344 set->size -= removed;
2345 } else {
2346 /* set dne devices to NONE */
2347 for (i = 0; i < set->size; i++) {
2348 if (!ceph_osd_exists(osdmap, set->osds[i]))
2349 set->osds[i] = CRUSH_ITEM_NONE;
2350 }
2351 }
2352 }
2353
2354 /*
2355 * Calculate raw set (CRUSH output) for given PG and filter out
2356 * nonexistent OSDs. ->primary is undefined for a raw set.
2357 *
2358 * Placement seed (CRUSH input) is returned through @ppps.
2359 */
2360 static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
2361 struct ceph_pg_pool_info *pi,
2362 const struct ceph_pg *raw_pgid,
2363 struct ceph_osds *raw,
2364 u32 *ppps)
2365 {
2366 u32 pps = raw_pg_to_pps(pi, raw_pgid);
2367 int ruleno;
2368 int len;
2369
2370 ceph_osds_init(raw);
2371 if (ppps)
2372 *ppps = pps;
2373
2374 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
2375 pi->size);
2376 if (ruleno < 0) {
2377 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2378 pi->id, pi->crush_ruleset, pi->type, pi->size);
2379 return;
2380 }
2381
2382 if (pi->size > ARRAY_SIZE(raw->osds)) {
2383 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2384 pi->id, pi->crush_ruleset, pi->type, pi->size,
2385 ARRAY_SIZE(raw->osds));
2386 return;
2387 }
2388
2389 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
2390 osdmap->osd_weight, osdmap->max_osd, pi->id);
2391 if (len < 0) {
2392 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2393 len, ruleno, pi->id, pi->crush_ruleset, pi->type,
2394 pi->size);
2395 return;
2396 }
2397
2398 raw->size = len;
2399 remove_nonexistent_osds(osdmap, pi, raw);
2400 }
2401
2402 /* apply pg_upmap[_items] mappings */
2403 static void apply_upmap(struct ceph_osdmap *osdmap,
2404 const struct ceph_pg *pgid,
2405 struct ceph_osds *raw)
2406 {
2407 struct ceph_pg_mapping *pg;
2408 int i, j;
2409
2410 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
2411 if (pg) {
2412 /* make sure targets aren't marked out */
2413 for (i = 0; i < pg->pg_upmap.len; i++) {
2414 int osd = pg->pg_upmap.osds[i];
2415
2416 if (osd != CRUSH_ITEM_NONE &&
2417 osd < osdmap->max_osd &&
2418 osdmap->osd_weight[osd] == 0) {
2419 /* reject/ignore explicit mapping */
2420 return;
2421 }
2422 }
2423 for (i = 0; i < pg->pg_upmap.len; i++)
2424 raw->osds[i] = pg->pg_upmap.osds[i];
2425 raw->size = pg->pg_upmap.len;
2426 return;
2427 }
2428
2429 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
2430 if (pg) {
2431 /*
2432 * Note: this approach does not allow a bidirectional swap,
2433 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2434 */
2435 for (i = 0; i < pg->pg_upmap_items.len; i++) {
2436 int from = pg->pg_upmap_items.from_to[i][0];
2437 int to = pg->pg_upmap_items.from_to[i][1];
2438 int pos = -1;
2439 bool exists = false;
2440
2441 /* make sure replacement doesn't already appear */
2442 for (j = 0; j < raw->size; j++) {
2443 int osd = raw->osds[j];
2444
2445 if (osd == to) {
2446 exists = true;
2447 break;
2448 }
2449 /* ignore mapping if target is marked out */
2450 if (osd == from && pos < 0 &&
2451 !(to != CRUSH_ITEM_NONE &&
2452 to < osdmap->max_osd &&
2453 osdmap->osd_weight[to] == 0)) {
2454 pos = j;
2455 }
2456 }
2457 if (!exists && pos >= 0) {
2458 raw->osds[pos] = to;
2459 return;
2460 }
2461 }
2462 }
2463 }
2464
2465 /*
2466 * Given raw set, calculate up set and up primary. By definition of an
2467 * up set, the result won't contain nonexistent or down OSDs.
2468 *
2469 * This is done in-place - on return @set is the up set. If it's
2470 * empty, ->primary will remain undefined.
2471 */
2472 static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2473 struct ceph_pg_pool_info *pi,
2474 struct ceph_osds *set)
2475 {
2476 int i;
2477
2478 /* ->primary is undefined for a raw set */
2479 BUG_ON(set->primary != -1);
2480
2481 if (ceph_can_shift_osds(pi)) {
2482 int removed = 0;
2483
2484 /* shift left */
2485 for (i = 0; i < set->size; i++) {
2486 if (ceph_osd_is_down(osdmap, set->osds[i])) {
2487 removed++;
2488 continue;
2489 }
2490 if (removed)
2491 set->osds[i - removed] = set->osds[i];
2492 }
2493 set->size -= removed;
2494 if (set->size > 0)
2495 set->primary = set->osds[0];
2496 } else {
2497 /* set down/dne devices to NONE */
2498 for (i = set->size - 1; i >= 0; i--) {
2499 if (ceph_osd_is_down(osdmap, set->osds[i]))
2500 set->osds[i] = CRUSH_ITEM_NONE;
2501 else
2502 set->primary = set->osds[i];
2503 }
2504 }
2505 }
2506
2507 static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2508 struct ceph_pg_pool_info *pi,
2509 u32 pps,
2510 struct ceph_osds *up)
2511 {
2512 int i;
2513 int pos = -1;
2514
2515 /*
2516 * Do we have any non-default primary_affinity values for these
2517 * osds?
2518 */
2519 if (!osdmap->osd_primary_affinity)
2520 return;
2521
2522 for (i = 0; i < up->size; i++) {
2523 int osd = up->osds[i];
2524
2525 if (osd != CRUSH_ITEM_NONE &&
2526 osdmap->osd_primary_affinity[osd] !=
2527 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2528 break;
2529 }
2530 }
2531 if (i == up->size)
2532 return;
2533
2534 /*
2535 * Pick the primary. Feed both the seed (for the pg) and the
2536 * osd into the hash/rng so that a proportional fraction of an
2537 * osd's pgs get rejected as primary.
2538 */
2539 for (i = 0; i < up->size; i++) {
2540 int osd = up->osds[i];
2541 u32 aff;
2542
2543 if (osd == CRUSH_ITEM_NONE)
2544 continue;
2545
2546 aff = osdmap->osd_primary_affinity[osd];
2547 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2548 (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2549 pps, osd) >> 16) >= aff) {
2550 /*
2551 * We chose not to use this primary. Note it
2552 * anyway as a fallback in case we don't pick
2553 * anyone else, but keep looking.
2554 */
2555 if (pos < 0)
2556 pos = i;
2557 } else {
2558 pos = i;
2559 break;
2560 }
2561 }
2562 if (pos < 0)
2563 return;
2564
2565 up->primary = up->osds[pos];
2566
2567 if (ceph_can_shift_osds(pi) && pos > 0) {
2568 /* move the new primary to the front */
2569 for (i = pos; i > 0; i--)
2570 up->osds[i] = up->osds[i - 1];
2571 up->osds[0] = up->primary;
2572 }
2573 }
2574
2575 /*
2576 * Get pg_temp and primary_temp mappings for given PG.
2577 *
2578 * Note that a PG may have none, only pg_temp, only primary_temp or
2579 * both pg_temp and primary_temp mappings. This means @temp isn't
2580 * always a valid OSD set on return: in the "only primary_temp" case,
2581 * @temp will have its ->primary >= 0 but ->size == 0.
2582 */
2583 static void get_temp_osds(struct ceph_osdmap *osdmap,
2584 struct ceph_pg_pool_info *pi,
2585 const struct ceph_pg *pgid,
2586 struct ceph_osds *temp)
2587 {
2588 struct ceph_pg_mapping *pg;
2589 int i;
2590
2591 ceph_osds_init(temp);
2592
2593 /* pg_temp? */
2594 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
2595 if (pg) {
2596 for (i = 0; i < pg->pg_temp.len; i++) {
2597 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2598 if (ceph_can_shift_osds(pi))
2599 continue;
2600
2601 temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2602 } else {
2603 temp->osds[temp->size++] = pg->pg_temp.osds[i];
2604 }
2605 }
2606
2607 /* apply pg_temp's primary */
2608 for (i = 0; i < temp->size; i++) {
2609 if (temp->osds[i] != CRUSH_ITEM_NONE) {
2610 temp->primary = temp->osds[i];
2611 break;
2612 }
2613 }
2614 }
2615
2616 /* primary_temp? */
2617 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
2618 if (pg)
2619 temp->primary = pg->primary_temp.osd;
2620 }
2621
2622 /*
2623 * Map a PG to its acting set as well as its up set.
2624 *
2625 * Acting set is used for data mapping purposes, while up set can be
2626 * recorded for detecting interval changes and deciding whether to
2627 * resend a request.
2628 */
2629 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2630 struct ceph_pg_pool_info *pi,
2631 const struct ceph_pg *raw_pgid,
2632 struct ceph_osds *up,
2633 struct ceph_osds *acting)
2634 {
2635 struct ceph_pg pgid;
2636 u32 pps;
2637
2638 WARN_ON(pi->id != raw_pgid->pool);
2639 raw_pg_to_pg(pi, raw_pgid, &pgid);
2640
2641 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2642 apply_upmap(osdmap, &pgid, up);
2643 raw_to_up_osds(osdmap, pi, up);
2644 apply_primary_affinity(osdmap, pi, pps, up);
2645 get_temp_osds(osdmap, pi, &pgid, acting);
2646 if (!acting->size) {
2647 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2648 acting->size = up->size;
2649 if (acting->primary == -1)
2650 acting->primary = up->primary;
2651 }
2652 WARN_ON(!osds_valid(up) || !osds_valid(acting));
2653 }
2654
2655 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
2656 struct ceph_pg_pool_info *pi,
2657 const struct ceph_pg *raw_pgid,
2658 struct ceph_spg *spgid)
2659 {
2660 struct ceph_pg pgid;
2661 struct ceph_osds up, acting;
2662 int i;
2663
2664 WARN_ON(pi->id != raw_pgid->pool);
2665 raw_pg_to_pg(pi, raw_pgid, &pgid);
2666
2667 if (ceph_can_shift_osds(pi)) {
2668 spgid->pgid = pgid; /* struct */
2669 spgid->shard = CEPH_SPG_NOSHARD;
2670 return true;
2671 }
2672
2673 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
2674 for (i = 0; i < acting.size; i++) {
2675 if (acting.osds[i] == acting.primary) {
2676 spgid->pgid = pgid; /* struct */
2677 spgid->shard = i;
2678 return true;
2679 }
2680 }
2681
2682 return false;
2683 }
2684
2685 /*
2686 * Return acting primary for given PG, or -1 if none.
2687 */
2688 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2689 const struct ceph_pg *raw_pgid)
2690 {
2691 struct ceph_pg_pool_info *pi;
2692 struct ceph_osds up, acting;
2693
2694 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2695 if (!pi)
2696 return -1;
2697
2698 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
2699 return acting.primary;
2700 }
2701 EXPORT_SYMBOL(ceph_pg_to_acting_primary);