]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/rbd.c
3ba4836f024c5a9377d26be1f01ae14f90e69f04
[mirror_ubuntu-artful-kernel.git] / drivers / block / rbd.c
1 /*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
24 For usage instructions, please refer to:
25
26 Documentation/ABI/testing/sysfs-bus-rbd
27
28 */
29
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
35
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
39 #include <linux/fs.h>
40 #include <linux/blkdev.h>
41
42 #include "rbd_types.h"
43
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
45
46 /*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
55 /* It might be useful to have these defined elsewhere */
56
57 #define U8_MAX ((u8) (~0U))
58 #define U16_MAX ((u16) (~0U))
59 #define U32_MAX ((u32) (~0U))
60 #define U64_MAX ((u64) (~0ULL))
61
62 #define RBD_DRV_NAME "rbd"
63 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
64
65 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
66
67 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
68 #define RBD_MAX_SNAP_NAME_LEN \
69 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
70
71 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
72
73 #define RBD_SNAP_HEAD_NAME "-"
74
75 /* This allows a single page to hold an image name sent by OSD */
76 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
77 #define RBD_IMAGE_ID_LEN_MAX 64
78
79 #define RBD_OBJ_PREFIX_LEN_MAX 64
80
81 /* Feature bits */
82
83 #define RBD_FEATURE_LAYERING 1
84
85 /* Features supported by this (client software) implementation. */
86
87 #define RBD_FEATURES_ALL (0)
88
89 /*
90 * An RBD device name will be "rbd#", where the "rbd" comes from
91 * RBD_DRV_NAME above, and # is a unique integer identifier.
92 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
93 * enough to hold all possible device names.
94 */
95 #define DEV_NAME_LEN 32
96 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
97
98 /*
99 * block device image metadata (in-memory version)
100 */
101 struct rbd_image_header {
102 /* These four fields never change for a given rbd image */
103 char *object_prefix;
104 u64 features;
105 __u8 obj_order;
106 __u8 crypt_type;
107 __u8 comp_type;
108
109 /* The remaining fields need to be updated occasionally */
110 u64 image_size;
111 struct ceph_snap_context *snapc;
112 char *snap_names;
113 u64 *snap_sizes;
114
115 u64 obj_version;
116 };
117
118 /*
119 * An rbd image specification.
120 *
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
124 *
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
129 *
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
135 *
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
139 *
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
142 */
143 struct rbd_spec {
144 u64 pool_id;
145 char *pool_name;
146
147 char *image_id;
148 char *image_name;
149
150 u64 snap_id;
151 char *snap_name;
152
153 struct kref kref;
154 };
155
156 /*
157 * an instance of the client. multiple devices may share an rbd client.
158 */
159 struct rbd_client {
160 struct ceph_client *client;
161 struct kref kref;
162 struct list_head node;
163 };
164
165 struct rbd_img_request;
166 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
167
168 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
169
170 struct rbd_obj_request;
171 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
172
173 enum obj_request_type {
174 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
175 };
176
177 struct rbd_obj_request {
178 const char *object_name;
179 u64 offset; /* object start byte */
180 u64 length; /* bytes from offset */
181
182 struct rbd_img_request *img_request;
183 struct list_head links; /* img_request->obj_requests */
184 u32 which; /* posn image request list */
185
186 enum obj_request_type type;
187 union {
188 struct bio *bio_list;
189 struct {
190 struct page **pages;
191 u32 page_count;
192 };
193 };
194
195 struct ceph_osd_request *osd_req;
196
197 u64 xferred; /* bytes transferred */
198 u64 version;
199 s32 result;
200 atomic_t done;
201
202 rbd_obj_callback_t callback;
203 struct completion completion;
204
205 struct kref kref;
206 };
207
208 struct rbd_img_request {
209 struct request *rq;
210 struct rbd_device *rbd_dev;
211 u64 offset; /* starting image byte offset */
212 u64 length; /* byte count from offset */
213 bool write_request; /* false for read */
214 union {
215 struct ceph_snap_context *snapc; /* for writes */
216 u64 snap_id; /* for reads */
217 };
218 spinlock_t completion_lock;/* protects next_completion */
219 u32 next_completion;
220 rbd_img_callback_t callback;
221
222 u32 obj_request_count;
223 struct list_head obj_requests; /* rbd_obj_request structs */
224
225 struct kref kref;
226 };
227
228 #define for_each_obj_request(ireq, oreq) \
229 list_for_each_entry(oreq, &ireq->obj_requests, links)
230 #define for_each_obj_request_from(ireq, oreq) \
231 list_for_each_entry_from(oreq, &ireq->obj_requests, links)
232 #define for_each_obj_request_safe(ireq, oreq, n) \
233 list_for_each_entry_safe_reverse(oreq, n, &ireq->obj_requests, links)
234
235 struct rbd_snap {
236 struct device dev;
237 const char *name;
238 u64 size;
239 struct list_head node;
240 u64 id;
241 u64 features;
242 };
243
244 struct rbd_mapping {
245 u64 size;
246 u64 features;
247 bool read_only;
248 };
249
250 /*
251 * a single device
252 */
253 struct rbd_device {
254 int dev_id; /* blkdev unique id */
255
256 int major; /* blkdev assigned major */
257 struct gendisk *disk; /* blkdev's gendisk and rq */
258
259 u32 image_format; /* Either 1 or 2 */
260 struct rbd_client *rbd_client;
261
262 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
263
264 spinlock_t lock; /* queue, flags, open_count */
265
266 struct rbd_image_header header;
267 unsigned long flags; /* possibly lock protected */
268 struct rbd_spec *spec;
269
270 char *header_name;
271
272 struct ceph_file_layout layout;
273
274 struct ceph_osd_event *watch_event;
275 struct rbd_obj_request *watch_request;
276
277 struct rbd_spec *parent_spec;
278 u64 parent_overlap;
279
280 /* protects updating the header */
281 struct rw_semaphore header_rwsem;
282
283 struct rbd_mapping mapping;
284
285 struct list_head node;
286
287 /* list of snapshots */
288 struct list_head snaps;
289
290 /* sysfs related */
291 struct device dev;
292 unsigned long open_count; /* protected by lock */
293 };
294
295 /*
296 * Flag bits for rbd_dev->flags. If atomicity is required,
297 * rbd_dev->lock is used to protect access.
298 *
299 * Currently, only the "removing" flag (which is coupled with the
300 * "open_count" field) requires atomic access.
301 */
302 enum rbd_dev_flags {
303 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
304 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
305 };
306
307 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
308
309 static LIST_HEAD(rbd_dev_list); /* devices */
310 static DEFINE_SPINLOCK(rbd_dev_list_lock);
311
312 static LIST_HEAD(rbd_client_list); /* clients */
313 static DEFINE_SPINLOCK(rbd_client_list_lock);
314
315 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
316 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
317
318 static void rbd_dev_release(struct device *dev);
319 static void rbd_remove_snap_dev(struct rbd_snap *snap);
320
321 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
322 size_t count);
323 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
324 size_t count);
325
326 static struct bus_attribute rbd_bus_attrs[] = {
327 __ATTR(add, S_IWUSR, NULL, rbd_add),
328 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
329 __ATTR_NULL
330 };
331
332 static struct bus_type rbd_bus_type = {
333 .name = "rbd",
334 .bus_attrs = rbd_bus_attrs,
335 };
336
337 static void rbd_root_dev_release(struct device *dev)
338 {
339 }
340
341 static struct device rbd_root_dev = {
342 .init_name = "rbd",
343 .release = rbd_root_dev_release,
344 };
345
346 static __printf(2, 3)
347 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
348 {
349 struct va_format vaf;
350 va_list args;
351
352 va_start(args, fmt);
353 vaf.fmt = fmt;
354 vaf.va = &args;
355
356 if (!rbd_dev)
357 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
358 else if (rbd_dev->disk)
359 printk(KERN_WARNING "%s: %s: %pV\n",
360 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
361 else if (rbd_dev->spec && rbd_dev->spec->image_name)
362 printk(KERN_WARNING "%s: image %s: %pV\n",
363 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
364 else if (rbd_dev->spec && rbd_dev->spec->image_id)
365 printk(KERN_WARNING "%s: id %s: %pV\n",
366 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
367 else /* punt */
368 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
369 RBD_DRV_NAME, rbd_dev, &vaf);
370 va_end(args);
371 }
372
373 #ifdef RBD_DEBUG
374 #define rbd_assert(expr) \
375 if (unlikely(!(expr))) { \
376 printk(KERN_ERR "\nAssertion failure in %s() " \
377 "at line %d:\n\n" \
378 "\trbd_assert(%s);\n\n", \
379 __func__, __LINE__, #expr); \
380 BUG(); \
381 }
382 #else /* !RBD_DEBUG */
383 # define rbd_assert(expr) ((void) 0)
384 #endif /* !RBD_DEBUG */
385
386 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
387 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
388
389 static int rbd_open(struct block_device *bdev, fmode_t mode)
390 {
391 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
392 bool removing = false;
393
394 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
395 return -EROFS;
396
397 spin_lock(&rbd_dev->lock);
398 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
399 removing = true;
400 else
401 rbd_dev->open_count++;
402 spin_unlock(&rbd_dev->lock);
403 if (removing)
404 return -ENOENT;
405
406 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
407 (void) get_device(&rbd_dev->dev);
408 set_device_ro(bdev, rbd_dev->mapping.read_only);
409 mutex_unlock(&ctl_mutex);
410
411 return 0;
412 }
413
414 static int rbd_release(struct gendisk *disk, fmode_t mode)
415 {
416 struct rbd_device *rbd_dev = disk->private_data;
417 unsigned long open_count_before;
418
419 spin_lock(&rbd_dev->lock);
420 open_count_before = rbd_dev->open_count--;
421 spin_unlock(&rbd_dev->lock);
422 rbd_assert(open_count_before > 0);
423
424 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
425 put_device(&rbd_dev->dev);
426 mutex_unlock(&ctl_mutex);
427
428 return 0;
429 }
430
431 static const struct block_device_operations rbd_bd_ops = {
432 .owner = THIS_MODULE,
433 .open = rbd_open,
434 .release = rbd_release,
435 };
436
437 /*
438 * Initialize an rbd client instance.
439 * We own *ceph_opts.
440 */
441 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
442 {
443 struct rbd_client *rbdc;
444 int ret = -ENOMEM;
445
446 dout("rbd_client_create\n");
447 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
448 if (!rbdc)
449 goto out_opt;
450
451 kref_init(&rbdc->kref);
452 INIT_LIST_HEAD(&rbdc->node);
453
454 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455
456 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
457 if (IS_ERR(rbdc->client))
458 goto out_mutex;
459 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
460
461 ret = ceph_open_session(rbdc->client);
462 if (ret < 0)
463 goto out_err;
464
465 spin_lock(&rbd_client_list_lock);
466 list_add_tail(&rbdc->node, &rbd_client_list);
467 spin_unlock(&rbd_client_list_lock);
468
469 mutex_unlock(&ctl_mutex);
470
471 dout("rbd_client_create created %p\n", rbdc);
472 return rbdc;
473
474 out_err:
475 ceph_destroy_client(rbdc->client);
476 out_mutex:
477 mutex_unlock(&ctl_mutex);
478 kfree(rbdc);
479 out_opt:
480 if (ceph_opts)
481 ceph_destroy_options(ceph_opts);
482 return ERR_PTR(ret);
483 }
484
485 /*
486 * Find a ceph client with specific addr and configuration. If
487 * found, bump its reference count.
488 */
489 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
490 {
491 struct rbd_client *client_node;
492 bool found = false;
493
494 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
495 return NULL;
496
497 spin_lock(&rbd_client_list_lock);
498 list_for_each_entry(client_node, &rbd_client_list, node) {
499 if (!ceph_compare_options(ceph_opts, client_node->client)) {
500 kref_get(&client_node->kref);
501 found = true;
502 break;
503 }
504 }
505 spin_unlock(&rbd_client_list_lock);
506
507 return found ? client_node : NULL;
508 }
509
510 /*
511 * mount options
512 */
513 enum {
514 Opt_last_int,
515 /* int args above */
516 Opt_last_string,
517 /* string args above */
518 Opt_read_only,
519 Opt_read_write,
520 /* Boolean args above */
521 Opt_last_bool,
522 };
523
524 static match_table_t rbd_opts_tokens = {
525 /* int args above */
526 /* string args above */
527 {Opt_read_only, "read_only"},
528 {Opt_read_only, "ro"}, /* Alternate spelling */
529 {Opt_read_write, "read_write"},
530 {Opt_read_write, "rw"}, /* Alternate spelling */
531 /* Boolean args above */
532 {-1, NULL}
533 };
534
535 struct rbd_options {
536 bool read_only;
537 };
538
539 #define RBD_READ_ONLY_DEFAULT false
540
541 static int parse_rbd_opts_token(char *c, void *private)
542 {
543 struct rbd_options *rbd_opts = private;
544 substring_t argstr[MAX_OPT_ARGS];
545 int token, intval, ret;
546
547 token = match_token(c, rbd_opts_tokens, argstr);
548 if (token < 0)
549 return -EINVAL;
550
551 if (token < Opt_last_int) {
552 ret = match_int(&argstr[0], &intval);
553 if (ret < 0) {
554 pr_err("bad mount option arg (not int) "
555 "at '%s'\n", c);
556 return ret;
557 }
558 dout("got int token %d val %d\n", token, intval);
559 } else if (token > Opt_last_int && token < Opt_last_string) {
560 dout("got string token %d val %s\n", token,
561 argstr[0].from);
562 } else if (token > Opt_last_string && token < Opt_last_bool) {
563 dout("got Boolean token %d\n", token);
564 } else {
565 dout("got token %d\n", token);
566 }
567
568 switch (token) {
569 case Opt_read_only:
570 rbd_opts->read_only = true;
571 break;
572 case Opt_read_write:
573 rbd_opts->read_only = false;
574 break;
575 default:
576 rbd_assert(false);
577 break;
578 }
579 return 0;
580 }
581
582 /*
583 * Get a ceph client with specific addr and configuration, if one does
584 * not exist create it.
585 */
586 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
587 {
588 struct rbd_client *rbdc;
589
590 rbdc = rbd_client_find(ceph_opts);
591 if (rbdc) /* using an existing client */
592 ceph_destroy_options(ceph_opts);
593 else
594 rbdc = rbd_client_create(ceph_opts);
595
596 return rbdc;
597 }
598
599 /*
600 * Destroy ceph client
601 *
602 * Caller must hold rbd_client_list_lock.
603 */
604 static void rbd_client_release(struct kref *kref)
605 {
606 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
607
608 dout("rbd_release_client %p\n", rbdc);
609 spin_lock(&rbd_client_list_lock);
610 list_del(&rbdc->node);
611 spin_unlock(&rbd_client_list_lock);
612
613 ceph_destroy_client(rbdc->client);
614 kfree(rbdc);
615 }
616
617 /*
618 * Drop reference to ceph client node. If it's not referenced anymore, release
619 * it.
620 */
621 static void rbd_put_client(struct rbd_client *rbdc)
622 {
623 if (rbdc)
624 kref_put(&rbdc->kref, rbd_client_release);
625 }
626
627 static bool rbd_image_format_valid(u32 image_format)
628 {
629 return image_format == 1 || image_format == 2;
630 }
631
632 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
633 {
634 size_t size;
635 u32 snap_count;
636
637 /* The header has to start with the magic rbd header text */
638 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
639 return false;
640
641 /* The bio layer requires at least sector-sized I/O */
642
643 if (ondisk->options.order < SECTOR_SHIFT)
644 return false;
645
646 /* If we use u64 in a few spots we may be able to loosen this */
647
648 if (ondisk->options.order > 8 * sizeof (int) - 1)
649 return false;
650
651 /*
652 * The size of a snapshot header has to fit in a size_t, and
653 * that limits the number of snapshots.
654 */
655 snap_count = le32_to_cpu(ondisk->snap_count);
656 size = SIZE_MAX - sizeof (struct ceph_snap_context);
657 if (snap_count > size / sizeof (__le64))
658 return false;
659
660 /*
661 * Not only that, but the size of the entire the snapshot
662 * header must also be representable in a size_t.
663 */
664 size -= snap_count * sizeof (__le64);
665 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
666 return false;
667
668 return true;
669 }
670
671 /*
672 * Create a new header structure, translate header format from the on-disk
673 * header.
674 */
675 static int rbd_header_from_disk(struct rbd_image_header *header,
676 struct rbd_image_header_ondisk *ondisk)
677 {
678 u32 snap_count;
679 size_t len;
680 size_t size;
681 u32 i;
682
683 memset(header, 0, sizeof (*header));
684
685 snap_count = le32_to_cpu(ondisk->snap_count);
686
687 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
688 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
689 if (!header->object_prefix)
690 return -ENOMEM;
691 memcpy(header->object_prefix, ondisk->object_prefix, len);
692 header->object_prefix[len] = '\0';
693
694 if (snap_count) {
695 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
696
697 /* Save a copy of the snapshot names */
698
699 if (snap_names_len > (u64) SIZE_MAX)
700 return -EIO;
701 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
702 if (!header->snap_names)
703 goto out_err;
704 /*
705 * Note that rbd_dev_v1_header_read() guarantees
706 * the ondisk buffer we're working with has
707 * snap_names_len bytes beyond the end of the
708 * snapshot id array, this memcpy() is safe.
709 */
710 memcpy(header->snap_names, &ondisk->snaps[snap_count],
711 snap_names_len);
712
713 /* Record each snapshot's size */
714
715 size = snap_count * sizeof (*header->snap_sizes);
716 header->snap_sizes = kmalloc(size, GFP_KERNEL);
717 if (!header->snap_sizes)
718 goto out_err;
719 for (i = 0; i < snap_count; i++)
720 header->snap_sizes[i] =
721 le64_to_cpu(ondisk->snaps[i].image_size);
722 } else {
723 WARN_ON(ondisk->snap_names_len);
724 header->snap_names = NULL;
725 header->snap_sizes = NULL;
726 }
727
728 header->features = 0; /* No features support in v1 images */
729 header->obj_order = ondisk->options.order;
730 header->crypt_type = ondisk->options.crypt_type;
731 header->comp_type = ondisk->options.comp_type;
732
733 /* Allocate and fill in the snapshot context */
734
735 header->image_size = le64_to_cpu(ondisk->image_size);
736 size = sizeof (struct ceph_snap_context);
737 size += snap_count * sizeof (header->snapc->snaps[0]);
738 header->snapc = kzalloc(size, GFP_KERNEL);
739 if (!header->snapc)
740 goto out_err;
741
742 atomic_set(&header->snapc->nref, 1);
743 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
744 header->snapc->num_snaps = snap_count;
745 for (i = 0; i < snap_count; i++)
746 header->snapc->snaps[i] =
747 le64_to_cpu(ondisk->snaps[i].id);
748
749 return 0;
750
751 out_err:
752 kfree(header->snap_sizes);
753 header->snap_sizes = NULL;
754 kfree(header->snap_names);
755 header->snap_names = NULL;
756 kfree(header->object_prefix);
757 header->object_prefix = NULL;
758
759 return -ENOMEM;
760 }
761
762 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
763 {
764 struct rbd_snap *snap;
765
766 if (snap_id == CEPH_NOSNAP)
767 return RBD_SNAP_HEAD_NAME;
768
769 list_for_each_entry(snap, &rbd_dev->snaps, node)
770 if (snap_id == snap->id)
771 return snap->name;
772
773 return NULL;
774 }
775
776 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
777 {
778
779 struct rbd_snap *snap;
780
781 list_for_each_entry(snap, &rbd_dev->snaps, node) {
782 if (!strcmp(snap_name, snap->name)) {
783 rbd_dev->spec->snap_id = snap->id;
784 rbd_dev->mapping.size = snap->size;
785 rbd_dev->mapping.features = snap->features;
786
787 return 0;
788 }
789 }
790
791 return -ENOENT;
792 }
793
794 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
795 {
796 int ret;
797
798 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
799 sizeof (RBD_SNAP_HEAD_NAME))) {
800 rbd_dev->spec->snap_id = CEPH_NOSNAP;
801 rbd_dev->mapping.size = rbd_dev->header.image_size;
802 rbd_dev->mapping.features = rbd_dev->header.features;
803 ret = 0;
804 } else {
805 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
806 if (ret < 0)
807 goto done;
808 rbd_dev->mapping.read_only = true;
809 }
810 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811
812 done:
813 return ret;
814 }
815
816 static void rbd_header_free(struct rbd_image_header *header)
817 {
818 kfree(header->object_prefix);
819 header->object_prefix = NULL;
820 kfree(header->snap_sizes);
821 header->snap_sizes = NULL;
822 kfree(header->snap_names);
823 header->snap_names = NULL;
824 ceph_put_snap_context(header->snapc);
825 header->snapc = NULL;
826 }
827
828 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
829 {
830 char *name;
831 u64 segment;
832 int ret;
833
834 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
835 if (!name)
836 return NULL;
837 segment = offset >> rbd_dev->header.obj_order;
838 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
839 rbd_dev->header.object_prefix, segment);
840 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
841 pr_err("error formatting segment name for #%llu (%d)\n",
842 segment, ret);
843 kfree(name);
844 name = NULL;
845 }
846
847 return name;
848 }
849
850 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
851 {
852 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
853
854 return offset & (segment_size - 1);
855 }
856
857 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
858 u64 offset, u64 length)
859 {
860 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
861
862 offset &= segment_size - 1;
863
864 rbd_assert(length <= U64_MAX - offset);
865 if (offset + length > segment_size)
866 length = segment_size - offset;
867
868 return length;
869 }
870
871 /*
872 * returns the size of an object in the image
873 */
874 static u64 rbd_obj_bytes(struct rbd_image_header *header)
875 {
876 return 1 << header->obj_order;
877 }
878
879 /*
880 * bio helpers
881 */
882
883 static void bio_chain_put(struct bio *chain)
884 {
885 struct bio *tmp;
886
887 while (chain) {
888 tmp = chain;
889 chain = chain->bi_next;
890 bio_put(tmp);
891 }
892 }
893
894 /*
895 * zeros a bio chain, starting at specific offset
896 */
897 static void zero_bio_chain(struct bio *chain, int start_ofs)
898 {
899 struct bio_vec *bv;
900 unsigned long flags;
901 void *buf;
902 int i;
903 int pos = 0;
904
905 while (chain) {
906 bio_for_each_segment(bv, chain, i) {
907 if (pos + bv->bv_len > start_ofs) {
908 int remainder = max(start_ofs - pos, 0);
909 buf = bvec_kmap_irq(bv, &flags);
910 memset(buf + remainder, 0,
911 bv->bv_len - remainder);
912 bvec_kunmap_irq(buf, &flags);
913 }
914 pos += bv->bv_len;
915 }
916
917 chain = chain->bi_next;
918 }
919 }
920
921 /*
922 * Clone a portion of a bio, starting at the given byte offset
923 * and continuing for the number of bytes indicated.
924 */
925 static struct bio *bio_clone_range(struct bio *bio_src,
926 unsigned int offset,
927 unsigned int len,
928 gfp_t gfpmask)
929 {
930 struct bio_vec *bv;
931 unsigned int resid;
932 unsigned short idx;
933 unsigned int voff;
934 unsigned short end_idx;
935 unsigned short vcnt;
936 struct bio *bio;
937
938 /* Handle the easy case for the caller */
939
940 if (!offset && len == bio_src->bi_size)
941 return bio_clone(bio_src, gfpmask);
942
943 if (WARN_ON_ONCE(!len))
944 return NULL;
945 if (WARN_ON_ONCE(len > bio_src->bi_size))
946 return NULL;
947 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
948 return NULL;
949
950 /* Find first affected segment... */
951
952 resid = offset;
953 __bio_for_each_segment(bv, bio_src, idx, 0) {
954 if (resid < bv->bv_len)
955 break;
956 resid -= bv->bv_len;
957 }
958 voff = resid;
959
960 /* ...and the last affected segment */
961
962 resid += len;
963 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
964 if (resid <= bv->bv_len)
965 break;
966 resid -= bv->bv_len;
967 }
968 vcnt = end_idx - idx + 1;
969
970 /* Build the clone */
971
972 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
973 if (!bio)
974 return NULL; /* ENOMEM */
975
976 bio->bi_bdev = bio_src->bi_bdev;
977 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
978 bio->bi_rw = bio_src->bi_rw;
979 bio->bi_flags |= 1 << BIO_CLONED;
980
981 /*
982 * Copy over our part of the bio_vec, then update the first
983 * and last (or only) entries.
984 */
985 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
986 vcnt * sizeof (struct bio_vec));
987 bio->bi_io_vec[0].bv_offset += voff;
988 if (vcnt > 1) {
989 bio->bi_io_vec[0].bv_len -= voff;
990 bio->bi_io_vec[vcnt - 1].bv_len = resid;
991 } else {
992 bio->bi_io_vec[0].bv_len = len;
993 }
994
995 bio->bi_vcnt = vcnt;
996 bio->bi_size = len;
997 bio->bi_idx = 0;
998
999 return bio;
1000 }
1001
1002 /*
1003 * Clone a portion of a bio chain, starting at the given byte offset
1004 * into the first bio in the source chain and continuing for the
1005 * number of bytes indicated. The result is another bio chain of
1006 * exactly the given length, or a null pointer on error.
1007 *
1008 * The bio_src and offset parameters are both in-out. On entry they
1009 * refer to the first source bio and the offset into that bio where
1010 * the start of data to be cloned is located.
1011 *
1012 * On return, bio_src is updated to refer to the bio in the source
1013 * chain that contains first un-cloned byte, and *offset will
1014 * contain the offset of that byte within that bio.
1015 */
1016 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1017 unsigned int *offset,
1018 unsigned int len,
1019 gfp_t gfpmask)
1020 {
1021 struct bio *bi = *bio_src;
1022 unsigned int off = *offset;
1023 struct bio *chain = NULL;
1024 struct bio **end;
1025
1026 /* Build up a chain of clone bios up to the limit */
1027
1028 if (!bi || off >= bi->bi_size || !len)
1029 return NULL; /* Nothing to clone */
1030
1031 end = &chain;
1032 while (len) {
1033 unsigned int bi_size;
1034 struct bio *bio;
1035
1036 if (!bi) {
1037 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1038 goto out_err; /* EINVAL; ran out of bio's */
1039 }
1040 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1041 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1042 if (!bio)
1043 goto out_err; /* ENOMEM */
1044
1045 *end = bio;
1046 end = &bio->bi_next;
1047
1048 off += bi_size;
1049 if (off == bi->bi_size) {
1050 bi = bi->bi_next;
1051 off = 0;
1052 }
1053 len -= bi_size;
1054 }
1055 *bio_src = bi;
1056 *offset = off;
1057
1058 return chain;
1059 out_err:
1060 bio_chain_put(chain);
1061
1062 return NULL;
1063 }
1064
1065 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1066 {
1067 kref_get(&obj_request->kref);
1068 }
1069
1070 static void rbd_obj_request_destroy(struct kref *kref);
1071 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1072 {
1073 rbd_assert(obj_request != NULL);
1074 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1075 }
1076
1077 static void rbd_img_request_get(struct rbd_img_request *img_request)
1078 {
1079 kref_get(&img_request->kref);
1080 }
1081
1082 static void rbd_img_request_destroy(struct kref *kref);
1083 static void rbd_img_request_put(struct rbd_img_request *img_request)
1084 {
1085 rbd_assert(img_request != NULL);
1086 kref_put(&img_request->kref, rbd_img_request_destroy);
1087 }
1088
1089 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1090 struct rbd_obj_request *obj_request)
1091 {
1092 rbd_assert(obj_request->img_request == NULL);
1093
1094 rbd_obj_request_get(obj_request);
1095 obj_request->img_request = img_request;
1096 obj_request->which = img_request->obj_request_count;
1097 rbd_assert(obj_request->which != BAD_WHICH);
1098 img_request->obj_request_count++;
1099 list_add_tail(&obj_request->links, &img_request->obj_requests);
1100 }
1101
1102 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1103 struct rbd_obj_request *obj_request)
1104 {
1105 rbd_assert(obj_request->which != BAD_WHICH);
1106
1107 list_del(&obj_request->links);
1108 rbd_assert(img_request->obj_request_count > 0);
1109 img_request->obj_request_count--;
1110 rbd_assert(obj_request->which == img_request->obj_request_count);
1111 obj_request->which = BAD_WHICH;
1112 rbd_assert(obj_request->img_request == img_request);
1113 obj_request->img_request = NULL;
1114 obj_request->callback = NULL;
1115 rbd_obj_request_put(obj_request);
1116 }
1117
1118 static bool obj_request_type_valid(enum obj_request_type type)
1119 {
1120 switch (type) {
1121 case OBJ_REQUEST_NODATA:
1122 case OBJ_REQUEST_BIO:
1123 case OBJ_REQUEST_PAGES:
1124 return true;
1125 default:
1126 return false;
1127 }
1128 }
1129
1130 struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
1131 {
1132 struct ceph_osd_req_op *op;
1133 va_list args;
1134 size_t size;
1135
1136 op = kzalloc(sizeof (*op), GFP_NOIO);
1137 if (!op)
1138 return NULL;
1139 op->op = opcode;
1140 va_start(args, opcode);
1141 switch (opcode) {
1142 case CEPH_OSD_OP_READ:
1143 case CEPH_OSD_OP_WRITE:
1144 /* rbd_osd_req_op_create(READ, offset, length) */
1145 /* rbd_osd_req_op_create(WRITE, offset, length) */
1146 op->extent.offset = va_arg(args, u64);
1147 op->extent.length = va_arg(args, u64);
1148 if (opcode == CEPH_OSD_OP_WRITE)
1149 op->payload_len = op->extent.length;
1150 break;
1151 case CEPH_OSD_OP_CALL:
1152 /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
1153 op->cls.class_name = va_arg(args, char *);
1154 size = strlen(op->cls.class_name);
1155 rbd_assert(size <= (size_t) U8_MAX);
1156 op->cls.class_len = size;
1157 op->payload_len = size;
1158
1159 op->cls.method_name = va_arg(args, char *);
1160 size = strlen(op->cls.method_name);
1161 rbd_assert(size <= (size_t) U8_MAX);
1162 op->cls.method_len = size;
1163 op->payload_len += size;
1164
1165 op->cls.argc = 0;
1166 op->cls.indata = va_arg(args, void *);
1167 size = va_arg(args, size_t);
1168 rbd_assert(size <= (size_t) U32_MAX);
1169 op->cls.indata_len = (u32) size;
1170 op->payload_len += size;
1171 break;
1172 case CEPH_OSD_OP_NOTIFY_ACK:
1173 case CEPH_OSD_OP_WATCH:
1174 /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
1175 /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
1176 op->watch.cookie = va_arg(args, u64);
1177 op->watch.ver = va_arg(args, u64);
1178 op->watch.ver = cpu_to_le64(op->watch.ver);
1179 if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
1180 op->watch.flag = (u8) 1;
1181 break;
1182 default:
1183 rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
1184 kfree(op);
1185 op = NULL;
1186 break;
1187 }
1188 va_end(args);
1189
1190 return op;
1191 }
1192
1193 static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
1194 {
1195 kfree(op);
1196 }
1197
1198 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1199 struct rbd_obj_request *obj_request)
1200 {
1201 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1202 }
1203
1204 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1205 {
1206 if (img_request->callback)
1207 img_request->callback(img_request);
1208 else
1209 rbd_img_request_put(img_request);
1210 }
1211
1212 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1213
1214 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1215 {
1216 return wait_for_completion_interruptible(&obj_request->completion);
1217 }
1218
1219 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request,
1220 struct ceph_osd_op *op)
1221 {
1222 atomic_set(&obj_request->done, 1);
1223 }
1224
1225 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1226 {
1227 if (obj_request->callback)
1228 obj_request->callback(obj_request);
1229 else
1230 complete_all(&obj_request->completion);
1231 }
1232
1233 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request,
1234 struct ceph_osd_op *op)
1235 {
1236 u64 xferred;
1237
1238 /*
1239 * We support a 64-bit length, but ultimately it has to be
1240 * passed to blk_end_request(), which takes an unsigned int.
1241 */
1242 xferred = le64_to_cpu(op->extent.length);
1243 rbd_assert(xferred < (u64) UINT_MAX);
1244 if (obj_request->result == (s32) -ENOENT) {
1245 zero_bio_chain(obj_request->bio_list, 0);
1246 obj_request->result = 0;
1247 } else if (xferred < obj_request->length && !obj_request->result) {
1248 zero_bio_chain(obj_request->bio_list, xferred);
1249 xferred = obj_request->length;
1250 }
1251 obj_request->xferred = xferred;
1252 atomic_set(&obj_request->done, 1);
1253 }
1254
1255 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request,
1256 struct ceph_osd_op *op)
1257 {
1258 obj_request->xferred = le64_to_cpu(op->extent.length);
1259 atomic_set(&obj_request->done, 1);
1260 }
1261
1262 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1263 struct ceph_msg *msg)
1264 {
1265 struct rbd_obj_request *obj_request = osd_req->r_priv;
1266 struct ceph_osd_reply_head *reply_head;
1267 struct ceph_osd_op *op;
1268 u32 num_ops;
1269 u16 opcode;
1270
1271 rbd_assert(osd_req == obj_request->osd_req);
1272 rbd_assert(!!obj_request->img_request ^
1273 (obj_request->which == BAD_WHICH));
1274
1275 obj_request->xferred = le32_to_cpu(msg->hdr.data_len);
1276 reply_head = msg->front.iov_base;
1277 obj_request->result = (s32) le32_to_cpu(reply_head->result);
1278 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1279
1280 num_ops = le32_to_cpu(reply_head->num_ops);
1281 WARN_ON(num_ops != 1); /* For now */
1282
1283 op = &reply_head->ops[0];
1284 opcode = le16_to_cpu(op->op);
1285 switch (opcode) {
1286 case CEPH_OSD_OP_READ:
1287 rbd_osd_read_callback(obj_request, op);
1288 break;
1289 case CEPH_OSD_OP_WRITE:
1290 rbd_osd_write_callback(obj_request, op);
1291 break;
1292 case CEPH_OSD_OP_CALL:
1293 case CEPH_OSD_OP_NOTIFY_ACK:
1294 case CEPH_OSD_OP_WATCH:
1295 rbd_osd_trivial_callback(obj_request, op);
1296 break;
1297 default:
1298 rbd_warn(NULL, "%s: unsupported op %hu\n",
1299 obj_request->object_name, (unsigned short) opcode);
1300 break;
1301 }
1302
1303 if (atomic_read(&obj_request->done))
1304 rbd_obj_request_complete(obj_request);
1305 }
1306
1307 static struct ceph_osd_request *rbd_osd_req_create(
1308 struct rbd_device *rbd_dev,
1309 bool write_request,
1310 struct rbd_obj_request *obj_request,
1311 struct ceph_osd_req_op *op)
1312 {
1313 struct rbd_img_request *img_request = obj_request->img_request;
1314 struct ceph_snap_context *snapc = NULL;
1315 struct ceph_osd_client *osdc;
1316 struct ceph_osd_request *osd_req;
1317 struct timespec now;
1318 struct timespec *mtime;
1319 u64 snap_id = CEPH_NOSNAP;
1320 u64 offset = obj_request->offset;
1321 u64 length = obj_request->length;
1322
1323 if (img_request) {
1324 rbd_assert(img_request->write_request == write_request);
1325 if (img_request->write_request)
1326 snapc = img_request->snapc;
1327 else
1328 snap_id = img_request->snap_id;
1329 }
1330
1331 /* Allocate and initialize the request, for the single op */
1332
1333 osdc = &rbd_dev->rbd_client->client->osdc;
1334 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1335 if (!osd_req)
1336 return NULL; /* ENOMEM */
1337
1338 rbd_assert(obj_request_type_valid(obj_request->type));
1339 switch (obj_request->type) {
1340 case OBJ_REQUEST_NODATA:
1341 break; /* Nothing to do */
1342 case OBJ_REQUEST_BIO:
1343 rbd_assert(obj_request->bio_list != NULL);
1344 osd_req->r_bio = obj_request->bio_list;
1345 /* osd client requires "num pages" even for bio */
1346 osd_req->r_num_pages = calc_pages_for(offset, length);
1347 break;
1348 case OBJ_REQUEST_PAGES:
1349 osd_req->r_pages = obj_request->pages;
1350 osd_req->r_num_pages = obj_request->page_count;
1351 osd_req->r_page_alignment = offset & ~PAGE_MASK;
1352 break;
1353 }
1354
1355 if (write_request) {
1356 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1357 now = CURRENT_TIME;
1358 mtime = &now;
1359 } else {
1360 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1361 mtime = NULL; /* not needed for reads */
1362 offset = 0; /* These are not used... */
1363 length = 0; /* ...for osd read requests */
1364 }
1365
1366 osd_req->r_callback = rbd_osd_req_callback;
1367 osd_req->r_priv = obj_request;
1368
1369 osd_req->r_oid_len = strlen(obj_request->object_name);
1370 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1371 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1372
1373 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1374
1375 /* osd_req will get its own reference to snapc (if non-null) */
1376
1377 ceph_osdc_build_request(osd_req, offset, length, 1, op,
1378 snapc, snap_id, mtime);
1379
1380 return osd_req;
1381 }
1382
1383 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1384 {
1385 ceph_osdc_put_request(osd_req);
1386 }
1387
1388 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1389
1390 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1391 u64 offset, u64 length,
1392 enum obj_request_type type)
1393 {
1394 struct rbd_obj_request *obj_request;
1395 size_t size;
1396 char *name;
1397
1398 rbd_assert(obj_request_type_valid(type));
1399
1400 size = strlen(object_name) + 1;
1401 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1402 if (!obj_request)
1403 return NULL;
1404
1405 name = (char *)(obj_request + 1);
1406 obj_request->object_name = memcpy(name, object_name, size);
1407 obj_request->offset = offset;
1408 obj_request->length = length;
1409 obj_request->which = BAD_WHICH;
1410 obj_request->type = type;
1411 INIT_LIST_HEAD(&obj_request->links);
1412 atomic_set(&obj_request->done, 0);
1413 init_completion(&obj_request->completion);
1414 kref_init(&obj_request->kref);
1415
1416 return obj_request;
1417 }
1418
1419 static void rbd_obj_request_destroy(struct kref *kref)
1420 {
1421 struct rbd_obj_request *obj_request;
1422
1423 obj_request = container_of(kref, struct rbd_obj_request, kref);
1424
1425 rbd_assert(obj_request->img_request == NULL);
1426 rbd_assert(obj_request->which == BAD_WHICH);
1427
1428 if (obj_request->osd_req)
1429 rbd_osd_req_destroy(obj_request->osd_req);
1430
1431 rbd_assert(obj_request_type_valid(obj_request->type));
1432 switch (obj_request->type) {
1433 case OBJ_REQUEST_NODATA:
1434 break; /* Nothing to do */
1435 case OBJ_REQUEST_BIO:
1436 if (obj_request->bio_list)
1437 bio_chain_put(obj_request->bio_list);
1438 break;
1439 case OBJ_REQUEST_PAGES:
1440 if (obj_request->pages)
1441 ceph_release_page_vector(obj_request->pages,
1442 obj_request->page_count);
1443 break;
1444 }
1445
1446 kfree(obj_request);
1447 }
1448
1449 /*
1450 * Caller is responsible for filling in the list of object requests
1451 * that comprises the image request, and the Linux request pointer
1452 * (if there is one).
1453 */
1454 struct rbd_img_request *rbd_img_request_create(struct rbd_device *rbd_dev,
1455 u64 offset, u64 length,
1456 bool write_request)
1457 {
1458 struct rbd_img_request *img_request;
1459 struct ceph_snap_context *snapc = NULL;
1460
1461 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1462 if (!img_request)
1463 return NULL;
1464
1465 if (write_request) {
1466 down_read(&rbd_dev->header_rwsem);
1467 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1468 up_read(&rbd_dev->header_rwsem);
1469 if (WARN_ON(!snapc)) {
1470 kfree(img_request);
1471 return NULL; /* Shouldn't happen */
1472 }
1473 }
1474
1475 img_request->rq = NULL;
1476 img_request->rbd_dev = rbd_dev;
1477 img_request->offset = offset;
1478 img_request->length = length;
1479 img_request->write_request = write_request;
1480 if (write_request)
1481 img_request->snapc = snapc;
1482 else
1483 img_request->snap_id = rbd_dev->spec->snap_id;
1484 spin_lock_init(&img_request->completion_lock);
1485 img_request->next_completion = 0;
1486 img_request->callback = NULL;
1487 img_request->obj_request_count = 0;
1488 INIT_LIST_HEAD(&img_request->obj_requests);
1489 kref_init(&img_request->kref);
1490
1491 rbd_img_request_get(img_request); /* Avoid a warning */
1492 rbd_img_request_put(img_request); /* TEMPORARY */
1493
1494 return img_request;
1495 }
1496
1497 static void rbd_img_request_destroy(struct kref *kref)
1498 {
1499 struct rbd_img_request *img_request;
1500 struct rbd_obj_request *obj_request;
1501 struct rbd_obj_request *next_obj_request;
1502
1503 img_request = container_of(kref, struct rbd_img_request, kref);
1504
1505 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1506 rbd_img_obj_request_del(img_request, obj_request);
1507 rbd_assert(img_request->obj_request_count == 0);
1508
1509 if (img_request->write_request)
1510 ceph_put_snap_context(img_request->snapc);
1511
1512 kfree(img_request);
1513 }
1514
1515 static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1516 struct bio *bio_list)
1517 {
1518 struct rbd_device *rbd_dev = img_request->rbd_dev;
1519 struct rbd_obj_request *obj_request = NULL;
1520 struct rbd_obj_request *next_obj_request;
1521 unsigned int bio_offset;
1522 u64 image_offset;
1523 u64 resid;
1524 u16 opcode;
1525
1526 opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
1527 : CEPH_OSD_OP_READ;
1528 bio_offset = 0;
1529 image_offset = img_request->offset;
1530 rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1531 resid = img_request->length;
1532 while (resid) {
1533 const char *object_name;
1534 unsigned int clone_size;
1535 struct ceph_osd_req_op *op;
1536 u64 offset;
1537 u64 length;
1538
1539 object_name = rbd_segment_name(rbd_dev, image_offset);
1540 if (!object_name)
1541 goto out_unwind;
1542 offset = rbd_segment_offset(rbd_dev, image_offset);
1543 length = rbd_segment_length(rbd_dev, image_offset, resid);
1544 obj_request = rbd_obj_request_create(object_name,
1545 offset, length,
1546 OBJ_REQUEST_BIO);
1547 kfree(object_name); /* object request has its own copy */
1548 if (!obj_request)
1549 goto out_unwind;
1550
1551 rbd_assert(length <= (u64) UINT_MAX);
1552 clone_size = (unsigned int) length;
1553 obj_request->bio_list = bio_chain_clone_range(&bio_list,
1554 &bio_offset, clone_size,
1555 GFP_ATOMIC);
1556 if (!obj_request->bio_list)
1557 goto out_partial;
1558
1559 /*
1560 * Build up the op to use in building the osd
1561 * request. Note that the contents of the op are
1562 * copied by rbd_osd_req_create().
1563 */
1564 op = rbd_osd_req_op_create(opcode, offset, length);
1565 if (!op)
1566 goto out_partial;
1567 obj_request->osd_req = rbd_osd_req_create(rbd_dev,
1568 img_request->write_request,
1569 obj_request, op);
1570 rbd_osd_req_op_destroy(op);
1571 if (!obj_request->osd_req)
1572 goto out_partial;
1573 /* status and version are initially zero-filled */
1574
1575 rbd_img_obj_request_add(img_request, obj_request);
1576
1577 image_offset += length;
1578 resid -= length;
1579 }
1580
1581 return 0;
1582
1583 out_partial:
1584 rbd_obj_request_put(obj_request);
1585 out_unwind:
1586 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1587 rbd_obj_request_put(obj_request);
1588
1589 return -ENOMEM;
1590 }
1591
1592 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1593 {
1594 struct rbd_img_request *img_request;
1595 u32 which = obj_request->which;
1596 bool more = true;
1597
1598 img_request = obj_request->img_request;
1599 rbd_assert(img_request != NULL);
1600 rbd_assert(img_request->rq != NULL);
1601 rbd_assert(which != BAD_WHICH);
1602 rbd_assert(which < img_request->obj_request_count);
1603 rbd_assert(which >= img_request->next_completion);
1604
1605 spin_lock_irq(&img_request->completion_lock);
1606 if (which != img_request->next_completion)
1607 goto out;
1608
1609 for_each_obj_request_from(img_request, obj_request) {
1610 unsigned int xferred;
1611 int result;
1612
1613 rbd_assert(more);
1614 rbd_assert(which < img_request->obj_request_count);
1615
1616 if (!atomic_read(&obj_request->done))
1617 break;
1618
1619 rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1620 xferred = (unsigned int) obj_request->xferred;
1621 result = (int) obj_request->result;
1622 if (result)
1623 rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1624 img_request->write_request ? "write" : "read",
1625 result, xferred);
1626
1627 more = blk_end_request(img_request->rq, result, xferred);
1628 which++;
1629 }
1630 rbd_assert(more ^ (which == img_request->obj_request_count));
1631 img_request->next_completion = which;
1632 out:
1633 spin_unlock_irq(&img_request->completion_lock);
1634
1635 if (!more)
1636 rbd_img_request_complete(img_request);
1637 }
1638
1639 static int rbd_img_request_submit(struct rbd_img_request *img_request)
1640 {
1641 struct rbd_device *rbd_dev = img_request->rbd_dev;
1642 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1643 struct rbd_obj_request *obj_request;
1644
1645 for_each_obj_request(img_request, obj_request) {
1646 int ret;
1647
1648 obj_request->callback = rbd_img_obj_callback;
1649 ret = rbd_obj_request_submit(osdc, obj_request);
1650 if (ret)
1651 return ret;
1652 /*
1653 * The image request has its own reference to each
1654 * of its object requests, so we can safely drop the
1655 * initial one here.
1656 */
1657 rbd_obj_request_put(obj_request);
1658 }
1659
1660 return 0;
1661 }
1662
1663 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
1664 u64 ver, u64 notify_id)
1665 {
1666 struct rbd_obj_request *obj_request;
1667 struct ceph_osd_req_op *op;
1668 struct ceph_osd_client *osdc;
1669 int ret;
1670
1671 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1672 OBJ_REQUEST_NODATA);
1673 if (!obj_request)
1674 return -ENOMEM;
1675
1676 ret = -ENOMEM;
1677 op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
1678 if (!op)
1679 goto out;
1680 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1681 obj_request, op);
1682 rbd_osd_req_op_destroy(op);
1683 if (!obj_request->osd_req)
1684 goto out;
1685
1686 osdc = &rbd_dev->rbd_client->client->osdc;
1687 obj_request->callback = rbd_obj_request_put;
1688 ret = rbd_obj_request_submit(osdc, obj_request);
1689 out:
1690 if (ret)
1691 rbd_obj_request_put(obj_request);
1692
1693 return ret;
1694 }
1695
1696 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1697 {
1698 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1699 u64 hver;
1700 int rc;
1701
1702 if (!rbd_dev)
1703 return;
1704
1705 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1706 rbd_dev->header_name, (unsigned long long) notify_id,
1707 (unsigned int) opcode);
1708 rc = rbd_dev_refresh(rbd_dev, &hver);
1709 if (rc)
1710 rbd_warn(rbd_dev, "got notification but failed to "
1711 " update snaps: %d\n", rc);
1712
1713 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
1714 }
1715
1716 /*
1717 * Request sync osd watch/unwatch. The value of "start" determines
1718 * whether a watch request is being initiated or torn down.
1719 */
1720 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1721 {
1722 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1723 struct rbd_obj_request *obj_request;
1724 struct ceph_osd_req_op *op;
1725 int ret;
1726
1727 rbd_assert(start ^ !!rbd_dev->watch_event);
1728 rbd_assert(start ^ !!rbd_dev->watch_request);
1729
1730 if (start) {
1731 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0, rbd_dev,
1732 &rbd_dev->watch_event);
1733 if (ret < 0)
1734 return ret;
1735 rbd_assert(rbd_dev->watch_event != NULL);
1736 }
1737
1738 ret = -ENOMEM;
1739 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1740 OBJ_REQUEST_NODATA);
1741 if (!obj_request)
1742 goto out_cancel;
1743
1744 op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
1745 rbd_dev->watch_event->cookie,
1746 rbd_dev->header.obj_version, start);
1747 if (!op)
1748 goto out_cancel;
1749 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
1750 obj_request, op);
1751 rbd_osd_req_op_destroy(op);
1752 if (!obj_request->osd_req)
1753 goto out_cancel;
1754
1755 if (start)
1756 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
1757 else
1758 ceph_osdc_unregister_linger_request(osdc,
1759 rbd_dev->watch_request->osd_req);
1760 ret = rbd_obj_request_submit(osdc, obj_request);
1761 if (ret)
1762 goto out_cancel;
1763 ret = rbd_obj_request_wait(obj_request);
1764 if (ret)
1765 goto out_cancel;
1766 ret = obj_request->result;
1767 if (ret)
1768 goto out_cancel;
1769
1770 /*
1771 * A watch request is set to linger, so the underlying osd
1772 * request won't go away until we unregister it. We retain
1773 * a pointer to the object request during that time (in
1774 * rbd_dev->watch_request), so we'll keep a reference to
1775 * it. We'll drop that reference (below) after we've
1776 * unregistered it.
1777 */
1778 if (start) {
1779 rbd_dev->watch_request = obj_request;
1780
1781 return 0;
1782 }
1783
1784 /* We have successfully torn down the watch request */
1785
1786 rbd_obj_request_put(rbd_dev->watch_request);
1787 rbd_dev->watch_request = NULL;
1788 out_cancel:
1789 /* Cancel the event if we're tearing down, or on error */
1790 ceph_osdc_cancel_event(rbd_dev->watch_event);
1791 rbd_dev->watch_event = NULL;
1792 if (obj_request)
1793 rbd_obj_request_put(obj_request);
1794
1795 return ret;
1796 }
1797
1798 /*
1799 * Synchronous osd object method call
1800 */
1801 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1802 const char *object_name,
1803 const char *class_name,
1804 const char *method_name,
1805 const char *outbound,
1806 size_t outbound_size,
1807 char *inbound,
1808 size_t inbound_size,
1809 u64 *version)
1810 {
1811 struct rbd_obj_request *obj_request;
1812 struct ceph_osd_client *osdc;
1813 struct ceph_osd_req_op *op;
1814 struct page **pages;
1815 u32 page_count;
1816 int ret;
1817
1818 /*
1819 * Method calls are ultimately read operations but they
1820 * don't involve object data (so no offset or length).
1821 * The result should placed into the inbound buffer
1822 * provided. They also supply outbound data--parameters for
1823 * the object method. Currently if this is present it will
1824 * be a snapshot id.
1825 */
1826 page_count = (u32) calc_pages_for(0, inbound_size);
1827 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1828 if (IS_ERR(pages))
1829 return PTR_ERR(pages);
1830
1831 ret = -ENOMEM;
1832 obj_request = rbd_obj_request_create(object_name, 0, 0,
1833 OBJ_REQUEST_PAGES);
1834 if (!obj_request)
1835 goto out;
1836
1837 obj_request->pages = pages;
1838 obj_request->page_count = page_count;
1839
1840 op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
1841 method_name, outbound, outbound_size);
1842 if (!op)
1843 goto out;
1844 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1845 obj_request, op);
1846 rbd_osd_req_op_destroy(op);
1847 if (!obj_request->osd_req)
1848 goto out;
1849
1850 osdc = &rbd_dev->rbd_client->client->osdc;
1851 ret = rbd_obj_request_submit(osdc, obj_request);
1852 if (ret)
1853 goto out;
1854 ret = rbd_obj_request_wait(obj_request);
1855 if (ret)
1856 goto out;
1857
1858 ret = obj_request->result;
1859 if (ret < 0)
1860 goto out;
1861 ret = ceph_copy_from_page_vector(pages, inbound, 0,
1862 obj_request->xferred);
1863 if (version)
1864 *version = obj_request->version;
1865 out:
1866 if (obj_request)
1867 rbd_obj_request_put(obj_request);
1868 else
1869 ceph_release_page_vector(pages, page_count);
1870
1871 return ret;
1872 }
1873
1874 static void rbd_request_fn(struct request_queue *q)
1875 {
1876 struct rbd_device *rbd_dev = q->queuedata;
1877 bool read_only = rbd_dev->mapping.read_only;
1878 struct request *rq;
1879 int result;
1880
1881 while ((rq = blk_fetch_request(q))) {
1882 bool write_request = rq_data_dir(rq) == WRITE;
1883 struct rbd_img_request *img_request;
1884 u64 offset;
1885 u64 length;
1886
1887 /* Ignore any non-FS requests that filter through. */
1888
1889 if (rq->cmd_type != REQ_TYPE_FS) {
1890 __blk_end_request_all(rq, 0);
1891 continue;
1892 }
1893
1894 spin_unlock_irq(q->queue_lock);
1895
1896 /* Disallow writes to a read-only device */
1897
1898 if (write_request) {
1899 result = -EROFS;
1900 if (read_only)
1901 goto end_request;
1902 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
1903 }
1904
1905 /*
1906 * Quit early if the mapped snapshot no longer
1907 * exists. It's still possible the snapshot will
1908 * have disappeared by the time our request arrives
1909 * at the osd, but there's no sense in sending it if
1910 * we already know.
1911 */
1912 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
1913 dout("request for non-existent snapshot");
1914 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
1915 result = -ENXIO;
1916 goto end_request;
1917 }
1918
1919 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
1920 length = (u64) blk_rq_bytes(rq);
1921
1922 result = -EINVAL;
1923 if (WARN_ON(offset && length > U64_MAX - offset + 1))
1924 goto end_request; /* Shouldn't happen */
1925
1926 result = -ENOMEM;
1927 img_request = rbd_img_request_create(rbd_dev, offset, length,
1928 write_request);
1929 if (!img_request)
1930 goto end_request;
1931
1932 img_request->rq = rq;
1933
1934 result = rbd_img_request_fill_bio(img_request, rq->bio);
1935 if (!result)
1936 result = rbd_img_request_submit(img_request);
1937 if (result)
1938 rbd_img_request_put(img_request);
1939 end_request:
1940 spin_lock_irq(q->queue_lock);
1941 if (result < 0) {
1942 rbd_warn(rbd_dev, "obj_request %s result %d\n",
1943 write_request ? "write" : "read", result);
1944 __blk_end_request_all(rq, result);
1945 }
1946 }
1947 }
1948
1949 /*
1950 * a queue callback. Makes sure that we don't create a bio that spans across
1951 * multiple osd objects. One exception would be with a single page bios,
1952 * which we handle later at bio_chain_clone_range()
1953 */
1954 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1955 struct bio_vec *bvec)
1956 {
1957 struct rbd_device *rbd_dev = q->queuedata;
1958 sector_t sector_offset;
1959 sector_t sectors_per_obj;
1960 sector_t obj_sector_offset;
1961 int ret;
1962
1963 /*
1964 * Find how far into its rbd object the partition-relative
1965 * bio start sector is to offset relative to the enclosing
1966 * device.
1967 */
1968 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
1969 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
1970 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
1971
1972 /*
1973 * Compute the number of bytes from that offset to the end
1974 * of the object. Account for what's already used by the bio.
1975 */
1976 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
1977 if (ret > bmd->bi_size)
1978 ret -= bmd->bi_size;
1979 else
1980 ret = 0;
1981
1982 /*
1983 * Don't send back more than was asked for. And if the bio
1984 * was empty, let the whole thing through because: "Note
1985 * that a block device *must* allow a single page to be
1986 * added to an empty bio."
1987 */
1988 rbd_assert(bvec->bv_len <= PAGE_SIZE);
1989 if (ret > (int) bvec->bv_len || !bmd->bi_size)
1990 ret = (int) bvec->bv_len;
1991
1992 return ret;
1993 }
1994
1995 static void rbd_free_disk(struct rbd_device *rbd_dev)
1996 {
1997 struct gendisk *disk = rbd_dev->disk;
1998
1999 if (!disk)
2000 return;
2001
2002 if (disk->flags & GENHD_FL_UP)
2003 del_gendisk(disk);
2004 if (disk->queue)
2005 blk_cleanup_queue(disk->queue);
2006 put_disk(disk);
2007 }
2008
2009 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2010 const char *object_name,
2011 u64 offset, u64 length,
2012 char *buf, u64 *version)
2013
2014 {
2015 struct ceph_osd_req_op *op;
2016 struct rbd_obj_request *obj_request;
2017 struct ceph_osd_client *osdc;
2018 struct page **pages = NULL;
2019 u32 page_count;
2020 int ret;
2021
2022 page_count = (u32) calc_pages_for(offset, length);
2023 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2024 if (IS_ERR(pages))
2025 ret = PTR_ERR(pages);
2026
2027 ret = -ENOMEM;
2028 obj_request = rbd_obj_request_create(object_name, offset, length,
2029 OBJ_REQUEST_PAGES);
2030 if (!obj_request)
2031 goto out;
2032
2033 obj_request->pages = pages;
2034 obj_request->page_count = page_count;
2035
2036 op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
2037 if (!op)
2038 goto out;
2039 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2040 obj_request, op);
2041 rbd_osd_req_op_destroy(op);
2042 if (!obj_request->osd_req)
2043 goto out;
2044
2045 osdc = &rbd_dev->rbd_client->client->osdc;
2046 ret = rbd_obj_request_submit(osdc, obj_request);
2047 if (ret)
2048 goto out;
2049 ret = rbd_obj_request_wait(obj_request);
2050 if (ret)
2051 goto out;
2052
2053 ret = obj_request->result;
2054 if (ret < 0)
2055 goto out;
2056 ret = ceph_copy_from_page_vector(pages, buf, 0, obj_request->xferred);
2057 if (version)
2058 *version = obj_request->version;
2059 out:
2060 if (obj_request)
2061 rbd_obj_request_put(obj_request);
2062 else
2063 ceph_release_page_vector(pages, page_count);
2064
2065 return ret;
2066 }
2067
2068 /*
2069 * Read the complete header for the given rbd device.
2070 *
2071 * Returns a pointer to a dynamically-allocated buffer containing
2072 * the complete and validated header. Caller can pass the address
2073 * of a variable that will be filled in with the version of the
2074 * header object at the time it was read.
2075 *
2076 * Returns a pointer-coded errno if a failure occurs.
2077 */
2078 static struct rbd_image_header_ondisk *
2079 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2080 {
2081 struct rbd_image_header_ondisk *ondisk = NULL;
2082 u32 snap_count = 0;
2083 u64 names_size = 0;
2084 u32 want_count;
2085 int ret;
2086
2087 /*
2088 * The complete header will include an array of its 64-bit
2089 * snapshot ids, followed by the names of those snapshots as
2090 * a contiguous block of NUL-terminated strings. Note that
2091 * the number of snapshots could change by the time we read
2092 * it in, in which case we re-read it.
2093 */
2094 do {
2095 size_t size;
2096
2097 kfree(ondisk);
2098
2099 size = sizeof (*ondisk);
2100 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2101 size += names_size;
2102 ondisk = kmalloc(size, GFP_KERNEL);
2103 if (!ondisk)
2104 return ERR_PTR(-ENOMEM);
2105
2106 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2107 0, size,
2108 (char *) ondisk, version);
2109
2110 if (ret < 0)
2111 goto out_err;
2112 if (WARN_ON((size_t) ret < size)) {
2113 ret = -ENXIO;
2114 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2115 size, ret);
2116 goto out_err;
2117 }
2118 if (!rbd_dev_ondisk_valid(ondisk)) {
2119 ret = -ENXIO;
2120 rbd_warn(rbd_dev, "invalid header");
2121 goto out_err;
2122 }
2123
2124 names_size = le64_to_cpu(ondisk->snap_names_len);
2125 want_count = snap_count;
2126 snap_count = le32_to_cpu(ondisk->snap_count);
2127 } while (snap_count != want_count);
2128
2129 return ondisk;
2130
2131 out_err:
2132 kfree(ondisk);
2133
2134 return ERR_PTR(ret);
2135 }
2136
2137 /*
2138 * reload the ondisk the header
2139 */
2140 static int rbd_read_header(struct rbd_device *rbd_dev,
2141 struct rbd_image_header *header)
2142 {
2143 struct rbd_image_header_ondisk *ondisk;
2144 u64 ver = 0;
2145 int ret;
2146
2147 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2148 if (IS_ERR(ondisk))
2149 return PTR_ERR(ondisk);
2150 ret = rbd_header_from_disk(header, ondisk);
2151 if (ret >= 0)
2152 header->obj_version = ver;
2153 kfree(ondisk);
2154
2155 return ret;
2156 }
2157
2158 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2159 {
2160 struct rbd_snap *snap;
2161 struct rbd_snap *next;
2162
2163 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
2164 rbd_remove_snap_dev(snap);
2165 }
2166
2167 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2168 {
2169 sector_t size;
2170
2171 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2172 return;
2173
2174 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2175 dout("setting size to %llu sectors", (unsigned long long) size);
2176 rbd_dev->mapping.size = (u64) size;
2177 set_capacity(rbd_dev->disk, size);
2178 }
2179
2180 /*
2181 * only read the first part of the ondisk header, without the snaps info
2182 */
2183 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
2184 {
2185 int ret;
2186 struct rbd_image_header h;
2187
2188 ret = rbd_read_header(rbd_dev, &h);
2189 if (ret < 0)
2190 return ret;
2191
2192 down_write(&rbd_dev->header_rwsem);
2193
2194 /* Update image size, and check for resize of mapped image */
2195 rbd_dev->header.image_size = h.image_size;
2196 rbd_update_mapping_size(rbd_dev);
2197
2198 /* rbd_dev->header.object_prefix shouldn't change */
2199 kfree(rbd_dev->header.snap_sizes);
2200 kfree(rbd_dev->header.snap_names);
2201 /* osd requests may still refer to snapc */
2202 ceph_put_snap_context(rbd_dev->header.snapc);
2203
2204 if (hver)
2205 *hver = h.obj_version;
2206 rbd_dev->header.obj_version = h.obj_version;
2207 rbd_dev->header.image_size = h.image_size;
2208 rbd_dev->header.snapc = h.snapc;
2209 rbd_dev->header.snap_names = h.snap_names;
2210 rbd_dev->header.snap_sizes = h.snap_sizes;
2211 /* Free the extra copy of the object prefix */
2212 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2213 kfree(h.object_prefix);
2214
2215 ret = rbd_dev_snaps_update(rbd_dev);
2216 if (!ret)
2217 ret = rbd_dev_snaps_register(rbd_dev);
2218
2219 up_write(&rbd_dev->header_rwsem);
2220
2221 return ret;
2222 }
2223
2224 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
2225 {
2226 int ret;
2227
2228 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
2229 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2230 if (rbd_dev->image_format == 1)
2231 ret = rbd_dev_v1_refresh(rbd_dev, hver);
2232 else
2233 ret = rbd_dev_v2_refresh(rbd_dev, hver);
2234 mutex_unlock(&ctl_mutex);
2235
2236 return ret;
2237 }
2238
2239 static int rbd_init_disk(struct rbd_device *rbd_dev)
2240 {
2241 struct gendisk *disk;
2242 struct request_queue *q;
2243 u64 segment_size;
2244
2245 /* create gendisk info */
2246 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2247 if (!disk)
2248 return -ENOMEM;
2249
2250 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
2251 rbd_dev->dev_id);
2252 disk->major = rbd_dev->major;
2253 disk->first_minor = 0;
2254 disk->fops = &rbd_bd_ops;
2255 disk->private_data = rbd_dev;
2256
2257 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
2258 if (!q)
2259 goto out_disk;
2260
2261 /* We use the default size, but let's be explicit about it. */
2262 blk_queue_physical_block_size(q, SECTOR_SIZE);
2263
2264 /* set io sizes to object size */
2265 segment_size = rbd_obj_bytes(&rbd_dev->header);
2266 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2267 blk_queue_max_segment_size(q, segment_size);
2268 blk_queue_io_min(q, segment_size);
2269 blk_queue_io_opt(q, segment_size);
2270
2271 blk_queue_merge_bvec(q, rbd_merge_bvec);
2272 disk->queue = q;
2273
2274 q->queuedata = rbd_dev;
2275
2276 rbd_dev->disk = disk;
2277
2278 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2279
2280 return 0;
2281 out_disk:
2282 put_disk(disk);
2283
2284 return -ENOMEM;
2285 }
2286
2287 /*
2288 sysfs
2289 */
2290
2291 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2292 {
2293 return container_of(dev, struct rbd_device, dev);
2294 }
2295
2296 static ssize_t rbd_size_show(struct device *dev,
2297 struct device_attribute *attr, char *buf)
2298 {
2299 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2300 sector_t size;
2301
2302 down_read(&rbd_dev->header_rwsem);
2303 size = get_capacity(rbd_dev->disk);
2304 up_read(&rbd_dev->header_rwsem);
2305
2306 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
2307 }
2308
2309 /*
2310 * Note this shows the features for whatever's mapped, which is not
2311 * necessarily the base image.
2312 */
2313 static ssize_t rbd_features_show(struct device *dev,
2314 struct device_attribute *attr, char *buf)
2315 {
2316 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2317
2318 return sprintf(buf, "0x%016llx\n",
2319 (unsigned long long) rbd_dev->mapping.features);
2320 }
2321
2322 static ssize_t rbd_major_show(struct device *dev,
2323 struct device_attribute *attr, char *buf)
2324 {
2325 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2326
2327 return sprintf(buf, "%d\n", rbd_dev->major);
2328 }
2329
2330 static ssize_t rbd_client_id_show(struct device *dev,
2331 struct device_attribute *attr, char *buf)
2332 {
2333 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2334
2335 return sprintf(buf, "client%lld\n",
2336 ceph_client_id(rbd_dev->rbd_client->client));
2337 }
2338
2339 static ssize_t rbd_pool_show(struct device *dev,
2340 struct device_attribute *attr, char *buf)
2341 {
2342 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2343
2344 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
2345 }
2346
2347 static ssize_t rbd_pool_id_show(struct device *dev,
2348 struct device_attribute *attr, char *buf)
2349 {
2350 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2351
2352 return sprintf(buf, "%llu\n",
2353 (unsigned long long) rbd_dev->spec->pool_id);
2354 }
2355
2356 static ssize_t rbd_name_show(struct device *dev,
2357 struct device_attribute *attr, char *buf)
2358 {
2359 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2360
2361 if (rbd_dev->spec->image_name)
2362 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2363
2364 return sprintf(buf, "(unknown)\n");
2365 }
2366
2367 static ssize_t rbd_image_id_show(struct device *dev,
2368 struct device_attribute *attr, char *buf)
2369 {
2370 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2371
2372 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
2373 }
2374
2375 /*
2376 * Shows the name of the currently-mapped snapshot (or
2377 * RBD_SNAP_HEAD_NAME for the base image).
2378 */
2379 static ssize_t rbd_snap_show(struct device *dev,
2380 struct device_attribute *attr,
2381 char *buf)
2382 {
2383 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2384
2385 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
2386 }
2387
2388 /*
2389 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2390 * for the parent image. If there is no parent, simply shows
2391 * "(no parent image)".
2392 */
2393 static ssize_t rbd_parent_show(struct device *dev,
2394 struct device_attribute *attr,
2395 char *buf)
2396 {
2397 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2398 struct rbd_spec *spec = rbd_dev->parent_spec;
2399 int count;
2400 char *bufp = buf;
2401
2402 if (!spec)
2403 return sprintf(buf, "(no parent image)\n");
2404
2405 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2406 (unsigned long long) spec->pool_id, spec->pool_name);
2407 if (count < 0)
2408 return count;
2409 bufp += count;
2410
2411 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2412 spec->image_name ? spec->image_name : "(unknown)");
2413 if (count < 0)
2414 return count;
2415 bufp += count;
2416
2417 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2418 (unsigned long long) spec->snap_id, spec->snap_name);
2419 if (count < 0)
2420 return count;
2421 bufp += count;
2422
2423 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2424 if (count < 0)
2425 return count;
2426 bufp += count;
2427
2428 return (ssize_t) (bufp - buf);
2429 }
2430
2431 static ssize_t rbd_image_refresh(struct device *dev,
2432 struct device_attribute *attr,
2433 const char *buf,
2434 size_t size)
2435 {
2436 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2437 int ret;
2438
2439 ret = rbd_dev_refresh(rbd_dev, NULL);
2440
2441 return ret < 0 ? ret : size;
2442 }
2443
2444 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
2445 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
2446 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2447 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2448 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
2449 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
2450 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
2451 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
2452 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2453 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
2454 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
2455
2456 static struct attribute *rbd_attrs[] = {
2457 &dev_attr_size.attr,
2458 &dev_attr_features.attr,
2459 &dev_attr_major.attr,
2460 &dev_attr_client_id.attr,
2461 &dev_attr_pool.attr,
2462 &dev_attr_pool_id.attr,
2463 &dev_attr_name.attr,
2464 &dev_attr_image_id.attr,
2465 &dev_attr_current_snap.attr,
2466 &dev_attr_parent.attr,
2467 &dev_attr_refresh.attr,
2468 NULL
2469 };
2470
2471 static struct attribute_group rbd_attr_group = {
2472 .attrs = rbd_attrs,
2473 };
2474
2475 static const struct attribute_group *rbd_attr_groups[] = {
2476 &rbd_attr_group,
2477 NULL
2478 };
2479
2480 static void rbd_sysfs_dev_release(struct device *dev)
2481 {
2482 }
2483
2484 static struct device_type rbd_device_type = {
2485 .name = "rbd",
2486 .groups = rbd_attr_groups,
2487 .release = rbd_sysfs_dev_release,
2488 };
2489
2490
2491 /*
2492 sysfs - snapshots
2493 */
2494
2495 static ssize_t rbd_snap_size_show(struct device *dev,
2496 struct device_attribute *attr,
2497 char *buf)
2498 {
2499 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2500
2501 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2502 }
2503
2504 static ssize_t rbd_snap_id_show(struct device *dev,
2505 struct device_attribute *attr,
2506 char *buf)
2507 {
2508 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2509
2510 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2511 }
2512
2513 static ssize_t rbd_snap_features_show(struct device *dev,
2514 struct device_attribute *attr,
2515 char *buf)
2516 {
2517 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2518
2519 return sprintf(buf, "0x%016llx\n",
2520 (unsigned long long) snap->features);
2521 }
2522
2523 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2524 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2525 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2526
2527 static struct attribute *rbd_snap_attrs[] = {
2528 &dev_attr_snap_size.attr,
2529 &dev_attr_snap_id.attr,
2530 &dev_attr_snap_features.attr,
2531 NULL,
2532 };
2533
2534 static struct attribute_group rbd_snap_attr_group = {
2535 .attrs = rbd_snap_attrs,
2536 };
2537
2538 static void rbd_snap_dev_release(struct device *dev)
2539 {
2540 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2541 kfree(snap->name);
2542 kfree(snap);
2543 }
2544
2545 static const struct attribute_group *rbd_snap_attr_groups[] = {
2546 &rbd_snap_attr_group,
2547 NULL
2548 };
2549
2550 static struct device_type rbd_snap_device_type = {
2551 .groups = rbd_snap_attr_groups,
2552 .release = rbd_snap_dev_release,
2553 };
2554
2555 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2556 {
2557 kref_get(&spec->kref);
2558
2559 return spec;
2560 }
2561
2562 static void rbd_spec_free(struct kref *kref);
2563 static void rbd_spec_put(struct rbd_spec *spec)
2564 {
2565 if (spec)
2566 kref_put(&spec->kref, rbd_spec_free);
2567 }
2568
2569 static struct rbd_spec *rbd_spec_alloc(void)
2570 {
2571 struct rbd_spec *spec;
2572
2573 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2574 if (!spec)
2575 return NULL;
2576 kref_init(&spec->kref);
2577
2578 rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
2579
2580 return spec;
2581 }
2582
2583 static void rbd_spec_free(struct kref *kref)
2584 {
2585 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2586
2587 kfree(spec->pool_name);
2588 kfree(spec->image_id);
2589 kfree(spec->image_name);
2590 kfree(spec->snap_name);
2591 kfree(spec);
2592 }
2593
2594 struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
2595 struct rbd_spec *spec)
2596 {
2597 struct rbd_device *rbd_dev;
2598
2599 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2600 if (!rbd_dev)
2601 return NULL;
2602
2603 spin_lock_init(&rbd_dev->lock);
2604 rbd_dev->flags = 0;
2605 INIT_LIST_HEAD(&rbd_dev->node);
2606 INIT_LIST_HEAD(&rbd_dev->snaps);
2607 init_rwsem(&rbd_dev->header_rwsem);
2608
2609 rbd_dev->spec = spec;
2610 rbd_dev->rbd_client = rbdc;
2611
2612 /* Initialize the layout used for all rbd requests */
2613
2614 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2615 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2616 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2617 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2618
2619 return rbd_dev;
2620 }
2621
2622 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2623 {
2624 rbd_spec_put(rbd_dev->parent_spec);
2625 kfree(rbd_dev->header_name);
2626 rbd_put_client(rbd_dev->rbd_client);
2627 rbd_spec_put(rbd_dev->spec);
2628 kfree(rbd_dev);
2629 }
2630
2631 static bool rbd_snap_registered(struct rbd_snap *snap)
2632 {
2633 bool ret = snap->dev.type == &rbd_snap_device_type;
2634 bool reg = device_is_registered(&snap->dev);
2635
2636 rbd_assert(!ret ^ reg);
2637
2638 return ret;
2639 }
2640
2641 static void rbd_remove_snap_dev(struct rbd_snap *snap)
2642 {
2643 list_del(&snap->node);
2644 if (device_is_registered(&snap->dev))
2645 device_unregister(&snap->dev);
2646 }
2647
2648 static int rbd_register_snap_dev(struct rbd_snap *snap,
2649 struct device *parent)
2650 {
2651 struct device *dev = &snap->dev;
2652 int ret;
2653
2654 dev->type = &rbd_snap_device_type;
2655 dev->parent = parent;
2656 dev->release = rbd_snap_dev_release;
2657 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
2658 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2659
2660 ret = device_register(dev);
2661
2662 return ret;
2663 }
2664
2665 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2666 const char *snap_name,
2667 u64 snap_id, u64 snap_size,
2668 u64 snap_features)
2669 {
2670 struct rbd_snap *snap;
2671 int ret;
2672
2673 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2674 if (!snap)
2675 return ERR_PTR(-ENOMEM);
2676
2677 ret = -ENOMEM;
2678 snap->name = kstrdup(snap_name, GFP_KERNEL);
2679 if (!snap->name)
2680 goto err;
2681
2682 snap->id = snap_id;
2683 snap->size = snap_size;
2684 snap->features = snap_features;
2685
2686 return snap;
2687
2688 err:
2689 kfree(snap->name);
2690 kfree(snap);
2691
2692 return ERR_PTR(ret);
2693 }
2694
2695 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2696 u64 *snap_size, u64 *snap_features)
2697 {
2698 char *snap_name;
2699
2700 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2701
2702 *snap_size = rbd_dev->header.snap_sizes[which];
2703 *snap_features = 0; /* No features for v1 */
2704
2705 /* Skip over names until we find the one we are looking for */
2706
2707 snap_name = rbd_dev->header.snap_names;
2708 while (which--)
2709 snap_name += strlen(snap_name) + 1;
2710
2711 return snap_name;
2712 }
2713
2714 /*
2715 * Get the size and object order for an image snapshot, or if
2716 * snap_id is CEPH_NOSNAP, gets this information for the base
2717 * image.
2718 */
2719 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2720 u8 *order, u64 *snap_size)
2721 {
2722 __le64 snapid = cpu_to_le64(snap_id);
2723 int ret;
2724 struct {
2725 u8 order;
2726 __le64 size;
2727 } __attribute__ ((packed)) size_buf = { 0 };
2728
2729 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2730 "rbd", "get_size",
2731 (char *) &snapid, sizeof (snapid),
2732 (char *) &size_buf, sizeof (size_buf), NULL);
2733 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2734 if (ret < 0)
2735 return ret;
2736
2737 *order = size_buf.order;
2738 *snap_size = le64_to_cpu(size_buf.size);
2739
2740 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2741 (unsigned long long) snap_id, (unsigned int) *order,
2742 (unsigned long long) *snap_size);
2743
2744 return 0;
2745 }
2746
2747 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2748 {
2749 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2750 &rbd_dev->header.obj_order,
2751 &rbd_dev->header.image_size);
2752 }
2753
2754 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2755 {
2756 void *reply_buf;
2757 int ret;
2758 void *p;
2759
2760 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2761 if (!reply_buf)
2762 return -ENOMEM;
2763
2764 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2765 "rbd", "get_object_prefix",
2766 NULL, 0,
2767 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
2768 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2769 if (ret < 0)
2770 goto out;
2771 ret = 0; /* rbd_obj_method_sync() can return positive */
2772
2773 p = reply_buf;
2774 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2775 p + RBD_OBJ_PREFIX_LEN_MAX,
2776 NULL, GFP_NOIO);
2777
2778 if (IS_ERR(rbd_dev->header.object_prefix)) {
2779 ret = PTR_ERR(rbd_dev->header.object_prefix);
2780 rbd_dev->header.object_prefix = NULL;
2781 } else {
2782 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2783 }
2784
2785 out:
2786 kfree(reply_buf);
2787
2788 return ret;
2789 }
2790
2791 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2792 u64 *snap_features)
2793 {
2794 __le64 snapid = cpu_to_le64(snap_id);
2795 struct {
2796 __le64 features;
2797 __le64 incompat;
2798 } features_buf = { 0 };
2799 u64 incompat;
2800 int ret;
2801
2802 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2803 "rbd", "get_features",
2804 (char *) &snapid, sizeof (snapid),
2805 (char *) &features_buf, sizeof (features_buf),
2806 NULL);
2807 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2808 if (ret < 0)
2809 return ret;
2810
2811 incompat = le64_to_cpu(features_buf.incompat);
2812 if (incompat & ~RBD_FEATURES_ALL)
2813 return -ENXIO;
2814
2815 *snap_features = le64_to_cpu(features_buf.features);
2816
2817 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2818 (unsigned long long) snap_id,
2819 (unsigned long long) *snap_features,
2820 (unsigned long long) le64_to_cpu(features_buf.incompat));
2821
2822 return 0;
2823 }
2824
2825 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2826 {
2827 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2828 &rbd_dev->header.features);
2829 }
2830
2831 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2832 {
2833 struct rbd_spec *parent_spec;
2834 size_t size;
2835 void *reply_buf = NULL;
2836 __le64 snapid;
2837 void *p;
2838 void *end;
2839 char *image_id;
2840 u64 overlap;
2841 int ret;
2842
2843 parent_spec = rbd_spec_alloc();
2844 if (!parent_spec)
2845 return -ENOMEM;
2846
2847 size = sizeof (__le64) + /* pool_id */
2848 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
2849 sizeof (__le64) + /* snap_id */
2850 sizeof (__le64); /* overlap */
2851 reply_buf = kmalloc(size, GFP_KERNEL);
2852 if (!reply_buf) {
2853 ret = -ENOMEM;
2854 goto out_err;
2855 }
2856
2857 snapid = cpu_to_le64(CEPH_NOSNAP);
2858 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2859 "rbd", "get_parent",
2860 (char *) &snapid, sizeof (snapid),
2861 (char *) reply_buf, size, NULL);
2862 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2863 if (ret < 0)
2864 goto out_err;
2865
2866 ret = -ERANGE;
2867 p = reply_buf;
2868 end = (char *) reply_buf + size;
2869 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2870 if (parent_spec->pool_id == CEPH_NOPOOL)
2871 goto out; /* No parent? No problem. */
2872
2873 /* The ceph file layout needs to fit pool id in 32 bits */
2874
2875 ret = -EIO;
2876 if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2877 goto out;
2878
2879 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
2880 if (IS_ERR(image_id)) {
2881 ret = PTR_ERR(image_id);
2882 goto out_err;
2883 }
2884 parent_spec->image_id = image_id;
2885 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
2886 ceph_decode_64_safe(&p, end, overlap, out_err);
2887
2888 rbd_dev->parent_overlap = overlap;
2889 rbd_dev->parent_spec = parent_spec;
2890 parent_spec = NULL; /* rbd_dev now owns this */
2891 out:
2892 ret = 0;
2893 out_err:
2894 kfree(reply_buf);
2895 rbd_spec_put(parent_spec);
2896
2897 return ret;
2898 }
2899
2900 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
2901 {
2902 size_t image_id_size;
2903 char *image_id;
2904 void *p;
2905 void *end;
2906 size_t size;
2907 void *reply_buf = NULL;
2908 size_t len = 0;
2909 char *image_name = NULL;
2910 int ret;
2911
2912 rbd_assert(!rbd_dev->spec->image_name);
2913
2914 len = strlen(rbd_dev->spec->image_id);
2915 image_id_size = sizeof (__le32) + len;
2916 image_id = kmalloc(image_id_size, GFP_KERNEL);
2917 if (!image_id)
2918 return NULL;
2919
2920 p = image_id;
2921 end = (char *) image_id + image_id_size;
2922 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
2923
2924 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
2925 reply_buf = kmalloc(size, GFP_KERNEL);
2926 if (!reply_buf)
2927 goto out;
2928
2929 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
2930 "rbd", "dir_get_name",
2931 image_id, image_id_size,
2932 (char *) reply_buf, size, NULL);
2933 if (ret < 0)
2934 goto out;
2935 p = reply_buf;
2936 end = (char *) reply_buf + size;
2937 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
2938 if (IS_ERR(image_name))
2939 image_name = NULL;
2940 else
2941 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
2942 out:
2943 kfree(reply_buf);
2944 kfree(image_id);
2945
2946 return image_name;
2947 }
2948
2949 /*
2950 * When a parent image gets probed, we only have the pool, image,
2951 * and snapshot ids but not the names of any of them. This call
2952 * is made later to fill in those names. It has to be done after
2953 * rbd_dev_snaps_update() has completed because some of the
2954 * information (in particular, snapshot name) is not available
2955 * until then.
2956 */
2957 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
2958 {
2959 struct ceph_osd_client *osdc;
2960 const char *name;
2961 void *reply_buf = NULL;
2962 int ret;
2963
2964 if (rbd_dev->spec->pool_name)
2965 return 0; /* Already have the names */
2966
2967 /* Look up the pool name */
2968
2969 osdc = &rbd_dev->rbd_client->client->osdc;
2970 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
2971 if (!name) {
2972 rbd_warn(rbd_dev, "there is no pool with id %llu",
2973 rbd_dev->spec->pool_id); /* Really a BUG() */
2974 return -EIO;
2975 }
2976
2977 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
2978 if (!rbd_dev->spec->pool_name)
2979 return -ENOMEM;
2980
2981 /* Fetch the image name; tolerate failure here */
2982
2983 name = rbd_dev_image_name(rbd_dev);
2984 if (name)
2985 rbd_dev->spec->image_name = (char *) name;
2986 else
2987 rbd_warn(rbd_dev, "unable to get image name");
2988
2989 /* Look up the snapshot name. */
2990
2991 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
2992 if (!name) {
2993 rbd_warn(rbd_dev, "no snapshot with id %llu",
2994 rbd_dev->spec->snap_id); /* Really a BUG() */
2995 ret = -EIO;
2996 goto out_err;
2997 }
2998 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
2999 if(!rbd_dev->spec->snap_name)
3000 goto out_err;
3001
3002 return 0;
3003 out_err:
3004 kfree(reply_buf);
3005 kfree(rbd_dev->spec->pool_name);
3006 rbd_dev->spec->pool_name = NULL;
3007
3008 return ret;
3009 }
3010
3011 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3012 {
3013 size_t size;
3014 int ret;
3015 void *reply_buf;
3016 void *p;
3017 void *end;
3018 u64 seq;
3019 u32 snap_count;
3020 struct ceph_snap_context *snapc;
3021 u32 i;
3022
3023 /*
3024 * We'll need room for the seq value (maximum snapshot id),
3025 * snapshot count, and array of that many snapshot ids.
3026 * For now we have a fixed upper limit on the number we're
3027 * prepared to receive.
3028 */
3029 size = sizeof (__le64) + sizeof (__le32) +
3030 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3031 reply_buf = kzalloc(size, GFP_KERNEL);
3032 if (!reply_buf)
3033 return -ENOMEM;
3034
3035 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3036 "rbd", "get_snapcontext",
3037 NULL, 0,
3038 reply_buf, size, ver);
3039 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3040 if (ret < 0)
3041 goto out;
3042
3043 ret = -ERANGE;
3044 p = reply_buf;
3045 end = (char *) reply_buf + size;
3046 ceph_decode_64_safe(&p, end, seq, out);
3047 ceph_decode_32_safe(&p, end, snap_count, out);
3048
3049 /*
3050 * Make sure the reported number of snapshot ids wouldn't go
3051 * beyond the end of our buffer. But before checking that,
3052 * make sure the computed size of the snapshot context we
3053 * allocate is representable in a size_t.
3054 */
3055 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3056 / sizeof (u64)) {
3057 ret = -EINVAL;
3058 goto out;
3059 }
3060 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3061 goto out;
3062
3063 size = sizeof (struct ceph_snap_context) +
3064 snap_count * sizeof (snapc->snaps[0]);
3065 snapc = kmalloc(size, GFP_KERNEL);
3066 if (!snapc) {
3067 ret = -ENOMEM;
3068 goto out;
3069 }
3070
3071 atomic_set(&snapc->nref, 1);
3072 snapc->seq = seq;
3073 snapc->num_snaps = snap_count;
3074 for (i = 0; i < snap_count; i++)
3075 snapc->snaps[i] = ceph_decode_64(&p);
3076
3077 rbd_dev->header.snapc = snapc;
3078
3079 dout(" snap context seq = %llu, snap_count = %u\n",
3080 (unsigned long long) seq, (unsigned int) snap_count);
3081
3082 out:
3083 kfree(reply_buf);
3084
3085 return 0;
3086 }
3087
3088 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3089 {
3090 size_t size;
3091 void *reply_buf;
3092 __le64 snap_id;
3093 int ret;
3094 void *p;
3095 void *end;
3096 char *snap_name;
3097
3098 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3099 reply_buf = kmalloc(size, GFP_KERNEL);
3100 if (!reply_buf)
3101 return ERR_PTR(-ENOMEM);
3102
3103 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3104 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3105 "rbd", "get_snapshot_name",
3106 (char *) &snap_id, sizeof (snap_id),
3107 reply_buf, size, NULL);
3108 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3109 if (ret < 0)
3110 goto out;
3111
3112 p = reply_buf;
3113 end = (char *) reply_buf + size;
3114 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3115 if (IS_ERR(snap_name)) {
3116 ret = PTR_ERR(snap_name);
3117 goto out;
3118 } else {
3119 dout(" snap_id 0x%016llx snap_name = %s\n",
3120 (unsigned long long) le64_to_cpu(snap_id), snap_name);
3121 }
3122 kfree(reply_buf);
3123
3124 return snap_name;
3125 out:
3126 kfree(reply_buf);
3127
3128 return ERR_PTR(ret);
3129 }
3130
3131 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3132 u64 *snap_size, u64 *snap_features)
3133 {
3134 u64 snap_id;
3135 u8 order;
3136 int ret;
3137
3138 snap_id = rbd_dev->header.snapc->snaps[which];
3139 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3140 if (ret)
3141 return ERR_PTR(ret);
3142 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3143 if (ret)
3144 return ERR_PTR(ret);
3145
3146 return rbd_dev_v2_snap_name(rbd_dev, which);
3147 }
3148
3149 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3150 u64 *snap_size, u64 *snap_features)
3151 {
3152 if (rbd_dev->image_format == 1)
3153 return rbd_dev_v1_snap_info(rbd_dev, which,
3154 snap_size, snap_features);
3155 if (rbd_dev->image_format == 2)
3156 return rbd_dev_v2_snap_info(rbd_dev, which,
3157 snap_size, snap_features);
3158 return ERR_PTR(-EINVAL);
3159 }
3160
3161 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3162 {
3163 int ret;
3164 __u8 obj_order;
3165
3166 down_write(&rbd_dev->header_rwsem);
3167
3168 /* Grab old order first, to see if it changes */
3169
3170 obj_order = rbd_dev->header.obj_order,
3171 ret = rbd_dev_v2_image_size(rbd_dev);
3172 if (ret)
3173 goto out;
3174 if (rbd_dev->header.obj_order != obj_order) {
3175 ret = -EIO;
3176 goto out;
3177 }
3178 rbd_update_mapping_size(rbd_dev);
3179
3180 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3181 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3182 if (ret)
3183 goto out;
3184 ret = rbd_dev_snaps_update(rbd_dev);
3185 dout("rbd_dev_snaps_update returned %d\n", ret);
3186 if (ret)
3187 goto out;
3188 ret = rbd_dev_snaps_register(rbd_dev);
3189 dout("rbd_dev_snaps_register returned %d\n", ret);
3190 out:
3191 up_write(&rbd_dev->header_rwsem);
3192
3193 return ret;
3194 }
3195
3196 /*
3197 * Scan the rbd device's current snapshot list and compare it to the
3198 * newly-received snapshot context. Remove any existing snapshots
3199 * not present in the new snapshot context. Add a new snapshot for
3200 * any snaphots in the snapshot context not in the current list.
3201 * And verify there are no changes to snapshots we already know
3202 * about.
3203 *
3204 * Assumes the snapshots in the snapshot context are sorted by
3205 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3206 * are also maintained in that order.)
3207 */
3208 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
3209 {
3210 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3211 const u32 snap_count = snapc->num_snaps;
3212 struct list_head *head = &rbd_dev->snaps;
3213 struct list_head *links = head->next;
3214 u32 index = 0;
3215
3216 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
3217 while (index < snap_count || links != head) {
3218 u64 snap_id;
3219 struct rbd_snap *snap;
3220 char *snap_name;
3221 u64 snap_size = 0;
3222 u64 snap_features = 0;
3223
3224 snap_id = index < snap_count ? snapc->snaps[index]
3225 : CEPH_NOSNAP;
3226 snap = links != head ? list_entry(links, struct rbd_snap, node)
3227 : NULL;
3228 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
3229
3230 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3231 struct list_head *next = links->next;
3232
3233 /*
3234 * A previously-existing snapshot is not in
3235 * the new snap context.
3236 *
3237 * If the now missing snapshot is the one the
3238 * image is mapped to, clear its exists flag
3239 * so we can avoid sending any more requests
3240 * to it.
3241 */
3242 if (rbd_dev->spec->snap_id == snap->id)
3243 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3244 rbd_remove_snap_dev(snap);
3245 dout("%ssnap id %llu has been removed\n",
3246 rbd_dev->spec->snap_id == snap->id ?
3247 "mapped " : "",
3248 (unsigned long long) snap->id);
3249
3250 /* Done with this list entry; advance */
3251
3252 links = next;
3253 continue;
3254 }
3255
3256 snap_name = rbd_dev_snap_info(rbd_dev, index,
3257 &snap_size, &snap_features);
3258 if (IS_ERR(snap_name))
3259 return PTR_ERR(snap_name);
3260
3261 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3262 (unsigned long long) snap_id);
3263 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3264 struct rbd_snap *new_snap;
3265
3266 /* We haven't seen this snapshot before */
3267
3268 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
3269 snap_id, snap_size, snap_features);
3270 if (IS_ERR(new_snap)) {
3271 int err = PTR_ERR(new_snap);
3272
3273 dout(" failed to add dev, error %d\n", err);
3274
3275 return err;
3276 }
3277
3278 /* New goes before existing, or at end of list */
3279
3280 dout(" added dev%s\n", snap ? "" : " at end\n");
3281 if (snap)
3282 list_add_tail(&new_snap->node, &snap->node);
3283 else
3284 list_add_tail(&new_snap->node, head);
3285 } else {
3286 /* Already have this one */
3287
3288 dout(" already present\n");
3289
3290 rbd_assert(snap->size == snap_size);
3291 rbd_assert(!strcmp(snap->name, snap_name));
3292 rbd_assert(snap->features == snap_features);
3293
3294 /* Done with this list entry; advance */
3295
3296 links = links->next;
3297 }
3298
3299 /* Advance to the next entry in the snapshot context */
3300
3301 index++;
3302 }
3303 dout("%s: done\n", __func__);
3304
3305 return 0;
3306 }
3307
3308 /*
3309 * Scan the list of snapshots and register the devices for any that
3310 * have not already been registered.
3311 */
3312 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3313 {
3314 struct rbd_snap *snap;
3315 int ret = 0;
3316
3317 dout("%s called\n", __func__);
3318 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3319 return -EIO;
3320
3321 list_for_each_entry(snap, &rbd_dev->snaps, node) {
3322 if (!rbd_snap_registered(snap)) {
3323 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3324 if (ret < 0)
3325 break;
3326 }
3327 }
3328 dout("%s: returning %d\n", __func__, ret);
3329
3330 return ret;
3331 }
3332
3333 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3334 {
3335 struct device *dev;
3336 int ret;
3337
3338 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3339
3340 dev = &rbd_dev->dev;
3341 dev->bus = &rbd_bus_type;
3342 dev->type = &rbd_device_type;
3343 dev->parent = &rbd_root_dev;
3344 dev->release = rbd_dev_release;
3345 dev_set_name(dev, "%d", rbd_dev->dev_id);
3346 ret = device_register(dev);
3347
3348 mutex_unlock(&ctl_mutex);
3349
3350 return ret;
3351 }
3352
3353 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3354 {
3355 device_unregister(&rbd_dev->dev);
3356 }
3357
3358 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
3359
3360 /*
3361 * Get a unique rbd identifier for the given new rbd_dev, and add
3362 * the rbd_dev to the global list. The minimum rbd id is 1.
3363 */
3364 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
3365 {
3366 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
3367
3368 spin_lock(&rbd_dev_list_lock);
3369 list_add_tail(&rbd_dev->node, &rbd_dev_list);
3370 spin_unlock(&rbd_dev_list_lock);
3371 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3372 (unsigned long long) rbd_dev->dev_id);
3373 }
3374
3375 /*
3376 * Remove an rbd_dev from the global list, and record that its
3377 * identifier is no longer in use.
3378 */
3379 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
3380 {
3381 struct list_head *tmp;
3382 int rbd_id = rbd_dev->dev_id;
3383 int max_id;
3384
3385 rbd_assert(rbd_id > 0);
3386
3387 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3388 (unsigned long long) rbd_dev->dev_id);
3389 spin_lock(&rbd_dev_list_lock);
3390 list_del_init(&rbd_dev->node);
3391
3392 /*
3393 * If the id being "put" is not the current maximum, there
3394 * is nothing special we need to do.
3395 */
3396 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
3397 spin_unlock(&rbd_dev_list_lock);
3398 return;
3399 }
3400
3401 /*
3402 * We need to update the current maximum id. Search the
3403 * list to find out what it is. We're more likely to find
3404 * the maximum at the end, so search the list backward.
3405 */
3406 max_id = 0;
3407 list_for_each_prev(tmp, &rbd_dev_list) {
3408 struct rbd_device *rbd_dev;
3409
3410 rbd_dev = list_entry(tmp, struct rbd_device, node);
3411 if (rbd_dev->dev_id > max_id)
3412 max_id = rbd_dev->dev_id;
3413 }
3414 spin_unlock(&rbd_dev_list_lock);
3415
3416 /*
3417 * The max id could have been updated by rbd_dev_id_get(), in
3418 * which case it now accurately reflects the new maximum.
3419 * Be careful not to overwrite the maximum value in that
3420 * case.
3421 */
3422 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3423 dout(" max dev id has been reset\n");
3424 }
3425
3426 /*
3427 * Skips over white space at *buf, and updates *buf to point to the
3428 * first found non-space character (if any). Returns the length of
3429 * the token (string of non-white space characters) found. Note
3430 * that *buf must be terminated with '\0'.
3431 */
3432 static inline size_t next_token(const char **buf)
3433 {
3434 /*
3435 * These are the characters that produce nonzero for
3436 * isspace() in the "C" and "POSIX" locales.
3437 */
3438 const char *spaces = " \f\n\r\t\v";
3439
3440 *buf += strspn(*buf, spaces); /* Find start of token */
3441
3442 return strcspn(*buf, spaces); /* Return token length */
3443 }
3444
3445 /*
3446 * Finds the next token in *buf, and if the provided token buffer is
3447 * big enough, copies the found token into it. The result, if
3448 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3449 * must be terminated with '\0' on entry.
3450 *
3451 * Returns the length of the token found (not including the '\0').
3452 * Return value will be 0 if no token is found, and it will be >=
3453 * token_size if the token would not fit.
3454 *
3455 * The *buf pointer will be updated to point beyond the end of the
3456 * found token. Note that this occurs even if the token buffer is
3457 * too small to hold it.
3458 */
3459 static inline size_t copy_token(const char **buf,
3460 char *token,
3461 size_t token_size)
3462 {
3463 size_t len;
3464
3465 len = next_token(buf);
3466 if (len < token_size) {
3467 memcpy(token, *buf, len);
3468 *(token + len) = '\0';
3469 }
3470 *buf += len;
3471
3472 return len;
3473 }
3474
3475 /*
3476 * Finds the next token in *buf, dynamically allocates a buffer big
3477 * enough to hold a copy of it, and copies the token into the new
3478 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3479 * that a duplicate buffer is created even for a zero-length token.
3480 *
3481 * Returns a pointer to the newly-allocated duplicate, or a null
3482 * pointer if memory for the duplicate was not available. If
3483 * the lenp argument is a non-null pointer, the length of the token
3484 * (not including the '\0') is returned in *lenp.
3485 *
3486 * If successful, the *buf pointer will be updated to point beyond
3487 * the end of the found token.
3488 *
3489 * Note: uses GFP_KERNEL for allocation.
3490 */
3491 static inline char *dup_token(const char **buf, size_t *lenp)
3492 {
3493 char *dup;
3494 size_t len;
3495
3496 len = next_token(buf);
3497 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
3498 if (!dup)
3499 return NULL;
3500 *(dup + len) = '\0';
3501 *buf += len;
3502
3503 if (lenp)
3504 *lenp = len;
3505
3506 return dup;
3507 }
3508
3509 /*
3510 * Parse the options provided for an "rbd add" (i.e., rbd image
3511 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3512 * and the data written is passed here via a NUL-terminated buffer.
3513 * Returns 0 if successful or an error code otherwise.
3514 *
3515 * The information extracted from these options is recorded in
3516 * the other parameters which return dynamically-allocated
3517 * structures:
3518 * ceph_opts
3519 * The address of a pointer that will refer to a ceph options
3520 * structure. Caller must release the returned pointer using
3521 * ceph_destroy_options() when it is no longer needed.
3522 * rbd_opts
3523 * Address of an rbd options pointer. Fully initialized by
3524 * this function; caller must release with kfree().
3525 * spec
3526 * Address of an rbd image specification pointer. Fully
3527 * initialized by this function based on parsed options.
3528 * Caller must release with rbd_spec_put().
3529 *
3530 * The options passed take this form:
3531 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3532 * where:
3533 * <mon_addrs>
3534 * A comma-separated list of one or more monitor addresses.
3535 * A monitor address is an ip address, optionally followed
3536 * by a port number (separated by a colon).
3537 * I.e.: ip1[:port1][,ip2[:port2]...]
3538 * <options>
3539 * A comma-separated list of ceph and/or rbd options.
3540 * <pool_name>
3541 * The name of the rados pool containing the rbd image.
3542 * <image_name>
3543 * The name of the image in that pool to map.
3544 * <snap_id>
3545 * An optional snapshot id. If provided, the mapping will
3546 * present data from the image at the time that snapshot was
3547 * created. The image head is used if no snapshot id is
3548 * provided. Snapshot mappings are always read-only.
3549 */
3550 static int rbd_add_parse_args(const char *buf,
3551 struct ceph_options **ceph_opts,
3552 struct rbd_options **opts,
3553 struct rbd_spec **rbd_spec)
3554 {
3555 size_t len;
3556 char *options;
3557 const char *mon_addrs;
3558 size_t mon_addrs_size;
3559 struct rbd_spec *spec = NULL;
3560 struct rbd_options *rbd_opts = NULL;
3561 struct ceph_options *copts;
3562 int ret;
3563
3564 /* The first four tokens are required */
3565
3566 len = next_token(&buf);
3567 if (!len) {
3568 rbd_warn(NULL, "no monitor address(es) provided");
3569 return -EINVAL;
3570 }
3571 mon_addrs = buf;
3572 mon_addrs_size = len + 1;
3573 buf += len;
3574
3575 ret = -EINVAL;
3576 options = dup_token(&buf, NULL);
3577 if (!options)
3578 return -ENOMEM;
3579 if (!*options) {
3580 rbd_warn(NULL, "no options provided");
3581 goto out_err;
3582 }
3583
3584 spec = rbd_spec_alloc();
3585 if (!spec)
3586 goto out_mem;
3587
3588 spec->pool_name = dup_token(&buf, NULL);
3589 if (!spec->pool_name)
3590 goto out_mem;
3591 if (!*spec->pool_name) {
3592 rbd_warn(NULL, "no pool name provided");
3593 goto out_err;
3594 }
3595
3596 spec->image_name = dup_token(&buf, NULL);
3597 if (!spec->image_name)
3598 goto out_mem;
3599 if (!*spec->image_name) {
3600 rbd_warn(NULL, "no image name provided");
3601 goto out_err;
3602 }
3603
3604 /*
3605 * Snapshot name is optional; default is to use "-"
3606 * (indicating the head/no snapshot).
3607 */
3608 len = next_token(&buf);
3609 if (!len) {
3610 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3611 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
3612 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
3613 ret = -ENAMETOOLONG;
3614 goto out_err;
3615 }
3616 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
3617 if (!spec->snap_name)
3618 goto out_mem;
3619 *(spec->snap_name + len) = '\0';
3620
3621 /* Initialize all rbd options to the defaults */
3622
3623 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3624 if (!rbd_opts)
3625 goto out_mem;
3626
3627 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
3628
3629 copts = ceph_parse_options(options, mon_addrs,
3630 mon_addrs + mon_addrs_size - 1,
3631 parse_rbd_opts_token, rbd_opts);
3632 if (IS_ERR(copts)) {
3633 ret = PTR_ERR(copts);
3634 goto out_err;
3635 }
3636 kfree(options);
3637
3638 *ceph_opts = copts;
3639 *opts = rbd_opts;
3640 *rbd_spec = spec;
3641
3642 return 0;
3643 out_mem:
3644 ret = -ENOMEM;
3645 out_err:
3646 kfree(rbd_opts);
3647 rbd_spec_put(spec);
3648 kfree(options);
3649
3650 return ret;
3651 }
3652
3653 /*
3654 * An rbd format 2 image has a unique identifier, distinct from the
3655 * name given to it by the user. Internally, that identifier is
3656 * what's used to specify the names of objects related to the image.
3657 *
3658 * A special "rbd id" object is used to map an rbd image name to its
3659 * id. If that object doesn't exist, then there is no v2 rbd image
3660 * with the supplied name.
3661 *
3662 * This function will record the given rbd_dev's image_id field if
3663 * it can be determined, and in that case will return 0. If any
3664 * errors occur a negative errno will be returned and the rbd_dev's
3665 * image_id field will be unchanged (and should be NULL).
3666 */
3667 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3668 {
3669 int ret;
3670 size_t size;
3671 char *object_name;
3672 void *response;
3673 void *p;
3674
3675 /*
3676 * When probing a parent image, the image id is already
3677 * known (and the image name likely is not). There's no
3678 * need to fetch the image id again in this case.
3679 */
3680 if (rbd_dev->spec->image_id)
3681 return 0;
3682
3683 /*
3684 * First, see if the format 2 image id file exists, and if
3685 * so, get the image's persistent id from it.
3686 */
3687 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
3688 object_name = kmalloc(size, GFP_NOIO);
3689 if (!object_name)
3690 return -ENOMEM;
3691 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
3692 dout("rbd id object name is %s\n", object_name);
3693
3694 /* Response will be an encoded string, which includes a length */
3695
3696 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3697 response = kzalloc(size, GFP_NOIO);
3698 if (!response) {
3699 ret = -ENOMEM;
3700 goto out;
3701 }
3702
3703 ret = rbd_obj_method_sync(rbd_dev, object_name,
3704 "rbd", "get_id",
3705 NULL, 0,
3706 response, RBD_IMAGE_ID_LEN_MAX, NULL);
3707 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3708 if (ret < 0)
3709 goto out;
3710 ret = 0; /* rbd_obj_method_sync() can return positive */
3711
3712 p = response;
3713 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
3714 p + RBD_IMAGE_ID_LEN_MAX,
3715 NULL, GFP_NOIO);
3716 if (IS_ERR(rbd_dev->spec->image_id)) {
3717 ret = PTR_ERR(rbd_dev->spec->image_id);
3718 rbd_dev->spec->image_id = NULL;
3719 } else {
3720 dout("image_id is %s\n", rbd_dev->spec->image_id);
3721 }
3722 out:
3723 kfree(response);
3724 kfree(object_name);
3725
3726 return ret;
3727 }
3728
3729 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3730 {
3731 int ret;
3732 size_t size;
3733
3734 /* Version 1 images have no id; empty string is used */
3735
3736 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3737 if (!rbd_dev->spec->image_id)
3738 return -ENOMEM;
3739
3740 /* Record the header object name for this rbd image. */
3741
3742 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
3743 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3744 if (!rbd_dev->header_name) {
3745 ret = -ENOMEM;
3746 goto out_err;
3747 }
3748 sprintf(rbd_dev->header_name, "%s%s",
3749 rbd_dev->spec->image_name, RBD_SUFFIX);
3750
3751 /* Populate rbd image metadata */
3752
3753 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3754 if (ret < 0)
3755 goto out_err;
3756
3757 /* Version 1 images have no parent (no layering) */
3758
3759 rbd_dev->parent_spec = NULL;
3760 rbd_dev->parent_overlap = 0;
3761
3762 rbd_dev->image_format = 1;
3763
3764 dout("discovered version 1 image, header name is %s\n",
3765 rbd_dev->header_name);
3766
3767 return 0;
3768
3769 out_err:
3770 kfree(rbd_dev->header_name);
3771 rbd_dev->header_name = NULL;
3772 kfree(rbd_dev->spec->image_id);
3773 rbd_dev->spec->image_id = NULL;
3774
3775 return ret;
3776 }
3777
3778 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3779 {
3780 size_t size;
3781 int ret;
3782 u64 ver = 0;
3783
3784 /*
3785 * Image id was filled in by the caller. Record the header
3786 * object name for this rbd image.
3787 */
3788 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
3789 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3790 if (!rbd_dev->header_name)
3791 return -ENOMEM;
3792 sprintf(rbd_dev->header_name, "%s%s",
3793 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
3794
3795 /* Get the size and object order for the image */
3796
3797 ret = rbd_dev_v2_image_size(rbd_dev);
3798 if (ret < 0)
3799 goto out_err;
3800
3801 /* Get the object prefix (a.k.a. block_name) for the image */
3802
3803 ret = rbd_dev_v2_object_prefix(rbd_dev);
3804 if (ret < 0)
3805 goto out_err;
3806
3807 /* Get the and check features for the image */
3808
3809 ret = rbd_dev_v2_features(rbd_dev);
3810 if (ret < 0)
3811 goto out_err;
3812
3813 /* If the image supports layering, get the parent info */
3814
3815 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3816 ret = rbd_dev_v2_parent_info(rbd_dev);
3817 if (ret < 0)
3818 goto out_err;
3819 }
3820
3821 /* crypto and compression type aren't (yet) supported for v2 images */
3822
3823 rbd_dev->header.crypt_type = 0;
3824 rbd_dev->header.comp_type = 0;
3825
3826 /* Get the snapshot context, plus the header version */
3827
3828 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
3829 if (ret)
3830 goto out_err;
3831 rbd_dev->header.obj_version = ver;
3832
3833 rbd_dev->image_format = 2;
3834
3835 dout("discovered version 2 image, header name is %s\n",
3836 rbd_dev->header_name);
3837
3838 return 0;
3839 out_err:
3840 rbd_dev->parent_overlap = 0;
3841 rbd_spec_put(rbd_dev->parent_spec);
3842 rbd_dev->parent_spec = NULL;
3843 kfree(rbd_dev->header_name);
3844 rbd_dev->header_name = NULL;
3845 kfree(rbd_dev->header.object_prefix);
3846 rbd_dev->header.object_prefix = NULL;
3847
3848 return ret;
3849 }
3850
3851 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3852 {
3853 int ret;
3854
3855 /* no need to lock here, as rbd_dev is not registered yet */
3856 ret = rbd_dev_snaps_update(rbd_dev);
3857 if (ret)
3858 return ret;
3859
3860 ret = rbd_dev_probe_update_spec(rbd_dev);
3861 if (ret)
3862 goto err_out_snaps;
3863
3864 ret = rbd_dev_set_mapping(rbd_dev);
3865 if (ret)
3866 goto err_out_snaps;
3867
3868 /* generate unique id: find highest unique id, add one */
3869 rbd_dev_id_get(rbd_dev);
3870
3871 /* Fill in the device name, now that we have its id. */
3872 BUILD_BUG_ON(DEV_NAME_LEN
3873 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3874 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3875
3876 /* Get our block major device number. */
3877
3878 ret = register_blkdev(0, rbd_dev->name);
3879 if (ret < 0)
3880 goto err_out_id;
3881 rbd_dev->major = ret;
3882
3883 /* Set up the blkdev mapping. */
3884
3885 ret = rbd_init_disk(rbd_dev);
3886 if (ret)
3887 goto err_out_blkdev;
3888
3889 ret = rbd_bus_add_dev(rbd_dev);
3890 if (ret)
3891 goto err_out_disk;
3892
3893 /*
3894 * At this point cleanup in the event of an error is the job
3895 * of the sysfs code (initiated by rbd_bus_del_dev()).
3896 */
3897 down_write(&rbd_dev->header_rwsem);
3898 ret = rbd_dev_snaps_register(rbd_dev);
3899 up_write(&rbd_dev->header_rwsem);
3900 if (ret)
3901 goto err_out_bus;
3902
3903 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
3904 if (ret)
3905 goto err_out_bus;
3906
3907 /* Everything's ready. Announce the disk to the world. */
3908
3909 add_disk(rbd_dev->disk);
3910
3911 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3912 (unsigned long long) rbd_dev->mapping.size);
3913
3914 return ret;
3915 err_out_bus:
3916 /* this will also clean up rest of rbd_dev stuff */
3917
3918 rbd_bus_del_dev(rbd_dev);
3919
3920 return ret;
3921 err_out_disk:
3922 rbd_free_disk(rbd_dev);
3923 err_out_blkdev:
3924 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3925 err_out_id:
3926 rbd_dev_id_put(rbd_dev);
3927 err_out_snaps:
3928 rbd_remove_all_snaps(rbd_dev);
3929
3930 return ret;
3931 }
3932
3933 /*
3934 * Probe for the existence of the header object for the given rbd
3935 * device. For format 2 images this includes determining the image
3936 * id.
3937 */
3938 static int rbd_dev_probe(struct rbd_device *rbd_dev)
3939 {
3940 int ret;
3941
3942 /*
3943 * Get the id from the image id object. If it's not a
3944 * format 2 image, we'll get ENOENT back, and we'll assume
3945 * it's a format 1 image.
3946 */
3947 ret = rbd_dev_image_id(rbd_dev);
3948 if (ret)
3949 ret = rbd_dev_v1_probe(rbd_dev);
3950 else
3951 ret = rbd_dev_v2_probe(rbd_dev);
3952 if (ret) {
3953 dout("probe failed, returning %d\n", ret);
3954
3955 return ret;
3956 }
3957
3958 ret = rbd_dev_probe_finish(rbd_dev);
3959 if (ret)
3960 rbd_header_free(&rbd_dev->header);
3961
3962 return ret;
3963 }
3964
3965 static ssize_t rbd_add(struct bus_type *bus,
3966 const char *buf,
3967 size_t count)
3968 {
3969 struct rbd_device *rbd_dev = NULL;
3970 struct ceph_options *ceph_opts = NULL;
3971 struct rbd_options *rbd_opts = NULL;
3972 struct rbd_spec *spec = NULL;
3973 struct rbd_client *rbdc;
3974 struct ceph_osd_client *osdc;
3975 int rc = -ENOMEM;
3976
3977 if (!try_module_get(THIS_MODULE))
3978 return -ENODEV;
3979
3980 /* parse add command */
3981 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
3982 if (rc < 0)
3983 goto err_out_module;
3984
3985 rbdc = rbd_get_client(ceph_opts);
3986 if (IS_ERR(rbdc)) {
3987 rc = PTR_ERR(rbdc);
3988 goto err_out_args;
3989 }
3990 ceph_opts = NULL; /* rbd_dev client now owns this */
3991
3992 /* pick the pool */
3993 osdc = &rbdc->client->osdc;
3994 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
3995 if (rc < 0)
3996 goto err_out_client;
3997 spec->pool_id = (u64) rc;
3998
3999 /* The ceph file layout needs to fit pool id in 32 bits */
4000
4001 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4002 rc = -EIO;
4003 goto err_out_client;
4004 }
4005
4006 rbd_dev = rbd_dev_create(rbdc, spec);
4007 if (!rbd_dev)
4008 goto err_out_client;
4009 rbdc = NULL; /* rbd_dev now owns this */
4010 spec = NULL; /* rbd_dev now owns this */
4011
4012 rbd_dev->mapping.read_only = rbd_opts->read_only;
4013 kfree(rbd_opts);
4014 rbd_opts = NULL; /* done with this */
4015
4016 rc = rbd_dev_probe(rbd_dev);
4017 if (rc < 0)
4018 goto err_out_rbd_dev;
4019
4020 return count;
4021 err_out_rbd_dev:
4022 rbd_dev_destroy(rbd_dev);
4023 err_out_client:
4024 rbd_put_client(rbdc);
4025 err_out_args:
4026 if (ceph_opts)
4027 ceph_destroy_options(ceph_opts);
4028 kfree(rbd_opts);
4029 rbd_spec_put(spec);
4030 err_out_module:
4031 module_put(THIS_MODULE);
4032
4033 dout("Error adding device %s\n", buf);
4034
4035 return (ssize_t) rc;
4036 }
4037
4038 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4039 {
4040 struct list_head *tmp;
4041 struct rbd_device *rbd_dev;
4042
4043 spin_lock(&rbd_dev_list_lock);
4044 list_for_each(tmp, &rbd_dev_list) {
4045 rbd_dev = list_entry(tmp, struct rbd_device, node);
4046 if (rbd_dev->dev_id == dev_id) {
4047 spin_unlock(&rbd_dev_list_lock);
4048 return rbd_dev;
4049 }
4050 }
4051 spin_unlock(&rbd_dev_list_lock);
4052 return NULL;
4053 }
4054
4055 static void rbd_dev_release(struct device *dev)
4056 {
4057 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4058
4059 if (rbd_dev->watch_event)
4060 rbd_dev_header_watch_sync(rbd_dev, 0);
4061
4062 /* clean up and free blkdev */
4063 rbd_free_disk(rbd_dev);
4064 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4065
4066 /* release allocated disk header fields */
4067 rbd_header_free(&rbd_dev->header);
4068
4069 /* done with the id, and with the rbd_dev */
4070 rbd_dev_id_put(rbd_dev);
4071 rbd_assert(rbd_dev->rbd_client != NULL);
4072 rbd_dev_destroy(rbd_dev);
4073
4074 /* release module ref */
4075 module_put(THIS_MODULE);
4076 }
4077
4078 static ssize_t rbd_remove(struct bus_type *bus,
4079 const char *buf,
4080 size_t count)
4081 {
4082 struct rbd_device *rbd_dev = NULL;
4083 int target_id, rc;
4084 unsigned long ul;
4085 int ret = count;
4086
4087 rc = strict_strtoul(buf, 10, &ul);
4088 if (rc)
4089 return rc;
4090
4091 /* convert to int; abort if we lost anything in the conversion */
4092 target_id = (int) ul;
4093 if (target_id != ul)
4094 return -EINVAL;
4095
4096 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4097
4098 rbd_dev = __rbd_get_dev(target_id);
4099 if (!rbd_dev) {
4100 ret = -ENOENT;
4101 goto done;
4102 }
4103
4104 spin_lock(&rbd_dev->lock);
4105 if (rbd_dev->open_count)
4106 ret = -EBUSY;
4107 else
4108 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4109 spin_unlock(&rbd_dev->lock);
4110 if (ret < 0)
4111 goto done;
4112
4113 rbd_remove_all_snaps(rbd_dev);
4114 rbd_bus_del_dev(rbd_dev);
4115
4116 done:
4117 mutex_unlock(&ctl_mutex);
4118
4119 return ret;
4120 }
4121
4122 /*
4123 * create control files in sysfs
4124 * /sys/bus/rbd/...
4125 */
4126 static int rbd_sysfs_init(void)
4127 {
4128 int ret;
4129
4130 ret = device_register(&rbd_root_dev);
4131 if (ret < 0)
4132 return ret;
4133
4134 ret = bus_register(&rbd_bus_type);
4135 if (ret < 0)
4136 device_unregister(&rbd_root_dev);
4137
4138 return ret;
4139 }
4140
4141 static void rbd_sysfs_cleanup(void)
4142 {
4143 bus_unregister(&rbd_bus_type);
4144 device_unregister(&rbd_root_dev);
4145 }
4146
4147 int __init rbd_init(void)
4148 {
4149 int rc;
4150
4151 if (!libceph_compatible(NULL)) {
4152 rbd_warn(NULL, "libceph incompatibility (quitting)");
4153
4154 return -EINVAL;
4155 }
4156 rc = rbd_sysfs_init();
4157 if (rc)
4158 return rc;
4159 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
4160 return 0;
4161 }
4162
4163 void __exit rbd_exit(void)
4164 {
4165 rbd_sysfs_cleanup();
4166 }
4167
4168 module_init(rbd_init);
4169 module_exit(rbd_exit);
4170
4171 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4172 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4173 MODULE_DESCRIPTION("rados block device");
4174
4175 /* following authorship retained from original osdblk.c */
4176 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4177
4178 MODULE_LICENSE("GPL");