]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/rbd.c
libceph: define source request op functions
[mirror_ubuntu-artful-kernel.git] / drivers / block / rbd.c
1 /*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
24 For usage instructions, please refer to:
25
26 Documentation/ABI/testing/sysfs-bus-rbd
27
28 */
29
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
35
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
39 #include <linux/fs.h>
40 #include <linux/blkdev.h>
41
42 #include "rbd_types.h"
43
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
45
46 /*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
55 #define RBD_DRV_NAME "rbd"
56 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
57
58 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
59
60 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
61 #define RBD_MAX_SNAP_NAME_LEN \
62 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
63
64 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
65
66 #define RBD_SNAP_HEAD_NAME "-"
67
68 /* This allows a single page to hold an image name sent by OSD */
69 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
70 #define RBD_IMAGE_ID_LEN_MAX 64
71
72 #define RBD_OBJ_PREFIX_LEN_MAX 64
73
74 /* Feature bits */
75
76 #define RBD_FEATURE_LAYERING 1
77
78 /* Features supported by this (client software) implementation. */
79
80 #define RBD_FEATURES_ALL (0)
81
82 /*
83 * An RBD device name will be "rbd#", where the "rbd" comes from
84 * RBD_DRV_NAME above, and # is a unique integer identifier.
85 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
86 * enough to hold all possible device names.
87 */
88 #define DEV_NAME_LEN 32
89 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
90
91 /*
92 * block device image metadata (in-memory version)
93 */
94 struct rbd_image_header {
95 /* These four fields never change for a given rbd image */
96 char *object_prefix;
97 u64 features;
98 __u8 obj_order;
99 __u8 crypt_type;
100 __u8 comp_type;
101
102 /* The remaining fields need to be updated occasionally */
103 u64 image_size;
104 struct ceph_snap_context *snapc;
105 char *snap_names;
106 u64 *snap_sizes;
107
108 u64 obj_version;
109 };
110
111 /*
112 * An rbd image specification.
113 *
114 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
115 * identify an image. Each rbd_dev structure includes a pointer to
116 * an rbd_spec structure that encapsulates this identity.
117 *
118 * Each of the id's in an rbd_spec has an associated name. For a
119 * user-mapped image, the names are supplied and the id's associated
120 * with them are looked up. For a layered image, a parent image is
121 * defined by the tuple, and the names are looked up.
122 *
123 * An rbd_dev structure contains a parent_spec pointer which is
124 * non-null if the image it represents is a child in a layered
125 * image. This pointer will refer to the rbd_spec structure used
126 * by the parent rbd_dev for its own identity (i.e., the structure
127 * is shared between the parent and child).
128 *
129 * Since these structures are populated once, during the discovery
130 * phase of image construction, they are effectively immutable so
131 * we make no effort to synchronize access to them.
132 *
133 * Note that code herein does not assume the image name is known (it
134 * could be a null pointer).
135 */
136 struct rbd_spec {
137 u64 pool_id;
138 char *pool_name;
139
140 char *image_id;
141 char *image_name;
142
143 u64 snap_id;
144 char *snap_name;
145
146 struct kref kref;
147 };
148
149 /*
150 * an instance of the client. multiple devices may share an rbd client.
151 */
152 struct rbd_client {
153 struct ceph_client *client;
154 struct kref kref;
155 struct list_head node;
156 };
157
158 struct rbd_img_request;
159 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
160
161 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
162
163 struct rbd_obj_request;
164 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
165
166 enum obj_request_type {
167 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
168 };
169
170 struct rbd_obj_request {
171 const char *object_name;
172 u64 offset; /* object start byte */
173 u64 length; /* bytes from offset */
174
175 struct rbd_img_request *img_request;
176 struct list_head links; /* img_request->obj_requests */
177 u32 which; /* posn image request list */
178
179 enum obj_request_type type;
180 union {
181 struct bio *bio_list;
182 struct {
183 struct page **pages;
184 u32 page_count;
185 };
186 };
187
188 struct ceph_osd_request *osd_req;
189
190 u64 xferred; /* bytes transferred */
191 u64 version;
192 int result;
193 atomic_t done;
194
195 rbd_obj_callback_t callback;
196 struct completion completion;
197
198 struct kref kref;
199 };
200
201 struct rbd_img_request {
202 struct request *rq;
203 struct rbd_device *rbd_dev;
204 u64 offset; /* starting image byte offset */
205 u64 length; /* byte count from offset */
206 bool write_request; /* false for read */
207 union {
208 struct ceph_snap_context *snapc; /* for writes */
209 u64 snap_id; /* for reads */
210 };
211 spinlock_t completion_lock;/* protects next_completion */
212 u32 next_completion;
213 rbd_img_callback_t callback;
214
215 u32 obj_request_count;
216 struct list_head obj_requests; /* rbd_obj_request structs */
217
218 struct kref kref;
219 };
220
221 #define for_each_obj_request(ireq, oreq) \
222 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
223 #define for_each_obj_request_from(ireq, oreq) \
224 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
225 #define for_each_obj_request_safe(ireq, oreq, n) \
226 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
227
228 struct rbd_snap {
229 struct device dev;
230 const char *name;
231 u64 size;
232 struct list_head node;
233 u64 id;
234 u64 features;
235 };
236
237 struct rbd_mapping {
238 u64 size;
239 u64 features;
240 bool read_only;
241 };
242
243 /*
244 * a single device
245 */
246 struct rbd_device {
247 int dev_id; /* blkdev unique id */
248
249 int major; /* blkdev assigned major */
250 struct gendisk *disk; /* blkdev's gendisk and rq */
251
252 u32 image_format; /* Either 1 or 2 */
253 struct rbd_client *rbd_client;
254
255 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
256
257 spinlock_t lock; /* queue, flags, open_count */
258
259 struct rbd_image_header header;
260 unsigned long flags; /* possibly lock protected */
261 struct rbd_spec *spec;
262
263 char *header_name;
264
265 struct ceph_file_layout layout;
266
267 struct ceph_osd_event *watch_event;
268 struct rbd_obj_request *watch_request;
269
270 struct rbd_spec *parent_spec;
271 u64 parent_overlap;
272
273 /* protects updating the header */
274 struct rw_semaphore header_rwsem;
275
276 struct rbd_mapping mapping;
277
278 struct list_head node;
279
280 /* list of snapshots */
281 struct list_head snaps;
282
283 /* sysfs related */
284 struct device dev;
285 unsigned long open_count; /* protected by lock */
286 };
287
288 /*
289 * Flag bits for rbd_dev->flags. If atomicity is required,
290 * rbd_dev->lock is used to protect access.
291 *
292 * Currently, only the "removing" flag (which is coupled with the
293 * "open_count" field) requires atomic access.
294 */
295 enum rbd_dev_flags {
296 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
297 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
298 };
299
300 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
301
302 static LIST_HEAD(rbd_dev_list); /* devices */
303 static DEFINE_SPINLOCK(rbd_dev_list_lock);
304
305 static LIST_HEAD(rbd_client_list); /* clients */
306 static DEFINE_SPINLOCK(rbd_client_list_lock);
307
308 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
309 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
310
311 static void rbd_dev_release(struct device *dev);
312 static void rbd_remove_snap_dev(struct rbd_snap *snap);
313
314 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
315 size_t count);
316 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
317 size_t count);
318
319 static struct bus_attribute rbd_bus_attrs[] = {
320 __ATTR(add, S_IWUSR, NULL, rbd_add),
321 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
322 __ATTR_NULL
323 };
324
325 static struct bus_type rbd_bus_type = {
326 .name = "rbd",
327 .bus_attrs = rbd_bus_attrs,
328 };
329
330 static void rbd_root_dev_release(struct device *dev)
331 {
332 }
333
334 static struct device rbd_root_dev = {
335 .init_name = "rbd",
336 .release = rbd_root_dev_release,
337 };
338
339 static __printf(2, 3)
340 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
341 {
342 struct va_format vaf;
343 va_list args;
344
345 va_start(args, fmt);
346 vaf.fmt = fmt;
347 vaf.va = &args;
348
349 if (!rbd_dev)
350 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
351 else if (rbd_dev->disk)
352 printk(KERN_WARNING "%s: %s: %pV\n",
353 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
354 else if (rbd_dev->spec && rbd_dev->spec->image_name)
355 printk(KERN_WARNING "%s: image %s: %pV\n",
356 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
357 else if (rbd_dev->spec && rbd_dev->spec->image_id)
358 printk(KERN_WARNING "%s: id %s: %pV\n",
359 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
360 else /* punt */
361 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
362 RBD_DRV_NAME, rbd_dev, &vaf);
363 va_end(args);
364 }
365
366 #ifdef RBD_DEBUG
367 #define rbd_assert(expr) \
368 if (unlikely(!(expr))) { \
369 printk(KERN_ERR "\nAssertion failure in %s() " \
370 "at line %d:\n\n" \
371 "\trbd_assert(%s);\n\n", \
372 __func__, __LINE__, #expr); \
373 BUG(); \
374 }
375 #else /* !RBD_DEBUG */
376 # define rbd_assert(expr) ((void) 0)
377 #endif /* !RBD_DEBUG */
378
379 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
380 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
381
382 static int rbd_open(struct block_device *bdev, fmode_t mode)
383 {
384 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
385 bool removing = false;
386
387 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
388 return -EROFS;
389
390 spin_lock_irq(&rbd_dev->lock);
391 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
392 removing = true;
393 else
394 rbd_dev->open_count++;
395 spin_unlock_irq(&rbd_dev->lock);
396 if (removing)
397 return -ENOENT;
398
399 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
400 (void) get_device(&rbd_dev->dev);
401 set_device_ro(bdev, rbd_dev->mapping.read_only);
402 mutex_unlock(&ctl_mutex);
403
404 return 0;
405 }
406
407 static int rbd_release(struct gendisk *disk, fmode_t mode)
408 {
409 struct rbd_device *rbd_dev = disk->private_data;
410 unsigned long open_count_before;
411
412 spin_lock_irq(&rbd_dev->lock);
413 open_count_before = rbd_dev->open_count--;
414 spin_unlock_irq(&rbd_dev->lock);
415 rbd_assert(open_count_before > 0);
416
417 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
418 put_device(&rbd_dev->dev);
419 mutex_unlock(&ctl_mutex);
420
421 return 0;
422 }
423
424 static const struct block_device_operations rbd_bd_ops = {
425 .owner = THIS_MODULE,
426 .open = rbd_open,
427 .release = rbd_release,
428 };
429
430 /*
431 * Initialize an rbd client instance.
432 * We own *ceph_opts.
433 */
434 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
435 {
436 struct rbd_client *rbdc;
437 int ret = -ENOMEM;
438
439 dout("%s:\n", __func__);
440 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
441 if (!rbdc)
442 goto out_opt;
443
444 kref_init(&rbdc->kref);
445 INIT_LIST_HEAD(&rbdc->node);
446
447 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
448
449 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
450 if (IS_ERR(rbdc->client))
451 goto out_mutex;
452 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
453
454 ret = ceph_open_session(rbdc->client);
455 if (ret < 0)
456 goto out_err;
457
458 spin_lock(&rbd_client_list_lock);
459 list_add_tail(&rbdc->node, &rbd_client_list);
460 spin_unlock(&rbd_client_list_lock);
461
462 mutex_unlock(&ctl_mutex);
463 dout("%s: rbdc %p\n", __func__, rbdc);
464
465 return rbdc;
466
467 out_err:
468 ceph_destroy_client(rbdc->client);
469 out_mutex:
470 mutex_unlock(&ctl_mutex);
471 kfree(rbdc);
472 out_opt:
473 if (ceph_opts)
474 ceph_destroy_options(ceph_opts);
475 dout("%s: error %d\n", __func__, ret);
476
477 return ERR_PTR(ret);
478 }
479
480 /*
481 * Find a ceph client with specific addr and configuration. If
482 * found, bump its reference count.
483 */
484 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
485 {
486 struct rbd_client *client_node;
487 bool found = false;
488
489 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
490 return NULL;
491
492 spin_lock(&rbd_client_list_lock);
493 list_for_each_entry(client_node, &rbd_client_list, node) {
494 if (!ceph_compare_options(ceph_opts, client_node->client)) {
495 kref_get(&client_node->kref);
496 found = true;
497 break;
498 }
499 }
500 spin_unlock(&rbd_client_list_lock);
501
502 return found ? client_node : NULL;
503 }
504
505 /*
506 * mount options
507 */
508 enum {
509 Opt_last_int,
510 /* int args above */
511 Opt_last_string,
512 /* string args above */
513 Opt_read_only,
514 Opt_read_write,
515 /* Boolean args above */
516 Opt_last_bool,
517 };
518
519 static match_table_t rbd_opts_tokens = {
520 /* int args above */
521 /* string args above */
522 {Opt_read_only, "read_only"},
523 {Opt_read_only, "ro"}, /* Alternate spelling */
524 {Opt_read_write, "read_write"},
525 {Opt_read_write, "rw"}, /* Alternate spelling */
526 /* Boolean args above */
527 {-1, NULL}
528 };
529
530 struct rbd_options {
531 bool read_only;
532 };
533
534 #define RBD_READ_ONLY_DEFAULT false
535
536 static int parse_rbd_opts_token(char *c, void *private)
537 {
538 struct rbd_options *rbd_opts = private;
539 substring_t argstr[MAX_OPT_ARGS];
540 int token, intval, ret;
541
542 token = match_token(c, rbd_opts_tokens, argstr);
543 if (token < 0)
544 return -EINVAL;
545
546 if (token < Opt_last_int) {
547 ret = match_int(&argstr[0], &intval);
548 if (ret < 0) {
549 pr_err("bad mount option arg (not int) "
550 "at '%s'\n", c);
551 return ret;
552 }
553 dout("got int token %d val %d\n", token, intval);
554 } else if (token > Opt_last_int && token < Opt_last_string) {
555 dout("got string token %d val %s\n", token,
556 argstr[0].from);
557 } else if (token > Opt_last_string && token < Opt_last_bool) {
558 dout("got Boolean token %d\n", token);
559 } else {
560 dout("got token %d\n", token);
561 }
562
563 switch (token) {
564 case Opt_read_only:
565 rbd_opts->read_only = true;
566 break;
567 case Opt_read_write:
568 rbd_opts->read_only = false;
569 break;
570 default:
571 rbd_assert(false);
572 break;
573 }
574 return 0;
575 }
576
577 /*
578 * Get a ceph client with specific addr and configuration, if one does
579 * not exist create it.
580 */
581 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
582 {
583 struct rbd_client *rbdc;
584
585 rbdc = rbd_client_find(ceph_opts);
586 if (rbdc) /* using an existing client */
587 ceph_destroy_options(ceph_opts);
588 else
589 rbdc = rbd_client_create(ceph_opts);
590
591 return rbdc;
592 }
593
594 /*
595 * Destroy ceph client
596 *
597 * Caller must hold rbd_client_list_lock.
598 */
599 static void rbd_client_release(struct kref *kref)
600 {
601 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
602
603 dout("%s: rbdc %p\n", __func__, rbdc);
604 spin_lock(&rbd_client_list_lock);
605 list_del(&rbdc->node);
606 spin_unlock(&rbd_client_list_lock);
607
608 ceph_destroy_client(rbdc->client);
609 kfree(rbdc);
610 }
611
612 /*
613 * Drop reference to ceph client node. If it's not referenced anymore, release
614 * it.
615 */
616 static void rbd_put_client(struct rbd_client *rbdc)
617 {
618 if (rbdc)
619 kref_put(&rbdc->kref, rbd_client_release);
620 }
621
622 static bool rbd_image_format_valid(u32 image_format)
623 {
624 return image_format == 1 || image_format == 2;
625 }
626
627 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
628 {
629 size_t size;
630 u32 snap_count;
631
632 /* The header has to start with the magic rbd header text */
633 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
634 return false;
635
636 /* The bio layer requires at least sector-sized I/O */
637
638 if (ondisk->options.order < SECTOR_SHIFT)
639 return false;
640
641 /* If we use u64 in a few spots we may be able to loosen this */
642
643 if (ondisk->options.order > 8 * sizeof (int) - 1)
644 return false;
645
646 /*
647 * The size of a snapshot header has to fit in a size_t, and
648 * that limits the number of snapshots.
649 */
650 snap_count = le32_to_cpu(ondisk->snap_count);
651 size = SIZE_MAX - sizeof (struct ceph_snap_context);
652 if (snap_count > size / sizeof (__le64))
653 return false;
654
655 /*
656 * Not only that, but the size of the entire the snapshot
657 * header must also be representable in a size_t.
658 */
659 size -= snap_count * sizeof (__le64);
660 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
661 return false;
662
663 return true;
664 }
665
666 /*
667 * Create a new header structure, translate header format from the on-disk
668 * header.
669 */
670 static int rbd_header_from_disk(struct rbd_image_header *header,
671 struct rbd_image_header_ondisk *ondisk)
672 {
673 u32 snap_count;
674 size_t len;
675 size_t size;
676 u32 i;
677
678 memset(header, 0, sizeof (*header));
679
680 snap_count = le32_to_cpu(ondisk->snap_count);
681
682 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
683 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
684 if (!header->object_prefix)
685 return -ENOMEM;
686 memcpy(header->object_prefix, ondisk->object_prefix, len);
687 header->object_prefix[len] = '\0';
688
689 if (snap_count) {
690 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
691
692 /* Save a copy of the snapshot names */
693
694 if (snap_names_len > (u64) SIZE_MAX)
695 return -EIO;
696 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
697 if (!header->snap_names)
698 goto out_err;
699 /*
700 * Note that rbd_dev_v1_header_read() guarantees
701 * the ondisk buffer we're working with has
702 * snap_names_len bytes beyond the end of the
703 * snapshot id array, this memcpy() is safe.
704 */
705 memcpy(header->snap_names, &ondisk->snaps[snap_count],
706 snap_names_len);
707
708 /* Record each snapshot's size */
709
710 size = snap_count * sizeof (*header->snap_sizes);
711 header->snap_sizes = kmalloc(size, GFP_KERNEL);
712 if (!header->snap_sizes)
713 goto out_err;
714 for (i = 0; i < snap_count; i++)
715 header->snap_sizes[i] =
716 le64_to_cpu(ondisk->snaps[i].image_size);
717 } else {
718 WARN_ON(ondisk->snap_names_len);
719 header->snap_names = NULL;
720 header->snap_sizes = NULL;
721 }
722
723 header->features = 0; /* No features support in v1 images */
724 header->obj_order = ondisk->options.order;
725 header->crypt_type = ondisk->options.crypt_type;
726 header->comp_type = ondisk->options.comp_type;
727
728 /* Allocate and fill in the snapshot context */
729
730 header->image_size = le64_to_cpu(ondisk->image_size);
731 size = sizeof (struct ceph_snap_context);
732 size += snap_count * sizeof (header->snapc->snaps[0]);
733 header->snapc = kzalloc(size, GFP_KERNEL);
734 if (!header->snapc)
735 goto out_err;
736
737 atomic_set(&header->snapc->nref, 1);
738 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
739 header->snapc->num_snaps = snap_count;
740 for (i = 0; i < snap_count; i++)
741 header->snapc->snaps[i] =
742 le64_to_cpu(ondisk->snaps[i].id);
743
744 return 0;
745
746 out_err:
747 kfree(header->snap_sizes);
748 header->snap_sizes = NULL;
749 kfree(header->snap_names);
750 header->snap_names = NULL;
751 kfree(header->object_prefix);
752 header->object_prefix = NULL;
753
754 return -ENOMEM;
755 }
756
757 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
758 {
759 struct rbd_snap *snap;
760
761 if (snap_id == CEPH_NOSNAP)
762 return RBD_SNAP_HEAD_NAME;
763
764 list_for_each_entry(snap, &rbd_dev->snaps, node)
765 if (snap_id == snap->id)
766 return snap->name;
767
768 return NULL;
769 }
770
771 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
772 {
773
774 struct rbd_snap *snap;
775
776 list_for_each_entry(snap, &rbd_dev->snaps, node) {
777 if (!strcmp(snap_name, snap->name)) {
778 rbd_dev->spec->snap_id = snap->id;
779 rbd_dev->mapping.size = snap->size;
780 rbd_dev->mapping.features = snap->features;
781
782 return 0;
783 }
784 }
785
786 return -ENOENT;
787 }
788
789 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
790 {
791 int ret;
792
793 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
794 sizeof (RBD_SNAP_HEAD_NAME))) {
795 rbd_dev->spec->snap_id = CEPH_NOSNAP;
796 rbd_dev->mapping.size = rbd_dev->header.image_size;
797 rbd_dev->mapping.features = rbd_dev->header.features;
798 ret = 0;
799 } else {
800 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
801 if (ret < 0)
802 goto done;
803 rbd_dev->mapping.read_only = true;
804 }
805 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
806
807 done:
808 return ret;
809 }
810
811 static void rbd_header_free(struct rbd_image_header *header)
812 {
813 kfree(header->object_prefix);
814 header->object_prefix = NULL;
815 kfree(header->snap_sizes);
816 header->snap_sizes = NULL;
817 kfree(header->snap_names);
818 header->snap_names = NULL;
819 ceph_put_snap_context(header->snapc);
820 header->snapc = NULL;
821 }
822
823 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
824 {
825 char *name;
826 u64 segment;
827 int ret;
828
829 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
830 if (!name)
831 return NULL;
832 segment = offset >> rbd_dev->header.obj_order;
833 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
834 rbd_dev->header.object_prefix, segment);
835 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
836 pr_err("error formatting segment name for #%llu (%d)\n",
837 segment, ret);
838 kfree(name);
839 name = NULL;
840 }
841
842 return name;
843 }
844
845 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
846 {
847 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
848
849 return offset & (segment_size - 1);
850 }
851
852 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
853 u64 offset, u64 length)
854 {
855 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
856
857 offset &= segment_size - 1;
858
859 rbd_assert(length <= U64_MAX - offset);
860 if (offset + length > segment_size)
861 length = segment_size - offset;
862
863 return length;
864 }
865
866 /*
867 * returns the size of an object in the image
868 */
869 static u64 rbd_obj_bytes(struct rbd_image_header *header)
870 {
871 return 1 << header->obj_order;
872 }
873
874 /*
875 * bio helpers
876 */
877
878 static void bio_chain_put(struct bio *chain)
879 {
880 struct bio *tmp;
881
882 while (chain) {
883 tmp = chain;
884 chain = chain->bi_next;
885 bio_put(tmp);
886 }
887 }
888
889 /*
890 * zeros a bio chain, starting at specific offset
891 */
892 static void zero_bio_chain(struct bio *chain, int start_ofs)
893 {
894 struct bio_vec *bv;
895 unsigned long flags;
896 void *buf;
897 int i;
898 int pos = 0;
899
900 while (chain) {
901 bio_for_each_segment(bv, chain, i) {
902 if (pos + bv->bv_len > start_ofs) {
903 int remainder = max(start_ofs - pos, 0);
904 buf = bvec_kmap_irq(bv, &flags);
905 memset(buf + remainder, 0,
906 bv->bv_len - remainder);
907 bvec_kunmap_irq(buf, &flags);
908 }
909 pos += bv->bv_len;
910 }
911
912 chain = chain->bi_next;
913 }
914 }
915
916 /*
917 * Clone a portion of a bio, starting at the given byte offset
918 * and continuing for the number of bytes indicated.
919 */
920 static struct bio *bio_clone_range(struct bio *bio_src,
921 unsigned int offset,
922 unsigned int len,
923 gfp_t gfpmask)
924 {
925 struct bio_vec *bv;
926 unsigned int resid;
927 unsigned short idx;
928 unsigned int voff;
929 unsigned short end_idx;
930 unsigned short vcnt;
931 struct bio *bio;
932
933 /* Handle the easy case for the caller */
934
935 if (!offset && len == bio_src->bi_size)
936 return bio_clone(bio_src, gfpmask);
937
938 if (WARN_ON_ONCE(!len))
939 return NULL;
940 if (WARN_ON_ONCE(len > bio_src->bi_size))
941 return NULL;
942 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
943 return NULL;
944
945 /* Find first affected segment... */
946
947 resid = offset;
948 __bio_for_each_segment(bv, bio_src, idx, 0) {
949 if (resid < bv->bv_len)
950 break;
951 resid -= bv->bv_len;
952 }
953 voff = resid;
954
955 /* ...and the last affected segment */
956
957 resid += len;
958 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
959 if (resid <= bv->bv_len)
960 break;
961 resid -= bv->bv_len;
962 }
963 vcnt = end_idx - idx + 1;
964
965 /* Build the clone */
966
967 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
968 if (!bio)
969 return NULL; /* ENOMEM */
970
971 bio->bi_bdev = bio_src->bi_bdev;
972 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
973 bio->bi_rw = bio_src->bi_rw;
974 bio->bi_flags |= 1 << BIO_CLONED;
975
976 /*
977 * Copy over our part of the bio_vec, then update the first
978 * and last (or only) entries.
979 */
980 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
981 vcnt * sizeof (struct bio_vec));
982 bio->bi_io_vec[0].bv_offset += voff;
983 if (vcnt > 1) {
984 bio->bi_io_vec[0].bv_len -= voff;
985 bio->bi_io_vec[vcnt - 1].bv_len = resid;
986 } else {
987 bio->bi_io_vec[0].bv_len = len;
988 }
989
990 bio->bi_vcnt = vcnt;
991 bio->bi_size = len;
992 bio->bi_idx = 0;
993
994 return bio;
995 }
996
997 /*
998 * Clone a portion of a bio chain, starting at the given byte offset
999 * into the first bio in the source chain and continuing for the
1000 * number of bytes indicated. The result is another bio chain of
1001 * exactly the given length, or a null pointer on error.
1002 *
1003 * The bio_src and offset parameters are both in-out. On entry they
1004 * refer to the first source bio and the offset into that bio where
1005 * the start of data to be cloned is located.
1006 *
1007 * On return, bio_src is updated to refer to the bio in the source
1008 * chain that contains first un-cloned byte, and *offset will
1009 * contain the offset of that byte within that bio.
1010 */
1011 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1012 unsigned int *offset,
1013 unsigned int len,
1014 gfp_t gfpmask)
1015 {
1016 struct bio *bi = *bio_src;
1017 unsigned int off = *offset;
1018 struct bio *chain = NULL;
1019 struct bio **end;
1020
1021 /* Build up a chain of clone bios up to the limit */
1022
1023 if (!bi || off >= bi->bi_size || !len)
1024 return NULL; /* Nothing to clone */
1025
1026 end = &chain;
1027 while (len) {
1028 unsigned int bi_size;
1029 struct bio *bio;
1030
1031 if (!bi) {
1032 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1033 goto out_err; /* EINVAL; ran out of bio's */
1034 }
1035 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1036 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1037 if (!bio)
1038 goto out_err; /* ENOMEM */
1039
1040 *end = bio;
1041 end = &bio->bi_next;
1042
1043 off += bi_size;
1044 if (off == bi->bi_size) {
1045 bi = bi->bi_next;
1046 off = 0;
1047 }
1048 len -= bi_size;
1049 }
1050 *bio_src = bi;
1051 *offset = off;
1052
1053 return chain;
1054 out_err:
1055 bio_chain_put(chain);
1056
1057 return NULL;
1058 }
1059
1060 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1061 {
1062 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1063 atomic_read(&obj_request->kref.refcount));
1064 kref_get(&obj_request->kref);
1065 }
1066
1067 static void rbd_obj_request_destroy(struct kref *kref);
1068 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1069 {
1070 rbd_assert(obj_request != NULL);
1071 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1072 atomic_read(&obj_request->kref.refcount));
1073 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1074 }
1075
1076 static void rbd_img_request_get(struct rbd_img_request *img_request)
1077 {
1078 dout("%s: img %p (was %d)\n", __func__, img_request,
1079 atomic_read(&img_request->kref.refcount));
1080 kref_get(&img_request->kref);
1081 }
1082
1083 static void rbd_img_request_destroy(struct kref *kref);
1084 static void rbd_img_request_put(struct rbd_img_request *img_request)
1085 {
1086 rbd_assert(img_request != NULL);
1087 dout("%s: img %p (was %d)\n", __func__, img_request,
1088 atomic_read(&img_request->kref.refcount));
1089 kref_put(&img_request->kref, rbd_img_request_destroy);
1090 }
1091
1092 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1093 struct rbd_obj_request *obj_request)
1094 {
1095 rbd_assert(obj_request->img_request == NULL);
1096
1097 rbd_obj_request_get(obj_request);
1098 obj_request->img_request = img_request;
1099 obj_request->which = img_request->obj_request_count;
1100 rbd_assert(obj_request->which != BAD_WHICH);
1101 img_request->obj_request_count++;
1102 list_add_tail(&obj_request->links, &img_request->obj_requests);
1103 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1104 obj_request->which);
1105 }
1106
1107 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1108 struct rbd_obj_request *obj_request)
1109 {
1110 rbd_assert(obj_request->which != BAD_WHICH);
1111
1112 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1113 obj_request->which);
1114 list_del(&obj_request->links);
1115 rbd_assert(img_request->obj_request_count > 0);
1116 img_request->obj_request_count--;
1117 rbd_assert(obj_request->which == img_request->obj_request_count);
1118 obj_request->which = BAD_WHICH;
1119 rbd_assert(obj_request->img_request == img_request);
1120 obj_request->img_request = NULL;
1121 obj_request->callback = NULL;
1122 rbd_obj_request_put(obj_request);
1123 }
1124
1125 static bool obj_request_type_valid(enum obj_request_type type)
1126 {
1127 switch (type) {
1128 case OBJ_REQUEST_NODATA:
1129 case OBJ_REQUEST_BIO:
1130 case OBJ_REQUEST_PAGES:
1131 return true;
1132 default:
1133 return false;
1134 }
1135 }
1136
1137 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1138 struct rbd_obj_request *obj_request)
1139 {
1140 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1141
1142 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1143 }
1144
1145 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1146 {
1147 dout("%s: img %p\n", __func__, img_request);
1148 if (img_request->callback)
1149 img_request->callback(img_request);
1150 else
1151 rbd_img_request_put(img_request);
1152 }
1153
1154 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1155
1156 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1157 {
1158 dout("%s: obj %p\n", __func__, obj_request);
1159
1160 return wait_for_completion_interruptible(&obj_request->completion);
1161 }
1162
1163 static void obj_request_done_init(struct rbd_obj_request *obj_request)
1164 {
1165 atomic_set(&obj_request->done, 0);
1166 smp_wmb();
1167 }
1168
1169 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1170 {
1171 int done;
1172
1173 done = atomic_inc_return(&obj_request->done);
1174 if (done > 1) {
1175 struct rbd_img_request *img_request = obj_request->img_request;
1176 struct rbd_device *rbd_dev;
1177
1178 rbd_dev = img_request ? img_request->rbd_dev : NULL;
1179 rbd_warn(rbd_dev, "obj_request %p was already done\n",
1180 obj_request);
1181 }
1182 }
1183
1184 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1185 {
1186 smp_mb();
1187 return atomic_read(&obj_request->done) != 0;
1188 }
1189
1190 static void
1191 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1192 {
1193 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1194 obj_request, obj_request->img_request, obj_request->result,
1195 obj_request->xferred, obj_request->length);
1196 /*
1197 * ENOENT means a hole in the image. We zero-fill the
1198 * entire length of the request. A short read also implies
1199 * zero-fill to the end of the request. Either way we
1200 * update the xferred count to indicate the whole request
1201 * was satisfied.
1202 */
1203 BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1204 if (obj_request->result == -ENOENT) {
1205 zero_bio_chain(obj_request->bio_list, 0);
1206 obj_request->result = 0;
1207 obj_request->xferred = obj_request->length;
1208 } else if (obj_request->xferred < obj_request->length &&
1209 !obj_request->result) {
1210 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1211 obj_request->xferred = obj_request->length;
1212 }
1213 obj_request_done_set(obj_request);
1214 }
1215
1216 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1217 {
1218 dout("%s: obj %p cb %p\n", __func__, obj_request,
1219 obj_request->callback);
1220 if (obj_request->callback)
1221 obj_request->callback(obj_request);
1222 else
1223 complete_all(&obj_request->completion);
1224 }
1225
1226 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1227 {
1228 dout("%s: obj %p\n", __func__, obj_request);
1229 obj_request_done_set(obj_request);
1230 }
1231
1232 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1233 {
1234 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1235 obj_request->result, obj_request->xferred, obj_request->length);
1236 if (obj_request->img_request)
1237 rbd_img_obj_request_read_callback(obj_request);
1238 else
1239 obj_request_done_set(obj_request);
1240 }
1241
1242 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1243 {
1244 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1245 obj_request->result, obj_request->length);
1246 /*
1247 * There is no such thing as a successful short write.
1248 * Our xferred value is the number of bytes transferred
1249 * back. Set it to our originally-requested length.
1250 */
1251 obj_request->xferred = obj_request->length;
1252 obj_request_done_set(obj_request);
1253 }
1254
1255 /*
1256 * For a simple stat call there's nothing to do. We'll do more if
1257 * this is part of a write sequence for a layered image.
1258 */
1259 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1260 {
1261 dout("%s: obj %p\n", __func__, obj_request);
1262 obj_request_done_set(obj_request);
1263 }
1264
1265 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1266 struct ceph_msg *msg)
1267 {
1268 struct rbd_obj_request *obj_request = osd_req->r_priv;
1269 u16 opcode;
1270
1271 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1272 rbd_assert(osd_req == obj_request->osd_req);
1273 rbd_assert(!!obj_request->img_request ^
1274 (obj_request->which == BAD_WHICH));
1275
1276 if (osd_req->r_result < 0)
1277 obj_request->result = osd_req->r_result;
1278 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1279
1280 WARN_ON(osd_req->r_num_ops != 1); /* For now */
1281
1282 /*
1283 * We support a 64-bit length, but ultimately it has to be
1284 * passed to blk_end_request(), which takes an unsigned int.
1285 */
1286 obj_request->xferred = osd_req->r_reply_op_len[0];
1287 rbd_assert(obj_request->xferred < (u64) UINT_MAX);
1288 opcode = osd_req->r_request_ops[0].op;
1289 switch (opcode) {
1290 case CEPH_OSD_OP_READ:
1291 rbd_osd_read_callback(obj_request);
1292 break;
1293 case CEPH_OSD_OP_WRITE:
1294 rbd_osd_write_callback(obj_request);
1295 break;
1296 case CEPH_OSD_OP_STAT:
1297 rbd_osd_stat_callback(obj_request);
1298 break;
1299 case CEPH_OSD_OP_CALL:
1300 case CEPH_OSD_OP_NOTIFY_ACK:
1301 case CEPH_OSD_OP_WATCH:
1302 rbd_osd_trivial_callback(obj_request);
1303 break;
1304 default:
1305 rbd_warn(NULL, "%s: unsupported op %hu\n",
1306 obj_request->object_name, (unsigned short) opcode);
1307 break;
1308 }
1309
1310 if (obj_request_done_test(obj_request))
1311 rbd_obj_request_complete(obj_request);
1312 }
1313
1314 static struct ceph_osd_request *rbd_osd_req_create(
1315 struct rbd_device *rbd_dev,
1316 bool write_request,
1317 struct rbd_obj_request *obj_request,
1318 struct ceph_osd_req_op *op)
1319 {
1320 struct rbd_img_request *img_request = obj_request->img_request;
1321 struct ceph_snap_context *snapc = NULL;
1322 struct ceph_osd_client *osdc;
1323 struct ceph_osd_request *osd_req;
1324 struct ceph_osd_data *osd_data;
1325 struct timespec now;
1326 struct timespec *mtime;
1327 u64 snap_id = CEPH_NOSNAP;
1328 u64 offset = obj_request->offset;
1329 u64 length = obj_request->length;
1330
1331 if (img_request) {
1332 rbd_assert(img_request->write_request == write_request);
1333 if (img_request->write_request)
1334 snapc = img_request->snapc;
1335 else
1336 snap_id = img_request->snap_id;
1337 }
1338
1339 /* Allocate and initialize the request, for the single op */
1340
1341 osdc = &rbd_dev->rbd_client->client->osdc;
1342 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1343 if (!osd_req)
1344 return NULL; /* ENOMEM */
1345 osd_data = write_request ? &osd_req->r_data_out : &osd_req->r_data_in;
1346
1347 rbd_assert(obj_request_type_valid(obj_request->type));
1348 switch (obj_request->type) {
1349 case OBJ_REQUEST_NODATA:
1350 break; /* Nothing to do */
1351 case OBJ_REQUEST_BIO:
1352 rbd_assert(obj_request->bio_list != NULL);
1353 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
1354 osd_data->bio = obj_request->bio_list;
1355 break;
1356 case OBJ_REQUEST_PAGES:
1357 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
1358 osd_data->pages = obj_request->pages;
1359 osd_data->length = obj_request->length;
1360 osd_data->alignment = offset & ~PAGE_MASK;
1361 osd_data->pages_from_pool = false;
1362 osd_data->own_pages = false;
1363 break;
1364 }
1365
1366 if (write_request) {
1367 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1368 now = CURRENT_TIME;
1369 mtime = &now;
1370 } else {
1371 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1372 mtime = NULL; /* not needed for reads */
1373 offset = 0; /* These are not used... */
1374 length = 0; /* ...for osd read requests */
1375 }
1376
1377 osd_req->r_callback = rbd_osd_req_callback;
1378 osd_req->r_priv = obj_request;
1379
1380 osd_req->r_oid_len = strlen(obj_request->object_name);
1381 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1382 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1383
1384 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1385
1386 /* osd_req will get its own reference to snapc (if non-null) */
1387
1388 ceph_osdc_build_request(osd_req, offset, 1, op,
1389 snapc, snap_id, mtime);
1390
1391 return osd_req;
1392 }
1393
1394 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1395 {
1396 ceph_osdc_put_request(osd_req);
1397 }
1398
1399 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1400
1401 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1402 u64 offset, u64 length,
1403 enum obj_request_type type)
1404 {
1405 struct rbd_obj_request *obj_request;
1406 size_t size;
1407 char *name;
1408
1409 rbd_assert(obj_request_type_valid(type));
1410
1411 size = strlen(object_name) + 1;
1412 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1413 if (!obj_request)
1414 return NULL;
1415
1416 name = (char *)(obj_request + 1);
1417 obj_request->object_name = memcpy(name, object_name, size);
1418 obj_request->offset = offset;
1419 obj_request->length = length;
1420 obj_request->which = BAD_WHICH;
1421 obj_request->type = type;
1422 INIT_LIST_HEAD(&obj_request->links);
1423 obj_request_done_init(obj_request);
1424 init_completion(&obj_request->completion);
1425 kref_init(&obj_request->kref);
1426
1427 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1428 offset, length, (int)type, obj_request);
1429
1430 return obj_request;
1431 }
1432
1433 static void rbd_obj_request_destroy(struct kref *kref)
1434 {
1435 struct rbd_obj_request *obj_request;
1436
1437 obj_request = container_of(kref, struct rbd_obj_request, kref);
1438
1439 dout("%s: obj %p\n", __func__, obj_request);
1440
1441 rbd_assert(obj_request->img_request == NULL);
1442 rbd_assert(obj_request->which == BAD_WHICH);
1443
1444 if (obj_request->osd_req)
1445 rbd_osd_req_destroy(obj_request->osd_req);
1446
1447 rbd_assert(obj_request_type_valid(obj_request->type));
1448 switch (obj_request->type) {
1449 case OBJ_REQUEST_NODATA:
1450 break; /* Nothing to do */
1451 case OBJ_REQUEST_BIO:
1452 if (obj_request->bio_list)
1453 bio_chain_put(obj_request->bio_list);
1454 break;
1455 case OBJ_REQUEST_PAGES:
1456 if (obj_request->pages)
1457 ceph_release_page_vector(obj_request->pages,
1458 obj_request->page_count);
1459 break;
1460 }
1461
1462 kfree(obj_request);
1463 }
1464
1465 /*
1466 * Caller is responsible for filling in the list of object requests
1467 * that comprises the image request, and the Linux request pointer
1468 * (if there is one).
1469 */
1470 static struct rbd_img_request *rbd_img_request_create(
1471 struct rbd_device *rbd_dev,
1472 u64 offset, u64 length,
1473 bool write_request)
1474 {
1475 struct rbd_img_request *img_request;
1476 struct ceph_snap_context *snapc = NULL;
1477
1478 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1479 if (!img_request)
1480 return NULL;
1481
1482 if (write_request) {
1483 down_read(&rbd_dev->header_rwsem);
1484 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1485 up_read(&rbd_dev->header_rwsem);
1486 if (WARN_ON(!snapc)) {
1487 kfree(img_request);
1488 return NULL; /* Shouldn't happen */
1489 }
1490 }
1491
1492 img_request->rq = NULL;
1493 img_request->rbd_dev = rbd_dev;
1494 img_request->offset = offset;
1495 img_request->length = length;
1496 img_request->write_request = write_request;
1497 if (write_request)
1498 img_request->snapc = snapc;
1499 else
1500 img_request->snap_id = rbd_dev->spec->snap_id;
1501 spin_lock_init(&img_request->completion_lock);
1502 img_request->next_completion = 0;
1503 img_request->callback = NULL;
1504 img_request->obj_request_count = 0;
1505 INIT_LIST_HEAD(&img_request->obj_requests);
1506 kref_init(&img_request->kref);
1507
1508 rbd_img_request_get(img_request); /* Avoid a warning */
1509 rbd_img_request_put(img_request); /* TEMPORARY */
1510
1511 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1512 write_request ? "write" : "read", offset, length,
1513 img_request);
1514
1515 return img_request;
1516 }
1517
1518 static void rbd_img_request_destroy(struct kref *kref)
1519 {
1520 struct rbd_img_request *img_request;
1521 struct rbd_obj_request *obj_request;
1522 struct rbd_obj_request *next_obj_request;
1523
1524 img_request = container_of(kref, struct rbd_img_request, kref);
1525
1526 dout("%s: img %p\n", __func__, img_request);
1527
1528 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1529 rbd_img_obj_request_del(img_request, obj_request);
1530 rbd_assert(img_request->obj_request_count == 0);
1531
1532 if (img_request->write_request)
1533 ceph_put_snap_context(img_request->snapc);
1534
1535 kfree(img_request);
1536 }
1537
1538 static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1539 struct bio *bio_list)
1540 {
1541 struct rbd_device *rbd_dev = img_request->rbd_dev;
1542 struct rbd_obj_request *obj_request = NULL;
1543 struct rbd_obj_request *next_obj_request;
1544 unsigned int bio_offset;
1545 u64 image_offset;
1546 u64 resid;
1547 u16 opcode;
1548
1549 dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
1550
1551 opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
1552 : CEPH_OSD_OP_READ;
1553 bio_offset = 0;
1554 image_offset = img_request->offset;
1555 rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1556 resid = img_request->length;
1557 rbd_assert(resid > 0);
1558 while (resid) {
1559 const char *object_name;
1560 unsigned int clone_size;
1561 struct ceph_osd_req_op op;
1562 u64 offset;
1563 u64 length;
1564
1565 object_name = rbd_segment_name(rbd_dev, image_offset);
1566 if (!object_name)
1567 goto out_unwind;
1568 offset = rbd_segment_offset(rbd_dev, image_offset);
1569 length = rbd_segment_length(rbd_dev, image_offset, resid);
1570 obj_request = rbd_obj_request_create(object_name,
1571 offset, length,
1572 OBJ_REQUEST_BIO);
1573 kfree(object_name); /* object request has its own copy */
1574 if (!obj_request)
1575 goto out_unwind;
1576
1577 rbd_assert(length <= (u64) UINT_MAX);
1578 clone_size = (unsigned int) length;
1579 obj_request->bio_list = bio_chain_clone_range(&bio_list,
1580 &bio_offset, clone_size,
1581 GFP_ATOMIC);
1582 if (!obj_request->bio_list)
1583 goto out_partial;
1584
1585 /*
1586 * Build up the op to use in building the osd
1587 * request. Note that the contents of the op are
1588 * copied by rbd_osd_req_create().
1589 */
1590 osd_req_op_extent_init(&op, opcode, offset, length, 0, 0);
1591 obj_request->osd_req = rbd_osd_req_create(rbd_dev,
1592 img_request->write_request,
1593 obj_request, &op);
1594 if (!obj_request->osd_req)
1595 goto out_partial;
1596 /* status and version are initially zero-filled */
1597
1598 rbd_img_obj_request_add(img_request, obj_request);
1599
1600 image_offset += length;
1601 resid -= length;
1602 }
1603
1604 return 0;
1605
1606 out_partial:
1607 rbd_obj_request_put(obj_request);
1608 out_unwind:
1609 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1610 rbd_obj_request_put(obj_request);
1611
1612 return -ENOMEM;
1613 }
1614
1615 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1616 {
1617 struct rbd_img_request *img_request;
1618 u32 which = obj_request->which;
1619 bool more = true;
1620
1621 img_request = obj_request->img_request;
1622
1623 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1624 rbd_assert(img_request != NULL);
1625 rbd_assert(img_request->rq != NULL);
1626 rbd_assert(img_request->obj_request_count > 0);
1627 rbd_assert(which != BAD_WHICH);
1628 rbd_assert(which < img_request->obj_request_count);
1629 rbd_assert(which >= img_request->next_completion);
1630
1631 spin_lock_irq(&img_request->completion_lock);
1632 if (which != img_request->next_completion)
1633 goto out;
1634
1635 for_each_obj_request_from(img_request, obj_request) {
1636 unsigned int xferred;
1637 int result;
1638
1639 rbd_assert(more);
1640 rbd_assert(which < img_request->obj_request_count);
1641
1642 if (!obj_request_done_test(obj_request))
1643 break;
1644
1645 rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1646 xferred = (unsigned int) obj_request->xferred;
1647 result = (int) obj_request->result;
1648 if (result)
1649 rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1650 img_request->write_request ? "write" : "read",
1651 result, xferred);
1652
1653 more = blk_end_request(img_request->rq, result, xferred);
1654 which++;
1655 }
1656
1657 rbd_assert(more ^ (which == img_request->obj_request_count));
1658 img_request->next_completion = which;
1659 out:
1660 spin_unlock_irq(&img_request->completion_lock);
1661
1662 if (!more)
1663 rbd_img_request_complete(img_request);
1664 }
1665
1666 static int rbd_img_request_submit(struct rbd_img_request *img_request)
1667 {
1668 struct rbd_device *rbd_dev = img_request->rbd_dev;
1669 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1670 struct rbd_obj_request *obj_request;
1671 struct rbd_obj_request *next_obj_request;
1672
1673 dout("%s: img %p\n", __func__, img_request);
1674 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
1675 int ret;
1676
1677 obj_request->callback = rbd_img_obj_callback;
1678 ret = rbd_obj_request_submit(osdc, obj_request);
1679 if (ret)
1680 return ret;
1681 /*
1682 * The image request has its own reference to each
1683 * of its object requests, so we can safely drop the
1684 * initial one here.
1685 */
1686 rbd_obj_request_put(obj_request);
1687 }
1688
1689 return 0;
1690 }
1691
1692 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
1693 u64 ver, u64 notify_id)
1694 {
1695 struct rbd_obj_request *obj_request;
1696 struct ceph_osd_req_op op;
1697 struct ceph_osd_client *osdc;
1698 int ret;
1699
1700 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1701 OBJ_REQUEST_NODATA);
1702 if (!obj_request)
1703 return -ENOMEM;
1704
1705 ret = -ENOMEM;
1706 osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0);
1707 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1708 obj_request, &op);
1709 if (!obj_request->osd_req)
1710 goto out;
1711
1712 osdc = &rbd_dev->rbd_client->client->osdc;
1713 obj_request->callback = rbd_obj_request_put;
1714 ret = rbd_obj_request_submit(osdc, obj_request);
1715 out:
1716 if (ret)
1717 rbd_obj_request_put(obj_request);
1718
1719 return ret;
1720 }
1721
1722 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1723 {
1724 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1725 u64 hver;
1726 int rc;
1727
1728 if (!rbd_dev)
1729 return;
1730
1731 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
1732 rbd_dev->header_name, (unsigned long long) notify_id,
1733 (unsigned int) opcode);
1734 rc = rbd_dev_refresh(rbd_dev, &hver);
1735 if (rc)
1736 rbd_warn(rbd_dev, "got notification but failed to "
1737 " update snaps: %d\n", rc);
1738
1739 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
1740 }
1741
1742 /*
1743 * Request sync osd watch/unwatch. The value of "start" determines
1744 * whether a watch request is being initiated or torn down.
1745 */
1746 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1747 {
1748 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1749 struct rbd_obj_request *obj_request;
1750 struct ceph_osd_req_op op;
1751 int ret;
1752
1753 rbd_assert(start ^ !!rbd_dev->watch_event);
1754 rbd_assert(start ^ !!rbd_dev->watch_request);
1755
1756 if (start) {
1757 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
1758 &rbd_dev->watch_event);
1759 if (ret < 0)
1760 return ret;
1761 rbd_assert(rbd_dev->watch_event != NULL);
1762 }
1763
1764 ret = -ENOMEM;
1765 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1766 OBJ_REQUEST_NODATA);
1767 if (!obj_request)
1768 goto out_cancel;
1769
1770 osd_req_op_watch_init(&op, CEPH_OSD_OP_WATCH,
1771 rbd_dev->watch_event->cookie,
1772 rbd_dev->header.obj_version, start);
1773 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
1774 obj_request, &op);
1775 if (!obj_request->osd_req)
1776 goto out_cancel;
1777
1778 if (start)
1779 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
1780 else
1781 ceph_osdc_unregister_linger_request(osdc,
1782 rbd_dev->watch_request->osd_req);
1783 ret = rbd_obj_request_submit(osdc, obj_request);
1784 if (ret)
1785 goto out_cancel;
1786 ret = rbd_obj_request_wait(obj_request);
1787 if (ret)
1788 goto out_cancel;
1789 ret = obj_request->result;
1790 if (ret)
1791 goto out_cancel;
1792
1793 /*
1794 * A watch request is set to linger, so the underlying osd
1795 * request won't go away until we unregister it. We retain
1796 * a pointer to the object request during that time (in
1797 * rbd_dev->watch_request), so we'll keep a reference to
1798 * it. We'll drop that reference (below) after we've
1799 * unregistered it.
1800 */
1801 if (start) {
1802 rbd_dev->watch_request = obj_request;
1803
1804 return 0;
1805 }
1806
1807 /* We have successfully torn down the watch request */
1808
1809 rbd_obj_request_put(rbd_dev->watch_request);
1810 rbd_dev->watch_request = NULL;
1811 out_cancel:
1812 /* Cancel the event if we're tearing down, or on error */
1813 ceph_osdc_cancel_event(rbd_dev->watch_event);
1814 rbd_dev->watch_event = NULL;
1815 if (obj_request)
1816 rbd_obj_request_put(obj_request);
1817
1818 return ret;
1819 }
1820
1821 /*
1822 * Synchronous osd object method call
1823 */
1824 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1825 const char *object_name,
1826 const char *class_name,
1827 const char *method_name,
1828 const char *outbound,
1829 size_t outbound_size,
1830 char *inbound,
1831 size_t inbound_size,
1832 u64 *version)
1833 {
1834 struct rbd_obj_request *obj_request;
1835 struct ceph_osd_client *osdc;
1836 struct ceph_osd_req_op op;
1837 struct page **pages;
1838 u32 page_count;
1839 int ret;
1840
1841 /*
1842 * Method calls are ultimately read operations but they
1843 * don't involve object data (so no offset or length).
1844 * The result should placed into the inbound buffer
1845 * provided. They also supply outbound data--parameters for
1846 * the object method. Currently if this is present it will
1847 * be a snapshot id.
1848 */
1849 page_count = (u32) calc_pages_for(0, inbound_size);
1850 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1851 if (IS_ERR(pages))
1852 return PTR_ERR(pages);
1853
1854 ret = -ENOMEM;
1855 obj_request = rbd_obj_request_create(object_name, 0, 0,
1856 OBJ_REQUEST_PAGES);
1857 if (!obj_request)
1858 goto out;
1859
1860 obj_request->pages = pages;
1861 obj_request->page_count = page_count;
1862
1863 osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name,
1864 outbound, outbound_size);
1865 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1866 obj_request, &op);
1867 if (!obj_request->osd_req)
1868 goto out;
1869
1870 osdc = &rbd_dev->rbd_client->client->osdc;
1871 ret = rbd_obj_request_submit(osdc, obj_request);
1872 if (ret)
1873 goto out;
1874 ret = rbd_obj_request_wait(obj_request);
1875 if (ret)
1876 goto out;
1877
1878 ret = obj_request->result;
1879 if (ret < 0)
1880 goto out;
1881 ret = 0;
1882 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
1883 if (version)
1884 *version = obj_request->version;
1885 out:
1886 if (obj_request)
1887 rbd_obj_request_put(obj_request);
1888 else
1889 ceph_release_page_vector(pages, page_count);
1890
1891 return ret;
1892 }
1893
1894 static void rbd_request_fn(struct request_queue *q)
1895 __releases(q->queue_lock) __acquires(q->queue_lock)
1896 {
1897 struct rbd_device *rbd_dev = q->queuedata;
1898 bool read_only = rbd_dev->mapping.read_only;
1899 struct request *rq;
1900 int result;
1901
1902 while ((rq = blk_fetch_request(q))) {
1903 bool write_request = rq_data_dir(rq) == WRITE;
1904 struct rbd_img_request *img_request;
1905 u64 offset;
1906 u64 length;
1907
1908 /* Ignore any non-FS requests that filter through. */
1909
1910 if (rq->cmd_type != REQ_TYPE_FS) {
1911 dout("%s: non-fs request type %d\n", __func__,
1912 (int) rq->cmd_type);
1913 __blk_end_request_all(rq, 0);
1914 continue;
1915 }
1916
1917 /* Ignore/skip any zero-length requests */
1918
1919 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
1920 length = (u64) blk_rq_bytes(rq);
1921
1922 if (!length) {
1923 dout("%s: zero-length request\n", __func__);
1924 __blk_end_request_all(rq, 0);
1925 continue;
1926 }
1927
1928 spin_unlock_irq(q->queue_lock);
1929
1930 /* Disallow writes to a read-only device */
1931
1932 if (write_request) {
1933 result = -EROFS;
1934 if (read_only)
1935 goto end_request;
1936 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
1937 }
1938
1939 /*
1940 * Quit early if the mapped snapshot no longer
1941 * exists. It's still possible the snapshot will
1942 * have disappeared by the time our request arrives
1943 * at the osd, but there's no sense in sending it if
1944 * we already know.
1945 */
1946 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
1947 dout("request for non-existent snapshot");
1948 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
1949 result = -ENXIO;
1950 goto end_request;
1951 }
1952
1953 result = -EINVAL;
1954 if (WARN_ON(offset && length > U64_MAX - offset + 1))
1955 goto end_request; /* Shouldn't happen */
1956
1957 result = -ENOMEM;
1958 img_request = rbd_img_request_create(rbd_dev, offset, length,
1959 write_request);
1960 if (!img_request)
1961 goto end_request;
1962
1963 img_request->rq = rq;
1964
1965 result = rbd_img_request_fill_bio(img_request, rq->bio);
1966 if (!result)
1967 result = rbd_img_request_submit(img_request);
1968 if (result)
1969 rbd_img_request_put(img_request);
1970 end_request:
1971 spin_lock_irq(q->queue_lock);
1972 if (result < 0) {
1973 rbd_warn(rbd_dev, "obj_request %s result %d\n",
1974 write_request ? "write" : "read", result);
1975 __blk_end_request_all(rq, result);
1976 }
1977 }
1978 }
1979
1980 /*
1981 * a queue callback. Makes sure that we don't create a bio that spans across
1982 * multiple osd objects. One exception would be with a single page bios,
1983 * which we handle later at bio_chain_clone_range()
1984 */
1985 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1986 struct bio_vec *bvec)
1987 {
1988 struct rbd_device *rbd_dev = q->queuedata;
1989 sector_t sector_offset;
1990 sector_t sectors_per_obj;
1991 sector_t obj_sector_offset;
1992 int ret;
1993
1994 /*
1995 * Find how far into its rbd object the partition-relative
1996 * bio start sector is to offset relative to the enclosing
1997 * device.
1998 */
1999 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2000 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2001 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2002
2003 /*
2004 * Compute the number of bytes from that offset to the end
2005 * of the object. Account for what's already used by the bio.
2006 */
2007 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2008 if (ret > bmd->bi_size)
2009 ret -= bmd->bi_size;
2010 else
2011 ret = 0;
2012
2013 /*
2014 * Don't send back more than was asked for. And if the bio
2015 * was empty, let the whole thing through because: "Note
2016 * that a block device *must* allow a single page to be
2017 * added to an empty bio."
2018 */
2019 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2020 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2021 ret = (int) bvec->bv_len;
2022
2023 return ret;
2024 }
2025
2026 static void rbd_free_disk(struct rbd_device *rbd_dev)
2027 {
2028 struct gendisk *disk = rbd_dev->disk;
2029
2030 if (!disk)
2031 return;
2032
2033 if (disk->flags & GENHD_FL_UP)
2034 del_gendisk(disk);
2035 if (disk->queue)
2036 blk_cleanup_queue(disk->queue);
2037 put_disk(disk);
2038 }
2039
2040 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2041 const char *object_name,
2042 u64 offset, u64 length,
2043 char *buf, u64 *version)
2044
2045 {
2046 struct ceph_osd_req_op op;
2047 struct rbd_obj_request *obj_request;
2048 struct ceph_osd_client *osdc;
2049 struct page **pages = NULL;
2050 u32 page_count;
2051 size_t size;
2052 int ret;
2053
2054 page_count = (u32) calc_pages_for(offset, length);
2055 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2056 if (IS_ERR(pages))
2057 ret = PTR_ERR(pages);
2058
2059 ret = -ENOMEM;
2060 obj_request = rbd_obj_request_create(object_name, offset, length,
2061 OBJ_REQUEST_PAGES);
2062 if (!obj_request)
2063 goto out;
2064
2065 obj_request->pages = pages;
2066 obj_request->page_count = page_count;
2067
2068 osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0);
2069 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2070 obj_request, &op);
2071 if (!obj_request->osd_req)
2072 goto out;
2073
2074 osdc = &rbd_dev->rbd_client->client->osdc;
2075 ret = rbd_obj_request_submit(osdc, obj_request);
2076 if (ret)
2077 goto out;
2078 ret = rbd_obj_request_wait(obj_request);
2079 if (ret)
2080 goto out;
2081
2082 ret = obj_request->result;
2083 if (ret < 0)
2084 goto out;
2085
2086 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2087 size = (size_t) obj_request->xferred;
2088 ceph_copy_from_page_vector(pages, buf, 0, size);
2089 rbd_assert(size <= (size_t) INT_MAX);
2090 ret = (int) size;
2091 if (version)
2092 *version = obj_request->version;
2093 out:
2094 if (obj_request)
2095 rbd_obj_request_put(obj_request);
2096 else
2097 ceph_release_page_vector(pages, page_count);
2098
2099 return ret;
2100 }
2101
2102 /*
2103 * Read the complete header for the given rbd device.
2104 *
2105 * Returns a pointer to a dynamically-allocated buffer containing
2106 * the complete and validated header. Caller can pass the address
2107 * of a variable that will be filled in with the version of the
2108 * header object at the time it was read.
2109 *
2110 * Returns a pointer-coded errno if a failure occurs.
2111 */
2112 static struct rbd_image_header_ondisk *
2113 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2114 {
2115 struct rbd_image_header_ondisk *ondisk = NULL;
2116 u32 snap_count = 0;
2117 u64 names_size = 0;
2118 u32 want_count;
2119 int ret;
2120
2121 /*
2122 * The complete header will include an array of its 64-bit
2123 * snapshot ids, followed by the names of those snapshots as
2124 * a contiguous block of NUL-terminated strings. Note that
2125 * the number of snapshots could change by the time we read
2126 * it in, in which case we re-read it.
2127 */
2128 do {
2129 size_t size;
2130
2131 kfree(ondisk);
2132
2133 size = sizeof (*ondisk);
2134 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2135 size += names_size;
2136 ondisk = kmalloc(size, GFP_KERNEL);
2137 if (!ondisk)
2138 return ERR_PTR(-ENOMEM);
2139
2140 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2141 0, size,
2142 (char *) ondisk, version);
2143 if (ret < 0)
2144 goto out_err;
2145 if (WARN_ON((size_t) ret < size)) {
2146 ret = -ENXIO;
2147 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2148 size, ret);
2149 goto out_err;
2150 }
2151 if (!rbd_dev_ondisk_valid(ondisk)) {
2152 ret = -ENXIO;
2153 rbd_warn(rbd_dev, "invalid header");
2154 goto out_err;
2155 }
2156
2157 names_size = le64_to_cpu(ondisk->snap_names_len);
2158 want_count = snap_count;
2159 snap_count = le32_to_cpu(ondisk->snap_count);
2160 } while (snap_count != want_count);
2161
2162 return ondisk;
2163
2164 out_err:
2165 kfree(ondisk);
2166
2167 return ERR_PTR(ret);
2168 }
2169
2170 /*
2171 * reload the ondisk the header
2172 */
2173 static int rbd_read_header(struct rbd_device *rbd_dev,
2174 struct rbd_image_header *header)
2175 {
2176 struct rbd_image_header_ondisk *ondisk;
2177 u64 ver = 0;
2178 int ret;
2179
2180 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2181 if (IS_ERR(ondisk))
2182 return PTR_ERR(ondisk);
2183 ret = rbd_header_from_disk(header, ondisk);
2184 if (ret >= 0)
2185 header->obj_version = ver;
2186 kfree(ondisk);
2187
2188 return ret;
2189 }
2190
2191 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2192 {
2193 struct rbd_snap *snap;
2194 struct rbd_snap *next;
2195
2196 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
2197 rbd_remove_snap_dev(snap);
2198 }
2199
2200 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2201 {
2202 sector_t size;
2203
2204 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2205 return;
2206
2207 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2208 dout("setting size to %llu sectors", (unsigned long long) size);
2209 rbd_dev->mapping.size = (u64) size;
2210 set_capacity(rbd_dev->disk, size);
2211 }
2212
2213 /*
2214 * only read the first part of the ondisk header, without the snaps info
2215 */
2216 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
2217 {
2218 int ret;
2219 struct rbd_image_header h;
2220
2221 ret = rbd_read_header(rbd_dev, &h);
2222 if (ret < 0)
2223 return ret;
2224
2225 down_write(&rbd_dev->header_rwsem);
2226
2227 /* Update image size, and check for resize of mapped image */
2228 rbd_dev->header.image_size = h.image_size;
2229 rbd_update_mapping_size(rbd_dev);
2230
2231 /* rbd_dev->header.object_prefix shouldn't change */
2232 kfree(rbd_dev->header.snap_sizes);
2233 kfree(rbd_dev->header.snap_names);
2234 /* osd requests may still refer to snapc */
2235 ceph_put_snap_context(rbd_dev->header.snapc);
2236
2237 if (hver)
2238 *hver = h.obj_version;
2239 rbd_dev->header.obj_version = h.obj_version;
2240 rbd_dev->header.image_size = h.image_size;
2241 rbd_dev->header.snapc = h.snapc;
2242 rbd_dev->header.snap_names = h.snap_names;
2243 rbd_dev->header.snap_sizes = h.snap_sizes;
2244 /* Free the extra copy of the object prefix */
2245 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2246 kfree(h.object_prefix);
2247
2248 ret = rbd_dev_snaps_update(rbd_dev);
2249 if (!ret)
2250 ret = rbd_dev_snaps_register(rbd_dev);
2251
2252 up_write(&rbd_dev->header_rwsem);
2253
2254 return ret;
2255 }
2256
2257 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
2258 {
2259 int ret;
2260
2261 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
2262 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2263 if (rbd_dev->image_format == 1)
2264 ret = rbd_dev_v1_refresh(rbd_dev, hver);
2265 else
2266 ret = rbd_dev_v2_refresh(rbd_dev, hver);
2267 mutex_unlock(&ctl_mutex);
2268
2269 return ret;
2270 }
2271
2272 static int rbd_init_disk(struct rbd_device *rbd_dev)
2273 {
2274 struct gendisk *disk;
2275 struct request_queue *q;
2276 u64 segment_size;
2277
2278 /* create gendisk info */
2279 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2280 if (!disk)
2281 return -ENOMEM;
2282
2283 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
2284 rbd_dev->dev_id);
2285 disk->major = rbd_dev->major;
2286 disk->first_minor = 0;
2287 disk->fops = &rbd_bd_ops;
2288 disk->private_data = rbd_dev;
2289
2290 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
2291 if (!q)
2292 goto out_disk;
2293
2294 /* We use the default size, but let's be explicit about it. */
2295 blk_queue_physical_block_size(q, SECTOR_SIZE);
2296
2297 /* set io sizes to object size */
2298 segment_size = rbd_obj_bytes(&rbd_dev->header);
2299 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2300 blk_queue_max_segment_size(q, segment_size);
2301 blk_queue_io_min(q, segment_size);
2302 blk_queue_io_opt(q, segment_size);
2303
2304 blk_queue_merge_bvec(q, rbd_merge_bvec);
2305 disk->queue = q;
2306
2307 q->queuedata = rbd_dev;
2308
2309 rbd_dev->disk = disk;
2310
2311 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2312
2313 return 0;
2314 out_disk:
2315 put_disk(disk);
2316
2317 return -ENOMEM;
2318 }
2319
2320 /*
2321 sysfs
2322 */
2323
2324 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2325 {
2326 return container_of(dev, struct rbd_device, dev);
2327 }
2328
2329 static ssize_t rbd_size_show(struct device *dev,
2330 struct device_attribute *attr, char *buf)
2331 {
2332 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2333 sector_t size;
2334
2335 down_read(&rbd_dev->header_rwsem);
2336 size = get_capacity(rbd_dev->disk);
2337 up_read(&rbd_dev->header_rwsem);
2338
2339 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
2340 }
2341
2342 /*
2343 * Note this shows the features for whatever's mapped, which is not
2344 * necessarily the base image.
2345 */
2346 static ssize_t rbd_features_show(struct device *dev,
2347 struct device_attribute *attr, char *buf)
2348 {
2349 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2350
2351 return sprintf(buf, "0x%016llx\n",
2352 (unsigned long long) rbd_dev->mapping.features);
2353 }
2354
2355 static ssize_t rbd_major_show(struct device *dev,
2356 struct device_attribute *attr, char *buf)
2357 {
2358 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2359
2360 return sprintf(buf, "%d\n", rbd_dev->major);
2361 }
2362
2363 static ssize_t rbd_client_id_show(struct device *dev,
2364 struct device_attribute *attr, char *buf)
2365 {
2366 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2367
2368 return sprintf(buf, "client%lld\n",
2369 ceph_client_id(rbd_dev->rbd_client->client));
2370 }
2371
2372 static ssize_t rbd_pool_show(struct device *dev,
2373 struct device_attribute *attr, char *buf)
2374 {
2375 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2376
2377 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
2378 }
2379
2380 static ssize_t rbd_pool_id_show(struct device *dev,
2381 struct device_attribute *attr, char *buf)
2382 {
2383 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2384
2385 return sprintf(buf, "%llu\n",
2386 (unsigned long long) rbd_dev->spec->pool_id);
2387 }
2388
2389 static ssize_t rbd_name_show(struct device *dev,
2390 struct device_attribute *attr, char *buf)
2391 {
2392 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2393
2394 if (rbd_dev->spec->image_name)
2395 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2396
2397 return sprintf(buf, "(unknown)\n");
2398 }
2399
2400 static ssize_t rbd_image_id_show(struct device *dev,
2401 struct device_attribute *attr, char *buf)
2402 {
2403 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2404
2405 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
2406 }
2407
2408 /*
2409 * Shows the name of the currently-mapped snapshot (or
2410 * RBD_SNAP_HEAD_NAME for the base image).
2411 */
2412 static ssize_t rbd_snap_show(struct device *dev,
2413 struct device_attribute *attr,
2414 char *buf)
2415 {
2416 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2417
2418 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
2419 }
2420
2421 /*
2422 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2423 * for the parent image. If there is no parent, simply shows
2424 * "(no parent image)".
2425 */
2426 static ssize_t rbd_parent_show(struct device *dev,
2427 struct device_attribute *attr,
2428 char *buf)
2429 {
2430 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2431 struct rbd_spec *spec = rbd_dev->parent_spec;
2432 int count;
2433 char *bufp = buf;
2434
2435 if (!spec)
2436 return sprintf(buf, "(no parent image)\n");
2437
2438 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2439 (unsigned long long) spec->pool_id, spec->pool_name);
2440 if (count < 0)
2441 return count;
2442 bufp += count;
2443
2444 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2445 spec->image_name ? spec->image_name : "(unknown)");
2446 if (count < 0)
2447 return count;
2448 bufp += count;
2449
2450 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2451 (unsigned long long) spec->snap_id, spec->snap_name);
2452 if (count < 0)
2453 return count;
2454 bufp += count;
2455
2456 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2457 if (count < 0)
2458 return count;
2459 bufp += count;
2460
2461 return (ssize_t) (bufp - buf);
2462 }
2463
2464 static ssize_t rbd_image_refresh(struct device *dev,
2465 struct device_attribute *attr,
2466 const char *buf,
2467 size_t size)
2468 {
2469 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2470 int ret;
2471
2472 ret = rbd_dev_refresh(rbd_dev, NULL);
2473
2474 return ret < 0 ? ret : size;
2475 }
2476
2477 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
2478 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
2479 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2480 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2481 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
2482 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
2483 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
2484 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
2485 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2486 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
2487 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
2488
2489 static struct attribute *rbd_attrs[] = {
2490 &dev_attr_size.attr,
2491 &dev_attr_features.attr,
2492 &dev_attr_major.attr,
2493 &dev_attr_client_id.attr,
2494 &dev_attr_pool.attr,
2495 &dev_attr_pool_id.attr,
2496 &dev_attr_name.attr,
2497 &dev_attr_image_id.attr,
2498 &dev_attr_current_snap.attr,
2499 &dev_attr_parent.attr,
2500 &dev_attr_refresh.attr,
2501 NULL
2502 };
2503
2504 static struct attribute_group rbd_attr_group = {
2505 .attrs = rbd_attrs,
2506 };
2507
2508 static const struct attribute_group *rbd_attr_groups[] = {
2509 &rbd_attr_group,
2510 NULL
2511 };
2512
2513 static void rbd_sysfs_dev_release(struct device *dev)
2514 {
2515 }
2516
2517 static struct device_type rbd_device_type = {
2518 .name = "rbd",
2519 .groups = rbd_attr_groups,
2520 .release = rbd_sysfs_dev_release,
2521 };
2522
2523
2524 /*
2525 sysfs - snapshots
2526 */
2527
2528 static ssize_t rbd_snap_size_show(struct device *dev,
2529 struct device_attribute *attr,
2530 char *buf)
2531 {
2532 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2533
2534 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2535 }
2536
2537 static ssize_t rbd_snap_id_show(struct device *dev,
2538 struct device_attribute *attr,
2539 char *buf)
2540 {
2541 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2542
2543 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2544 }
2545
2546 static ssize_t rbd_snap_features_show(struct device *dev,
2547 struct device_attribute *attr,
2548 char *buf)
2549 {
2550 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2551
2552 return sprintf(buf, "0x%016llx\n",
2553 (unsigned long long) snap->features);
2554 }
2555
2556 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2557 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2558 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2559
2560 static struct attribute *rbd_snap_attrs[] = {
2561 &dev_attr_snap_size.attr,
2562 &dev_attr_snap_id.attr,
2563 &dev_attr_snap_features.attr,
2564 NULL,
2565 };
2566
2567 static struct attribute_group rbd_snap_attr_group = {
2568 .attrs = rbd_snap_attrs,
2569 };
2570
2571 static void rbd_snap_dev_release(struct device *dev)
2572 {
2573 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2574 kfree(snap->name);
2575 kfree(snap);
2576 }
2577
2578 static const struct attribute_group *rbd_snap_attr_groups[] = {
2579 &rbd_snap_attr_group,
2580 NULL
2581 };
2582
2583 static struct device_type rbd_snap_device_type = {
2584 .groups = rbd_snap_attr_groups,
2585 .release = rbd_snap_dev_release,
2586 };
2587
2588 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2589 {
2590 kref_get(&spec->kref);
2591
2592 return spec;
2593 }
2594
2595 static void rbd_spec_free(struct kref *kref);
2596 static void rbd_spec_put(struct rbd_spec *spec)
2597 {
2598 if (spec)
2599 kref_put(&spec->kref, rbd_spec_free);
2600 }
2601
2602 static struct rbd_spec *rbd_spec_alloc(void)
2603 {
2604 struct rbd_spec *spec;
2605
2606 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2607 if (!spec)
2608 return NULL;
2609 kref_init(&spec->kref);
2610
2611 rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
2612
2613 return spec;
2614 }
2615
2616 static void rbd_spec_free(struct kref *kref)
2617 {
2618 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2619
2620 kfree(spec->pool_name);
2621 kfree(spec->image_id);
2622 kfree(spec->image_name);
2623 kfree(spec->snap_name);
2624 kfree(spec);
2625 }
2626
2627 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
2628 struct rbd_spec *spec)
2629 {
2630 struct rbd_device *rbd_dev;
2631
2632 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2633 if (!rbd_dev)
2634 return NULL;
2635
2636 spin_lock_init(&rbd_dev->lock);
2637 rbd_dev->flags = 0;
2638 INIT_LIST_HEAD(&rbd_dev->node);
2639 INIT_LIST_HEAD(&rbd_dev->snaps);
2640 init_rwsem(&rbd_dev->header_rwsem);
2641
2642 rbd_dev->spec = spec;
2643 rbd_dev->rbd_client = rbdc;
2644
2645 /* Initialize the layout used for all rbd requests */
2646
2647 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2648 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2649 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2650 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2651
2652 return rbd_dev;
2653 }
2654
2655 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2656 {
2657 rbd_spec_put(rbd_dev->parent_spec);
2658 kfree(rbd_dev->header_name);
2659 rbd_put_client(rbd_dev->rbd_client);
2660 rbd_spec_put(rbd_dev->spec);
2661 kfree(rbd_dev);
2662 }
2663
2664 static bool rbd_snap_registered(struct rbd_snap *snap)
2665 {
2666 bool ret = snap->dev.type == &rbd_snap_device_type;
2667 bool reg = device_is_registered(&snap->dev);
2668
2669 rbd_assert(!ret ^ reg);
2670
2671 return ret;
2672 }
2673
2674 static void rbd_remove_snap_dev(struct rbd_snap *snap)
2675 {
2676 list_del(&snap->node);
2677 if (device_is_registered(&snap->dev))
2678 device_unregister(&snap->dev);
2679 }
2680
2681 static int rbd_register_snap_dev(struct rbd_snap *snap,
2682 struct device *parent)
2683 {
2684 struct device *dev = &snap->dev;
2685 int ret;
2686
2687 dev->type = &rbd_snap_device_type;
2688 dev->parent = parent;
2689 dev->release = rbd_snap_dev_release;
2690 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
2691 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2692
2693 ret = device_register(dev);
2694
2695 return ret;
2696 }
2697
2698 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2699 const char *snap_name,
2700 u64 snap_id, u64 snap_size,
2701 u64 snap_features)
2702 {
2703 struct rbd_snap *snap;
2704 int ret;
2705
2706 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2707 if (!snap)
2708 return ERR_PTR(-ENOMEM);
2709
2710 ret = -ENOMEM;
2711 snap->name = kstrdup(snap_name, GFP_KERNEL);
2712 if (!snap->name)
2713 goto err;
2714
2715 snap->id = snap_id;
2716 snap->size = snap_size;
2717 snap->features = snap_features;
2718
2719 return snap;
2720
2721 err:
2722 kfree(snap->name);
2723 kfree(snap);
2724
2725 return ERR_PTR(ret);
2726 }
2727
2728 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2729 u64 *snap_size, u64 *snap_features)
2730 {
2731 char *snap_name;
2732
2733 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2734
2735 *snap_size = rbd_dev->header.snap_sizes[which];
2736 *snap_features = 0; /* No features for v1 */
2737
2738 /* Skip over names until we find the one we are looking for */
2739
2740 snap_name = rbd_dev->header.snap_names;
2741 while (which--)
2742 snap_name += strlen(snap_name) + 1;
2743
2744 return snap_name;
2745 }
2746
2747 /*
2748 * Get the size and object order for an image snapshot, or if
2749 * snap_id is CEPH_NOSNAP, gets this information for the base
2750 * image.
2751 */
2752 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2753 u8 *order, u64 *snap_size)
2754 {
2755 __le64 snapid = cpu_to_le64(snap_id);
2756 int ret;
2757 struct {
2758 u8 order;
2759 __le64 size;
2760 } __attribute__ ((packed)) size_buf = { 0 };
2761
2762 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2763 "rbd", "get_size",
2764 (char *) &snapid, sizeof (snapid),
2765 (char *) &size_buf, sizeof (size_buf), NULL);
2766 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2767 if (ret < 0)
2768 return ret;
2769
2770 *order = size_buf.order;
2771 *snap_size = le64_to_cpu(size_buf.size);
2772
2773 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2774 (unsigned long long) snap_id, (unsigned int) *order,
2775 (unsigned long long) *snap_size);
2776
2777 return 0;
2778 }
2779
2780 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2781 {
2782 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2783 &rbd_dev->header.obj_order,
2784 &rbd_dev->header.image_size);
2785 }
2786
2787 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2788 {
2789 void *reply_buf;
2790 int ret;
2791 void *p;
2792
2793 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2794 if (!reply_buf)
2795 return -ENOMEM;
2796
2797 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2798 "rbd", "get_object_prefix",
2799 NULL, 0,
2800 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
2801 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2802 if (ret < 0)
2803 goto out;
2804
2805 p = reply_buf;
2806 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2807 p + RBD_OBJ_PREFIX_LEN_MAX,
2808 NULL, GFP_NOIO);
2809
2810 if (IS_ERR(rbd_dev->header.object_prefix)) {
2811 ret = PTR_ERR(rbd_dev->header.object_prefix);
2812 rbd_dev->header.object_prefix = NULL;
2813 } else {
2814 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2815 }
2816
2817 out:
2818 kfree(reply_buf);
2819
2820 return ret;
2821 }
2822
2823 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2824 u64 *snap_features)
2825 {
2826 __le64 snapid = cpu_to_le64(snap_id);
2827 struct {
2828 __le64 features;
2829 __le64 incompat;
2830 } features_buf = { 0 };
2831 u64 incompat;
2832 int ret;
2833
2834 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2835 "rbd", "get_features",
2836 (char *) &snapid, sizeof (snapid),
2837 (char *) &features_buf, sizeof (features_buf),
2838 NULL);
2839 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2840 if (ret < 0)
2841 return ret;
2842
2843 incompat = le64_to_cpu(features_buf.incompat);
2844 if (incompat & ~RBD_FEATURES_ALL)
2845 return -ENXIO;
2846
2847 *snap_features = le64_to_cpu(features_buf.features);
2848
2849 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2850 (unsigned long long) snap_id,
2851 (unsigned long long) *snap_features,
2852 (unsigned long long) le64_to_cpu(features_buf.incompat));
2853
2854 return 0;
2855 }
2856
2857 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2858 {
2859 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2860 &rbd_dev->header.features);
2861 }
2862
2863 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2864 {
2865 struct rbd_spec *parent_spec;
2866 size_t size;
2867 void *reply_buf = NULL;
2868 __le64 snapid;
2869 void *p;
2870 void *end;
2871 char *image_id;
2872 u64 overlap;
2873 int ret;
2874
2875 parent_spec = rbd_spec_alloc();
2876 if (!parent_spec)
2877 return -ENOMEM;
2878
2879 size = sizeof (__le64) + /* pool_id */
2880 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
2881 sizeof (__le64) + /* snap_id */
2882 sizeof (__le64); /* overlap */
2883 reply_buf = kmalloc(size, GFP_KERNEL);
2884 if (!reply_buf) {
2885 ret = -ENOMEM;
2886 goto out_err;
2887 }
2888
2889 snapid = cpu_to_le64(CEPH_NOSNAP);
2890 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2891 "rbd", "get_parent",
2892 (char *) &snapid, sizeof (snapid),
2893 (char *) reply_buf, size, NULL);
2894 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2895 if (ret < 0)
2896 goto out_err;
2897
2898 ret = -ERANGE;
2899 p = reply_buf;
2900 end = (char *) reply_buf + size;
2901 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2902 if (parent_spec->pool_id == CEPH_NOPOOL)
2903 goto out; /* No parent? No problem. */
2904
2905 /* The ceph file layout needs to fit pool id in 32 bits */
2906
2907 ret = -EIO;
2908 if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2909 goto out;
2910
2911 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
2912 if (IS_ERR(image_id)) {
2913 ret = PTR_ERR(image_id);
2914 goto out_err;
2915 }
2916 parent_spec->image_id = image_id;
2917 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
2918 ceph_decode_64_safe(&p, end, overlap, out_err);
2919
2920 rbd_dev->parent_overlap = overlap;
2921 rbd_dev->parent_spec = parent_spec;
2922 parent_spec = NULL; /* rbd_dev now owns this */
2923 out:
2924 ret = 0;
2925 out_err:
2926 kfree(reply_buf);
2927 rbd_spec_put(parent_spec);
2928
2929 return ret;
2930 }
2931
2932 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
2933 {
2934 size_t image_id_size;
2935 char *image_id;
2936 void *p;
2937 void *end;
2938 size_t size;
2939 void *reply_buf = NULL;
2940 size_t len = 0;
2941 char *image_name = NULL;
2942 int ret;
2943
2944 rbd_assert(!rbd_dev->spec->image_name);
2945
2946 len = strlen(rbd_dev->spec->image_id);
2947 image_id_size = sizeof (__le32) + len;
2948 image_id = kmalloc(image_id_size, GFP_KERNEL);
2949 if (!image_id)
2950 return NULL;
2951
2952 p = image_id;
2953 end = (char *) image_id + image_id_size;
2954 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
2955
2956 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
2957 reply_buf = kmalloc(size, GFP_KERNEL);
2958 if (!reply_buf)
2959 goto out;
2960
2961 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
2962 "rbd", "dir_get_name",
2963 image_id, image_id_size,
2964 (char *) reply_buf, size, NULL);
2965 if (ret < 0)
2966 goto out;
2967 p = reply_buf;
2968 end = (char *) reply_buf + size;
2969 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
2970 if (IS_ERR(image_name))
2971 image_name = NULL;
2972 else
2973 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
2974 out:
2975 kfree(reply_buf);
2976 kfree(image_id);
2977
2978 return image_name;
2979 }
2980
2981 /*
2982 * When a parent image gets probed, we only have the pool, image,
2983 * and snapshot ids but not the names of any of them. This call
2984 * is made later to fill in those names. It has to be done after
2985 * rbd_dev_snaps_update() has completed because some of the
2986 * information (in particular, snapshot name) is not available
2987 * until then.
2988 */
2989 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
2990 {
2991 struct ceph_osd_client *osdc;
2992 const char *name;
2993 void *reply_buf = NULL;
2994 int ret;
2995
2996 if (rbd_dev->spec->pool_name)
2997 return 0; /* Already have the names */
2998
2999 /* Look up the pool name */
3000
3001 osdc = &rbd_dev->rbd_client->client->osdc;
3002 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
3003 if (!name) {
3004 rbd_warn(rbd_dev, "there is no pool with id %llu",
3005 rbd_dev->spec->pool_id); /* Really a BUG() */
3006 return -EIO;
3007 }
3008
3009 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3010 if (!rbd_dev->spec->pool_name)
3011 return -ENOMEM;
3012
3013 /* Fetch the image name; tolerate failure here */
3014
3015 name = rbd_dev_image_name(rbd_dev);
3016 if (name)
3017 rbd_dev->spec->image_name = (char *) name;
3018 else
3019 rbd_warn(rbd_dev, "unable to get image name");
3020
3021 /* Look up the snapshot name. */
3022
3023 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3024 if (!name) {
3025 rbd_warn(rbd_dev, "no snapshot with id %llu",
3026 rbd_dev->spec->snap_id); /* Really a BUG() */
3027 ret = -EIO;
3028 goto out_err;
3029 }
3030 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3031 if(!rbd_dev->spec->snap_name)
3032 goto out_err;
3033
3034 return 0;
3035 out_err:
3036 kfree(reply_buf);
3037 kfree(rbd_dev->spec->pool_name);
3038 rbd_dev->spec->pool_name = NULL;
3039
3040 return ret;
3041 }
3042
3043 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3044 {
3045 size_t size;
3046 int ret;
3047 void *reply_buf;
3048 void *p;
3049 void *end;
3050 u64 seq;
3051 u32 snap_count;
3052 struct ceph_snap_context *snapc;
3053 u32 i;
3054
3055 /*
3056 * We'll need room for the seq value (maximum snapshot id),
3057 * snapshot count, and array of that many snapshot ids.
3058 * For now we have a fixed upper limit on the number we're
3059 * prepared to receive.
3060 */
3061 size = sizeof (__le64) + sizeof (__le32) +
3062 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3063 reply_buf = kzalloc(size, GFP_KERNEL);
3064 if (!reply_buf)
3065 return -ENOMEM;
3066
3067 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3068 "rbd", "get_snapcontext",
3069 NULL, 0,
3070 reply_buf, size, ver);
3071 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3072 if (ret < 0)
3073 goto out;
3074
3075 ret = -ERANGE;
3076 p = reply_buf;
3077 end = (char *) reply_buf + size;
3078 ceph_decode_64_safe(&p, end, seq, out);
3079 ceph_decode_32_safe(&p, end, snap_count, out);
3080
3081 /*
3082 * Make sure the reported number of snapshot ids wouldn't go
3083 * beyond the end of our buffer. But before checking that,
3084 * make sure the computed size of the snapshot context we
3085 * allocate is representable in a size_t.
3086 */
3087 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3088 / sizeof (u64)) {
3089 ret = -EINVAL;
3090 goto out;
3091 }
3092 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3093 goto out;
3094
3095 size = sizeof (struct ceph_snap_context) +
3096 snap_count * sizeof (snapc->snaps[0]);
3097 snapc = kmalloc(size, GFP_KERNEL);
3098 if (!snapc) {
3099 ret = -ENOMEM;
3100 goto out;
3101 }
3102
3103 atomic_set(&snapc->nref, 1);
3104 snapc->seq = seq;
3105 snapc->num_snaps = snap_count;
3106 for (i = 0; i < snap_count; i++)
3107 snapc->snaps[i] = ceph_decode_64(&p);
3108
3109 rbd_dev->header.snapc = snapc;
3110
3111 dout(" snap context seq = %llu, snap_count = %u\n",
3112 (unsigned long long) seq, (unsigned int) snap_count);
3113
3114 out:
3115 kfree(reply_buf);
3116
3117 return 0;
3118 }
3119
3120 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3121 {
3122 size_t size;
3123 void *reply_buf;
3124 __le64 snap_id;
3125 int ret;
3126 void *p;
3127 void *end;
3128 char *snap_name;
3129
3130 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3131 reply_buf = kmalloc(size, GFP_KERNEL);
3132 if (!reply_buf)
3133 return ERR_PTR(-ENOMEM);
3134
3135 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3136 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3137 "rbd", "get_snapshot_name",
3138 (char *) &snap_id, sizeof (snap_id),
3139 reply_buf, size, NULL);
3140 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3141 if (ret < 0)
3142 goto out;
3143
3144 p = reply_buf;
3145 end = (char *) reply_buf + size;
3146 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3147 if (IS_ERR(snap_name)) {
3148 ret = PTR_ERR(snap_name);
3149 goto out;
3150 } else {
3151 dout(" snap_id 0x%016llx snap_name = %s\n",
3152 (unsigned long long) le64_to_cpu(snap_id), snap_name);
3153 }
3154 kfree(reply_buf);
3155
3156 return snap_name;
3157 out:
3158 kfree(reply_buf);
3159
3160 return ERR_PTR(ret);
3161 }
3162
3163 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3164 u64 *snap_size, u64 *snap_features)
3165 {
3166 u64 snap_id;
3167 u8 order;
3168 int ret;
3169
3170 snap_id = rbd_dev->header.snapc->snaps[which];
3171 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3172 if (ret)
3173 return ERR_PTR(ret);
3174 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3175 if (ret)
3176 return ERR_PTR(ret);
3177
3178 return rbd_dev_v2_snap_name(rbd_dev, which);
3179 }
3180
3181 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3182 u64 *snap_size, u64 *snap_features)
3183 {
3184 if (rbd_dev->image_format == 1)
3185 return rbd_dev_v1_snap_info(rbd_dev, which,
3186 snap_size, snap_features);
3187 if (rbd_dev->image_format == 2)
3188 return rbd_dev_v2_snap_info(rbd_dev, which,
3189 snap_size, snap_features);
3190 return ERR_PTR(-EINVAL);
3191 }
3192
3193 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3194 {
3195 int ret;
3196 __u8 obj_order;
3197
3198 down_write(&rbd_dev->header_rwsem);
3199
3200 /* Grab old order first, to see if it changes */
3201
3202 obj_order = rbd_dev->header.obj_order,
3203 ret = rbd_dev_v2_image_size(rbd_dev);
3204 if (ret)
3205 goto out;
3206 if (rbd_dev->header.obj_order != obj_order) {
3207 ret = -EIO;
3208 goto out;
3209 }
3210 rbd_update_mapping_size(rbd_dev);
3211
3212 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3213 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3214 if (ret)
3215 goto out;
3216 ret = rbd_dev_snaps_update(rbd_dev);
3217 dout("rbd_dev_snaps_update returned %d\n", ret);
3218 if (ret)
3219 goto out;
3220 ret = rbd_dev_snaps_register(rbd_dev);
3221 dout("rbd_dev_snaps_register returned %d\n", ret);
3222 out:
3223 up_write(&rbd_dev->header_rwsem);
3224
3225 return ret;
3226 }
3227
3228 /*
3229 * Scan the rbd device's current snapshot list and compare it to the
3230 * newly-received snapshot context. Remove any existing snapshots
3231 * not present in the new snapshot context. Add a new snapshot for
3232 * any snaphots in the snapshot context not in the current list.
3233 * And verify there are no changes to snapshots we already know
3234 * about.
3235 *
3236 * Assumes the snapshots in the snapshot context are sorted by
3237 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3238 * are also maintained in that order.)
3239 */
3240 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
3241 {
3242 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3243 const u32 snap_count = snapc->num_snaps;
3244 struct list_head *head = &rbd_dev->snaps;
3245 struct list_head *links = head->next;
3246 u32 index = 0;
3247
3248 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
3249 while (index < snap_count || links != head) {
3250 u64 snap_id;
3251 struct rbd_snap *snap;
3252 char *snap_name;
3253 u64 snap_size = 0;
3254 u64 snap_features = 0;
3255
3256 snap_id = index < snap_count ? snapc->snaps[index]
3257 : CEPH_NOSNAP;
3258 snap = links != head ? list_entry(links, struct rbd_snap, node)
3259 : NULL;
3260 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
3261
3262 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3263 struct list_head *next = links->next;
3264
3265 /*
3266 * A previously-existing snapshot is not in
3267 * the new snap context.
3268 *
3269 * If the now missing snapshot is the one the
3270 * image is mapped to, clear its exists flag
3271 * so we can avoid sending any more requests
3272 * to it.
3273 */
3274 if (rbd_dev->spec->snap_id == snap->id)
3275 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3276 rbd_remove_snap_dev(snap);
3277 dout("%ssnap id %llu has been removed\n",
3278 rbd_dev->spec->snap_id == snap->id ?
3279 "mapped " : "",
3280 (unsigned long long) snap->id);
3281
3282 /* Done with this list entry; advance */
3283
3284 links = next;
3285 continue;
3286 }
3287
3288 snap_name = rbd_dev_snap_info(rbd_dev, index,
3289 &snap_size, &snap_features);
3290 if (IS_ERR(snap_name))
3291 return PTR_ERR(snap_name);
3292
3293 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3294 (unsigned long long) snap_id);
3295 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3296 struct rbd_snap *new_snap;
3297
3298 /* We haven't seen this snapshot before */
3299
3300 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
3301 snap_id, snap_size, snap_features);
3302 if (IS_ERR(new_snap)) {
3303 int err = PTR_ERR(new_snap);
3304
3305 dout(" failed to add dev, error %d\n", err);
3306
3307 return err;
3308 }
3309
3310 /* New goes before existing, or at end of list */
3311
3312 dout(" added dev%s\n", snap ? "" : " at end\n");
3313 if (snap)
3314 list_add_tail(&new_snap->node, &snap->node);
3315 else
3316 list_add_tail(&new_snap->node, head);
3317 } else {
3318 /* Already have this one */
3319
3320 dout(" already present\n");
3321
3322 rbd_assert(snap->size == snap_size);
3323 rbd_assert(!strcmp(snap->name, snap_name));
3324 rbd_assert(snap->features == snap_features);
3325
3326 /* Done with this list entry; advance */
3327
3328 links = links->next;
3329 }
3330
3331 /* Advance to the next entry in the snapshot context */
3332
3333 index++;
3334 }
3335 dout("%s: done\n", __func__);
3336
3337 return 0;
3338 }
3339
3340 /*
3341 * Scan the list of snapshots and register the devices for any that
3342 * have not already been registered.
3343 */
3344 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3345 {
3346 struct rbd_snap *snap;
3347 int ret = 0;
3348
3349 dout("%s:\n", __func__);
3350 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3351 return -EIO;
3352
3353 list_for_each_entry(snap, &rbd_dev->snaps, node) {
3354 if (!rbd_snap_registered(snap)) {
3355 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3356 if (ret < 0)
3357 break;
3358 }
3359 }
3360 dout("%s: returning %d\n", __func__, ret);
3361
3362 return ret;
3363 }
3364
3365 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3366 {
3367 struct device *dev;
3368 int ret;
3369
3370 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3371
3372 dev = &rbd_dev->dev;
3373 dev->bus = &rbd_bus_type;
3374 dev->type = &rbd_device_type;
3375 dev->parent = &rbd_root_dev;
3376 dev->release = rbd_dev_release;
3377 dev_set_name(dev, "%d", rbd_dev->dev_id);
3378 ret = device_register(dev);
3379
3380 mutex_unlock(&ctl_mutex);
3381
3382 return ret;
3383 }
3384
3385 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3386 {
3387 device_unregister(&rbd_dev->dev);
3388 }
3389
3390 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
3391
3392 /*
3393 * Get a unique rbd identifier for the given new rbd_dev, and add
3394 * the rbd_dev to the global list. The minimum rbd id is 1.
3395 */
3396 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
3397 {
3398 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
3399
3400 spin_lock(&rbd_dev_list_lock);
3401 list_add_tail(&rbd_dev->node, &rbd_dev_list);
3402 spin_unlock(&rbd_dev_list_lock);
3403 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3404 (unsigned long long) rbd_dev->dev_id);
3405 }
3406
3407 /*
3408 * Remove an rbd_dev from the global list, and record that its
3409 * identifier is no longer in use.
3410 */
3411 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
3412 {
3413 struct list_head *tmp;
3414 int rbd_id = rbd_dev->dev_id;
3415 int max_id;
3416
3417 rbd_assert(rbd_id > 0);
3418
3419 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3420 (unsigned long long) rbd_dev->dev_id);
3421 spin_lock(&rbd_dev_list_lock);
3422 list_del_init(&rbd_dev->node);
3423
3424 /*
3425 * If the id being "put" is not the current maximum, there
3426 * is nothing special we need to do.
3427 */
3428 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
3429 spin_unlock(&rbd_dev_list_lock);
3430 return;
3431 }
3432
3433 /*
3434 * We need to update the current maximum id. Search the
3435 * list to find out what it is. We're more likely to find
3436 * the maximum at the end, so search the list backward.
3437 */
3438 max_id = 0;
3439 list_for_each_prev(tmp, &rbd_dev_list) {
3440 struct rbd_device *rbd_dev;
3441
3442 rbd_dev = list_entry(tmp, struct rbd_device, node);
3443 if (rbd_dev->dev_id > max_id)
3444 max_id = rbd_dev->dev_id;
3445 }
3446 spin_unlock(&rbd_dev_list_lock);
3447
3448 /*
3449 * The max id could have been updated by rbd_dev_id_get(), in
3450 * which case it now accurately reflects the new maximum.
3451 * Be careful not to overwrite the maximum value in that
3452 * case.
3453 */
3454 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3455 dout(" max dev id has been reset\n");
3456 }
3457
3458 /*
3459 * Skips over white space at *buf, and updates *buf to point to the
3460 * first found non-space character (if any). Returns the length of
3461 * the token (string of non-white space characters) found. Note
3462 * that *buf must be terminated with '\0'.
3463 */
3464 static inline size_t next_token(const char **buf)
3465 {
3466 /*
3467 * These are the characters that produce nonzero for
3468 * isspace() in the "C" and "POSIX" locales.
3469 */
3470 const char *spaces = " \f\n\r\t\v";
3471
3472 *buf += strspn(*buf, spaces); /* Find start of token */
3473
3474 return strcspn(*buf, spaces); /* Return token length */
3475 }
3476
3477 /*
3478 * Finds the next token in *buf, and if the provided token buffer is
3479 * big enough, copies the found token into it. The result, if
3480 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3481 * must be terminated with '\0' on entry.
3482 *
3483 * Returns the length of the token found (not including the '\0').
3484 * Return value will be 0 if no token is found, and it will be >=
3485 * token_size if the token would not fit.
3486 *
3487 * The *buf pointer will be updated to point beyond the end of the
3488 * found token. Note that this occurs even if the token buffer is
3489 * too small to hold it.
3490 */
3491 static inline size_t copy_token(const char **buf,
3492 char *token,
3493 size_t token_size)
3494 {
3495 size_t len;
3496
3497 len = next_token(buf);
3498 if (len < token_size) {
3499 memcpy(token, *buf, len);
3500 *(token + len) = '\0';
3501 }
3502 *buf += len;
3503
3504 return len;
3505 }
3506
3507 /*
3508 * Finds the next token in *buf, dynamically allocates a buffer big
3509 * enough to hold a copy of it, and copies the token into the new
3510 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3511 * that a duplicate buffer is created even for a zero-length token.
3512 *
3513 * Returns a pointer to the newly-allocated duplicate, or a null
3514 * pointer if memory for the duplicate was not available. If
3515 * the lenp argument is a non-null pointer, the length of the token
3516 * (not including the '\0') is returned in *lenp.
3517 *
3518 * If successful, the *buf pointer will be updated to point beyond
3519 * the end of the found token.
3520 *
3521 * Note: uses GFP_KERNEL for allocation.
3522 */
3523 static inline char *dup_token(const char **buf, size_t *lenp)
3524 {
3525 char *dup;
3526 size_t len;
3527
3528 len = next_token(buf);
3529 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
3530 if (!dup)
3531 return NULL;
3532 *(dup + len) = '\0';
3533 *buf += len;
3534
3535 if (lenp)
3536 *lenp = len;
3537
3538 return dup;
3539 }
3540
3541 /*
3542 * Parse the options provided for an "rbd add" (i.e., rbd image
3543 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3544 * and the data written is passed here via a NUL-terminated buffer.
3545 * Returns 0 if successful or an error code otherwise.
3546 *
3547 * The information extracted from these options is recorded in
3548 * the other parameters which return dynamically-allocated
3549 * structures:
3550 * ceph_opts
3551 * The address of a pointer that will refer to a ceph options
3552 * structure. Caller must release the returned pointer using
3553 * ceph_destroy_options() when it is no longer needed.
3554 * rbd_opts
3555 * Address of an rbd options pointer. Fully initialized by
3556 * this function; caller must release with kfree().
3557 * spec
3558 * Address of an rbd image specification pointer. Fully
3559 * initialized by this function based on parsed options.
3560 * Caller must release with rbd_spec_put().
3561 *
3562 * The options passed take this form:
3563 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3564 * where:
3565 * <mon_addrs>
3566 * A comma-separated list of one or more monitor addresses.
3567 * A monitor address is an ip address, optionally followed
3568 * by a port number (separated by a colon).
3569 * I.e.: ip1[:port1][,ip2[:port2]...]
3570 * <options>
3571 * A comma-separated list of ceph and/or rbd options.
3572 * <pool_name>
3573 * The name of the rados pool containing the rbd image.
3574 * <image_name>
3575 * The name of the image in that pool to map.
3576 * <snap_id>
3577 * An optional snapshot id. If provided, the mapping will
3578 * present data from the image at the time that snapshot was
3579 * created. The image head is used if no snapshot id is
3580 * provided. Snapshot mappings are always read-only.
3581 */
3582 static int rbd_add_parse_args(const char *buf,
3583 struct ceph_options **ceph_opts,
3584 struct rbd_options **opts,
3585 struct rbd_spec **rbd_spec)
3586 {
3587 size_t len;
3588 char *options;
3589 const char *mon_addrs;
3590 size_t mon_addrs_size;
3591 struct rbd_spec *spec = NULL;
3592 struct rbd_options *rbd_opts = NULL;
3593 struct ceph_options *copts;
3594 int ret;
3595
3596 /* The first four tokens are required */
3597
3598 len = next_token(&buf);
3599 if (!len) {
3600 rbd_warn(NULL, "no monitor address(es) provided");
3601 return -EINVAL;
3602 }
3603 mon_addrs = buf;
3604 mon_addrs_size = len + 1;
3605 buf += len;
3606
3607 ret = -EINVAL;
3608 options = dup_token(&buf, NULL);
3609 if (!options)
3610 return -ENOMEM;
3611 if (!*options) {
3612 rbd_warn(NULL, "no options provided");
3613 goto out_err;
3614 }
3615
3616 spec = rbd_spec_alloc();
3617 if (!spec)
3618 goto out_mem;
3619
3620 spec->pool_name = dup_token(&buf, NULL);
3621 if (!spec->pool_name)
3622 goto out_mem;
3623 if (!*spec->pool_name) {
3624 rbd_warn(NULL, "no pool name provided");
3625 goto out_err;
3626 }
3627
3628 spec->image_name = dup_token(&buf, NULL);
3629 if (!spec->image_name)
3630 goto out_mem;
3631 if (!*spec->image_name) {
3632 rbd_warn(NULL, "no image name provided");
3633 goto out_err;
3634 }
3635
3636 /*
3637 * Snapshot name is optional; default is to use "-"
3638 * (indicating the head/no snapshot).
3639 */
3640 len = next_token(&buf);
3641 if (!len) {
3642 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3643 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
3644 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
3645 ret = -ENAMETOOLONG;
3646 goto out_err;
3647 }
3648 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
3649 if (!spec->snap_name)
3650 goto out_mem;
3651 *(spec->snap_name + len) = '\0';
3652
3653 /* Initialize all rbd options to the defaults */
3654
3655 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3656 if (!rbd_opts)
3657 goto out_mem;
3658
3659 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
3660
3661 copts = ceph_parse_options(options, mon_addrs,
3662 mon_addrs + mon_addrs_size - 1,
3663 parse_rbd_opts_token, rbd_opts);
3664 if (IS_ERR(copts)) {
3665 ret = PTR_ERR(copts);
3666 goto out_err;
3667 }
3668 kfree(options);
3669
3670 *ceph_opts = copts;
3671 *opts = rbd_opts;
3672 *rbd_spec = spec;
3673
3674 return 0;
3675 out_mem:
3676 ret = -ENOMEM;
3677 out_err:
3678 kfree(rbd_opts);
3679 rbd_spec_put(spec);
3680 kfree(options);
3681
3682 return ret;
3683 }
3684
3685 /*
3686 * An rbd format 2 image has a unique identifier, distinct from the
3687 * name given to it by the user. Internally, that identifier is
3688 * what's used to specify the names of objects related to the image.
3689 *
3690 * A special "rbd id" object is used to map an rbd image name to its
3691 * id. If that object doesn't exist, then there is no v2 rbd image
3692 * with the supplied name.
3693 *
3694 * This function will record the given rbd_dev's image_id field if
3695 * it can be determined, and in that case will return 0. If any
3696 * errors occur a negative errno will be returned and the rbd_dev's
3697 * image_id field will be unchanged (and should be NULL).
3698 */
3699 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3700 {
3701 int ret;
3702 size_t size;
3703 char *object_name;
3704 void *response;
3705 void *p;
3706
3707 /*
3708 * When probing a parent image, the image id is already
3709 * known (and the image name likely is not). There's no
3710 * need to fetch the image id again in this case.
3711 */
3712 if (rbd_dev->spec->image_id)
3713 return 0;
3714
3715 /*
3716 * First, see if the format 2 image id file exists, and if
3717 * so, get the image's persistent id from it.
3718 */
3719 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
3720 object_name = kmalloc(size, GFP_NOIO);
3721 if (!object_name)
3722 return -ENOMEM;
3723 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
3724 dout("rbd id object name is %s\n", object_name);
3725
3726 /* Response will be an encoded string, which includes a length */
3727
3728 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3729 response = kzalloc(size, GFP_NOIO);
3730 if (!response) {
3731 ret = -ENOMEM;
3732 goto out;
3733 }
3734
3735 ret = rbd_obj_method_sync(rbd_dev, object_name,
3736 "rbd", "get_id",
3737 NULL, 0,
3738 response, RBD_IMAGE_ID_LEN_MAX, NULL);
3739 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3740 if (ret < 0)
3741 goto out;
3742
3743 p = response;
3744 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
3745 p + RBD_IMAGE_ID_LEN_MAX,
3746 NULL, GFP_NOIO);
3747 if (IS_ERR(rbd_dev->spec->image_id)) {
3748 ret = PTR_ERR(rbd_dev->spec->image_id);
3749 rbd_dev->spec->image_id = NULL;
3750 } else {
3751 dout("image_id is %s\n", rbd_dev->spec->image_id);
3752 }
3753 out:
3754 kfree(response);
3755 kfree(object_name);
3756
3757 return ret;
3758 }
3759
3760 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3761 {
3762 int ret;
3763 size_t size;
3764
3765 /* Version 1 images have no id; empty string is used */
3766
3767 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3768 if (!rbd_dev->spec->image_id)
3769 return -ENOMEM;
3770
3771 /* Record the header object name for this rbd image. */
3772
3773 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
3774 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3775 if (!rbd_dev->header_name) {
3776 ret = -ENOMEM;
3777 goto out_err;
3778 }
3779 sprintf(rbd_dev->header_name, "%s%s",
3780 rbd_dev->spec->image_name, RBD_SUFFIX);
3781
3782 /* Populate rbd image metadata */
3783
3784 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3785 if (ret < 0)
3786 goto out_err;
3787
3788 /* Version 1 images have no parent (no layering) */
3789
3790 rbd_dev->parent_spec = NULL;
3791 rbd_dev->parent_overlap = 0;
3792
3793 rbd_dev->image_format = 1;
3794
3795 dout("discovered version 1 image, header name is %s\n",
3796 rbd_dev->header_name);
3797
3798 return 0;
3799
3800 out_err:
3801 kfree(rbd_dev->header_name);
3802 rbd_dev->header_name = NULL;
3803 kfree(rbd_dev->spec->image_id);
3804 rbd_dev->spec->image_id = NULL;
3805
3806 return ret;
3807 }
3808
3809 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3810 {
3811 size_t size;
3812 int ret;
3813 u64 ver = 0;
3814
3815 /*
3816 * Image id was filled in by the caller. Record the header
3817 * object name for this rbd image.
3818 */
3819 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
3820 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3821 if (!rbd_dev->header_name)
3822 return -ENOMEM;
3823 sprintf(rbd_dev->header_name, "%s%s",
3824 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
3825
3826 /* Get the size and object order for the image */
3827
3828 ret = rbd_dev_v2_image_size(rbd_dev);
3829 if (ret < 0)
3830 goto out_err;
3831
3832 /* Get the object prefix (a.k.a. block_name) for the image */
3833
3834 ret = rbd_dev_v2_object_prefix(rbd_dev);
3835 if (ret < 0)
3836 goto out_err;
3837
3838 /* Get the and check features for the image */
3839
3840 ret = rbd_dev_v2_features(rbd_dev);
3841 if (ret < 0)
3842 goto out_err;
3843
3844 /* If the image supports layering, get the parent info */
3845
3846 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3847 ret = rbd_dev_v2_parent_info(rbd_dev);
3848 if (ret < 0)
3849 goto out_err;
3850 }
3851
3852 /* crypto and compression type aren't (yet) supported for v2 images */
3853
3854 rbd_dev->header.crypt_type = 0;
3855 rbd_dev->header.comp_type = 0;
3856
3857 /* Get the snapshot context, plus the header version */
3858
3859 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
3860 if (ret)
3861 goto out_err;
3862 rbd_dev->header.obj_version = ver;
3863
3864 rbd_dev->image_format = 2;
3865
3866 dout("discovered version 2 image, header name is %s\n",
3867 rbd_dev->header_name);
3868
3869 return 0;
3870 out_err:
3871 rbd_dev->parent_overlap = 0;
3872 rbd_spec_put(rbd_dev->parent_spec);
3873 rbd_dev->parent_spec = NULL;
3874 kfree(rbd_dev->header_name);
3875 rbd_dev->header_name = NULL;
3876 kfree(rbd_dev->header.object_prefix);
3877 rbd_dev->header.object_prefix = NULL;
3878
3879 return ret;
3880 }
3881
3882 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3883 {
3884 int ret;
3885
3886 /* no need to lock here, as rbd_dev is not registered yet */
3887 ret = rbd_dev_snaps_update(rbd_dev);
3888 if (ret)
3889 return ret;
3890
3891 ret = rbd_dev_probe_update_spec(rbd_dev);
3892 if (ret)
3893 goto err_out_snaps;
3894
3895 ret = rbd_dev_set_mapping(rbd_dev);
3896 if (ret)
3897 goto err_out_snaps;
3898
3899 /* generate unique id: find highest unique id, add one */
3900 rbd_dev_id_get(rbd_dev);
3901
3902 /* Fill in the device name, now that we have its id. */
3903 BUILD_BUG_ON(DEV_NAME_LEN
3904 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3905 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3906
3907 /* Get our block major device number. */
3908
3909 ret = register_blkdev(0, rbd_dev->name);
3910 if (ret < 0)
3911 goto err_out_id;
3912 rbd_dev->major = ret;
3913
3914 /* Set up the blkdev mapping. */
3915
3916 ret = rbd_init_disk(rbd_dev);
3917 if (ret)
3918 goto err_out_blkdev;
3919
3920 ret = rbd_bus_add_dev(rbd_dev);
3921 if (ret)
3922 goto err_out_disk;
3923
3924 /*
3925 * At this point cleanup in the event of an error is the job
3926 * of the sysfs code (initiated by rbd_bus_del_dev()).
3927 */
3928 down_write(&rbd_dev->header_rwsem);
3929 ret = rbd_dev_snaps_register(rbd_dev);
3930 up_write(&rbd_dev->header_rwsem);
3931 if (ret)
3932 goto err_out_bus;
3933
3934 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
3935 if (ret)
3936 goto err_out_bus;
3937
3938 /* Everything's ready. Announce the disk to the world. */
3939
3940 add_disk(rbd_dev->disk);
3941
3942 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3943 (unsigned long long) rbd_dev->mapping.size);
3944
3945 return ret;
3946 err_out_bus:
3947 /* this will also clean up rest of rbd_dev stuff */
3948
3949 rbd_bus_del_dev(rbd_dev);
3950
3951 return ret;
3952 err_out_disk:
3953 rbd_free_disk(rbd_dev);
3954 err_out_blkdev:
3955 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3956 err_out_id:
3957 rbd_dev_id_put(rbd_dev);
3958 err_out_snaps:
3959 rbd_remove_all_snaps(rbd_dev);
3960
3961 return ret;
3962 }
3963
3964 /*
3965 * Probe for the existence of the header object for the given rbd
3966 * device. For format 2 images this includes determining the image
3967 * id.
3968 */
3969 static int rbd_dev_probe(struct rbd_device *rbd_dev)
3970 {
3971 int ret;
3972
3973 /*
3974 * Get the id from the image id object. If it's not a
3975 * format 2 image, we'll get ENOENT back, and we'll assume
3976 * it's a format 1 image.
3977 */
3978 ret = rbd_dev_image_id(rbd_dev);
3979 if (ret)
3980 ret = rbd_dev_v1_probe(rbd_dev);
3981 else
3982 ret = rbd_dev_v2_probe(rbd_dev);
3983 if (ret) {
3984 dout("probe failed, returning %d\n", ret);
3985
3986 return ret;
3987 }
3988
3989 ret = rbd_dev_probe_finish(rbd_dev);
3990 if (ret)
3991 rbd_header_free(&rbd_dev->header);
3992
3993 return ret;
3994 }
3995
3996 static ssize_t rbd_add(struct bus_type *bus,
3997 const char *buf,
3998 size_t count)
3999 {
4000 struct rbd_device *rbd_dev = NULL;
4001 struct ceph_options *ceph_opts = NULL;
4002 struct rbd_options *rbd_opts = NULL;
4003 struct rbd_spec *spec = NULL;
4004 struct rbd_client *rbdc;
4005 struct ceph_osd_client *osdc;
4006 int rc = -ENOMEM;
4007
4008 if (!try_module_get(THIS_MODULE))
4009 return -ENODEV;
4010
4011 /* parse add command */
4012 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4013 if (rc < 0)
4014 goto err_out_module;
4015
4016 rbdc = rbd_get_client(ceph_opts);
4017 if (IS_ERR(rbdc)) {
4018 rc = PTR_ERR(rbdc);
4019 goto err_out_args;
4020 }
4021 ceph_opts = NULL; /* rbd_dev client now owns this */
4022
4023 /* pick the pool */
4024 osdc = &rbdc->client->osdc;
4025 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4026 if (rc < 0)
4027 goto err_out_client;
4028 spec->pool_id = (u64) rc;
4029
4030 /* The ceph file layout needs to fit pool id in 32 bits */
4031
4032 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4033 rc = -EIO;
4034 goto err_out_client;
4035 }
4036
4037 rbd_dev = rbd_dev_create(rbdc, spec);
4038 if (!rbd_dev)
4039 goto err_out_client;
4040 rbdc = NULL; /* rbd_dev now owns this */
4041 spec = NULL; /* rbd_dev now owns this */
4042
4043 rbd_dev->mapping.read_only = rbd_opts->read_only;
4044 kfree(rbd_opts);
4045 rbd_opts = NULL; /* done with this */
4046
4047 rc = rbd_dev_probe(rbd_dev);
4048 if (rc < 0)
4049 goto err_out_rbd_dev;
4050
4051 return count;
4052 err_out_rbd_dev:
4053 rbd_dev_destroy(rbd_dev);
4054 err_out_client:
4055 rbd_put_client(rbdc);
4056 err_out_args:
4057 if (ceph_opts)
4058 ceph_destroy_options(ceph_opts);
4059 kfree(rbd_opts);
4060 rbd_spec_put(spec);
4061 err_out_module:
4062 module_put(THIS_MODULE);
4063
4064 dout("Error adding device %s\n", buf);
4065
4066 return (ssize_t) rc;
4067 }
4068
4069 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4070 {
4071 struct list_head *tmp;
4072 struct rbd_device *rbd_dev;
4073
4074 spin_lock(&rbd_dev_list_lock);
4075 list_for_each(tmp, &rbd_dev_list) {
4076 rbd_dev = list_entry(tmp, struct rbd_device, node);
4077 if (rbd_dev->dev_id == dev_id) {
4078 spin_unlock(&rbd_dev_list_lock);
4079 return rbd_dev;
4080 }
4081 }
4082 spin_unlock(&rbd_dev_list_lock);
4083 return NULL;
4084 }
4085
4086 static void rbd_dev_release(struct device *dev)
4087 {
4088 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4089
4090 if (rbd_dev->watch_event)
4091 rbd_dev_header_watch_sync(rbd_dev, 0);
4092
4093 /* clean up and free blkdev */
4094 rbd_free_disk(rbd_dev);
4095 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4096
4097 /* release allocated disk header fields */
4098 rbd_header_free(&rbd_dev->header);
4099
4100 /* done with the id, and with the rbd_dev */
4101 rbd_dev_id_put(rbd_dev);
4102 rbd_assert(rbd_dev->rbd_client != NULL);
4103 rbd_dev_destroy(rbd_dev);
4104
4105 /* release module ref */
4106 module_put(THIS_MODULE);
4107 }
4108
4109 static ssize_t rbd_remove(struct bus_type *bus,
4110 const char *buf,
4111 size_t count)
4112 {
4113 struct rbd_device *rbd_dev = NULL;
4114 int target_id, rc;
4115 unsigned long ul;
4116 int ret = count;
4117
4118 rc = strict_strtoul(buf, 10, &ul);
4119 if (rc)
4120 return rc;
4121
4122 /* convert to int; abort if we lost anything in the conversion */
4123 target_id = (int) ul;
4124 if (target_id != ul)
4125 return -EINVAL;
4126
4127 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4128
4129 rbd_dev = __rbd_get_dev(target_id);
4130 if (!rbd_dev) {
4131 ret = -ENOENT;
4132 goto done;
4133 }
4134
4135 spin_lock_irq(&rbd_dev->lock);
4136 if (rbd_dev->open_count)
4137 ret = -EBUSY;
4138 else
4139 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4140 spin_unlock_irq(&rbd_dev->lock);
4141 if (ret < 0)
4142 goto done;
4143
4144 rbd_remove_all_snaps(rbd_dev);
4145 rbd_bus_del_dev(rbd_dev);
4146
4147 done:
4148 mutex_unlock(&ctl_mutex);
4149
4150 return ret;
4151 }
4152
4153 /*
4154 * create control files in sysfs
4155 * /sys/bus/rbd/...
4156 */
4157 static int rbd_sysfs_init(void)
4158 {
4159 int ret;
4160
4161 ret = device_register(&rbd_root_dev);
4162 if (ret < 0)
4163 return ret;
4164
4165 ret = bus_register(&rbd_bus_type);
4166 if (ret < 0)
4167 device_unregister(&rbd_root_dev);
4168
4169 return ret;
4170 }
4171
4172 static void rbd_sysfs_cleanup(void)
4173 {
4174 bus_unregister(&rbd_bus_type);
4175 device_unregister(&rbd_root_dev);
4176 }
4177
4178 static int __init rbd_init(void)
4179 {
4180 int rc;
4181
4182 if (!libceph_compatible(NULL)) {
4183 rbd_warn(NULL, "libceph incompatibility (quitting)");
4184
4185 return -EINVAL;
4186 }
4187 rc = rbd_sysfs_init();
4188 if (rc)
4189 return rc;
4190 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
4191 return 0;
4192 }
4193
4194 static void __exit rbd_exit(void)
4195 {
4196 rbd_sysfs_cleanup();
4197 }
4198
4199 module_init(rbd_init);
4200 module_exit(rbd_exit);
4201
4202 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4203 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4204 MODULE_DESCRIPTION("rados block device");
4205
4206 /* following authorship retained from original osdblk.c */
4207 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4208
4209 MODULE_LICENSE("GPL");