3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t
*v
)
71 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
72 if (counter
<= (unsigned int)INT_MAX
)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t
*v
)
85 counter
= atomic_dec_return(v
);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100 #define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
103 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
105 #define RBD_SNAP_HEAD_NAME "-"
107 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
109 /* This allows a single page to hold an image name sent by OSD */
110 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
111 #define RBD_IMAGE_ID_LEN_MAX 64
113 #define RBD_OBJ_PREFIX_LEN_MAX 64
117 #define RBD_FEATURE_LAYERING (1<<0)
118 #define RBD_FEATURE_STRIPINGV2 (1<<1)
119 #define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
122 /* Features supported by this (client software) implementation. */
124 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
132 #define DEV_NAME_LEN 32
133 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
136 * block device image metadata (in-memory version)
138 struct rbd_image_header
{
139 /* These six fields never change for a given rbd image */
146 u64 features
; /* Might be changeable someday? */
148 /* The remaining fields need to be updated occasionally */
150 struct ceph_snap_context
*snapc
;
151 char *snap_names
; /* format 1 only */
152 u64
*snap_sizes
; /* format 1 only */
156 * An rbd image specification.
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
182 const char *pool_name
;
184 const char *image_id
;
185 const char *image_name
;
188 const char *snap_name
;
194 * an instance of the client. multiple devices may share an rbd client.
197 struct ceph_client
*client
;
199 struct list_head node
;
202 struct rbd_img_request
;
203 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
205 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
207 struct rbd_obj_request
;
208 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
210 enum obj_request_type
{
211 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
214 enum obj_operation_type
{
221 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
222 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
223 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
227 struct rbd_obj_request
{
228 const char *object_name
;
229 u64 offset
; /* object start byte */
230 u64 length
; /* bytes from offset */
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
250 struct rbd_obj_request
*obj_request
; /* STAT op */
252 struct rbd_img_request
*img_request
;
254 /* links for img_request->obj_requests list */
255 struct list_head links
;
258 u32 which
; /* posn image request list */
260 enum obj_request_type type
;
262 struct bio
*bio_list
;
268 struct page
**copyup_pages
;
269 u32 copyup_page_count
;
271 struct ceph_osd_request
*osd_req
;
273 u64 xferred
; /* bytes transferred */
276 rbd_obj_callback_t callback
;
277 struct completion completion
;
283 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
285 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
286 IMG_REQ_DISCARD
, /* discard: normal = 0, discard request = 1 */
289 struct rbd_img_request
{
290 struct rbd_device
*rbd_dev
;
291 u64 offset
; /* starting image byte offset */
292 u64 length
; /* byte count from offset */
295 u64 snap_id
; /* for reads */
296 struct ceph_snap_context
*snapc
; /* for writes */
299 struct request
*rq
; /* block request */
300 struct rbd_obj_request
*obj_request
; /* obj req initiator */
302 struct page
**copyup_pages
;
303 u32 copyup_page_count
;
304 spinlock_t completion_lock
;/* protects next_completion */
306 rbd_img_callback_t callback
;
307 u64 xferred
;/* aggregate bytes transferred */
308 int result
; /* first nonzero obj_request result */
310 u32 obj_request_count
;
311 struct list_head obj_requests
; /* rbd_obj_request structs */
316 #define for_each_obj_request(ireq, oreq) \
317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
318 #define for_each_obj_request_from(ireq, oreq) \
319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_safe(ireq, oreq, n) \
321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
333 int dev_id
; /* blkdev unique id */
335 int major
; /* blkdev assigned major */
337 struct gendisk
*disk
; /* blkdev's gendisk and rq */
339 u32 image_format
; /* Either 1 or 2 */
340 struct rbd_client
*rbd_client
;
342 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
344 spinlock_t lock
; /* queue, flags, open_count */
346 struct rbd_image_header header
;
347 unsigned long flags
; /* possibly lock protected */
348 struct rbd_spec
*spec
;
352 struct ceph_file_layout layout
;
354 struct ceph_osd_event
*watch_event
;
355 struct rbd_obj_request
*watch_request
;
357 struct rbd_spec
*parent_spec
;
360 struct rbd_device
*parent
;
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set
;
365 /* protects updating the header */
366 struct rw_semaphore header_rwsem
;
368 struct rbd_mapping mapping
;
370 struct list_head node
;
374 unsigned long open_count
; /* protected by lock */
378 * Flag bits for rbd_dev->flags. If atomicity is required,
379 * rbd_dev->lock is used to protect access.
381 * Currently, only the "removing" flag (which is coupled with the
382 * "open_count" field) requires atomic access.
385 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
386 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
389 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
391 static LIST_HEAD(rbd_dev_list
); /* devices */
392 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
394 static LIST_HEAD(rbd_client_list
); /* clients */
395 static DEFINE_SPINLOCK(rbd_client_list_lock
);
397 /* Slab caches for frequently-allocated structures */
399 static struct kmem_cache
*rbd_img_request_cache
;
400 static struct kmem_cache
*rbd_obj_request_cache
;
401 static struct kmem_cache
*rbd_segment_name_cache
;
403 static int rbd_major
;
404 static DEFINE_IDA(rbd_dev_id_ida
);
406 static struct workqueue_struct
*rbd_wq
;
409 * Default to false for now, as single-major requires >= 0.75 version of
410 * userspace rbd utility.
412 static bool single_major
= false;
413 module_param(single_major
, bool, S_IRUGO
);
414 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: false)");
416 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
418 static void rbd_dev_device_release(struct device
*dev
);
420 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
422 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
424 static ssize_t
rbd_add_single_major(struct bus_type
*bus
, const char *buf
,
426 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
, const char *buf
,
428 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
);
429 static void rbd_spec_put(struct rbd_spec
*spec
);
431 static int rbd_dev_id_to_minor(int dev_id
)
433 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
436 static int minor_to_rbd_dev_id(int minor
)
438 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
441 static BUS_ATTR(add
, S_IWUSR
, NULL
, rbd_add
);
442 static BUS_ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
);
443 static BUS_ATTR(add_single_major
, S_IWUSR
, NULL
, rbd_add_single_major
);
444 static BUS_ATTR(remove_single_major
, S_IWUSR
, NULL
, rbd_remove_single_major
);
446 static struct attribute
*rbd_bus_attrs
[] = {
448 &bus_attr_remove
.attr
,
449 &bus_attr_add_single_major
.attr
,
450 &bus_attr_remove_single_major
.attr
,
454 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
455 struct attribute
*attr
, int index
)
458 (attr
== &bus_attr_add_single_major
.attr
||
459 attr
== &bus_attr_remove_single_major
.attr
))
465 static const struct attribute_group rbd_bus_group
= {
466 .attrs
= rbd_bus_attrs
,
467 .is_visible
= rbd_bus_is_visible
,
469 __ATTRIBUTE_GROUPS(rbd_bus
);
471 static struct bus_type rbd_bus_type
= {
473 .bus_groups
= rbd_bus_groups
,
476 static void rbd_root_dev_release(struct device
*dev
)
480 static struct device rbd_root_dev
= {
482 .release
= rbd_root_dev_release
,
485 static __printf(2, 3)
486 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
488 struct va_format vaf
;
496 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
497 else if (rbd_dev
->disk
)
498 printk(KERN_WARNING
"%s: %s: %pV\n",
499 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
500 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
501 printk(KERN_WARNING
"%s: image %s: %pV\n",
502 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
503 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
504 printk(KERN_WARNING
"%s: id %s: %pV\n",
505 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
507 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
508 RBD_DRV_NAME
, rbd_dev
, &vaf
);
513 #define rbd_assert(expr) \
514 if (unlikely(!(expr))) { \
515 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "\trbd_assert(%s);\n\n", \
518 __func__, __LINE__, #expr); \
521 #else /* !RBD_DEBUG */
522 # define rbd_assert(expr) ((void) 0)
523 #endif /* !RBD_DEBUG */
525 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
526 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
527 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
529 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
530 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
531 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
532 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
533 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
535 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
536 u8
*order
, u64
*snap_size
);
537 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
539 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
);
541 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
543 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
544 bool removing
= false;
546 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
549 spin_lock_irq(&rbd_dev
->lock
);
550 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
553 rbd_dev
->open_count
++;
554 spin_unlock_irq(&rbd_dev
->lock
);
558 (void) get_device(&rbd_dev
->dev
);
563 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
565 struct rbd_device
*rbd_dev
= disk
->private_data
;
566 unsigned long open_count_before
;
568 spin_lock_irq(&rbd_dev
->lock
);
569 open_count_before
= rbd_dev
->open_count
--;
570 spin_unlock_irq(&rbd_dev
->lock
);
571 rbd_assert(open_count_before
> 0);
573 put_device(&rbd_dev
->dev
);
576 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
581 bool ro_changed
= false;
583 /* get_user() may sleep, so call it before taking rbd_dev->lock */
584 if (get_user(val
, (int __user
*)(arg
)))
587 ro
= val
? true : false;
588 /* Snapshot doesn't allow to write*/
589 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
592 spin_lock_irq(&rbd_dev
->lock
);
593 /* prevent others open this device */
594 if (rbd_dev
->open_count
> 1) {
599 if (rbd_dev
->mapping
.read_only
!= ro
) {
600 rbd_dev
->mapping
.read_only
= ro
;
605 spin_unlock_irq(&rbd_dev
->lock
);
606 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
607 if (ret
== 0 && ro_changed
)
608 set_disk_ro(rbd_dev
->disk
, ro
? 1 : 0);
613 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
614 unsigned int cmd
, unsigned long arg
)
616 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
621 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
631 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
632 unsigned int cmd
, unsigned long arg
)
634 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
636 #endif /* CONFIG_COMPAT */
638 static const struct block_device_operations rbd_bd_ops
= {
639 .owner
= THIS_MODULE
,
641 .release
= rbd_release
,
644 .compat_ioctl
= rbd_compat_ioctl
,
649 * Initialize an rbd client instance. Success or not, this function
650 * consumes ceph_opts. Caller holds client_mutex.
652 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
654 struct rbd_client
*rbdc
;
657 dout("%s:\n", __func__
);
658 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
662 kref_init(&rbdc
->kref
);
663 INIT_LIST_HEAD(&rbdc
->node
);
665 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
666 if (IS_ERR(rbdc
->client
))
668 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
670 ret
= ceph_open_session(rbdc
->client
);
674 spin_lock(&rbd_client_list_lock
);
675 list_add_tail(&rbdc
->node
, &rbd_client_list
);
676 spin_unlock(&rbd_client_list_lock
);
678 dout("%s: rbdc %p\n", __func__
, rbdc
);
682 ceph_destroy_client(rbdc
->client
);
687 ceph_destroy_options(ceph_opts
);
688 dout("%s: error %d\n", __func__
, ret
);
693 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
695 kref_get(&rbdc
->kref
);
701 * Find a ceph client with specific addr and configuration. If
702 * found, bump its reference count.
704 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
706 struct rbd_client
*client_node
;
709 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
712 spin_lock(&rbd_client_list_lock
);
713 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
714 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
715 __rbd_get_client(client_node
);
721 spin_unlock(&rbd_client_list_lock
);
723 return found
? client_node
: NULL
;
733 /* string args above */
736 /* Boolean args above */
740 static match_table_t rbd_opts_tokens
= {
742 /* string args above */
743 {Opt_read_only
, "read_only"},
744 {Opt_read_only
, "ro"}, /* Alternate spelling */
745 {Opt_read_write
, "read_write"},
746 {Opt_read_write
, "rw"}, /* Alternate spelling */
747 /* Boolean args above */
755 #define RBD_READ_ONLY_DEFAULT false
757 static int parse_rbd_opts_token(char *c
, void *private)
759 struct rbd_options
*rbd_opts
= private;
760 substring_t argstr
[MAX_OPT_ARGS
];
761 int token
, intval
, ret
;
763 token
= match_token(c
, rbd_opts_tokens
, argstr
);
767 if (token
< Opt_last_int
) {
768 ret
= match_int(&argstr
[0], &intval
);
770 pr_err("bad mount option arg (not int) "
774 dout("got int token %d val %d\n", token
, intval
);
775 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
776 dout("got string token %d val %s\n", token
,
778 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
779 dout("got Boolean token %d\n", token
);
781 dout("got token %d\n", token
);
786 rbd_opts
->read_only
= true;
789 rbd_opts
->read_only
= false;
798 static char* obj_op_name(enum obj_operation_type op_type
)
813 * Get a ceph client with specific addr and configuration, if one does
814 * not exist create it. Either way, ceph_opts is consumed by this
817 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
819 struct rbd_client
*rbdc
;
821 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
822 rbdc
= rbd_client_find(ceph_opts
);
823 if (rbdc
) /* using an existing client */
824 ceph_destroy_options(ceph_opts
);
826 rbdc
= rbd_client_create(ceph_opts
);
827 mutex_unlock(&client_mutex
);
833 * Destroy ceph client
835 * Caller must hold rbd_client_list_lock.
837 static void rbd_client_release(struct kref
*kref
)
839 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
841 dout("%s: rbdc %p\n", __func__
, rbdc
);
842 spin_lock(&rbd_client_list_lock
);
843 list_del(&rbdc
->node
);
844 spin_unlock(&rbd_client_list_lock
);
846 ceph_destroy_client(rbdc
->client
);
851 * Drop reference to ceph client node. If it's not referenced anymore, release
854 static void rbd_put_client(struct rbd_client
*rbdc
)
857 kref_put(&rbdc
->kref
, rbd_client_release
);
860 static bool rbd_image_format_valid(u32 image_format
)
862 return image_format
== 1 || image_format
== 2;
865 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
870 /* The header has to start with the magic rbd header text */
871 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
874 /* The bio layer requires at least sector-sized I/O */
876 if (ondisk
->options
.order
< SECTOR_SHIFT
)
879 /* If we use u64 in a few spots we may be able to loosen this */
881 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
885 * The size of a snapshot header has to fit in a size_t, and
886 * that limits the number of snapshots.
888 snap_count
= le32_to_cpu(ondisk
->snap_count
);
889 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
890 if (snap_count
> size
/ sizeof (__le64
))
894 * Not only that, but the size of the entire the snapshot
895 * header must also be representable in a size_t.
897 size
-= snap_count
* sizeof (__le64
);
898 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
905 * Fill an rbd image header with information from the given format 1
908 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
909 struct rbd_image_header_ondisk
*ondisk
)
911 struct rbd_image_header
*header
= &rbd_dev
->header
;
912 bool first_time
= header
->object_prefix
== NULL
;
913 struct ceph_snap_context
*snapc
;
914 char *object_prefix
= NULL
;
915 char *snap_names
= NULL
;
916 u64
*snap_sizes
= NULL
;
922 /* Allocate this now to avoid having to handle failure below */
927 len
= strnlen(ondisk
->object_prefix
,
928 sizeof (ondisk
->object_prefix
));
929 object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
932 memcpy(object_prefix
, ondisk
->object_prefix
, len
);
933 object_prefix
[len
] = '\0';
936 /* Allocate the snapshot context and fill it in */
938 snap_count
= le32_to_cpu(ondisk
->snap_count
);
939 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
942 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
944 struct rbd_image_snap_ondisk
*snaps
;
945 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
947 /* We'll keep a copy of the snapshot names... */
949 if (snap_names_len
> (u64
)SIZE_MAX
)
951 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
955 /* ...as well as the array of their sizes. */
957 size
= snap_count
* sizeof (*header
->snap_sizes
);
958 snap_sizes
= kmalloc(size
, GFP_KERNEL
);
963 * Copy the names, and fill in each snapshot's id
966 * Note that rbd_dev_v1_header_info() guarantees the
967 * ondisk buffer we're working with has
968 * snap_names_len bytes beyond the end of the
969 * snapshot id array, this memcpy() is safe.
971 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
972 snaps
= ondisk
->snaps
;
973 for (i
= 0; i
< snap_count
; i
++) {
974 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
975 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
979 /* We won't fail any more, fill in the header */
982 header
->object_prefix
= object_prefix
;
983 header
->obj_order
= ondisk
->options
.order
;
984 header
->crypt_type
= ondisk
->options
.crypt_type
;
985 header
->comp_type
= ondisk
->options
.comp_type
;
986 /* The rest aren't used for format 1 images */
987 header
->stripe_unit
= 0;
988 header
->stripe_count
= 0;
989 header
->features
= 0;
991 ceph_put_snap_context(header
->snapc
);
992 kfree(header
->snap_names
);
993 kfree(header
->snap_sizes
);
996 /* The remaining fields always get updated (when we refresh) */
998 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
999 header
->snapc
= snapc
;
1000 header
->snap_names
= snap_names
;
1001 header
->snap_sizes
= snap_sizes
;
1009 ceph_put_snap_context(snapc
);
1010 kfree(object_prefix
);
1015 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1017 const char *snap_name
;
1019 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1021 /* Skip over names until we find the one we are looking for */
1023 snap_name
= rbd_dev
->header
.snap_names
;
1025 snap_name
+= strlen(snap_name
) + 1;
1027 return kstrdup(snap_name
, GFP_KERNEL
);
1031 * Snapshot id comparison function for use with qsort()/bsearch().
1032 * Note that result is for snapshots in *descending* order.
1034 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1036 u64 snap_id1
= *(u64
*)s1
;
1037 u64 snap_id2
= *(u64
*)s2
;
1039 if (snap_id1
< snap_id2
)
1041 return snap_id1
== snap_id2
? 0 : -1;
1045 * Search a snapshot context to see if the given snapshot id is
1048 * Returns the position of the snapshot id in the array if it's found,
1049 * or BAD_SNAP_INDEX otherwise.
1051 * Note: The snapshot array is in kept sorted (by the osd) in
1052 * reverse order, highest snapshot id first.
1054 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1056 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1059 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1060 sizeof (snap_id
), snapid_compare_reverse
);
1062 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1065 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1069 const char *snap_name
;
1071 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1072 if (which
== BAD_SNAP_INDEX
)
1073 return ERR_PTR(-ENOENT
);
1075 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1076 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1079 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1081 if (snap_id
== CEPH_NOSNAP
)
1082 return RBD_SNAP_HEAD_NAME
;
1084 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1085 if (rbd_dev
->image_format
== 1)
1086 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1088 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1091 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1094 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1095 if (snap_id
== CEPH_NOSNAP
) {
1096 *snap_size
= rbd_dev
->header
.image_size
;
1097 } else if (rbd_dev
->image_format
== 1) {
1100 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1101 if (which
== BAD_SNAP_INDEX
)
1104 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1109 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1118 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1121 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1122 if (snap_id
== CEPH_NOSNAP
) {
1123 *snap_features
= rbd_dev
->header
.features
;
1124 } else if (rbd_dev
->image_format
== 1) {
1125 *snap_features
= 0; /* No features for format 1 */
1130 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1134 *snap_features
= features
;
1139 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1141 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1146 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1149 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1153 rbd_dev
->mapping
.size
= size
;
1154 rbd_dev
->mapping
.features
= features
;
1159 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1161 rbd_dev
->mapping
.size
= 0;
1162 rbd_dev
->mapping
.features
= 0;
1165 static void rbd_segment_name_free(const char *name
)
1167 /* The explicit cast here is needed to drop the const qualifier */
1169 kmem_cache_free(rbd_segment_name_cache
, (void *)name
);
1172 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
1179 name
= kmem_cache_alloc(rbd_segment_name_cache
, GFP_NOIO
);
1182 segment
= offset
>> rbd_dev
->header
.obj_order
;
1183 name_format
= "%s.%012llx";
1184 if (rbd_dev
->image_format
== 2)
1185 name_format
= "%s.%016llx";
1186 ret
= snprintf(name
, CEPH_MAX_OID_NAME_LEN
+ 1, name_format
,
1187 rbd_dev
->header
.object_prefix
, segment
);
1188 if (ret
< 0 || ret
> CEPH_MAX_OID_NAME_LEN
) {
1189 pr_err("error formatting segment name for #%llu (%d)\n",
1191 rbd_segment_name_free(name
);
1198 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1200 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1202 return offset
& (segment_size
- 1);
1205 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1206 u64 offset
, u64 length
)
1208 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1210 offset
&= segment_size
- 1;
1212 rbd_assert(length
<= U64_MAX
- offset
);
1213 if (offset
+ length
> segment_size
)
1214 length
= segment_size
- offset
;
1220 * returns the size of an object in the image
1222 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
1224 return 1 << header
->obj_order
;
1231 static void bio_chain_put(struct bio
*chain
)
1237 chain
= chain
->bi_next
;
1243 * zeros a bio chain, starting at specific offset
1245 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1248 struct bvec_iter iter
;
1249 unsigned long flags
;
1254 bio_for_each_segment(bv
, chain
, iter
) {
1255 if (pos
+ bv
.bv_len
> start_ofs
) {
1256 int remainder
= max(start_ofs
- pos
, 0);
1257 buf
= bvec_kmap_irq(&bv
, &flags
);
1258 memset(buf
+ remainder
, 0,
1259 bv
.bv_len
- remainder
);
1260 flush_dcache_page(bv
.bv_page
);
1261 bvec_kunmap_irq(buf
, &flags
);
1266 chain
= chain
->bi_next
;
1271 * similar to zero_bio_chain(), zeros data defined by a page array,
1272 * starting at the given byte offset from the start of the array and
1273 * continuing up to the given end offset. The pages array is
1274 * assumed to be big enough to hold all bytes up to the end.
1276 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1278 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1280 rbd_assert(end
> offset
);
1281 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1282 while (offset
< end
) {
1285 unsigned long flags
;
1288 page_offset
= offset
& ~PAGE_MASK
;
1289 length
= min_t(size_t, PAGE_SIZE
- page_offset
, end
- offset
);
1290 local_irq_save(flags
);
1291 kaddr
= kmap_atomic(*page
);
1292 memset(kaddr
+ page_offset
, 0, length
);
1293 flush_dcache_page(*page
);
1294 kunmap_atomic(kaddr
);
1295 local_irq_restore(flags
);
1303 * Clone a portion of a bio, starting at the given byte offset
1304 * and continuing for the number of bytes indicated.
1306 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1307 unsigned int offset
,
1313 bio
= bio_clone(bio_src
, gfpmask
);
1315 return NULL
; /* ENOMEM */
1317 bio_advance(bio
, offset
);
1318 bio
->bi_iter
.bi_size
= len
;
1324 * Clone a portion of a bio chain, starting at the given byte offset
1325 * into the first bio in the source chain and continuing for the
1326 * number of bytes indicated. The result is another bio chain of
1327 * exactly the given length, or a null pointer on error.
1329 * The bio_src and offset parameters are both in-out. On entry they
1330 * refer to the first source bio and the offset into that bio where
1331 * the start of data to be cloned is located.
1333 * On return, bio_src is updated to refer to the bio in the source
1334 * chain that contains first un-cloned byte, and *offset will
1335 * contain the offset of that byte within that bio.
1337 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1338 unsigned int *offset
,
1342 struct bio
*bi
= *bio_src
;
1343 unsigned int off
= *offset
;
1344 struct bio
*chain
= NULL
;
1347 /* Build up a chain of clone bios up to the limit */
1349 if (!bi
|| off
>= bi
->bi_iter
.bi_size
|| !len
)
1350 return NULL
; /* Nothing to clone */
1354 unsigned int bi_size
;
1358 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1359 goto out_err
; /* EINVAL; ran out of bio's */
1361 bi_size
= min_t(unsigned int, bi
->bi_iter
.bi_size
- off
, len
);
1362 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1364 goto out_err
; /* ENOMEM */
1367 end
= &bio
->bi_next
;
1370 if (off
== bi
->bi_iter
.bi_size
) {
1381 bio_chain_put(chain
);
1387 * The default/initial value for all object request flags is 0. For
1388 * each flag, once its value is set to 1 it is never reset to 0
1391 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1393 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1394 struct rbd_device
*rbd_dev
;
1396 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1397 rbd_warn(rbd_dev
, "obj_request %p already marked img_data",
1402 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1405 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1408 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1410 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1411 struct rbd_device
*rbd_dev
= NULL
;
1413 if (obj_request_img_data_test(obj_request
))
1414 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1415 rbd_warn(rbd_dev
, "obj_request %p already marked done",
1420 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1423 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1427 * This sets the KNOWN flag after (possibly) setting the EXISTS
1428 * flag. The latter is set based on the "exists" value provided.
1430 * Note that for our purposes once an object exists it never goes
1431 * away again. It's possible that the response from two existence
1432 * checks are separated by the creation of the target object, and
1433 * the first ("doesn't exist") response arrives *after* the second
1434 * ("does exist"). In that case we ignore the second one.
1436 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1440 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1441 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1445 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1448 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1451 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1454 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1457 static bool obj_request_overlaps_parent(struct rbd_obj_request
*obj_request
)
1459 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
1461 return obj_request
->img_offset
<
1462 round_up(rbd_dev
->parent_overlap
, rbd_obj_bytes(&rbd_dev
->header
));
1465 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1467 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1468 atomic_read(&obj_request
->kref
.refcount
));
1469 kref_get(&obj_request
->kref
);
1472 static void rbd_obj_request_destroy(struct kref
*kref
);
1473 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1475 rbd_assert(obj_request
!= NULL
);
1476 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1477 atomic_read(&obj_request
->kref
.refcount
));
1478 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1481 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1483 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1484 atomic_read(&img_request
->kref
.refcount
));
1485 kref_get(&img_request
->kref
);
1488 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1489 static void rbd_parent_request_destroy(struct kref
*kref
);
1490 static void rbd_img_request_destroy(struct kref
*kref
);
1491 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1493 rbd_assert(img_request
!= NULL
);
1494 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1495 atomic_read(&img_request
->kref
.refcount
));
1496 if (img_request_child_test(img_request
))
1497 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1499 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1502 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1503 struct rbd_obj_request
*obj_request
)
1505 rbd_assert(obj_request
->img_request
== NULL
);
1507 /* Image request now owns object's original reference */
1508 obj_request
->img_request
= img_request
;
1509 obj_request
->which
= img_request
->obj_request_count
;
1510 rbd_assert(!obj_request_img_data_test(obj_request
));
1511 obj_request_img_data_set(obj_request
);
1512 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1513 img_request
->obj_request_count
++;
1514 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1515 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1516 obj_request
->which
);
1519 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1520 struct rbd_obj_request
*obj_request
)
1522 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1524 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1525 obj_request
->which
);
1526 list_del(&obj_request
->links
);
1527 rbd_assert(img_request
->obj_request_count
> 0);
1528 img_request
->obj_request_count
--;
1529 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1530 obj_request
->which
= BAD_WHICH
;
1531 rbd_assert(obj_request_img_data_test(obj_request
));
1532 rbd_assert(obj_request
->img_request
== img_request
);
1533 obj_request
->img_request
= NULL
;
1534 obj_request
->callback
= NULL
;
1535 rbd_obj_request_put(obj_request
);
1538 static bool obj_request_type_valid(enum obj_request_type type
)
1541 case OBJ_REQUEST_NODATA
:
1542 case OBJ_REQUEST_BIO
:
1543 case OBJ_REQUEST_PAGES
:
1550 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1551 struct rbd_obj_request
*obj_request
)
1553 dout("%s %p\n", __func__
, obj_request
);
1554 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1557 static void rbd_obj_request_end(struct rbd_obj_request
*obj_request
)
1559 dout("%s %p\n", __func__
, obj_request
);
1560 ceph_osdc_cancel_request(obj_request
->osd_req
);
1564 * Wait for an object request to complete. If interrupted, cancel the
1565 * underlying osd request.
1567 * @timeout: in jiffies, 0 means "wait forever"
1569 static int __rbd_obj_request_wait(struct rbd_obj_request
*obj_request
,
1570 unsigned long timeout
)
1574 dout("%s %p\n", __func__
, obj_request
);
1575 ret
= wait_for_completion_interruptible_timeout(
1576 &obj_request
->completion
,
1577 ceph_timeout_jiffies(timeout
));
1581 rbd_obj_request_end(obj_request
);
1586 dout("%s %p ret %d\n", __func__
, obj_request
, (int)ret
);
1590 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1592 return __rbd_obj_request_wait(obj_request
, 0);
1595 static int rbd_obj_request_wait_timeout(struct rbd_obj_request
*obj_request
,
1596 unsigned long timeout
)
1598 return __rbd_obj_request_wait(obj_request
, timeout
);
1601 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1604 dout("%s: img %p\n", __func__
, img_request
);
1607 * If no error occurred, compute the aggregate transfer
1608 * count for the image request. We could instead use
1609 * atomic64_cmpxchg() to update it as each object request
1610 * completes; not clear which way is better off hand.
1612 if (!img_request
->result
) {
1613 struct rbd_obj_request
*obj_request
;
1616 for_each_obj_request(img_request
, obj_request
)
1617 xferred
+= obj_request
->xferred
;
1618 img_request
->xferred
= xferred
;
1621 if (img_request
->callback
)
1622 img_request
->callback(img_request
);
1624 rbd_img_request_put(img_request
);
1628 * The default/initial value for all image request flags is 0. Each
1629 * is conditionally set to 1 at image request initialization time
1630 * and currently never change thereafter.
1632 static void img_request_write_set(struct rbd_img_request
*img_request
)
1634 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1638 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1641 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1645 * Set the discard flag when the img_request is an discard request
1647 static void img_request_discard_set(struct rbd_img_request
*img_request
)
1649 set_bit(IMG_REQ_DISCARD
, &img_request
->flags
);
1653 static bool img_request_discard_test(struct rbd_img_request
*img_request
)
1656 return test_bit(IMG_REQ_DISCARD
, &img_request
->flags
) != 0;
1659 static void img_request_child_set(struct rbd_img_request
*img_request
)
1661 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1665 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1667 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1671 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1674 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1677 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1679 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1683 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1685 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1689 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1692 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1695 static enum obj_operation_type
1696 rbd_img_request_op_type(struct rbd_img_request
*img_request
)
1698 if (img_request_write_test(img_request
))
1699 return OBJ_OP_WRITE
;
1700 else if (img_request_discard_test(img_request
))
1701 return OBJ_OP_DISCARD
;
1707 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1709 u64 xferred
= obj_request
->xferred
;
1710 u64 length
= obj_request
->length
;
1712 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1713 obj_request
, obj_request
->img_request
, obj_request
->result
,
1716 * ENOENT means a hole in the image. We zero-fill the entire
1717 * length of the request. A short read also implies zero-fill
1718 * to the end of the request. An error requires the whole
1719 * length of the request to be reported finished with an error
1720 * to the block layer. In each case we update the xferred
1721 * count to indicate the whole request was satisfied.
1723 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1724 if (obj_request
->result
== -ENOENT
) {
1725 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1726 zero_bio_chain(obj_request
->bio_list
, 0);
1728 zero_pages(obj_request
->pages
, 0, length
);
1729 obj_request
->result
= 0;
1730 } else if (xferred
< length
&& !obj_request
->result
) {
1731 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1732 zero_bio_chain(obj_request
->bio_list
, xferred
);
1734 zero_pages(obj_request
->pages
, xferred
, length
);
1736 obj_request
->xferred
= length
;
1737 obj_request_done_set(obj_request
);
1740 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1742 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1743 obj_request
->callback
);
1744 if (obj_request
->callback
)
1745 obj_request
->callback(obj_request
);
1747 complete_all(&obj_request
->completion
);
1750 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
)
1752 dout("%s: obj %p\n", __func__
, obj_request
);
1753 obj_request_done_set(obj_request
);
1756 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1758 struct rbd_img_request
*img_request
= NULL
;
1759 struct rbd_device
*rbd_dev
= NULL
;
1760 bool layered
= false;
1762 if (obj_request_img_data_test(obj_request
)) {
1763 img_request
= obj_request
->img_request
;
1764 layered
= img_request
&& img_request_layered_test(img_request
);
1765 rbd_dev
= img_request
->rbd_dev
;
1768 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1769 obj_request
, img_request
, obj_request
->result
,
1770 obj_request
->xferred
, obj_request
->length
);
1771 if (layered
&& obj_request
->result
== -ENOENT
&&
1772 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1773 rbd_img_parent_read(obj_request
);
1774 else if (img_request
)
1775 rbd_img_obj_request_read_callback(obj_request
);
1777 obj_request_done_set(obj_request
);
1780 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1782 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1783 obj_request
->result
, obj_request
->length
);
1785 * There is no such thing as a successful short write. Set
1786 * it to our originally-requested length.
1788 obj_request
->xferred
= obj_request
->length
;
1789 obj_request_done_set(obj_request
);
1792 static void rbd_osd_discard_callback(struct rbd_obj_request
*obj_request
)
1794 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1795 obj_request
->result
, obj_request
->length
);
1797 * There is no such thing as a successful short discard. Set
1798 * it to our originally-requested length.
1800 obj_request
->xferred
= obj_request
->length
;
1801 /* discarding a non-existent object is not a problem */
1802 if (obj_request
->result
== -ENOENT
)
1803 obj_request
->result
= 0;
1804 obj_request_done_set(obj_request
);
1808 * For a simple stat call there's nothing to do. We'll do more if
1809 * this is part of a write sequence for a layered image.
1811 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1813 dout("%s: obj %p\n", __func__
, obj_request
);
1814 obj_request_done_set(obj_request
);
1817 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1818 struct ceph_msg
*msg
)
1820 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1823 dout("%s: osd_req %p msg %p\n", __func__
, osd_req
, msg
);
1824 rbd_assert(osd_req
== obj_request
->osd_req
);
1825 if (obj_request_img_data_test(obj_request
)) {
1826 rbd_assert(obj_request
->img_request
);
1827 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1829 rbd_assert(obj_request
->which
== BAD_WHICH
);
1832 if (osd_req
->r_result
< 0)
1833 obj_request
->result
= osd_req
->r_result
;
1835 rbd_assert(osd_req
->r_num_ops
<= CEPH_OSD_MAX_OP
);
1838 * We support a 64-bit length, but ultimately it has to be
1839 * passed to the block layer, which just supports a 32-bit
1842 obj_request
->xferred
= osd_req
->r_reply_op_len
[0];
1843 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1845 opcode
= osd_req
->r_ops
[0].op
;
1847 case CEPH_OSD_OP_READ
:
1848 rbd_osd_read_callback(obj_request
);
1850 case CEPH_OSD_OP_SETALLOCHINT
:
1851 rbd_assert(osd_req
->r_ops
[1].op
== CEPH_OSD_OP_WRITE
);
1853 case CEPH_OSD_OP_WRITE
:
1854 rbd_osd_write_callback(obj_request
);
1856 case CEPH_OSD_OP_STAT
:
1857 rbd_osd_stat_callback(obj_request
);
1859 case CEPH_OSD_OP_DELETE
:
1860 case CEPH_OSD_OP_TRUNCATE
:
1861 case CEPH_OSD_OP_ZERO
:
1862 rbd_osd_discard_callback(obj_request
);
1864 case CEPH_OSD_OP_CALL
:
1865 case CEPH_OSD_OP_NOTIFY_ACK
:
1866 case CEPH_OSD_OP_WATCH
:
1867 rbd_osd_trivial_callback(obj_request
);
1870 rbd_warn(NULL
, "%s: unsupported op %hu",
1871 obj_request
->object_name
, (unsigned short) opcode
);
1875 if (obj_request_done_test(obj_request
))
1876 rbd_obj_request_complete(obj_request
);
1879 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1881 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1882 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1885 rbd_assert(osd_req
!= NULL
);
1887 snap_id
= img_request
? img_request
->snap_id
: CEPH_NOSNAP
;
1888 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1889 NULL
, snap_id
, NULL
);
1892 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1894 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1895 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1896 struct ceph_snap_context
*snapc
;
1897 struct timespec mtime
= CURRENT_TIME
;
1899 rbd_assert(osd_req
!= NULL
);
1901 snapc
= img_request
? img_request
->snapc
: NULL
;
1902 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1903 snapc
, CEPH_NOSNAP
, &mtime
);
1907 * Create an osd request. A read request has one osd op (read).
1908 * A write request has either one (watch) or two (hint+write) osd ops.
1909 * (All rbd data writes are prefixed with an allocation hint op, but
1910 * technically osd watch is a write request, hence this distinction.)
1912 static struct ceph_osd_request
*rbd_osd_req_create(
1913 struct rbd_device
*rbd_dev
,
1914 enum obj_operation_type op_type
,
1915 unsigned int num_ops
,
1916 struct rbd_obj_request
*obj_request
)
1918 struct ceph_snap_context
*snapc
= NULL
;
1919 struct ceph_osd_client
*osdc
;
1920 struct ceph_osd_request
*osd_req
;
1922 if (obj_request_img_data_test(obj_request
) &&
1923 (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_WRITE
)) {
1924 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1925 if (op_type
== OBJ_OP_WRITE
) {
1926 rbd_assert(img_request_write_test(img_request
));
1928 rbd_assert(img_request_discard_test(img_request
));
1930 snapc
= img_request
->snapc
;
1933 rbd_assert(num_ops
== 1 || ((op_type
== OBJ_OP_WRITE
) && num_ops
== 2));
1935 /* Allocate and initialize the request, for the num_ops ops */
1937 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1938 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false,
1941 return NULL
; /* ENOMEM */
1943 if (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
)
1944 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1946 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1948 osd_req
->r_callback
= rbd_osd_req_callback
;
1949 osd_req
->r_priv
= obj_request
;
1951 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1952 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
1958 * Create a copyup osd request based on the information in the object
1959 * request supplied. A copyup request has two or three osd ops, a
1960 * copyup method call, potentially a hint op, and a write or truncate
1963 static struct ceph_osd_request
*
1964 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
1966 struct rbd_img_request
*img_request
;
1967 struct ceph_snap_context
*snapc
;
1968 struct rbd_device
*rbd_dev
;
1969 struct ceph_osd_client
*osdc
;
1970 struct ceph_osd_request
*osd_req
;
1971 int num_osd_ops
= 3;
1973 rbd_assert(obj_request_img_data_test(obj_request
));
1974 img_request
= obj_request
->img_request
;
1975 rbd_assert(img_request
);
1976 rbd_assert(img_request_write_test(img_request
) ||
1977 img_request_discard_test(img_request
));
1979 if (img_request_discard_test(img_request
))
1982 /* Allocate and initialize the request, for all the ops */
1984 snapc
= img_request
->snapc
;
1985 rbd_dev
= img_request
->rbd_dev
;
1986 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1987 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, num_osd_ops
,
1990 return NULL
; /* ENOMEM */
1992 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1993 osd_req
->r_callback
= rbd_osd_req_callback
;
1994 osd_req
->r_priv
= obj_request
;
1996 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1997 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
2003 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
2005 ceph_osdc_put_request(osd_req
);
2008 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2010 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
2011 u64 offset
, u64 length
,
2012 enum obj_request_type type
)
2014 struct rbd_obj_request
*obj_request
;
2018 rbd_assert(obj_request_type_valid(type
));
2020 size
= strlen(object_name
) + 1;
2021 name
= kmalloc(size
, GFP_KERNEL
);
2025 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_KERNEL
);
2031 obj_request
->object_name
= memcpy(name
, object_name
, size
);
2032 obj_request
->offset
= offset
;
2033 obj_request
->length
= length
;
2034 obj_request
->flags
= 0;
2035 obj_request
->which
= BAD_WHICH
;
2036 obj_request
->type
= type
;
2037 INIT_LIST_HEAD(&obj_request
->links
);
2038 init_completion(&obj_request
->completion
);
2039 kref_init(&obj_request
->kref
);
2041 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__
, object_name
,
2042 offset
, length
, (int)type
, obj_request
);
2047 static void rbd_obj_request_destroy(struct kref
*kref
)
2049 struct rbd_obj_request
*obj_request
;
2051 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
2053 dout("%s: obj %p\n", __func__
, obj_request
);
2055 rbd_assert(obj_request
->img_request
== NULL
);
2056 rbd_assert(obj_request
->which
== BAD_WHICH
);
2058 if (obj_request
->osd_req
)
2059 rbd_osd_req_destroy(obj_request
->osd_req
);
2061 rbd_assert(obj_request_type_valid(obj_request
->type
));
2062 switch (obj_request
->type
) {
2063 case OBJ_REQUEST_NODATA
:
2064 break; /* Nothing to do */
2065 case OBJ_REQUEST_BIO
:
2066 if (obj_request
->bio_list
)
2067 bio_chain_put(obj_request
->bio_list
);
2069 case OBJ_REQUEST_PAGES
:
2070 if (obj_request
->pages
)
2071 ceph_release_page_vector(obj_request
->pages
,
2072 obj_request
->page_count
);
2076 kfree(obj_request
->object_name
);
2077 obj_request
->object_name
= NULL
;
2078 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
2081 /* It's OK to call this for a device with no parent */
2083 static void rbd_spec_put(struct rbd_spec
*spec
);
2084 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
2086 rbd_dev_remove_parent(rbd_dev
);
2087 rbd_spec_put(rbd_dev
->parent_spec
);
2088 rbd_dev
->parent_spec
= NULL
;
2089 rbd_dev
->parent_overlap
= 0;
2093 * Parent image reference counting is used to determine when an
2094 * image's parent fields can be safely torn down--after there are no
2095 * more in-flight requests to the parent image. When the last
2096 * reference is dropped, cleaning them up is safe.
2098 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
2102 if (!rbd_dev
->parent_spec
)
2105 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
2109 /* Last reference; clean up parent data structures */
2112 rbd_dev_unparent(rbd_dev
);
2114 rbd_warn(rbd_dev
, "parent reference underflow");
2118 * If an image has a non-zero parent overlap, get a reference to its
2121 * Returns true if the rbd device has a parent with a non-zero
2122 * overlap and a reference for it was successfully taken, or
2125 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
2129 if (!rbd_dev
->parent_spec
)
2132 down_read(&rbd_dev
->header_rwsem
);
2133 if (rbd_dev
->parent_overlap
)
2134 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
2135 up_read(&rbd_dev
->header_rwsem
);
2138 rbd_warn(rbd_dev
, "parent reference overflow");
2144 * Caller is responsible for filling in the list of object requests
2145 * that comprises the image request, and the Linux request pointer
2146 * (if there is one).
2148 static struct rbd_img_request
*rbd_img_request_create(
2149 struct rbd_device
*rbd_dev
,
2150 u64 offset
, u64 length
,
2151 enum obj_operation_type op_type
,
2152 struct ceph_snap_context
*snapc
)
2154 struct rbd_img_request
*img_request
;
2156 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_NOIO
);
2160 img_request
->rq
= NULL
;
2161 img_request
->rbd_dev
= rbd_dev
;
2162 img_request
->offset
= offset
;
2163 img_request
->length
= length
;
2164 img_request
->flags
= 0;
2165 if (op_type
== OBJ_OP_DISCARD
) {
2166 img_request_discard_set(img_request
);
2167 img_request
->snapc
= snapc
;
2168 } else if (op_type
== OBJ_OP_WRITE
) {
2169 img_request_write_set(img_request
);
2170 img_request
->snapc
= snapc
;
2172 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
2174 if (rbd_dev_parent_get(rbd_dev
))
2175 img_request_layered_set(img_request
);
2176 spin_lock_init(&img_request
->completion_lock
);
2177 img_request
->next_completion
= 0;
2178 img_request
->callback
= NULL
;
2179 img_request
->result
= 0;
2180 img_request
->obj_request_count
= 0;
2181 INIT_LIST_HEAD(&img_request
->obj_requests
);
2182 kref_init(&img_request
->kref
);
2184 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2185 obj_op_name(op_type
), offset
, length
, img_request
);
2190 static void rbd_img_request_destroy(struct kref
*kref
)
2192 struct rbd_img_request
*img_request
;
2193 struct rbd_obj_request
*obj_request
;
2194 struct rbd_obj_request
*next_obj_request
;
2196 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2198 dout("%s: img %p\n", __func__
, img_request
);
2200 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2201 rbd_img_obj_request_del(img_request
, obj_request
);
2202 rbd_assert(img_request
->obj_request_count
== 0);
2204 if (img_request_layered_test(img_request
)) {
2205 img_request_layered_clear(img_request
);
2206 rbd_dev_parent_put(img_request
->rbd_dev
);
2209 if (img_request_write_test(img_request
) ||
2210 img_request_discard_test(img_request
))
2211 ceph_put_snap_context(img_request
->snapc
);
2213 kmem_cache_free(rbd_img_request_cache
, img_request
);
2216 static struct rbd_img_request
*rbd_parent_request_create(
2217 struct rbd_obj_request
*obj_request
,
2218 u64 img_offset
, u64 length
)
2220 struct rbd_img_request
*parent_request
;
2221 struct rbd_device
*rbd_dev
;
2223 rbd_assert(obj_request
->img_request
);
2224 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2226 parent_request
= rbd_img_request_create(rbd_dev
->parent
, img_offset
,
2227 length
, OBJ_OP_READ
, NULL
);
2228 if (!parent_request
)
2231 img_request_child_set(parent_request
);
2232 rbd_obj_request_get(obj_request
);
2233 parent_request
->obj_request
= obj_request
;
2235 return parent_request
;
2238 static void rbd_parent_request_destroy(struct kref
*kref
)
2240 struct rbd_img_request
*parent_request
;
2241 struct rbd_obj_request
*orig_request
;
2243 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2244 orig_request
= parent_request
->obj_request
;
2246 parent_request
->obj_request
= NULL
;
2247 rbd_obj_request_put(orig_request
);
2248 img_request_child_clear(parent_request
);
2250 rbd_img_request_destroy(kref
);
2253 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2255 struct rbd_img_request
*img_request
;
2256 unsigned int xferred
;
2260 rbd_assert(obj_request_img_data_test(obj_request
));
2261 img_request
= obj_request
->img_request
;
2263 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2264 xferred
= (unsigned int)obj_request
->xferred
;
2265 result
= obj_request
->result
;
2267 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2268 enum obj_operation_type op_type
;
2270 if (img_request_discard_test(img_request
))
2271 op_type
= OBJ_OP_DISCARD
;
2272 else if (img_request_write_test(img_request
))
2273 op_type
= OBJ_OP_WRITE
;
2275 op_type
= OBJ_OP_READ
;
2277 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)",
2278 obj_op_name(op_type
), obj_request
->length
,
2279 obj_request
->img_offset
, obj_request
->offset
);
2280 rbd_warn(rbd_dev
, " result %d xferred %x",
2282 if (!img_request
->result
)
2283 img_request
->result
= result
;
2285 * Need to end I/O on the entire obj_request worth of
2286 * bytes in case of error.
2288 xferred
= obj_request
->length
;
2291 /* Image object requests don't own their page array */
2293 if (obj_request
->type
== OBJ_REQUEST_PAGES
) {
2294 obj_request
->pages
= NULL
;
2295 obj_request
->page_count
= 0;
2298 if (img_request_child_test(img_request
)) {
2299 rbd_assert(img_request
->obj_request
!= NULL
);
2300 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2302 rbd_assert(img_request
->rq
!= NULL
);
2304 more
= blk_update_request(img_request
->rq
, result
, xferred
);
2306 __blk_mq_end_request(img_request
->rq
, result
);
2312 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2314 struct rbd_img_request
*img_request
;
2315 u32 which
= obj_request
->which
;
2318 rbd_assert(obj_request_img_data_test(obj_request
));
2319 img_request
= obj_request
->img_request
;
2321 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2322 rbd_assert(img_request
!= NULL
);
2323 rbd_assert(img_request
->obj_request_count
> 0);
2324 rbd_assert(which
!= BAD_WHICH
);
2325 rbd_assert(which
< img_request
->obj_request_count
);
2327 spin_lock_irq(&img_request
->completion_lock
);
2328 if (which
!= img_request
->next_completion
)
2331 for_each_obj_request_from(img_request
, obj_request
) {
2333 rbd_assert(which
< img_request
->obj_request_count
);
2335 if (!obj_request_done_test(obj_request
))
2337 more
= rbd_img_obj_end_request(obj_request
);
2341 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2342 img_request
->next_completion
= which
;
2344 spin_unlock_irq(&img_request
->completion_lock
);
2345 rbd_img_request_put(img_request
);
2348 rbd_img_request_complete(img_request
);
2352 * Add individual osd ops to the given ceph_osd_request and prepare
2353 * them for submission. num_ops is the current number of
2354 * osd operations already to the object request.
2356 static void rbd_img_obj_request_fill(struct rbd_obj_request
*obj_request
,
2357 struct ceph_osd_request
*osd_request
,
2358 enum obj_operation_type op_type
,
2359 unsigned int num_ops
)
2361 struct rbd_img_request
*img_request
= obj_request
->img_request
;
2362 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2363 u64 object_size
= rbd_obj_bytes(&rbd_dev
->header
);
2364 u64 offset
= obj_request
->offset
;
2365 u64 length
= obj_request
->length
;
2369 if (op_type
== OBJ_OP_DISCARD
) {
2370 if (!offset
&& length
== object_size
&&
2371 (!img_request_layered_test(img_request
) ||
2372 !obj_request_overlaps_parent(obj_request
))) {
2373 opcode
= CEPH_OSD_OP_DELETE
;
2374 } else if ((offset
+ length
== object_size
)) {
2375 opcode
= CEPH_OSD_OP_TRUNCATE
;
2377 down_read(&rbd_dev
->header_rwsem
);
2378 img_end
= rbd_dev
->header
.image_size
;
2379 up_read(&rbd_dev
->header_rwsem
);
2381 if (obj_request
->img_offset
+ length
== img_end
)
2382 opcode
= CEPH_OSD_OP_TRUNCATE
;
2384 opcode
= CEPH_OSD_OP_ZERO
;
2386 } else if (op_type
== OBJ_OP_WRITE
) {
2387 opcode
= CEPH_OSD_OP_WRITE
;
2388 osd_req_op_alloc_hint_init(osd_request
, num_ops
,
2389 object_size
, object_size
);
2392 opcode
= CEPH_OSD_OP_READ
;
2395 if (opcode
== CEPH_OSD_OP_DELETE
)
2396 osd_req_op_init(osd_request
, num_ops
, opcode
, 0);
2398 osd_req_op_extent_init(osd_request
, num_ops
, opcode
,
2399 offset
, length
, 0, 0);
2401 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2402 osd_req_op_extent_osd_data_bio(osd_request
, num_ops
,
2403 obj_request
->bio_list
, length
);
2404 else if (obj_request
->type
== OBJ_REQUEST_PAGES
)
2405 osd_req_op_extent_osd_data_pages(osd_request
, num_ops
,
2406 obj_request
->pages
, length
,
2407 offset
& ~PAGE_MASK
, false, false);
2409 /* Discards are also writes */
2410 if (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
)
2411 rbd_osd_req_format_write(obj_request
);
2413 rbd_osd_req_format_read(obj_request
);
2417 * Split up an image request into one or more object requests, each
2418 * to a different object. The "type" parameter indicates whether
2419 * "data_desc" is the pointer to the head of a list of bio
2420 * structures, or the base of a page array. In either case this
2421 * function assumes data_desc describes memory sufficient to hold
2422 * all data described by the image request.
2424 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2425 enum obj_request_type type
,
2428 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2429 struct rbd_obj_request
*obj_request
= NULL
;
2430 struct rbd_obj_request
*next_obj_request
;
2431 struct bio
*bio_list
= NULL
;
2432 unsigned int bio_offset
= 0;
2433 struct page
**pages
= NULL
;
2434 enum obj_operation_type op_type
;
2438 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2439 (int)type
, data_desc
);
2441 img_offset
= img_request
->offset
;
2442 resid
= img_request
->length
;
2443 rbd_assert(resid
> 0);
2444 op_type
= rbd_img_request_op_type(img_request
);
2446 if (type
== OBJ_REQUEST_BIO
) {
2447 bio_list
= data_desc
;
2448 rbd_assert(img_offset
==
2449 bio_list
->bi_iter
.bi_sector
<< SECTOR_SHIFT
);
2450 } else if (type
== OBJ_REQUEST_PAGES
) {
2455 struct ceph_osd_request
*osd_req
;
2456 const char *object_name
;
2460 object_name
= rbd_segment_name(rbd_dev
, img_offset
);
2463 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2464 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2465 obj_request
= rbd_obj_request_create(object_name
,
2466 offset
, length
, type
);
2467 /* object request has its own copy of the object name */
2468 rbd_segment_name_free(object_name
);
2473 * set obj_request->img_request before creating the
2474 * osd_request so that it gets the right snapc
2476 rbd_img_obj_request_add(img_request
, obj_request
);
2478 if (type
== OBJ_REQUEST_BIO
) {
2479 unsigned int clone_size
;
2481 rbd_assert(length
<= (u64
)UINT_MAX
);
2482 clone_size
= (unsigned int)length
;
2483 obj_request
->bio_list
=
2484 bio_chain_clone_range(&bio_list
,
2488 if (!obj_request
->bio_list
)
2490 } else if (type
== OBJ_REQUEST_PAGES
) {
2491 unsigned int page_count
;
2493 obj_request
->pages
= pages
;
2494 page_count
= (u32
)calc_pages_for(offset
, length
);
2495 obj_request
->page_count
= page_count
;
2496 if ((offset
+ length
) & ~PAGE_MASK
)
2497 page_count
--; /* more on last page */
2498 pages
+= page_count
;
2501 osd_req
= rbd_osd_req_create(rbd_dev
, op_type
,
2502 (op_type
== OBJ_OP_WRITE
) ? 2 : 1,
2507 obj_request
->osd_req
= osd_req
;
2508 obj_request
->callback
= rbd_img_obj_callback
;
2509 obj_request
->img_offset
= img_offset
;
2511 rbd_img_obj_request_fill(obj_request
, osd_req
, op_type
, 0);
2513 rbd_img_request_get(img_request
);
2515 img_offset
+= length
;
2522 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2523 rbd_img_obj_request_del(img_request
, obj_request
);
2529 rbd_img_obj_copyup_callback(struct rbd_obj_request
*obj_request
)
2531 struct rbd_img_request
*img_request
;
2532 struct rbd_device
*rbd_dev
;
2533 struct page
**pages
;
2536 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
||
2537 obj_request
->type
== OBJ_REQUEST_NODATA
);
2538 rbd_assert(obj_request_img_data_test(obj_request
));
2539 img_request
= obj_request
->img_request
;
2540 rbd_assert(img_request
);
2542 rbd_dev
= img_request
->rbd_dev
;
2543 rbd_assert(rbd_dev
);
2545 pages
= obj_request
->copyup_pages
;
2546 rbd_assert(pages
!= NULL
);
2547 obj_request
->copyup_pages
= NULL
;
2548 page_count
= obj_request
->copyup_page_count
;
2549 rbd_assert(page_count
);
2550 obj_request
->copyup_page_count
= 0;
2551 ceph_release_page_vector(pages
, page_count
);
2554 * We want the transfer count to reflect the size of the
2555 * original write request. There is no such thing as a
2556 * successful short write, so if the request was successful
2557 * we can just set it to the originally-requested length.
2559 if (!obj_request
->result
)
2560 obj_request
->xferred
= obj_request
->length
;
2562 /* Finish up with the normal image object callback */
2564 rbd_img_obj_callback(obj_request
);
2568 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2570 struct rbd_obj_request
*orig_request
;
2571 struct ceph_osd_request
*osd_req
;
2572 struct ceph_osd_client
*osdc
;
2573 struct rbd_device
*rbd_dev
;
2574 struct page
**pages
;
2575 enum obj_operation_type op_type
;
2580 rbd_assert(img_request_child_test(img_request
));
2582 /* First get what we need from the image request */
2584 pages
= img_request
->copyup_pages
;
2585 rbd_assert(pages
!= NULL
);
2586 img_request
->copyup_pages
= NULL
;
2587 page_count
= img_request
->copyup_page_count
;
2588 rbd_assert(page_count
);
2589 img_request
->copyup_page_count
= 0;
2591 orig_request
= img_request
->obj_request
;
2592 rbd_assert(orig_request
!= NULL
);
2593 rbd_assert(obj_request_type_valid(orig_request
->type
));
2594 img_result
= img_request
->result
;
2595 parent_length
= img_request
->length
;
2596 rbd_assert(parent_length
== img_request
->xferred
);
2597 rbd_img_request_put(img_request
);
2599 rbd_assert(orig_request
->img_request
);
2600 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2601 rbd_assert(rbd_dev
);
2604 * If the overlap has become 0 (most likely because the
2605 * image has been flattened) we need to free the pages
2606 * and re-submit the original write request.
2608 if (!rbd_dev
->parent_overlap
) {
2609 struct ceph_osd_client
*osdc
;
2611 ceph_release_page_vector(pages
, page_count
);
2612 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2613 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2622 * The original osd request is of no use to use any more.
2623 * We need a new one that can hold the three ops in a copyup
2624 * request. Allocate the new copyup osd request for the
2625 * original request, and release the old one.
2627 img_result
= -ENOMEM
;
2628 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2631 rbd_osd_req_destroy(orig_request
->osd_req
);
2632 orig_request
->osd_req
= osd_req
;
2633 orig_request
->copyup_pages
= pages
;
2634 orig_request
->copyup_page_count
= page_count
;
2636 /* Initialize the copyup op */
2638 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2639 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2642 /* Add the other op(s) */
2644 op_type
= rbd_img_request_op_type(orig_request
->img_request
);
2645 rbd_img_obj_request_fill(orig_request
, osd_req
, op_type
, 1);
2647 /* All set, send it off. */
2649 orig_request
->callback
= rbd_img_obj_copyup_callback
;
2650 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2651 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2655 /* Record the error code and complete the request */
2657 orig_request
->result
= img_result
;
2658 orig_request
->xferred
= 0;
2659 obj_request_done_set(orig_request
);
2660 rbd_obj_request_complete(orig_request
);
2664 * Read from the parent image the range of data that covers the
2665 * entire target of the given object request. This is used for
2666 * satisfying a layered image write request when the target of an
2667 * object request from the image request does not exist.
2669 * A page array big enough to hold the returned data is allocated
2670 * and supplied to rbd_img_request_fill() as the "data descriptor."
2671 * When the read completes, this page array will be transferred to
2672 * the original object request for the copyup operation.
2674 * If an error occurs, record it as the result of the original
2675 * object request and mark it done so it gets completed.
2677 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2679 struct rbd_img_request
*img_request
= NULL
;
2680 struct rbd_img_request
*parent_request
= NULL
;
2681 struct rbd_device
*rbd_dev
;
2684 struct page
**pages
= NULL
;
2688 rbd_assert(obj_request_img_data_test(obj_request
));
2689 rbd_assert(obj_request_type_valid(obj_request
->type
));
2691 img_request
= obj_request
->img_request
;
2692 rbd_assert(img_request
!= NULL
);
2693 rbd_dev
= img_request
->rbd_dev
;
2694 rbd_assert(rbd_dev
->parent
!= NULL
);
2697 * Determine the byte range covered by the object in the
2698 * child image to which the original request was to be sent.
2700 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2701 length
= (u64
)1 << rbd_dev
->header
.obj_order
;
2704 * There is no defined parent data beyond the parent
2705 * overlap, so limit what we read at that boundary if
2708 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2709 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2710 length
= rbd_dev
->parent_overlap
- img_offset
;
2714 * Allocate a page array big enough to receive the data read
2717 page_count
= (u32
)calc_pages_for(0, length
);
2718 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2719 if (IS_ERR(pages
)) {
2720 result
= PTR_ERR(pages
);
2726 parent_request
= rbd_parent_request_create(obj_request
,
2727 img_offset
, length
);
2728 if (!parent_request
)
2731 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2734 parent_request
->copyup_pages
= pages
;
2735 parent_request
->copyup_page_count
= page_count
;
2737 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2738 result
= rbd_img_request_submit(parent_request
);
2742 parent_request
->copyup_pages
= NULL
;
2743 parent_request
->copyup_page_count
= 0;
2744 parent_request
->obj_request
= NULL
;
2745 rbd_obj_request_put(obj_request
);
2748 ceph_release_page_vector(pages
, page_count
);
2750 rbd_img_request_put(parent_request
);
2751 obj_request
->result
= result
;
2752 obj_request
->xferred
= 0;
2753 obj_request_done_set(obj_request
);
2758 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2760 struct rbd_obj_request
*orig_request
;
2761 struct rbd_device
*rbd_dev
;
2764 rbd_assert(!obj_request_img_data_test(obj_request
));
2767 * All we need from the object request is the original
2768 * request and the result of the STAT op. Grab those, then
2769 * we're done with the request.
2771 orig_request
= obj_request
->obj_request
;
2772 obj_request
->obj_request
= NULL
;
2773 rbd_obj_request_put(orig_request
);
2774 rbd_assert(orig_request
);
2775 rbd_assert(orig_request
->img_request
);
2777 result
= obj_request
->result
;
2778 obj_request
->result
= 0;
2780 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2781 obj_request
, orig_request
, result
,
2782 obj_request
->xferred
, obj_request
->length
);
2783 rbd_obj_request_put(obj_request
);
2786 * If the overlap has become 0 (most likely because the
2787 * image has been flattened) we need to free the pages
2788 * and re-submit the original write request.
2790 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2791 if (!rbd_dev
->parent_overlap
) {
2792 struct ceph_osd_client
*osdc
;
2794 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2795 result
= rbd_obj_request_submit(osdc
, orig_request
);
2801 * Our only purpose here is to determine whether the object
2802 * exists, and we don't want to treat the non-existence as
2803 * an error. If something else comes back, transfer the
2804 * error to the original request and complete it now.
2807 obj_request_existence_set(orig_request
, true);
2808 } else if (result
== -ENOENT
) {
2809 obj_request_existence_set(orig_request
, false);
2810 } else if (result
) {
2811 orig_request
->result
= result
;
2816 * Resubmit the original request now that we have recorded
2817 * whether the target object exists.
2819 orig_request
->result
= rbd_img_obj_request_submit(orig_request
);
2821 if (orig_request
->result
)
2822 rbd_obj_request_complete(orig_request
);
2825 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2827 struct rbd_obj_request
*stat_request
;
2828 struct rbd_device
*rbd_dev
;
2829 struct ceph_osd_client
*osdc
;
2830 struct page
**pages
= NULL
;
2836 * The response data for a STAT call consists of:
2843 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2844 page_count
= (u32
)calc_pages_for(0, size
);
2845 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2847 return PTR_ERR(pages
);
2850 stat_request
= rbd_obj_request_create(obj_request
->object_name
, 0, 0,
2855 rbd_obj_request_get(obj_request
);
2856 stat_request
->obj_request
= obj_request
;
2857 stat_request
->pages
= pages
;
2858 stat_request
->page_count
= page_count
;
2860 rbd_assert(obj_request
->img_request
);
2861 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2862 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
2864 if (!stat_request
->osd_req
)
2866 stat_request
->callback
= rbd_img_obj_exists_callback
;
2868 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
, 0);
2869 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2871 rbd_osd_req_format_read(stat_request
);
2873 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2874 ret
= rbd_obj_request_submit(osdc
, stat_request
);
2877 rbd_obj_request_put(obj_request
);
2882 static bool img_obj_request_simple(struct rbd_obj_request
*obj_request
)
2884 struct rbd_img_request
*img_request
;
2885 struct rbd_device
*rbd_dev
;
2887 rbd_assert(obj_request_img_data_test(obj_request
));
2889 img_request
= obj_request
->img_request
;
2890 rbd_assert(img_request
);
2891 rbd_dev
= img_request
->rbd_dev
;
2894 if (!img_request_write_test(img_request
) &&
2895 !img_request_discard_test(img_request
))
2898 /* Non-layered writes */
2899 if (!img_request_layered_test(img_request
))
2903 * Layered writes outside of the parent overlap range don't
2904 * share any data with the parent.
2906 if (!obj_request_overlaps_parent(obj_request
))
2910 * Entire-object layered writes - we will overwrite whatever
2911 * parent data there is anyway.
2913 if (!obj_request
->offset
&&
2914 obj_request
->length
== rbd_obj_bytes(&rbd_dev
->header
))
2918 * If the object is known to already exist, its parent data has
2919 * already been copied.
2921 if (obj_request_known_test(obj_request
) &&
2922 obj_request_exists_test(obj_request
))
2928 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2930 if (img_obj_request_simple(obj_request
)) {
2931 struct rbd_device
*rbd_dev
;
2932 struct ceph_osd_client
*osdc
;
2934 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2935 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2937 return rbd_obj_request_submit(osdc
, obj_request
);
2941 * It's a layered write. The target object might exist but
2942 * we may not know that yet. If we know it doesn't exist,
2943 * start by reading the data for the full target object from
2944 * the parent so we can use it for a copyup to the target.
2946 if (obj_request_known_test(obj_request
))
2947 return rbd_img_obj_parent_read_full(obj_request
);
2949 /* We don't know whether the target exists. Go find out. */
2951 return rbd_img_obj_exists_submit(obj_request
);
2954 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2956 struct rbd_obj_request
*obj_request
;
2957 struct rbd_obj_request
*next_obj_request
;
2959 dout("%s: img %p\n", __func__
, img_request
);
2960 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2963 ret
= rbd_img_obj_request_submit(obj_request
);
2971 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2973 struct rbd_obj_request
*obj_request
;
2974 struct rbd_device
*rbd_dev
;
2979 rbd_assert(img_request_child_test(img_request
));
2981 /* First get what we need from the image request and release it */
2983 obj_request
= img_request
->obj_request
;
2984 img_xferred
= img_request
->xferred
;
2985 img_result
= img_request
->result
;
2986 rbd_img_request_put(img_request
);
2989 * If the overlap has become 0 (most likely because the
2990 * image has been flattened) we need to re-submit the
2993 rbd_assert(obj_request
);
2994 rbd_assert(obj_request
->img_request
);
2995 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2996 if (!rbd_dev
->parent_overlap
) {
2997 struct ceph_osd_client
*osdc
;
2999 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3000 img_result
= rbd_obj_request_submit(osdc
, obj_request
);
3005 obj_request
->result
= img_result
;
3006 if (obj_request
->result
)
3010 * We need to zero anything beyond the parent overlap
3011 * boundary. Since rbd_img_obj_request_read_callback()
3012 * will zero anything beyond the end of a short read, an
3013 * easy way to do this is to pretend the data from the
3014 * parent came up short--ending at the overlap boundary.
3016 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
3017 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
3018 if (obj_end
> rbd_dev
->parent_overlap
) {
3021 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
3022 xferred
= rbd_dev
->parent_overlap
-
3023 obj_request
->img_offset
;
3025 obj_request
->xferred
= min(img_xferred
, xferred
);
3027 obj_request
->xferred
= img_xferred
;
3030 rbd_img_obj_request_read_callback(obj_request
);
3031 rbd_obj_request_complete(obj_request
);
3034 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
3036 struct rbd_img_request
*img_request
;
3039 rbd_assert(obj_request_img_data_test(obj_request
));
3040 rbd_assert(obj_request
->img_request
!= NULL
);
3041 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
3042 rbd_assert(obj_request_type_valid(obj_request
->type
));
3044 /* rbd_read_finish(obj_request, obj_request->length); */
3045 img_request
= rbd_parent_request_create(obj_request
,
3046 obj_request
->img_offset
,
3047 obj_request
->length
);
3052 if (obj_request
->type
== OBJ_REQUEST_BIO
)
3053 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3054 obj_request
->bio_list
);
3056 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
3057 obj_request
->pages
);
3061 img_request
->callback
= rbd_img_parent_read_callback
;
3062 result
= rbd_img_request_submit(img_request
);
3069 rbd_img_request_put(img_request
);
3070 obj_request
->result
= result
;
3071 obj_request
->xferred
= 0;
3072 obj_request_done_set(obj_request
);
3075 static int rbd_obj_notify_ack_sync(struct rbd_device
*rbd_dev
, u64 notify_id
)
3077 struct rbd_obj_request
*obj_request
;
3078 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3081 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
3082 OBJ_REQUEST_NODATA
);
3087 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3089 if (!obj_request
->osd_req
)
3092 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_NOTIFY_ACK
,
3094 rbd_osd_req_format_read(obj_request
);
3096 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3099 ret
= rbd_obj_request_wait(obj_request
);
3101 rbd_obj_request_put(obj_request
);
3106 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
3108 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
3114 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__
,
3115 rbd_dev
->header_name
, (unsigned long long)notify_id
,
3116 (unsigned int)opcode
);
3119 * Until adequate refresh error handling is in place, there is
3120 * not much we can do here, except warn.
3122 * See http://tracker.ceph.com/issues/5040
3124 ret
= rbd_dev_refresh(rbd_dev
);
3126 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
3128 ret
= rbd_obj_notify_ack_sync(rbd_dev
, notify_id
);
3130 rbd_warn(rbd_dev
, "notify_ack ret %d", ret
);
3134 * Send a (un)watch request and wait for the ack. Return a request
3135 * with a ref held on success or error.
3137 static struct rbd_obj_request
*rbd_obj_watch_request_helper(
3138 struct rbd_device
*rbd_dev
,
3141 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3142 struct ceph_options
*opts
= osdc
->client
->options
;
3143 struct rbd_obj_request
*obj_request
;
3146 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
3147 OBJ_REQUEST_NODATA
);
3149 return ERR_PTR(-ENOMEM
);
3151 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_WRITE
, 1,
3153 if (!obj_request
->osd_req
) {
3158 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_WATCH
,
3159 rbd_dev
->watch_event
->cookie
, 0, watch
);
3160 rbd_osd_req_format_write(obj_request
);
3163 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
3165 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3169 ret
= rbd_obj_request_wait_timeout(obj_request
, opts
->mount_timeout
);
3173 ret
= obj_request
->result
;
3176 rbd_obj_request_end(obj_request
);
3183 rbd_obj_request_put(obj_request
);
3184 return ERR_PTR(ret
);
3188 * Initiate a watch request, synchronously.
3190 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
)
3192 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3193 struct rbd_obj_request
*obj_request
;
3196 rbd_assert(!rbd_dev
->watch_event
);
3197 rbd_assert(!rbd_dev
->watch_request
);
3199 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
3200 &rbd_dev
->watch_event
);
3204 obj_request
= rbd_obj_watch_request_helper(rbd_dev
, true);
3205 if (IS_ERR(obj_request
)) {
3206 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
3207 rbd_dev
->watch_event
= NULL
;
3208 return PTR_ERR(obj_request
);
3212 * A watch request is set to linger, so the underlying osd
3213 * request won't go away until we unregister it. We retain
3214 * a pointer to the object request during that time (in
3215 * rbd_dev->watch_request), so we'll keep a reference to it.
3216 * We'll drop that reference after we've unregistered it in
3217 * rbd_dev_header_unwatch_sync().
3219 rbd_dev
->watch_request
= obj_request
;
3225 * Tear down a watch request, synchronously.
3227 static void rbd_dev_header_unwatch_sync(struct rbd_device
*rbd_dev
)
3229 struct rbd_obj_request
*obj_request
;
3231 rbd_assert(rbd_dev
->watch_event
);
3232 rbd_assert(rbd_dev
->watch_request
);
3234 rbd_obj_request_end(rbd_dev
->watch_request
);
3235 rbd_obj_request_put(rbd_dev
->watch_request
);
3236 rbd_dev
->watch_request
= NULL
;
3238 obj_request
= rbd_obj_watch_request_helper(rbd_dev
, false);
3239 if (!IS_ERR(obj_request
))
3240 rbd_obj_request_put(obj_request
);
3242 rbd_warn(rbd_dev
, "unable to tear down watch request (%ld)",
3243 PTR_ERR(obj_request
));
3245 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
3246 rbd_dev
->watch_event
= NULL
;
3250 * Synchronous osd object method call. Returns the number of bytes
3251 * returned in the outbound buffer, or a negative error code.
3253 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
3254 const char *object_name
,
3255 const char *class_name
,
3256 const char *method_name
,
3257 const void *outbound
,
3258 size_t outbound_size
,
3260 size_t inbound_size
)
3262 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3263 struct rbd_obj_request
*obj_request
;
3264 struct page
**pages
;
3269 * Method calls are ultimately read operations. The result
3270 * should placed into the inbound buffer provided. They
3271 * also supply outbound data--parameters for the object
3272 * method. Currently if this is present it will be a
3275 page_count
= (u32
)calc_pages_for(0, inbound_size
);
3276 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3278 return PTR_ERR(pages
);
3281 obj_request
= rbd_obj_request_create(object_name
, 0, inbound_size
,
3286 obj_request
->pages
= pages
;
3287 obj_request
->page_count
= page_count
;
3289 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3291 if (!obj_request
->osd_req
)
3294 osd_req_op_cls_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_CALL
,
3295 class_name
, method_name
);
3296 if (outbound_size
) {
3297 struct ceph_pagelist
*pagelist
;
3299 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
3303 ceph_pagelist_init(pagelist
);
3304 ceph_pagelist_append(pagelist
, outbound
, outbound_size
);
3305 osd_req_op_cls_request_data_pagelist(obj_request
->osd_req
, 0,
3308 osd_req_op_cls_response_data_pages(obj_request
->osd_req
, 0,
3309 obj_request
->pages
, inbound_size
,
3311 rbd_osd_req_format_read(obj_request
);
3313 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3316 ret
= rbd_obj_request_wait(obj_request
);
3320 ret
= obj_request
->result
;
3324 rbd_assert(obj_request
->xferred
< (u64
)INT_MAX
);
3325 ret
= (int)obj_request
->xferred
;
3326 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
3329 rbd_obj_request_put(obj_request
);
3331 ceph_release_page_vector(pages
, page_count
);
3336 static void rbd_queue_workfn(struct work_struct
*work
)
3338 struct request
*rq
= blk_mq_rq_from_pdu(work
);
3339 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
3340 struct rbd_img_request
*img_request
;
3341 struct ceph_snap_context
*snapc
= NULL
;
3342 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
3343 u64 length
= blk_rq_bytes(rq
);
3344 enum obj_operation_type op_type
;
3348 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
3349 dout("%s: non-fs request type %d\n", __func__
,
3350 (int) rq
->cmd_type
);
3355 if (rq
->cmd_flags
& REQ_DISCARD
)
3356 op_type
= OBJ_OP_DISCARD
;
3357 else if (rq
->cmd_flags
& REQ_WRITE
)
3358 op_type
= OBJ_OP_WRITE
;
3360 op_type
= OBJ_OP_READ
;
3362 /* Ignore/skip any zero-length requests */
3365 dout("%s: zero-length request\n", __func__
);
3370 /* Only reads are allowed to a read-only device */
3372 if (op_type
!= OBJ_OP_READ
) {
3373 if (rbd_dev
->mapping
.read_only
) {
3377 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3381 * Quit early if the mapped snapshot no longer exists. It's
3382 * still possible the snapshot will have disappeared by the
3383 * time our request arrives at the osd, but there's no sense in
3384 * sending it if we already know.
3386 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3387 dout("request for non-existent snapshot");
3388 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3393 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3394 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
3397 goto err_rq
; /* Shouldn't happen */
3400 blk_mq_start_request(rq
);
3402 down_read(&rbd_dev
->header_rwsem
);
3403 mapping_size
= rbd_dev
->mapping
.size
;
3404 if (op_type
!= OBJ_OP_READ
) {
3405 snapc
= rbd_dev
->header
.snapc
;
3406 ceph_get_snap_context(snapc
);
3408 up_read(&rbd_dev
->header_rwsem
);
3410 if (offset
+ length
> mapping_size
) {
3411 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
3412 length
, mapping_size
);
3417 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
, op_type
,
3423 img_request
->rq
= rq
;
3425 if (op_type
== OBJ_OP_DISCARD
)
3426 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_NODATA
,
3429 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3432 goto err_img_request
;
3434 result
= rbd_img_request_submit(img_request
);
3436 goto err_img_request
;
3441 rbd_img_request_put(img_request
);
3444 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
3445 obj_op_name(op_type
), length
, offset
, result
);
3446 ceph_put_snap_context(snapc
);
3448 blk_mq_end_request(rq
, result
);
3451 static int rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
3452 const struct blk_mq_queue_data
*bd
)
3454 struct request
*rq
= bd
->rq
;
3455 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3457 queue_work(rbd_wq
, work
);
3458 return BLK_MQ_RQ_QUEUE_OK
;
3462 * a queue callback. Makes sure that we don't create a bio that spans across
3463 * multiple osd objects. One exception would be with a single page bios,
3464 * which we handle later at bio_chain_clone_range()
3466 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
3467 struct bio_vec
*bvec
)
3469 struct rbd_device
*rbd_dev
= q
->queuedata
;
3470 sector_t sector_offset
;
3471 sector_t sectors_per_obj
;
3472 sector_t obj_sector_offset
;
3476 * Find how far into its rbd object the partition-relative
3477 * bio start sector is to offset relative to the enclosing
3480 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
3481 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
3482 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
3485 * Compute the number of bytes from that offset to the end
3486 * of the object. Account for what's already used by the bio.
3488 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
3489 if (ret
> bmd
->bi_size
)
3490 ret
-= bmd
->bi_size
;
3495 * Don't send back more than was asked for. And if the bio
3496 * was empty, let the whole thing through because: "Note
3497 * that a block device *must* allow a single page to be
3498 * added to an empty bio."
3500 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
3501 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
3502 ret
= (int) bvec
->bv_len
;
3507 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3509 struct gendisk
*disk
= rbd_dev
->disk
;
3514 rbd_dev
->disk
= NULL
;
3515 if (disk
->flags
& GENHD_FL_UP
) {
3518 blk_cleanup_queue(disk
->queue
);
3519 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3524 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3525 const char *object_name
,
3526 u64 offset
, u64 length
, void *buf
)
3529 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3530 struct rbd_obj_request
*obj_request
;
3531 struct page
**pages
= NULL
;
3536 page_count
= (u32
) calc_pages_for(offset
, length
);
3537 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3539 return PTR_ERR(pages
);
3542 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
3547 obj_request
->pages
= pages
;
3548 obj_request
->page_count
= page_count
;
3550 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3552 if (!obj_request
->osd_req
)
3555 osd_req_op_extent_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_READ
,
3556 offset
, length
, 0, 0);
3557 osd_req_op_extent_osd_data_pages(obj_request
->osd_req
, 0,
3559 obj_request
->length
,
3560 obj_request
->offset
& ~PAGE_MASK
,
3562 rbd_osd_req_format_read(obj_request
);
3564 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3567 ret
= rbd_obj_request_wait(obj_request
);
3571 ret
= obj_request
->result
;
3575 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
3576 size
= (size_t) obj_request
->xferred
;
3577 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
3578 rbd_assert(size
<= (size_t)INT_MAX
);
3582 rbd_obj_request_put(obj_request
);
3584 ceph_release_page_vector(pages
, page_count
);
3590 * Read the complete header for the given rbd device. On successful
3591 * return, the rbd_dev->header field will contain up-to-date
3592 * information about the image.
3594 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3596 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3603 * The complete header will include an array of its 64-bit
3604 * snapshot ids, followed by the names of those snapshots as
3605 * a contiguous block of NUL-terminated strings. Note that
3606 * the number of snapshots could change by the time we read
3607 * it in, in which case we re-read it.
3614 size
= sizeof (*ondisk
);
3615 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3617 ondisk
= kmalloc(size
, GFP_KERNEL
);
3621 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
3625 if ((size_t)ret
< size
) {
3627 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3631 if (!rbd_dev_ondisk_valid(ondisk
)) {
3633 rbd_warn(rbd_dev
, "invalid header");
3637 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3638 want_count
= snap_count
;
3639 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3640 } while (snap_count
!= want_count
);
3642 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3650 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3651 * has disappeared from the (just updated) snapshot context.
3653 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3657 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3660 snap_id
= rbd_dev
->spec
->snap_id
;
3661 if (snap_id
== CEPH_NOSNAP
)
3664 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3665 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3668 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3674 * Don't hold the lock while doing disk operations,
3675 * or lock ordering will conflict with the bdev mutex via:
3676 * rbd_add() -> blkdev_get() -> rbd_open()
3678 spin_lock_irq(&rbd_dev
->lock
);
3679 removing
= test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
3680 spin_unlock_irq(&rbd_dev
->lock
);
3682 * If the device is being removed, rbd_dev->disk has
3683 * been destroyed, so don't try to update its size
3686 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3687 dout("setting size to %llu sectors", (unsigned long long)size
);
3688 set_capacity(rbd_dev
->disk
, size
);
3689 revalidate_disk(rbd_dev
->disk
);
3693 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3698 down_write(&rbd_dev
->header_rwsem
);
3699 mapping_size
= rbd_dev
->mapping
.size
;
3701 ret
= rbd_dev_header_info(rbd_dev
);
3706 * If there is a parent, see if it has disappeared due to the
3707 * mapped image getting flattened.
3709 if (rbd_dev
->parent
) {
3710 ret
= rbd_dev_v2_parent_info(rbd_dev
);
3715 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
3716 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
3718 /* validate mapped snapshot's EXISTS flag */
3719 rbd_exists_validate(rbd_dev
);
3723 up_write(&rbd_dev
->header_rwsem
);
3724 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
3725 rbd_dev_update_size(rbd_dev
);
3730 static int rbd_init_request(void *data
, struct request
*rq
,
3731 unsigned int hctx_idx
, unsigned int request_idx
,
3732 unsigned int numa_node
)
3734 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3736 INIT_WORK(work
, rbd_queue_workfn
);
3740 static struct blk_mq_ops rbd_mq_ops
= {
3741 .queue_rq
= rbd_queue_rq
,
3742 .map_queue
= blk_mq_map_queue
,
3743 .init_request
= rbd_init_request
,
3746 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3748 struct gendisk
*disk
;
3749 struct request_queue
*q
;
3753 /* create gendisk info */
3754 disk
= alloc_disk(single_major
?
3755 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
3756 RBD_MINORS_PER_MAJOR
);
3760 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3762 disk
->major
= rbd_dev
->major
;
3763 disk
->first_minor
= rbd_dev
->minor
;
3765 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3766 disk
->fops
= &rbd_bd_ops
;
3767 disk
->private_data
= rbd_dev
;
3769 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
3770 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
3771 rbd_dev
->tag_set
.queue_depth
= BLKDEV_MAX_RQ
;
3772 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
3773 rbd_dev
->tag_set
.flags
=
3774 BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
3775 rbd_dev
->tag_set
.nr_hw_queues
= 1;
3776 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
3778 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
3782 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
3788 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
3789 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3791 /* set io sizes to object size */
3792 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
3793 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
3794 blk_queue_max_segments(q
, segment_size
/ SECTOR_SIZE
);
3795 blk_queue_max_segment_size(q
, segment_size
);
3796 blk_queue_io_min(q
, segment_size
);
3797 blk_queue_io_opt(q
, segment_size
);
3799 /* enable the discard support */
3800 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
3801 q
->limits
.discard_granularity
= segment_size
;
3802 q
->limits
.discard_alignment
= segment_size
;
3803 q
->limits
.max_discard_sectors
= segment_size
/ SECTOR_SIZE
;
3804 q
->limits
.discard_zeroes_data
= 1;
3806 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
3809 q
->queuedata
= rbd_dev
;
3811 rbd_dev
->disk
= disk
;
3815 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3825 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
3827 return container_of(dev
, struct rbd_device
, dev
);
3830 static ssize_t
rbd_size_show(struct device
*dev
,
3831 struct device_attribute
*attr
, char *buf
)
3833 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3835 return sprintf(buf
, "%llu\n",
3836 (unsigned long long)rbd_dev
->mapping
.size
);
3840 * Note this shows the features for whatever's mapped, which is not
3841 * necessarily the base image.
3843 static ssize_t
rbd_features_show(struct device
*dev
,
3844 struct device_attribute
*attr
, char *buf
)
3846 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3848 return sprintf(buf
, "0x%016llx\n",
3849 (unsigned long long)rbd_dev
->mapping
.features
);
3852 static ssize_t
rbd_major_show(struct device
*dev
,
3853 struct device_attribute
*attr
, char *buf
)
3855 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3858 return sprintf(buf
, "%d\n", rbd_dev
->major
);
3860 return sprintf(buf
, "(none)\n");
3863 static ssize_t
rbd_minor_show(struct device
*dev
,
3864 struct device_attribute
*attr
, char *buf
)
3866 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3868 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
3871 static ssize_t
rbd_client_id_show(struct device
*dev
,
3872 struct device_attribute
*attr
, char *buf
)
3874 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3876 return sprintf(buf
, "client%lld\n",
3877 ceph_client_id(rbd_dev
->rbd_client
->client
));
3880 static ssize_t
rbd_pool_show(struct device
*dev
,
3881 struct device_attribute
*attr
, char *buf
)
3883 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3885 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
3888 static ssize_t
rbd_pool_id_show(struct device
*dev
,
3889 struct device_attribute
*attr
, char *buf
)
3891 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3893 return sprintf(buf
, "%llu\n",
3894 (unsigned long long) rbd_dev
->spec
->pool_id
);
3897 static ssize_t
rbd_name_show(struct device
*dev
,
3898 struct device_attribute
*attr
, char *buf
)
3900 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3902 if (rbd_dev
->spec
->image_name
)
3903 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
3905 return sprintf(buf
, "(unknown)\n");
3908 static ssize_t
rbd_image_id_show(struct device
*dev
,
3909 struct device_attribute
*attr
, char *buf
)
3911 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3913 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
3917 * Shows the name of the currently-mapped snapshot (or
3918 * RBD_SNAP_HEAD_NAME for the base image).
3920 static ssize_t
rbd_snap_show(struct device
*dev
,
3921 struct device_attribute
*attr
,
3924 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3926 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
3930 * For a v2 image, shows the chain of parent images, separated by empty
3931 * lines. For v1 images or if there is no parent, shows "(no parent
3934 static ssize_t
rbd_parent_show(struct device
*dev
,
3935 struct device_attribute
*attr
,
3938 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3941 if (!rbd_dev
->parent
)
3942 return sprintf(buf
, "(no parent image)\n");
3944 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
3945 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
3947 count
+= sprintf(&buf
[count
], "%s"
3948 "pool_id %llu\npool_name %s\n"
3949 "image_id %s\nimage_name %s\n"
3950 "snap_id %llu\nsnap_name %s\n"
3952 !count
? "" : "\n", /* first? */
3953 spec
->pool_id
, spec
->pool_name
,
3954 spec
->image_id
, spec
->image_name
?: "(unknown)",
3955 spec
->snap_id
, spec
->snap_name
,
3956 rbd_dev
->parent_overlap
);
3962 static ssize_t
rbd_image_refresh(struct device
*dev
,
3963 struct device_attribute
*attr
,
3967 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3970 ret
= rbd_dev_refresh(rbd_dev
);
3977 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
3978 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
3979 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
3980 static DEVICE_ATTR(minor
, S_IRUGO
, rbd_minor_show
, NULL
);
3981 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
3982 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
3983 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
3984 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
3985 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
3986 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
3987 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
3988 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
3990 static struct attribute
*rbd_attrs
[] = {
3991 &dev_attr_size
.attr
,
3992 &dev_attr_features
.attr
,
3993 &dev_attr_major
.attr
,
3994 &dev_attr_minor
.attr
,
3995 &dev_attr_client_id
.attr
,
3996 &dev_attr_pool
.attr
,
3997 &dev_attr_pool_id
.attr
,
3998 &dev_attr_name
.attr
,
3999 &dev_attr_image_id
.attr
,
4000 &dev_attr_current_snap
.attr
,
4001 &dev_attr_parent
.attr
,
4002 &dev_attr_refresh
.attr
,
4006 static struct attribute_group rbd_attr_group
= {
4010 static const struct attribute_group
*rbd_attr_groups
[] = {
4015 static void rbd_sysfs_dev_release(struct device
*dev
)
4019 static struct device_type rbd_device_type
= {
4021 .groups
= rbd_attr_groups
,
4022 .release
= rbd_sysfs_dev_release
,
4025 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
4027 kref_get(&spec
->kref
);
4032 static void rbd_spec_free(struct kref
*kref
);
4033 static void rbd_spec_put(struct rbd_spec
*spec
)
4036 kref_put(&spec
->kref
, rbd_spec_free
);
4039 static struct rbd_spec
*rbd_spec_alloc(void)
4041 struct rbd_spec
*spec
;
4043 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
4047 spec
->pool_id
= CEPH_NOPOOL
;
4048 spec
->snap_id
= CEPH_NOSNAP
;
4049 kref_init(&spec
->kref
);
4054 static void rbd_spec_free(struct kref
*kref
)
4056 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
4058 kfree(spec
->pool_name
);
4059 kfree(spec
->image_id
);
4060 kfree(spec
->image_name
);
4061 kfree(spec
->snap_name
);
4065 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
4066 struct rbd_spec
*spec
)
4068 struct rbd_device
*rbd_dev
;
4070 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
4074 spin_lock_init(&rbd_dev
->lock
);
4076 atomic_set(&rbd_dev
->parent_ref
, 0);
4077 INIT_LIST_HEAD(&rbd_dev
->node
);
4078 init_rwsem(&rbd_dev
->header_rwsem
);
4080 rbd_dev
->spec
= spec
;
4081 rbd_dev
->rbd_client
= rbdc
;
4083 /* Initialize the layout used for all rbd requests */
4085 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
4086 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
4087 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
4088 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
4093 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
4095 rbd_put_client(rbd_dev
->rbd_client
);
4096 rbd_spec_put(rbd_dev
->spec
);
4101 * Get the size and object order for an image snapshot, or if
4102 * snap_id is CEPH_NOSNAP, gets this information for the base
4105 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
4106 u8
*order
, u64
*snap_size
)
4108 __le64 snapid
= cpu_to_le64(snap_id
);
4113 } __attribute__ ((packed
)) size_buf
= { 0 };
4115 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4117 &snapid
, sizeof (snapid
),
4118 &size_buf
, sizeof (size_buf
));
4119 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4122 if (ret
< sizeof (size_buf
))
4126 *order
= size_buf
.order
;
4127 dout(" order %u", (unsigned int)*order
);
4129 *snap_size
= le64_to_cpu(size_buf
.size
);
4131 dout(" snap_id 0x%016llx snap_size = %llu\n",
4132 (unsigned long long)snap_id
,
4133 (unsigned long long)*snap_size
);
4138 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
4140 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
4141 &rbd_dev
->header
.obj_order
,
4142 &rbd_dev
->header
.image_size
);
4145 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
4151 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
4155 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4156 "rbd", "get_object_prefix", NULL
, 0,
4157 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
4158 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4163 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
4164 p
+ ret
, NULL
, GFP_NOIO
);
4167 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
4168 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
4169 rbd_dev
->header
.object_prefix
= NULL
;
4171 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
4179 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
4182 __le64 snapid
= cpu_to_le64(snap_id
);
4186 } __attribute__ ((packed
)) features_buf
= { 0 };
4190 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4191 "rbd", "get_features",
4192 &snapid
, sizeof (snapid
),
4193 &features_buf
, sizeof (features_buf
));
4194 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4197 if (ret
< sizeof (features_buf
))
4200 incompat
= le64_to_cpu(features_buf
.incompat
);
4201 if (incompat
& ~RBD_FEATURES_SUPPORTED
)
4204 *snap_features
= le64_to_cpu(features_buf
.features
);
4206 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4207 (unsigned long long)snap_id
,
4208 (unsigned long long)*snap_features
,
4209 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
4214 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
4216 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
4217 &rbd_dev
->header
.features
);
4220 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
4222 struct rbd_spec
*parent_spec
;
4224 void *reply_buf
= NULL
;
4234 parent_spec
= rbd_spec_alloc();
4238 size
= sizeof (__le64
) + /* pool_id */
4239 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
4240 sizeof (__le64
) + /* snap_id */
4241 sizeof (__le64
); /* overlap */
4242 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4248 snapid
= cpu_to_le64(rbd_dev
->spec
->snap_id
);
4249 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4250 "rbd", "get_parent",
4251 &snapid
, sizeof (snapid
),
4253 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4258 end
= reply_buf
+ ret
;
4260 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
4261 if (pool_id
== CEPH_NOPOOL
) {
4263 * Either the parent never existed, or we have
4264 * record of it but the image got flattened so it no
4265 * longer has a parent. When the parent of a
4266 * layered image disappears we immediately set the
4267 * overlap to 0. The effect of this is that all new
4268 * requests will be treated as if the image had no
4271 if (rbd_dev
->parent_overlap
) {
4272 rbd_dev
->parent_overlap
= 0;
4273 rbd_dev_parent_put(rbd_dev
);
4274 pr_info("%s: clone image has been flattened\n",
4275 rbd_dev
->disk
->disk_name
);
4278 goto out
; /* No parent? No problem. */
4281 /* The ceph file layout needs to fit pool id in 32 bits */
4284 if (pool_id
> (u64
)U32_MAX
) {
4285 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
4286 (unsigned long long)pool_id
, U32_MAX
);
4290 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4291 if (IS_ERR(image_id
)) {
4292 ret
= PTR_ERR(image_id
);
4295 ceph_decode_64_safe(&p
, end
, snap_id
, out_err
);
4296 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
4299 * The parent won't change (except when the clone is
4300 * flattened, already handled that). So we only need to
4301 * record the parent spec we have not already done so.
4303 if (!rbd_dev
->parent_spec
) {
4304 parent_spec
->pool_id
= pool_id
;
4305 parent_spec
->image_id
= image_id
;
4306 parent_spec
->snap_id
= snap_id
;
4307 rbd_dev
->parent_spec
= parent_spec
;
4308 parent_spec
= NULL
; /* rbd_dev now owns this */
4314 * We always update the parent overlap. If it's zero we issue
4315 * a warning, as we will proceed as if there was no parent.
4319 /* refresh, careful to warn just once */
4320 if (rbd_dev
->parent_overlap
)
4322 "clone now standalone (overlap became 0)");
4325 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
4328 rbd_dev
->parent_overlap
= overlap
;
4334 rbd_spec_put(parent_spec
);
4339 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
4343 __le64 stripe_count
;
4344 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
4345 size_t size
= sizeof (striping_info_buf
);
4352 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4353 "rbd", "get_stripe_unit_count", NULL
, 0,
4354 (char *)&striping_info_buf
, size
);
4355 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4362 * We don't actually support the "fancy striping" feature
4363 * (STRIPINGV2) yet, but if the striping sizes are the
4364 * defaults the behavior is the same as before. So find
4365 * out, and only fail if the image has non-default values.
4368 obj_size
= (u64
)1 << rbd_dev
->header
.obj_order
;
4369 p
= &striping_info_buf
;
4370 stripe_unit
= ceph_decode_64(&p
);
4371 if (stripe_unit
!= obj_size
) {
4372 rbd_warn(rbd_dev
, "unsupported stripe unit "
4373 "(got %llu want %llu)",
4374 stripe_unit
, obj_size
);
4377 stripe_count
= ceph_decode_64(&p
);
4378 if (stripe_count
!= 1) {
4379 rbd_warn(rbd_dev
, "unsupported stripe count "
4380 "(got %llu want 1)", stripe_count
);
4383 rbd_dev
->header
.stripe_unit
= stripe_unit
;
4384 rbd_dev
->header
.stripe_count
= stripe_count
;
4389 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
4391 size_t image_id_size
;
4396 void *reply_buf
= NULL
;
4398 char *image_name
= NULL
;
4401 rbd_assert(!rbd_dev
->spec
->image_name
);
4403 len
= strlen(rbd_dev
->spec
->image_id
);
4404 image_id_size
= sizeof (__le32
) + len
;
4405 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4410 end
= image_id
+ image_id_size
;
4411 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4413 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4414 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4418 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
4419 "rbd", "dir_get_name",
4420 image_id
, image_id_size
,
4425 end
= reply_buf
+ ret
;
4427 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4428 if (IS_ERR(image_name
))
4431 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4439 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4441 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4442 const char *snap_name
;
4445 /* Skip over names until we find the one we are looking for */
4447 snap_name
= rbd_dev
->header
.snap_names
;
4448 while (which
< snapc
->num_snaps
) {
4449 if (!strcmp(name
, snap_name
))
4450 return snapc
->snaps
[which
];
4451 snap_name
+= strlen(snap_name
) + 1;
4457 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4459 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4464 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4465 const char *snap_name
;
4467 snap_id
= snapc
->snaps
[which
];
4468 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4469 if (IS_ERR(snap_name
)) {
4470 /* ignore no-longer existing snapshots */
4471 if (PTR_ERR(snap_name
) == -ENOENT
)
4476 found
= !strcmp(name
, snap_name
);
4479 return found
? snap_id
: CEPH_NOSNAP
;
4483 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4484 * no snapshot by that name is found, or if an error occurs.
4486 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4488 if (rbd_dev
->image_format
== 1)
4489 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4491 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4495 * An image being mapped will have everything but the snap id.
4497 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
4499 struct rbd_spec
*spec
= rbd_dev
->spec
;
4501 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
4502 rbd_assert(spec
->image_id
&& spec
->image_name
);
4503 rbd_assert(spec
->snap_name
);
4505 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
4508 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
4509 if (snap_id
== CEPH_NOSNAP
)
4512 spec
->snap_id
= snap_id
;
4514 spec
->snap_id
= CEPH_NOSNAP
;
4521 * A parent image will have all ids but none of the names.
4523 * All names in an rbd spec are dynamically allocated. It's OK if we
4524 * can't figure out the name for an image id.
4526 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
4528 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4529 struct rbd_spec
*spec
= rbd_dev
->spec
;
4530 const char *pool_name
;
4531 const char *image_name
;
4532 const char *snap_name
;
4535 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
4536 rbd_assert(spec
->image_id
);
4537 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
4539 /* Get the pool name; we have to make our own copy of this */
4541 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
4543 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
4546 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
4550 /* Fetch the image name; tolerate failure here */
4552 image_name
= rbd_dev_image_name(rbd_dev
);
4554 rbd_warn(rbd_dev
, "unable to get image name");
4556 /* Fetch the snapshot name */
4558 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
4559 if (IS_ERR(snap_name
)) {
4560 ret
= PTR_ERR(snap_name
);
4564 spec
->pool_name
= pool_name
;
4565 spec
->image_name
= image_name
;
4566 spec
->snap_name
= snap_name
;
4576 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
4585 struct ceph_snap_context
*snapc
;
4589 * We'll need room for the seq value (maximum snapshot id),
4590 * snapshot count, and array of that many snapshot ids.
4591 * For now we have a fixed upper limit on the number we're
4592 * prepared to receive.
4594 size
= sizeof (__le64
) + sizeof (__le32
) +
4595 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
4596 reply_buf
= kzalloc(size
, GFP_KERNEL
);
4600 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4601 "rbd", "get_snapcontext", NULL
, 0,
4603 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4608 end
= reply_buf
+ ret
;
4610 ceph_decode_64_safe(&p
, end
, seq
, out
);
4611 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
4614 * Make sure the reported number of snapshot ids wouldn't go
4615 * beyond the end of our buffer. But before checking that,
4616 * make sure the computed size of the snapshot context we
4617 * allocate is representable in a size_t.
4619 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
4624 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
4628 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
4634 for (i
= 0; i
< snap_count
; i
++)
4635 snapc
->snaps
[i
] = ceph_decode_64(&p
);
4637 ceph_put_snap_context(rbd_dev
->header
.snapc
);
4638 rbd_dev
->header
.snapc
= snapc
;
4640 dout(" snap context seq = %llu, snap_count = %u\n",
4641 (unsigned long long)seq
, (unsigned int)snap_count
);
4648 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
4659 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
4660 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4662 return ERR_PTR(-ENOMEM
);
4664 snapid
= cpu_to_le64(snap_id
);
4665 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4666 "rbd", "get_snapshot_name",
4667 &snapid
, sizeof (snapid
),
4669 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4671 snap_name
= ERR_PTR(ret
);
4676 end
= reply_buf
+ ret
;
4677 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4678 if (IS_ERR(snap_name
))
4681 dout(" snap_id 0x%016llx snap_name = %s\n",
4682 (unsigned long long)snap_id
, snap_name
);
4689 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
4691 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
4694 ret
= rbd_dev_v2_image_size(rbd_dev
);
4699 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
4704 ret
= rbd_dev_v2_snap_context(rbd_dev
);
4705 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
4710 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
4712 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4714 if (rbd_dev
->image_format
== 1)
4715 return rbd_dev_v1_header_info(rbd_dev
);
4717 return rbd_dev_v2_header_info(rbd_dev
);
4720 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
4725 dev
= &rbd_dev
->dev
;
4726 dev
->bus
= &rbd_bus_type
;
4727 dev
->type
= &rbd_device_type
;
4728 dev
->parent
= &rbd_root_dev
;
4729 dev
->release
= rbd_dev_device_release
;
4730 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
4731 ret
= device_register(dev
);
4736 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
4738 device_unregister(&rbd_dev
->dev
);
4742 * Get a unique rbd identifier for the given new rbd_dev, and add
4743 * the rbd_dev to the global list.
4745 static int rbd_dev_id_get(struct rbd_device
*rbd_dev
)
4749 new_dev_id
= ida_simple_get(&rbd_dev_id_ida
,
4750 0, minor_to_rbd_dev_id(1 << MINORBITS
),
4755 rbd_dev
->dev_id
= new_dev_id
;
4757 spin_lock(&rbd_dev_list_lock
);
4758 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
4759 spin_unlock(&rbd_dev_list_lock
);
4761 dout("rbd_dev %p given dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4767 * Remove an rbd_dev from the global list, and record that its
4768 * identifier is no longer in use.
4770 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
4772 spin_lock(&rbd_dev_list_lock
);
4773 list_del_init(&rbd_dev
->node
);
4774 spin_unlock(&rbd_dev_list_lock
);
4776 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4778 dout("rbd_dev %p released dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4782 * Skips over white space at *buf, and updates *buf to point to the
4783 * first found non-space character (if any). Returns the length of
4784 * the token (string of non-white space characters) found. Note
4785 * that *buf must be terminated with '\0'.
4787 static inline size_t next_token(const char **buf
)
4790 * These are the characters that produce nonzero for
4791 * isspace() in the "C" and "POSIX" locales.
4793 const char *spaces
= " \f\n\r\t\v";
4795 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
4797 return strcspn(*buf
, spaces
); /* Return token length */
4801 * Finds the next token in *buf, dynamically allocates a buffer big
4802 * enough to hold a copy of it, and copies the token into the new
4803 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4804 * that a duplicate buffer is created even for a zero-length token.
4806 * Returns a pointer to the newly-allocated duplicate, or a null
4807 * pointer if memory for the duplicate was not available. If
4808 * the lenp argument is a non-null pointer, the length of the token
4809 * (not including the '\0') is returned in *lenp.
4811 * If successful, the *buf pointer will be updated to point beyond
4812 * the end of the found token.
4814 * Note: uses GFP_KERNEL for allocation.
4816 static inline char *dup_token(const char **buf
, size_t *lenp
)
4821 len
= next_token(buf
);
4822 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
4825 *(dup
+ len
) = '\0';
4835 * Parse the options provided for an "rbd add" (i.e., rbd image
4836 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4837 * and the data written is passed here via a NUL-terminated buffer.
4838 * Returns 0 if successful or an error code otherwise.
4840 * The information extracted from these options is recorded in
4841 * the other parameters which return dynamically-allocated
4844 * The address of a pointer that will refer to a ceph options
4845 * structure. Caller must release the returned pointer using
4846 * ceph_destroy_options() when it is no longer needed.
4848 * Address of an rbd options pointer. Fully initialized by
4849 * this function; caller must release with kfree().
4851 * Address of an rbd image specification pointer. Fully
4852 * initialized by this function based on parsed options.
4853 * Caller must release with rbd_spec_put().
4855 * The options passed take this form:
4856 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4859 * A comma-separated list of one or more monitor addresses.
4860 * A monitor address is an ip address, optionally followed
4861 * by a port number (separated by a colon).
4862 * I.e.: ip1[:port1][,ip2[:port2]...]
4864 * A comma-separated list of ceph and/or rbd options.
4866 * The name of the rados pool containing the rbd image.
4868 * The name of the image in that pool to map.
4870 * An optional snapshot id. If provided, the mapping will
4871 * present data from the image at the time that snapshot was
4872 * created. The image head is used if no snapshot id is
4873 * provided. Snapshot mappings are always read-only.
4875 static int rbd_add_parse_args(const char *buf
,
4876 struct ceph_options
**ceph_opts
,
4877 struct rbd_options
**opts
,
4878 struct rbd_spec
**rbd_spec
)
4882 const char *mon_addrs
;
4884 size_t mon_addrs_size
;
4885 struct rbd_spec
*spec
= NULL
;
4886 struct rbd_options
*rbd_opts
= NULL
;
4887 struct ceph_options
*copts
;
4890 /* The first four tokens are required */
4892 len
= next_token(&buf
);
4894 rbd_warn(NULL
, "no monitor address(es) provided");
4898 mon_addrs_size
= len
+ 1;
4902 options
= dup_token(&buf
, NULL
);
4906 rbd_warn(NULL
, "no options provided");
4910 spec
= rbd_spec_alloc();
4914 spec
->pool_name
= dup_token(&buf
, NULL
);
4915 if (!spec
->pool_name
)
4917 if (!*spec
->pool_name
) {
4918 rbd_warn(NULL
, "no pool name provided");
4922 spec
->image_name
= dup_token(&buf
, NULL
);
4923 if (!spec
->image_name
)
4925 if (!*spec
->image_name
) {
4926 rbd_warn(NULL
, "no image name provided");
4931 * Snapshot name is optional; default is to use "-"
4932 * (indicating the head/no snapshot).
4934 len
= next_token(&buf
);
4936 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
4937 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
4938 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
4939 ret
= -ENAMETOOLONG
;
4942 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
4945 *(snap_name
+ len
) = '\0';
4946 spec
->snap_name
= snap_name
;
4948 /* Initialize all rbd options to the defaults */
4950 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
4954 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
4956 copts
= ceph_parse_options(options
, mon_addrs
,
4957 mon_addrs
+ mon_addrs_size
- 1,
4958 parse_rbd_opts_token
, rbd_opts
);
4959 if (IS_ERR(copts
)) {
4960 ret
= PTR_ERR(copts
);
4981 * Return pool id (>= 0) or a negative error code.
4983 static int rbd_add_get_pool_id(struct rbd_client
*rbdc
, const char *pool_name
)
4985 struct ceph_options
*opts
= rbdc
->client
->options
;
4991 ret
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, pool_name
);
4992 if (ret
== -ENOENT
&& tries
++ < 1) {
4993 ret
= ceph_monc_do_get_version(&rbdc
->client
->monc
, "osdmap",
4998 if (rbdc
->client
->osdc
.osdmap
->epoch
< newest_epoch
) {
4999 ceph_monc_request_next_osdmap(&rbdc
->client
->monc
);
5000 (void) ceph_monc_wait_osdmap(&rbdc
->client
->monc
,
5002 opts
->mount_timeout
);
5005 /* the osdmap we have is new enough */
5014 * An rbd format 2 image has a unique identifier, distinct from the
5015 * name given to it by the user. Internally, that identifier is
5016 * what's used to specify the names of objects related to the image.
5018 * A special "rbd id" object is used to map an rbd image name to its
5019 * id. If that object doesn't exist, then there is no v2 rbd image
5020 * with the supplied name.
5022 * This function will record the given rbd_dev's image_id field if
5023 * it can be determined, and in that case will return 0. If any
5024 * errors occur a negative errno will be returned and the rbd_dev's
5025 * image_id field will be unchanged (and should be NULL).
5027 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
5036 * When probing a parent image, the image id is already
5037 * known (and the image name likely is not). There's no
5038 * need to fetch the image id again in this case. We
5039 * do still need to set the image format though.
5041 if (rbd_dev
->spec
->image_id
) {
5042 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
5048 * First, see if the format 2 image id file exists, and if
5049 * so, get the image's persistent id from it.
5051 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
5052 object_name
= kmalloc(size
, GFP_NOIO
);
5055 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
5056 dout("rbd id object name is %s\n", object_name
);
5058 /* Response will be an encoded string, which includes a length */
5060 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
5061 response
= kzalloc(size
, GFP_NOIO
);
5067 /* If it doesn't exist we'll assume it's a format 1 image */
5069 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
5070 "rbd", "get_id", NULL
, 0,
5071 response
, RBD_IMAGE_ID_LEN_MAX
);
5072 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5073 if (ret
== -ENOENT
) {
5074 image_id
= kstrdup("", GFP_KERNEL
);
5075 ret
= image_id
? 0 : -ENOMEM
;
5077 rbd_dev
->image_format
= 1;
5078 } else if (ret
>= 0) {
5081 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
5083 ret
= PTR_ERR_OR_ZERO(image_id
);
5085 rbd_dev
->image_format
= 2;
5089 rbd_dev
->spec
->image_id
= image_id
;
5090 dout("image_id is %s\n", image_id
);
5100 * Undo whatever state changes are made by v1 or v2 header info
5103 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
5105 struct rbd_image_header
*header
;
5107 rbd_dev_parent_put(rbd_dev
);
5109 /* Free dynamic fields from the header, then zero it out */
5111 header
= &rbd_dev
->header
;
5112 ceph_put_snap_context(header
->snapc
);
5113 kfree(header
->snap_sizes
);
5114 kfree(header
->snap_names
);
5115 kfree(header
->object_prefix
);
5116 memset(header
, 0, sizeof (*header
));
5119 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
5123 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
5128 * Get the and check features for the image. Currently the
5129 * features are assumed to never change.
5131 ret
= rbd_dev_v2_features(rbd_dev
);
5135 /* If the image supports fancy striping, get its parameters */
5137 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
5138 ret
= rbd_dev_v2_striping_info(rbd_dev
);
5142 /* No support for crypto and compression type format 2 images */
5146 rbd_dev
->header
.features
= 0;
5147 kfree(rbd_dev
->header
.object_prefix
);
5148 rbd_dev
->header
.object_prefix
= NULL
;
5153 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
)
5155 struct rbd_device
*parent
= NULL
;
5156 struct rbd_spec
*parent_spec
;
5157 struct rbd_client
*rbdc
;
5160 if (!rbd_dev
->parent_spec
)
5163 * We need to pass a reference to the client and the parent
5164 * spec when creating the parent rbd_dev. Images related by
5165 * parent/child relationships always share both.
5167 parent_spec
= rbd_spec_get(rbd_dev
->parent_spec
);
5168 rbdc
= __rbd_get_client(rbd_dev
->rbd_client
);
5171 parent
= rbd_dev_create(rbdc
, parent_spec
);
5175 ret
= rbd_dev_image_probe(parent
, false);
5178 rbd_dev
->parent
= parent
;
5179 atomic_set(&rbd_dev
->parent_ref
, 1);
5184 rbd_dev_unparent(rbd_dev
);
5185 kfree(rbd_dev
->header_name
);
5186 rbd_dev_destroy(parent
);
5188 rbd_put_client(rbdc
);
5189 rbd_spec_put(parent_spec
);
5195 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
5199 /* Get an id and fill in device name. */
5201 ret
= rbd_dev_id_get(rbd_dev
);
5205 BUILD_BUG_ON(DEV_NAME_LEN
5206 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
5207 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
5209 /* Record our major and minor device numbers. */
5211 if (!single_major
) {
5212 ret
= register_blkdev(0, rbd_dev
->name
);
5216 rbd_dev
->major
= ret
;
5219 rbd_dev
->major
= rbd_major
;
5220 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
5223 /* Set up the blkdev mapping. */
5225 ret
= rbd_init_disk(rbd_dev
);
5227 goto err_out_blkdev
;
5229 ret
= rbd_dev_mapping_set(rbd_dev
);
5233 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
5234 set_disk_ro(rbd_dev
->disk
, rbd_dev
->mapping
.read_only
);
5236 ret
= rbd_bus_add_dev(rbd_dev
);
5238 goto err_out_mapping
;
5240 /* Everything's ready. Announce the disk to the world. */
5242 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5243 add_disk(rbd_dev
->disk
);
5245 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
5246 (unsigned long long) rbd_dev
->mapping
.size
);
5251 rbd_dev_mapping_clear(rbd_dev
);
5253 rbd_free_disk(rbd_dev
);
5256 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5258 rbd_dev_id_put(rbd_dev
);
5259 rbd_dev_mapping_clear(rbd_dev
);
5264 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
5266 struct rbd_spec
*spec
= rbd_dev
->spec
;
5269 /* Record the header object name for this rbd image. */
5271 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5273 if (rbd_dev
->image_format
== 1)
5274 size
= strlen(spec
->image_name
) + sizeof (RBD_SUFFIX
);
5276 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(spec
->image_id
);
5278 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
5279 if (!rbd_dev
->header_name
)
5282 if (rbd_dev
->image_format
== 1)
5283 sprintf(rbd_dev
->header_name
, "%s%s",
5284 spec
->image_name
, RBD_SUFFIX
);
5286 sprintf(rbd_dev
->header_name
, "%s%s",
5287 RBD_HEADER_PREFIX
, spec
->image_id
);
5291 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
5293 rbd_dev_unprobe(rbd_dev
);
5294 kfree(rbd_dev
->header_name
);
5295 rbd_dev
->header_name
= NULL
;
5296 rbd_dev
->image_format
= 0;
5297 kfree(rbd_dev
->spec
->image_id
);
5298 rbd_dev
->spec
->image_id
= NULL
;
5300 rbd_dev_destroy(rbd_dev
);
5304 * Probe for the existence of the header object for the given rbd
5305 * device. If this image is the one being mapped (i.e., not a
5306 * parent), initiate a watch on its header object before using that
5307 * object to get detailed information about the rbd image.
5309 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
)
5314 * Get the id from the image id object. Unless there's an
5315 * error, rbd_dev->spec->image_id will be filled in with
5316 * a dynamically-allocated string, and rbd_dev->image_format
5317 * will be set to either 1 or 2.
5319 ret
= rbd_dev_image_id(rbd_dev
);
5323 ret
= rbd_dev_header_name(rbd_dev
);
5325 goto err_out_format
;
5328 ret
= rbd_dev_header_watch_sync(rbd_dev
);
5331 pr_info("image %s/%s does not exist\n",
5332 rbd_dev
->spec
->pool_name
,
5333 rbd_dev
->spec
->image_name
);
5334 goto out_header_name
;
5338 ret
= rbd_dev_header_info(rbd_dev
);
5343 * If this image is the one being mapped, we have pool name and
5344 * id, image name and id, and snap name - need to fill snap id.
5345 * Otherwise this is a parent image, identified by pool, image
5346 * and snap ids - need to fill in names for those ids.
5349 ret
= rbd_spec_fill_snap_id(rbd_dev
);
5351 ret
= rbd_spec_fill_names(rbd_dev
);
5354 pr_info("snap %s/%s@%s does not exist\n",
5355 rbd_dev
->spec
->pool_name
,
5356 rbd_dev
->spec
->image_name
,
5357 rbd_dev
->spec
->snap_name
);
5361 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
5362 ret
= rbd_dev_v2_parent_info(rbd_dev
);
5367 * Need to warn users if this image is the one being
5368 * mapped and has a parent.
5370 if (mapping
&& rbd_dev
->parent_spec
)
5372 "WARNING: kernel layering is EXPERIMENTAL!");
5375 ret
= rbd_dev_probe_parent(rbd_dev
);
5379 dout("discovered format %u image, header name is %s\n",
5380 rbd_dev
->image_format
, rbd_dev
->header_name
);
5384 rbd_dev_unprobe(rbd_dev
);
5387 rbd_dev_header_unwatch_sync(rbd_dev
);
5389 kfree(rbd_dev
->header_name
);
5390 rbd_dev
->header_name
= NULL
;
5392 rbd_dev
->image_format
= 0;
5393 kfree(rbd_dev
->spec
->image_id
);
5394 rbd_dev
->spec
->image_id
= NULL
;
5398 static ssize_t
do_rbd_add(struct bus_type
*bus
,
5402 struct rbd_device
*rbd_dev
= NULL
;
5403 struct ceph_options
*ceph_opts
= NULL
;
5404 struct rbd_options
*rbd_opts
= NULL
;
5405 struct rbd_spec
*spec
= NULL
;
5406 struct rbd_client
*rbdc
;
5410 if (!try_module_get(THIS_MODULE
))
5413 /* parse add command */
5414 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5416 goto err_out_module
;
5417 read_only
= rbd_opts
->read_only
;
5419 rbd_opts
= NULL
; /* done with this */
5421 rbdc
= rbd_get_client(ceph_opts
);
5428 rc
= rbd_add_get_pool_id(rbdc
, spec
->pool_name
);
5431 pr_info("pool %s does not exist\n", spec
->pool_name
);
5432 goto err_out_client
;
5434 spec
->pool_id
= (u64
)rc
;
5436 /* The ceph file layout needs to fit pool id in 32 bits */
5438 if (spec
->pool_id
> (u64
)U32_MAX
) {
5439 rbd_warn(NULL
, "pool id too large (%llu > %u)",
5440 (unsigned long long)spec
->pool_id
, U32_MAX
);
5442 goto err_out_client
;
5445 rbd_dev
= rbd_dev_create(rbdc
, spec
);
5447 goto err_out_client
;
5448 rbdc
= NULL
; /* rbd_dev now owns this */
5449 spec
= NULL
; /* rbd_dev now owns this */
5451 rc
= rbd_dev_image_probe(rbd_dev
, true);
5453 goto err_out_rbd_dev
;
5455 /* If we are mapping a snapshot it must be marked read-only */
5457 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5459 rbd_dev
->mapping
.read_only
= read_only
;
5461 rc
= rbd_dev_device_setup(rbd_dev
);
5464 * rbd_dev_header_unwatch_sync() can't be moved into
5465 * rbd_dev_image_release() without refactoring, see
5466 * commit 1f3ef78861ac.
5468 rbd_dev_header_unwatch_sync(rbd_dev
);
5469 rbd_dev_image_release(rbd_dev
);
5470 goto err_out_module
;
5476 rbd_dev_destroy(rbd_dev
);
5478 rbd_put_client(rbdc
);
5482 module_put(THIS_MODULE
);
5484 dout("Error adding device %s\n", buf
);
5489 static ssize_t
rbd_add(struct bus_type
*bus
,
5496 return do_rbd_add(bus
, buf
, count
);
5499 static ssize_t
rbd_add_single_major(struct bus_type
*bus
,
5503 return do_rbd_add(bus
, buf
, count
);
5506 static void rbd_dev_device_release(struct device
*dev
)
5508 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5510 rbd_free_disk(rbd_dev
);
5511 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5512 rbd_dev_mapping_clear(rbd_dev
);
5514 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5515 rbd_dev_id_put(rbd_dev
);
5516 rbd_dev_mapping_clear(rbd_dev
);
5519 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5521 while (rbd_dev
->parent
) {
5522 struct rbd_device
*first
= rbd_dev
;
5523 struct rbd_device
*second
= first
->parent
;
5524 struct rbd_device
*third
;
5527 * Follow to the parent with no grandparent and
5530 while (second
&& (third
= second
->parent
)) {
5535 rbd_dev_image_release(second
);
5536 first
->parent
= NULL
;
5537 first
->parent_overlap
= 0;
5539 rbd_assert(first
->parent_spec
);
5540 rbd_spec_put(first
->parent_spec
);
5541 first
->parent_spec
= NULL
;
5545 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
5549 struct rbd_device
*rbd_dev
= NULL
;
5550 struct list_head
*tmp
;
5553 bool already
= false;
5556 ret
= kstrtoul(buf
, 10, &ul
);
5560 /* convert to int; abort if we lost anything in the conversion */
5566 spin_lock(&rbd_dev_list_lock
);
5567 list_for_each(tmp
, &rbd_dev_list
) {
5568 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
5569 if (rbd_dev
->dev_id
== dev_id
) {
5575 spin_lock_irq(&rbd_dev
->lock
);
5576 if (rbd_dev
->open_count
)
5579 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
5581 spin_unlock_irq(&rbd_dev
->lock
);
5583 spin_unlock(&rbd_dev_list_lock
);
5584 if (ret
< 0 || already
)
5587 rbd_dev_header_unwatch_sync(rbd_dev
);
5589 * flush remaining watch callbacks - these must be complete
5590 * before the osd_client is shutdown
5592 dout("%s: flushing notifies", __func__
);
5593 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
5596 * Don't free anything from rbd_dev->disk until after all
5597 * notifies are completely processed. Otherwise
5598 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5599 * in a potential use after free of rbd_dev->disk or rbd_dev.
5601 rbd_bus_del_dev(rbd_dev
);
5602 rbd_dev_image_release(rbd_dev
);
5603 module_put(THIS_MODULE
);
5608 static ssize_t
rbd_remove(struct bus_type
*bus
,
5615 return do_rbd_remove(bus
, buf
, count
);
5618 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
,
5622 return do_rbd_remove(bus
, buf
, count
);
5626 * create control files in sysfs
5629 static int rbd_sysfs_init(void)
5633 ret
= device_register(&rbd_root_dev
);
5637 ret
= bus_register(&rbd_bus_type
);
5639 device_unregister(&rbd_root_dev
);
5644 static void rbd_sysfs_cleanup(void)
5646 bus_unregister(&rbd_bus_type
);
5647 device_unregister(&rbd_root_dev
);
5650 static int rbd_slab_init(void)
5652 rbd_assert(!rbd_img_request_cache
);
5653 rbd_img_request_cache
= kmem_cache_create("rbd_img_request",
5654 sizeof (struct rbd_img_request
),
5655 __alignof__(struct rbd_img_request
),
5657 if (!rbd_img_request_cache
)
5660 rbd_assert(!rbd_obj_request_cache
);
5661 rbd_obj_request_cache
= kmem_cache_create("rbd_obj_request",
5662 sizeof (struct rbd_obj_request
),
5663 __alignof__(struct rbd_obj_request
),
5665 if (!rbd_obj_request_cache
)
5668 rbd_assert(!rbd_segment_name_cache
);
5669 rbd_segment_name_cache
= kmem_cache_create("rbd_segment_name",
5670 CEPH_MAX_OID_NAME_LEN
+ 1, 1, 0, NULL
);
5671 if (rbd_segment_name_cache
)
5674 if (rbd_obj_request_cache
) {
5675 kmem_cache_destroy(rbd_obj_request_cache
);
5676 rbd_obj_request_cache
= NULL
;
5679 kmem_cache_destroy(rbd_img_request_cache
);
5680 rbd_img_request_cache
= NULL
;
5685 static void rbd_slab_exit(void)
5687 rbd_assert(rbd_segment_name_cache
);
5688 kmem_cache_destroy(rbd_segment_name_cache
);
5689 rbd_segment_name_cache
= NULL
;
5691 rbd_assert(rbd_obj_request_cache
);
5692 kmem_cache_destroy(rbd_obj_request_cache
);
5693 rbd_obj_request_cache
= NULL
;
5695 rbd_assert(rbd_img_request_cache
);
5696 kmem_cache_destroy(rbd_img_request_cache
);
5697 rbd_img_request_cache
= NULL
;
5700 static int __init
rbd_init(void)
5704 if (!libceph_compatible(NULL
)) {
5705 rbd_warn(NULL
, "libceph incompatibility (quitting)");
5709 rc
= rbd_slab_init();
5714 * The number of active work items is limited by the number of
5715 * rbd devices * queue depth, so leave @max_active at default.
5717 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
5724 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
5725 if (rbd_major
< 0) {
5731 rc
= rbd_sysfs_init();
5733 goto err_out_blkdev
;
5736 pr_info("loaded (major %d)\n", rbd_major
);
5738 pr_info("loaded\n");
5744 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5746 destroy_workqueue(rbd_wq
);
5752 static void __exit
rbd_exit(void)
5754 ida_destroy(&rbd_dev_id_ida
);
5755 rbd_sysfs_cleanup();
5757 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5758 destroy_workqueue(rbd_wq
);
5762 module_init(rbd_init
);
5763 module_exit(rbd_exit
);
5765 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5766 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5767 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5768 /* following authorship retained from original osdblk.c */
5769 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5771 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5772 MODULE_LICENSE("GPL");