3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t
*v
)
64 counter
= (unsigned int)atomic_fetch_add_unless(v
, 1, 0);
65 if (counter
<= (unsigned int)INT_MAX
)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t
*v
)
78 counter
= atomic_dec_return(v
);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header
{
141 /* These six fields never change for a given rbd image */
147 u64 features
; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context
*snapc
;
152 char *snap_names
; /* format 1 only */
153 u64
*snap_sizes
; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name
;
184 const char *pool_ns
; /* NULL if default, never "" */
186 const char *image_id
;
187 const char *image_name
;
190 const char *snap_name
;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client
*client
;
201 struct list_head node
;
204 struct rbd_img_request
;
206 enum obj_request_type
{
207 OBJ_REQUEST_NODATA
= 1,
208 OBJ_REQUEST_BIO
, /* pointer into provided bio (list) */
209 OBJ_REQUEST_BVECS
, /* pointer into provided bio_vec array */
210 OBJ_REQUEST_OWN_BVECS
, /* private bio_vec array, doesn't own pages */
213 enum obj_operation_type
{
220 * Writes go through the following state machine to deal with
224 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
226 * v \------------------------------/
232 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233 * there is a parent or not.
235 enum rbd_obj_write_state
{
236 RBD_OBJ_WRITE_FLAT
= 1,
238 RBD_OBJ_WRITE_COPYUP
,
241 struct rbd_obj_request
{
242 struct ceph_object_extent ex
;
244 bool tried_parent
; /* for reads */
245 enum rbd_obj_write_state write_state
; /* for writes */
248 struct rbd_img_request
*img_request
;
249 struct ceph_file_extent
*img_extents
;
253 struct ceph_bio_iter bio_pos
;
255 struct ceph_bvec_iter bvec_pos
;
260 struct bio_vec
*copyup_bvecs
;
261 u32 copyup_bvec_count
;
263 struct ceph_osd_request
*osd_req
;
265 u64 xferred
; /* bytes transferred */
272 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
273 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
276 struct rbd_img_request
{
277 struct rbd_device
*rbd_dev
;
278 enum obj_operation_type op_type
;
279 enum obj_request_type data_type
;
282 u64 snap_id
; /* for reads */
283 struct ceph_snap_context
*snapc
; /* for writes */
286 struct request
*rq
; /* block request */
287 struct rbd_obj_request
*obj_request
; /* obj req initiator */
289 spinlock_t completion_lock
;
290 u64 xferred
;/* aggregate bytes transferred */
291 int result
; /* first nonzero obj_request result */
293 struct list_head object_extents
; /* obj_req.ex structs */
294 u32 obj_request_count
;
300 #define for_each_obj_request(ireq, oreq) \
301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
305 enum rbd_watch_state
{
306 RBD_WATCH_STATE_UNREGISTERED
,
307 RBD_WATCH_STATE_REGISTERED
,
308 RBD_WATCH_STATE_ERROR
,
311 enum rbd_lock_state
{
312 RBD_LOCK_STATE_UNLOCKED
,
313 RBD_LOCK_STATE_LOCKED
,
314 RBD_LOCK_STATE_RELEASING
,
317 /* WatchNotify::ClientId */
318 struct rbd_client_id
{
332 int dev_id
; /* blkdev unique id */
334 int major
; /* blkdev assigned major */
336 struct gendisk
*disk
; /* blkdev's gendisk and rq */
338 u32 image_format
; /* Either 1 or 2 */
339 struct rbd_client
*rbd_client
;
341 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
343 spinlock_t lock
; /* queue, flags, open_count */
345 struct rbd_image_header header
;
346 unsigned long flags
; /* possibly lock protected */
347 struct rbd_spec
*spec
;
348 struct rbd_options
*opts
;
349 char *config_info
; /* add{,_single_major} string */
351 struct ceph_object_id header_oid
;
352 struct ceph_object_locator header_oloc
;
354 struct ceph_file_layout layout
; /* used for all rbd requests */
356 struct mutex watch_mutex
;
357 enum rbd_watch_state watch_state
;
358 struct ceph_osd_linger_request
*watch_handle
;
360 struct delayed_work watch_dwork
;
362 struct rw_semaphore lock_rwsem
;
363 enum rbd_lock_state lock_state
;
364 char lock_cookie
[32];
365 struct rbd_client_id owner_cid
;
366 struct work_struct acquired_lock_work
;
367 struct work_struct released_lock_work
;
368 struct delayed_work lock_dwork
;
369 struct work_struct unlock_work
;
370 wait_queue_head_t lock_waitq
;
372 struct workqueue_struct
*task_wq
;
374 struct rbd_spec
*parent_spec
;
377 struct rbd_device
*parent
;
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set
;
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem
;
385 struct rbd_mapping mapping
;
387 struct list_head node
;
391 unsigned long open_count
; /* protected by lock */
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
401 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
402 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
403 RBD_DEV_FLAG_BLACKLISTED
, /* our ceph_client is blacklisted */
406 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
408 static LIST_HEAD(rbd_dev_list
); /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
411 static LIST_HEAD(rbd_client_list
); /* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock
);
414 /* Slab caches for frequently-allocated structures */
416 static struct kmem_cache
*rbd_img_request_cache
;
417 static struct kmem_cache
*rbd_obj_request_cache
;
419 static int rbd_major
;
420 static DEFINE_IDA(rbd_dev_id_ida
);
422 static struct workqueue_struct
*rbd_wq
;
425 * single-major requires >= 0.75 version of userspace rbd utility.
427 static bool single_major
= true;
428 module_param(single_major
, bool, 0444);
429 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: true)");
431 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
433 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
435 static ssize_t
rbd_add_single_major(struct bus_type
*bus
, const char *buf
,
437 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
, const char *buf
,
439 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
);
441 static int rbd_dev_id_to_minor(int dev_id
)
443 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
446 static int minor_to_rbd_dev_id(int minor
)
448 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
451 static bool __rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
453 return rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
||
454 rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
;
457 static bool rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
461 down_read(&rbd_dev
->lock_rwsem
);
462 is_lock_owner
= __rbd_is_lock_owner(rbd_dev
);
463 up_read(&rbd_dev
->lock_rwsem
);
464 return is_lock_owner
;
467 static ssize_t
rbd_supported_features_show(struct bus_type
*bus
, char *buf
)
469 return sprintf(buf
, "0x%llx\n", RBD_FEATURES_SUPPORTED
);
472 static BUS_ATTR(add
, 0200, NULL
, rbd_add
);
473 static BUS_ATTR(remove
, 0200, NULL
, rbd_remove
);
474 static BUS_ATTR(add_single_major
, 0200, NULL
, rbd_add_single_major
);
475 static BUS_ATTR(remove_single_major
, 0200, NULL
, rbd_remove_single_major
);
476 static BUS_ATTR(supported_features
, 0444, rbd_supported_features_show
, NULL
);
478 static struct attribute
*rbd_bus_attrs
[] = {
480 &bus_attr_remove
.attr
,
481 &bus_attr_add_single_major
.attr
,
482 &bus_attr_remove_single_major
.attr
,
483 &bus_attr_supported_features
.attr
,
487 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
488 struct attribute
*attr
, int index
)
491 (attr
== &bus_attr_add_single_major
.attr
||
492 attr
== &bus_attr_remove_single_major
.attr
))
498 static const struct attribute_group rbd_bus_group
= {
499 .attrs
= rbd_bus_attrs
,
500 .is_visible
= rbd_bus_is_visible
,
502 __ATTRIBUTE_GROUPS(rbd_bus
);
504 static struct bus_type rbd_bus_type
= {
506 .bus_groups
= rbd_bus_groups
,
509 static void rbd_root_dev_release(struct device
*dev
)
513 static struct device rbd_root_dev
= {
515 .release
= rbd_root_dev_release
,
518 static __printf(2, 3)
519 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
521 struct va_format vaf
;
529 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
530 else if (rbd_dev
->disk
)
531 printk(KERN_WARNING
"%s: %s: %pV\n",
532 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
533 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
534 printk(KERN_WARNING
"%s: image %s: %pV\n",
535 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
536 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
537 printk(KERN_WARNING
"%s: id %s: %pV\n",
538 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
540 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
541 RBD_DRV_NAME
, rbd_dev
, &vaf
);
546 #define rbd_assert(expr) \
547 if (unlikely(!(expr))) { \
548 printk(KERN_ERR "\nAssertion failure in %s() " \
550 "\trbd_assert(%s);\n\n", \
551 __func__, __LINE__, #expr); \
554 #else /* !RBD_DEBUG */
555 # define rbd_assert(expr) ((void) 0)
556 #endif /* !RBD_DEBUG */
558 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
560 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
561 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
562 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
563 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
564 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
566 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
567 u8
*order
, u64
*snap_size
);
568 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
571 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
573 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
574 bool removing
= false;
576 spin_lock_irq(&rbd_dev
->lock
);
577 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
580 rbd_dev
->open_count
++;
581 spin_unlock_irq(&rbd_dev
->lock
);
585 (void) get_device(&rbd_dev
->dev
);
590 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
592 struct rbd_device
*rbd_dev
= disk
->private_data
;
593 unsigned long open_count_before
;
595 spin_lock_irq(&rbd_dev
->lock
);
596 open_count_before
= rbd_dev
->open_count
--;
597 spin_unlock_irq(&rbd_dev
->lock
);
598 rbd_assert(open_count_before
> 0);
600 put_device(&rbd_dev
->dev
);
603 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
607 if (get_user(ro
, (int __user
*)arg
))
610 /* Snapshots can't be marked read-write */
611 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
614 /* Let blkdev_roset() handle it */
618 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
619 unsigned int cmd
, unsigned long arg
)
621 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
626 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
636 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
637 unsigned int cmd
, unsigned long arg
)
639 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
641 #endif /* CONFIG_COMPAT */
643 static const struct block_device_operations rbd_bd_ops
= {
644 .owner
= THIS_MODULE
,
646 .release
= rbd_release
,
649 .compat_ioctl
= rbd_compat_ioctl
,
654 * Initialize an rbd client instance. Success or not, this function
655 * consumes ceph_opts. Caller holds client_mutex.
657 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
659 struct rbd_client
*rbdc
;
662 dout("%s:\n", __func__
);
663 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
667 kref_init(&rbdc
->kref
);
668 INIT_LIST_HEAD(&rbdc
->node
);
670 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
);
671 if (IS_ERR(rbdc
->client
))
673 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
675 ret
= ceph_open_session(rbdc
->client
);
679 spin_lock(&rbd_client_list_lock
);
680 list_add_tail(&rbdc
->node
, &rbd_client_list
);
681 spin_unlock(&rbd_client_list_lock
);
683 dout("%s: rbdc %p\n", __func__
, rbdc
);
687 ceph_destroy_client(rbdc
->client
);
692 ceph_destroy_options(ceph_opts
);
693 dout("%s: error %d\n", __func__
, ret
);
698 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
700 kref_get(&rbdc
->kref
);
706 * Find a ceph client with specific addr and configuration. If
707 * found, bump its reference count.
709 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
711 struct rbd_client
*client_node
;
714 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
717 spin_lock(&rbd_client_list_lock
);
718 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
719 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
720 __rbd_get_client(client_node
);
726 spin_unlock(&rbd_client_list_lock
);
728 return found
? client_node
: NULL
;
732 * (Per device) rbd map options
741 /* string args above */
750 static match_table_t rbd_opts_tokens
= {
751 {Opt_queue_depth
, "queue_depth=%d"},
752 {Opt_lock_timeout
, "lock_timeout=%d"},
754 {Opt_pool_ns
, "_pool_ns=%s"},
755 /* string args above */
756 {Opt_read_only
, "read_only"},
757 {Opt_read_only
, "ro"}, /* Alternate spelling */
758 {Opt_read_write
, "read_write"},
759 {Opt_read_write
, "rw"}, /* Alternate spelling */
760 {Opt_lock_on_read
, "lock_on_read"},
761 {Opt_exclusive
, "exclusive"},
762 {Opt_notrim
, "notrim"},
768 unsigned long lock_timeout
;
775 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
776 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
777 #define RBD_READ_ONLY_DEFAULT false
778 #define RBD_LOCK_ON_READ_DEFAULT false
779 #define RBD_EXCLUSIVE_DEFAULT false
780 #define RBD_TRIM_DEFAULT true
782 struct parse_rbd_opts_ctx
{
783 struct rbd_spec
*spec
;
784 struct rbd_options
*opts
;
787 static int parse_rbd_opts_token(char *c
, void *private)
789 struct parse_rbd_opts_ctx
*pctx
= private;
790 substring_t argstr
[MAX_OPT_ARGS
];
791 int token
, intval
, ret
;
793 token
= match_token(c
, rbd_opts_tokens
, argstr
);
794 if (token
< Opt_last_int
) {
795 ret
= match_int(&argstr
[0], &intval
);
797 pr_err("bad option arg (not int) at '%s'\n", c
);
800 dout("got int token %d val %d\n", token
, intval
);
801 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
802 dout("got string token %d val %s\n", token
, argstr
[0].from
);
804 dout("got token %d\n", token
);
808 case Opt_queue_depth
:
810 pr_err("queue_depth out of range\n");
813 pctx
->opts
->queue_depth
= intval
;
815 case Opt_lock_timeout
:
816 /* 0 is "wait forever" (i.e. infinite timeout) */
817 if (intval
< 0 || intval
> INT_MAX
/ 1000) {
818 pr_err("lock_timeout out of range\n");
821 pctx
->opts
->lock_timeout
= msecs_to_jiffies(intval
* 1000);
824 kfree(pctx
->spec
->pool_ns
);
825 pctx
->spec
->pool_ns
= match_strdup(argstr
);
826 if (!pctx
->spec
->pool_ns
)
830 pctx
->opts
->read_only
= true;
833 pctx
->opts
->read_only
= false;
835 case Opt_lock_on_read
:
836 pctx
->opts
->lock_on_read
= true;
839 pctx
->opts
->exclusive
= true;
842 pctx
->opts
->trim
= false;
845 /* libceph prints "bad option" msg */
852 static char* obj_op_name(enum obj_operation_type op_type
)
867 * Destroy ceph client
869 * Caller must hold rbd_client_list_lock.
871 static void rbd_client_release(struct kref
*kref
)
873 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
875 dout("%s: rbdc %p\n", __func__
, rbdc
);
876 spin_lock(&rbd_client_list_lock
);
877 list_del(&rbdc
->node
);
878 spin_unlock(&rbd_client_list_lock
);
880 ceph_destroy_client(rbdc
->client
);
885 * Drop reference to ceph client node. If it's not referenced anymore, release
888 static void rbd_put_client(struct rbd_client
*rbdc
)
891 kref_put(&rbdc
->kref
, rbd_client_release
);
894 static int wait_for_latest_osdmap(struct ceph_client
*client
)
899 ret
= ceph_monc_get_version(&client
->monc
, "osdmap", &newest_epoch
);
903 if (client
->osdc
.osdmap
->epoch
>= newest_epoch
)
906 ceph_osdc_maybe_request_map(&client
->osdc
);
907 return ceph_monc_wait_osdmap(&client
->monc
, newest_epoch
,
908 client
->options
->mount_timeout
);
912 * Get a ceph client with specific addr and configuration, if one does
913 * not exist create it. Either way, ceph_opts is consumed by this
916 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
918 struct rbd_client
*rbdc
;
921 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
922 rbdc
= rbd_client_find(ceph_opts
);
924 ceph_destroy_options(ceph_opts
);
927 * Using an existing client. Make sure ->pg_pools is up to
928 * date before we look up the pool id in do_rbd_add().
930 ret
= wait_for_latest_osdmap(rbdc
->client
);
932 rbd_warn(NULL
, "failed to get latest osdmap: %d", ret
);
933 rbd_put_client(rbdc
);
937 rbdc
= rbd_client_create(ceph_opts
);
939 mutex_unlock(&client_mutex
);
944 static bool rbd_image_format_valid(u32 image_format
)
946 return image_format
== 1 || image_format
== 2;
949 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
954 /* The header has to start with the magic rbd header text */
955 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
958 /* The bio layer requires at least sector-sized I/O */
960 if (ondisk
->options
.order
< SECTOR_SHIFT
)
963 /* If we use u64 in a few spots we may be able to loosen this */
965 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
969 * The size of a snapshot header has to fit in a size_t, and
970 * that limits the number of snapshots.
972 snap_count
= le32_to_cpu(ondisk
->snap_count
);
973 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
974 if (snap_count
> size
/ sizeof (__le64
))
978 * Not only that, but the size of the entire the snapshot
979 * header must also be representable in a size_t.
981 size
-= snap_count
* sizeof (__le64
);
982 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
989 * returns the size of an object in the image
991 static u32
rbd_obj_bytes(struct rbd_image_header
*header
)
993 return 1U << header
->obj_order
;
996 static void rbd_init_layout(struct rbd_device
*rbd_dev
)
998 if (rbd_dev
->header
.stripe_unit
== 0 ||
999 rbd_dev
->header
.stripe_count
== 0) {
1000 rbd_dev
->header
.stripe_unit
= rbd_obj_bytes(&rbd_dev
->header
);
1001 rbd_dev
->header
.stripe_count
= 1;
1004 rbd_dev
->layout
.stripe_unit
= rbd_dev
->header
.stripe_unit
;
1005 rbd_dev
->layout
.stripe_count
= rbd_dev
->header
.stripe_count
;
1006 rbd_dev
->layout
.object_size
= rbd_obj_bytes(&rbd_dev
->header
);
1007 rbd_dev
->layout
.pool_id
= rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
?
1008 rbd_dev
->spec
->pool_id
: rbd_dev
->header
.data_pool_id
;
1009 RCU_INIT_POINTER(rbd_dev
->layout
.pool_ns
, NULL
);
1013 * Fill an rbd image header with information from the given format 1
1016 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
1017 struct rbd_image_header_ondisk
*ondisk
)
1019 struct rbd_image_header
*header
= &rbd_dev
->header
;
1020 bool first_time
= header
->object_prefix
== NULL
;
1021 struct ceph_snap_context
*snapc
;
1022 char *object_prefix
= NULL
;
1023 char *snap_names
= NULL
;
1024 u64
*snap_sizes
= NULL
;
1029 /* Allocate this now to avoid having to handle failure below */
1032 object_prefix
= kstrndup(ondisk
->object_prefix
,
1033 sizeof(ondisk
->object_prefix
),
1039 /* Allocate the snapshot context and fill it in */
1041 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1042 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
1045 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
1047 struct rbd_image_snap_ondisk
*snaps
;
1048 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
1050 /* We'll keep a copy of the snapshot names... */
1052 if (snap_names_len
> (u64
)SIZE_MAX
)
1054 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
1058 /* ...as well as the array of their sizes. */
1059 snap_sizes
= kmalloc_array(snap_count
,
1060 sizeof(*header
->snap_sizes
),
1066 * Copy the names, and fill in each snapshot's id
1069 * Note that rbd_dev_v1_header_info() guarantees the
1070 * ondisk buffer we're working with has
1071 * snap_names_len bytes beyond the end of the
1072 * snapshot id array, this memcpy() is safe.
1074 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
1075 snaps
= ondisk
->snaps
;
1076 for (i
= 0; i
< snap_count
; i
++) {
1077 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
1078 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
1082 /* We won't fail any more, fill in the header */
1085 header
->object_prefix
= object_prefix
;
1086 header
->obj_order
= ondisk
->options
.order
;
1087 rbd_init_layout(rbd_dev
);
1089 ceph_put_snap_context(header
->snapc
);
1090 kfree(header
->snap_names
);
1091 kfree(header
->snap_sizes
);
1094 /* The remaining fields always get updated (when we refresh) */
1096 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
1097 header
->snapc
= snapc
;
1098 header
->snap_names
= snap_names
;
1099 header
->snap_sizes
= snap_sizes
;
1107 ceph_put_snap_context(snapc
);
1108 kfree(object_prefix
);
1113 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1115 const char *snap_name
;
1117 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1119 /* Skip over names until we find the one we are looking for */
1121 snap_name
= rbd_dev
->header
.snap_names
;
1123 snap_name
+= strlen(snap_name
) + 1;
1125 return kstrdup(snap_name
, GFP_KERNEL
);
1129 * Snapshot id comparison function for use with qsort()/bsearch().
1130 * Note that result is for snapshots in *descending* order.
1132 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1134 u64 snap_id1
= *(u64
*)s1
;
1135 u64 snap_id2
= *(u64
*)s2
;
1137 if (snap_id1
< snap_id2
)
1139 return snap_id1
== snap_id2
? 0 : -1;
1143 * Search a snapshot context to see if the given snapshot id is
1146 * Returns the position of the snapshot id in the array if it's found,
1147 * or BAD_SNAP_INDEX otherwise.
1149 * Note: The snapshot array is in kept sorted (by the osd) in
1150 * reverse order, highest snapshot id first.
1152 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1154 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1157 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1158 sizeof (snap_id
), snapid_compare_reverse
);
1160 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1163 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1167 const char *snap_name
;
1169 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1170 if (which
== BAD_SNAP_INDEX
)
1171 return ERR_PTR(-ENOENT
);
1173 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1174 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1177 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1179 if (snap_id
== CEPH_NOSNAP
)
1180 return RBD_SNAP_HEAD_NAME
;
1182 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1183 if (rbd_dev
->image_format
== 1)
1184 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1186 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1189 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1192 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1193 if (snap_id
== CEPH_NOSNAP
) {
1194 *snap_size
= rbd_dev
->header
.image_size
;
1195 } else if (rbd_dev
->image_format
== 1) {
1198 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1199 if (which
== BAD_SNAP_INDEX
)
1202 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1207 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1216 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1219 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1220 if (snap_id
== CEPH_NOSNAP
) {
1221 *snap_features
= rbd_dev
->header
.features
;
1222 } else if (rbd_dev
->image_format
== 1) {
1223 *snap_features
= 0; /* No features for format 1 */
1228 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1232 *snap_features
= features
;
1237 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1239 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1244 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1247 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1251 rbd_dev
->mapping
.size
= size
;
1252 rbd_dev
->mapping
.features
= features
;
1257 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1259 rbd_dev
->mapping
.size
= 0;
1260 rbd_dev
->mapping
.features
= 0;
1263 static void zero_bvec(struct bio_vec
*bv
)
1266 unsigned long flags
;
1268 buf
= bvec_kmap_irq(bv
, &flags
);
1269 memset(buf
, 0, bv
->bv_len
);
1270 flush_dcache_page(bv
->bv_page
);
1271 bvec_kunmap_irq(buf
, &flags
);
1274 static void zero_bios(struct ceph_bio_iter
*bio_pos
, u32 off
, u32 bytes
)
1276 struct ceph_bio_iter it
= *bio_pos
;
1278 ceph_bio_iter_advance(&it
, off
);
1279 ceph_bio_iter_advance_step(&it
, bytes
, ({
1284 static void zero_bvecs(struct ceph_bvec_iter
*bvec_pos
, u32 off
, u32 bytes
)
1286 struct ceph_bvec_iter it
= *bvec_pos
;
1288 ceph_bvec_iter_advance(&it
, off
);
1289 ceph_bvec_iter_advance_step(&it
, bytes
, ({
1295 * Zero a range in @obj_req data buffer defined by a bio (list) or
1296 * (private) bio_vec array.
1298 * @off is relative to the start of the data buffer.
1300 static void rbd_obj_zero_range(struct rbd_obj_request
*obj_req
, u32 off
,
1303 switch (obj_req
->img_request
->data_type
) {
1304 case OBJ_REQUEST_BIO
:
1305 zero_bios(&obj_req
->bio_pos
, off
, bytes
);
1307 case OBJ_REQUEST_BVECS
:
1308 case OBJ_REQUEST_OWN_BVECS
:
1309 zero_bvecs(&obj_req
->bvec_pos
, off
, bytes
);
1316 static void rbd_obj_request_destroy(struct kref
*kref
);
1317 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1319 rbd_assert(obj_request
!= NULL
);
1320 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1321 kref_read(&obj_request
->kref
));
1322 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1325 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1327 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1328 kref_read(&img_request
->kref
));
1329 kref_get(&img_request
->kref
);
1332 static void rbd_img_request_destroy(struct kref
*kref
);
1333 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1335 rbd_assert(img_request
!= NULL
);
1336 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1337 kref_read(&img_request
->kref
));
1338 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1341 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1342 struct rbd_obj_request
*obj_request
)
1344 rbd_assert(obj_request
->img_request
== NULL
);
1346 /* Image request now owns object's original reference */
1347 obj_request
->img_request
= img_request
;
1348 img_request
->obj_request_count
++;
1349 img_request
->pending_count
++;
1350 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1353 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1354 struct rbd_obj_request
*obj_request
)
1356 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1357 list_del(&obj_request
->ex
.oe_item
);
1358 rbd_assert(img_request
->obj_request_count
> 0);
1359 img_request
->obj_request_count
--;
1360 rbd_assert(obj_request
->img_request
== img_request
);
1361 rbd_obj_request_put(obj_request
);
1364 static void rbd_obj_request_submit(struct rbd_obj_request
*obj_request
)
1366 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1368 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__
,
1369 obj_request
, obj_request
->ex
.oe_objno
, obj_request
->ex
.oe_off
,
1370 obj_request
->ex
.oe_len
, osd_req
);
1371 ceph_osdc_start_request(osd_req
->r_osdc
, osd_req
, false);
1375 * The default/initial value for all image request flags is 0. Each
1376 * is conditionally set to 1 at image request initialization time
1377 * and currently never change thereafter.
1379 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1381 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1385 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1387 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1391 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1394 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1397 static bool rbd_obj_is_entire(struct rbd_obj_request
*obj_req
)
1399 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1401 return !obj_req
->ex
.oe_off
&&
1402 obj_req
->ex
.oe_len
== rbd_dev
->layout
.object_size
;
1405 static bool rbd_obj_is_tail(struct rbd_obj_request
*obj_req
)
1407 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1409 return obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
==
1410 rbd_dev
->layout
.object_size
;
1413 static u64
rbd_obj_img_extents_bytes(struct rbd_obj_request
*obj_req
)
1415 return ceph_file_extents_bytes(obj_req
->img_extents
,
1416 obj_req
->num_img_extents
);
1419 static bool rbd_img_is_write(struct rbd_img_request
*img_req
)
1421 switch (img_req
->op_type
) {
1425 case OBJ_OP_DISCARD
:
1432 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
);
1434 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
)
1436 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1438 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
1439 osd_req
->r_result
, obj_req
);
1440 rbd_assert(osd_req
== obj_req
->osd_req
);
1442 obj_req
->result
= osd_req
->r_result
< 0 ? osd_req
->r_result
: 0;
1443 if (!obj_req
->result
&& !rbd_img_is_write(obj_req
->img_request
))
1444 obj_req
->xferred
= osd_req
->r_result
;
1447 * Writes aren't allowed to return a data payload. In some
1448 * guarded write cases (e.g. stat + zero on an empty object)
1449 * a stat response makes it through, but we don't care.
1451 obj_req
->xferred
= 0;
1453 rbd_obj_handle_request(obj_req
);
1456 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1458 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1460 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1461 osd_req
->r_snapid
= obj_request
->img_request
->snap_id
;
1464 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1466 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1468 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1469 ktime_get_real_ts64(&osd_req
->r_mtime
);
1470 osd_req
->r_data_offset
= obj_request
->ex
.oe_off
;
1473 static struct ceph_osd_request
*
1474 rbd_osd_req_create(struct rbd_obj_request
*obj_req
, unsigned int num_ops
)
1476 struct rbd_img_request
*img_req
= obj_req
->img_request
;
1477 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
1478 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1479 struct ceph_osd_request
*req
;
1480 const char *name_format
= rbd_dev
->image_format
== 1 ?
1481 RBD_V1_DATA_FORMAT
: RBD_V2_DATA_FORMAT
;
1483 req
= ceph_osdc_alloc_request(osdc
,
1484 (rbd_img_is_write(img_req
) ? img_req
->snapc
: NULL
),
1485 num_ops
, false, GFP_NOIO
);
1489 req
->r_callback
= rbd_osd_req_callback
;
1490 req
->r_priv
= obj_req
;
1493 * Data objects may be stored in a separate pool, but always in
1494 * the same namespace in that pool as the header in its pool.
1496 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
1497 req
->r_base_oloc
.pool
= rbd_dev
->layout
.pool_id
;
1499 if (ceph_oid_aprintf(&req
->r_base_oid
, GFP_NOIO
, name_format
,
1500 rbd_dev
->header
.object_prefix
, obj_req
->ex
.oe_objno
))
1503 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
))
1509 ceph_osdc_put_request(req
);
1513 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1515 ceph_osdc_put_request(osd_req
);
1518 static struct rbd_obj_request
*rbd_obj_request_create(void)
1520 struct rbd_obj_request
*obj_request
;
1522 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
1526 ceph_object_extent_init(&obj_request
->ex
);
1527 kref_init(&obj_request
->kref
);
1529 dout("%s %p\n", __func__
, obj_request
);
1533 static void rbd_obj_request_destroy(struct kref
*kref
)
1535 struct rbd_obj_request
*obj_request
;
1538 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1540 dout("%s: obj %p\n", __func__
, obj_request
);
1542 if (obj_request
->osd_req
)
1543 rbd_osd_req_destroy(obj_request
->osd_req
);
1545 switch (obj_request
->img_request
->data_type
) {
1546 case OBJ_REQUEST_NODATA
:
1547 case OBJ_REQUEST_BIO
:
1548 case OBJ_REQUEST_BVECS
:
1549 break; /* Nothing to do */
1550 case OBJ_REQUEST_OWN_BVECS
:
1551 kfree(obj_request
->bvec_pos
.bvecs
);
1557 kfree(obj_request
->img_extents
);
1558 if (obj_request
->copyup_bvecs
) {
1559 for (i
= 0; i
< obj_request
->copyup_bvec_count
; i
++) {
1560 if (obj_request
->copyup_bvecs
[i
].bv_page
)
1561 __free_page(obj_request
->copyup_bvecs
[i
].bv_page
);
1563 kfree(obj_request
->copyup_bvecs
);
1566 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1569 /* It's OK to call this for a device with no parent */
1571 static void rbd_spec_put(struct rbd_spec
*spec
);
1572 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1574 rbd_dev_remove_parent(rbd_dev
);
1575 rbd_spec_put(rbd_dev
->parent_spec
);
1576 rbd_dev
->parent_spec
= NULL
;
1577 rbd_dev
->parent_overlap
= 0;
1581 * Parent image reference counting is used to determine when an
1582 * image's parent fields can be safely torn down--after there are no
1583 * more in-flight requests to the parent image. When the last
1584 * reference is dropped, cleaning them up is safe.
1586 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1590 if (!rbd_dev
->parent_spec
)
1593 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1597 /* Last reference; clean up parent data structures */
1600 rbd_dev_unparent(rbd_dev
);
1602 rbd_warn(rbd_dev
, "parent reference underflow");
1606 * If an image has a non-zero parent overlap, get a reference to its
1609 * Returns true if the rbd device has a parent with a non-zero
1610 * overlap and a reference for it was successfully taken, or
1613 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1617 if (!rbd_dev
->parent_spec
)
1620 down_read(&rbd_dev
->header_rwsem
);
1621 if (rbd_dev
->parent_overlap
)
1622 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1623 up_read(&rbd_dev
->header_rwsem
);
1626 rbd_warn(rbd_dev
, "parent reference overflow");
1632 * Caller is responsible for filling in the list of object requests
1633 * that comprises the image request, and the Linux request pointer
1634 * (if there is one).
1636 static struct rbd_img_request
*rbd_img_request_create(
1637 struct rbd_device
*rbd_dev
,
1638 enum obj_operation_type op_type
,
1639 struct ceph_snap_context
*snapc
)
1641 struct rbd_img_request
*img_request
;
1643 img_request
= kmem_cache_zalloc(rbd_img_request_cache
, GFP_NOIO
);
1647 img_request
->rbd_dev
= rbd_dev
;
1648 img_request
->op_type
= op_type
;
1649 if (!rbd_img_is_write(img_request
))
1650 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1652 img_request
->snapc
= snapc
;
1654 if (rbd_dev_parent_get(rbd_dev
))
1655 img_request_layered_set(img_request
);
1657 spin_lock_init(&img_request
->completion_lock
);
1658 INIT_LIST_HEAD(&img_request
->object_extents
);
1659 kref_init(&img_request
->kref
);
1661 dout("%s: rbd_dev %p %s -> img %p\n", __func__
, rbd_dev
,
1662 obj_op_name(op_type
), img_request
);
1666 static void rbd_img_request_destroy(struct kref
*kref
)
1668 struct rbd_img_request
*img_request
;
1669 struct rbd_obj_request
*obj_request
;
1670 struct rbd_obj_request
*next_obj_request
;
1672 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
1674 dout("%s: img %p\n", __func__
, img_request
);
1676 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1677 rbd_img_obj_request_del(img_request
, obj_request
);
1678 rbd_assert(img_request
->obj_request_count
== 0);
1680 if (img_request_layered_test(img_request
)) {
1681 img_request_layered_clear(img_request
);
1682 rbd_dev_parent_put(img_request
->rbd_dev
);
1685 if (rbd_img_is_write(img_request
))
1686 ceph_put_snap_context(img_request
->snapc
);
1688 kmem_cache_free(rbd_img_request_cache
, img_request
);
1691 static void prune_extents(struct ceph_file_extent
*img_extents
,
1692 u32
*num_img_extents
, u64 overlap
)
1694 u32 cnt
= *num_img_extents
;
1696 /* drop extents completely beyond the overlap */
1697 while (cnt
&& img_extents
[cnt
- 1].fe_off
>= overlap
)
1701 struct ceph_file_extent
*ex
= &img_extents
[cnt
- 1];
1703 /* trim final overlapping extent */
1704 if (ex
->fe_off
+ ex
->fe_len
> overlap
)
1705 ex
->fe_len
= overlap
- ex
->fe_off
;
1708 *num_img_extents
= cnt
;
1712 * Determine the byte range(s) covered by either just the object extent
1713 * or the entire object in the parent image.
1715 static int rbd_obj_calc_img_extents(struct rbd_obj_request
*obj_req
,
1718 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1721 if (!rbd_dev
->parent_overlap
)
1724 ret
= ceph_extent_to_file(&rbd_dev
->layout
, obj_req
->ex
.oe_objno
,
1725 entire
? 0 : obj_req
->ex
.oe_off
,
1726 entire
? rbd_dev
->layout
.object_size
:
1728 &obj_req
->img_extents
,
1729 &obj_req
->num_img_extents
);
1733 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
1734 rbd_dev
->parent_overlap
);
1738 static void rbd_osd_req_setup_data(struct rbd_obj_request
*obj_req
, u32 which
)
1740 switch (obj_req
->img_request
->data_type
) {
1741 case OBJ_REQUEST_BIO
:
1742 osd_req_op_extent_osd_data_bio(obj_req
->osd_req
, which
,
1744 obj_req
->ex
.oe_len
);
1746 case OBJ_REQUEST_BVECS
:
1747 case OBJ_REQUEST_OWN_BVECS
:
1748 rbd_assert(obj_req
->bvec_pos
.iter
.bi_size
==
1749 obj_req
->ex
.oe_len
);
1750 rbd_assert(obj_req
->bvec_idx
== obj_req
->bvec_count
);
1751 osd_req_op_extent_osd_data_bvec_pos(obj_req
->osd_req
, which
,
1752 &obj_req
->bvec_pos
);
1759 static int rbd_obj_setup_read(struct rbd_obj_request
*obj_req
)
1761 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, 1);
1762 if (!obj_req
->osd_req
)
1765 osd_req_op_extent_init(obj_req
->osd_req
, 0, CEPH_OSD_OP_READ
,
1766 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
1767 rbd_osd_req_setup_data(obj_req
, 0);
1769 rbd_osd_req_format_read(obj_req
);
1773 static int __rbd_obj_setup_stat(struct rbd_obj_request
*obj_req
,
1776 struct page
**pages
;
1779 * The response data for a STAT call consists of:
1786 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
1788 return PTR_ERR(pages
);
1790 osd_req_op_init(obj_req
->osd_req
, which
, CEPH_OSD_OP_STAT
, 0);
1791 osd_req_op_raw_data_in_pages(obj_req
->osd_req
, which
, pages
,
1792 8 + sizeof(struct ceph_timespec
),
1797 static void __rbd_obj_setup_write(struct rbd_obj_request
*obj_req
,
1800 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1803 osd_req_op_alloc_hint_init(obj_req
->osd_req
, which
++,
1804 rbd_dev
->layout
.object_size
,
1805 rbd_dev
->layout
.object_size
);
1807 if (rbd_obj_is_entire(obj_req
))
1808 opcode
= CEPH_OSD_OP_WRITEFULL
;
1810 opcode
= CEPH_OSD_OP_WRITE
;
1812 osd_req_op_extent_init(obj_req
->osd_req
, which
, opcode
,
1813 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
1814 rbd_osd_req_setup_data(obj_req
, which
++);
1816 rbd_assert(which
== obj_req
->osd_req
->r_num_ops
);
1817 rbd_osd_req_format_write(obj_req
);
1820 static int rbd_obj_setup_write(struct rbd_obj_request
*obj_req
)
1822 unsigned int num_osd_ops
, which
= 0;
1825 /* reverse map the entire object onto the parent */
1826 ret
= rbd_obj_calc_img_extents(obj_req
, true);
1830 if (obj_req
->num_img_extents
) {
1831 obj_req
->write_state
= RBD_OBJ_WRITE_GUARD
;
1832 num_osd_ops
= 3; /* stat + setallochint + write/writefull */
1834 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
1835 num_osd_ops
= 2; /* setallochint + write/writefull */
1838 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
1839 if (!obj_req
->osd_req
)
1842 if (obj_req
->num_img_extents
) {
1843 ret
= __rbd_obj_setup_stat(obj_req
, which
++);
1848 __rbd_obj_setup_write(obj_req
, which
);
1852 static void __rbd_obj_setup_discard(struct rbd_obj_request
*obj_req
,
1857 if (rbd_obj_is_entire(obj_req
)) {
1858 if (obj_req
->num_img_extents
) {
1859 osd_req_op_init(obj_req
->osd_req
, which
++,
1860 CEPH_OSD_OP_CREATE
, 0);
1861 opcode
= CEPH_OSD_OP_TRUNCATE
;
1863 osd_req_op_init(obj_req
->osd_req
, which
++,
1864 CEPH_OSD_OP_DELETE
, 0);
1867 } else if (rbd_obj_is_tail(obj_req
)) {
1868 opcode
= CEPH_OSD_OP_TRUNCATE
;
1870 opcode
= CEPH_OSD_OP_ZERO
;
1874 osd_req_op_extent_init(obj_req
->osd_req
, which
++, opcode
,
1875 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
1878 rbd_assert(which
== obj_req
->osd_req
->r_num_ops
);
1879 rbd_osd_req_format_write(obj_req
);
1882 static int rbd_obj_setup_discard(struct rbd_obj_request
*obj_req
)
1884 unsigned int num_osd_ops
, which
= 0;
1887 /* reverse map the entire object onto the parent */
1888 ret
= rbd_obj_calc_img_extents(obj_req
, true);
1892 if (rbd_obj_is_entire(obj_req
)) {
1893 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
1894 if (obj_req
->num_img_extents
)
1895 num_osd_ops
= 2; /* create + truncate */
1897 num_osd_ops
= 1; /* delete */
1899 if (obj_req
->num_img_extents
) {
1900 obj_req
->write_state
= RBD_OBJ_WRITE_GUARD
;
1901 num_osd_ops
= 2; /* stat + truncate/zero */
1903 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
1904 num_osd_ops
= 1; /* truncate/zero */
1908 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
1909 if (!obj_req
->osd_req
)
1912 if (!rbd_obj_is_entire(obj_req
) && obj_req
->num_img_extents
) {
1913 ret
= __rbd_obj_setup_stat(obj_req
, which
++);
1918 __rbd_obj_setup_discard(obj_req
, which
);
1923 * For each object request in @img_req, allocate an OSD request, add
1924 * individual OSD ops and prepare them for submission. The number of
1925 * OSD ops depends on op_type and the overlap point (if any).
1927 static int __rbd_img_fill_request(struct rbd_img_request
*img_req
)
1929 struct rbd_obj_request
*obj_req
;
1932 for_each_obj_request(img_req
, obj_req
) {
1933 switch (img_req
->op_type
) {
1935 ret
= rbd_obj_setup_read(obj_req
);
1938 ret
= rbd_obj_setup_write(obj_req
);
1940 case OBJ_OP_DISCARD
:
1941 ret
= rbd_obj_setup_discard(obj_req
);
1953 union rbd_img_fill_iter
{
1954 struct ceph_bio_iter bio_iter
;
1955 struct ceph_bvec_iter bvec_iter
;
1958 struct rbd_img_fill_ctx
{
1959 enum obj_request_type pos_type
;
1960 union rbd_img_fill_iter
*pos
;
1961 union rbd_img_fill_iter iter
;
1962 ceph_object_extent_fn_t set_pos_fn
;
1963 ceph_object_extent_fn_t count_fn
;
1964 ceph_object_extent_fn_t copy_fn
;
1967 static struct ceph_object_extent
*alloc_object_extent(void *arg
)
1969 struct rbd_img_request
*img_req
= arg
;
1970 struct rbd_obj_request
*obj_req
;
1972 obj_req
= rbd_obj_request_create();
1976 rbd_img_obj_request_add(img_req
, obj_req
);
1977 return &obj_req
->ex
;
1981 * While su != os && sc == 1 is technically not fancy (it's the same
1982 * layout as su == os && sc == 1), we can't use the nocopy path for it
1983 * because ->set_pos_fn() should be called only once per object.
1984 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1985 * treat su != os && sc == 1 as fancy.
1987 static bool rbd_layout_is_fancy(struct ceph_file_layout
*l
)
1989 return l
->stripe_unit
!= l
->object_size
;
1992 static int rbd_img_fill_request_nocopy(struct rbd_img_request
*img_req
,
1993 struct ceph_file_extent
*img_extents
,
1994 u32 num_img_extents
,
1995 struct rbd_img_fill_ctx
*fctx
)
2000 img_req
->data_type
= fctx
->pos_type
;
2003 * Create object requests and set each object request's starting
2004 * position in the provided bio (list) or bio_vec array.
2006 fctx
->iter
= *fctx
->pos
;
2007 for (i
= 0; i
< num_img_extents
; i
++) {
2008 ret
= ceph_file_to_extents(&img_req
->rbd_dev
->layout
,
2009 img_extents
[i
].fe_off
,
2010 img_extents
[i
].fe_len
,
2011 &img_req
->object_extents
,
2012 alloc_object_extent
, img_req
,
2013 fctx
->set_pos_fn
, &fctx
->iter
);
2018 return __rbd_img_fill_request(img_req
);
2022 * Map a list of image extents to a list of object extents, create the
2023 * corresponding object requests (normally each to a different object,
2024 * but not always) and add them to @img_req. For each object request,
2025 * set up its data descriptor to point to the corresponding chunk(s) of
2026 * @fctx->pos data buffer.
2028 * Because ceph_file_to_extents() will merge adjacent object extents
2029 * together, each object request's data descriptor may point to multiple
2030 * different chunks of @fctx->pos data buffer.
2032 * @fctx->pos data buffer is assumed to be large enough.
2034 static int rbd_img_fill_request(struct rbd_img_request
*img_req
,
2035 struct ceph_file_extent
*img_extents
,
2036 u32 num_img_extents
,
2037 struct rbd_img_fill_ctx
*fctx
)
2039 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
2040 struct rbd_obj_request
*obj_req
;
2044 if (fctx
->pos_type
== OBJ_REQUEST_NODATA
||
2045 !rbd_layout_is_fancy(&rbd_dev
->layout
))
2046 return rbd_img_fill_request_nocopy(img_req
, img_extents
,
2047 num_img_extents
, fctx
);
2049 img_req
->data_type
= OBJ_REQUEST_OWN_BVECS
;
2052 * Create object requests and determine ->bvec_count for each object
2053 * request. Note that ->bvec_count sum over all object requests may
2054 * be greater than the number of bio_vecs in the provided bio (list)
2055 * or bio_vec array because when mapped, those bio_vecs can straddle
2056 * stripe unit boundaries.
2058 fctx
->iter
= *fctx
->pos
;
2059 for (i
= 0; i
< num_img_extents
; i
++) {
2060 ret
= ceph_file_to_extents(&rbd_dev
->layout
,
2061 img_extents
[i
].fe_off
,
2062 img_extents
[i
].fe_len
,
2063 &img_req
->object_extents
,
2064 alloc_object_extent
, img_req
,
2065 fctx
->count_fn
, &fctx
->iter
);
2070 for_each_obj_request(img_req
, obj_req
) {
2071 obj_req
->bvec_pos
.bvecs
= kmalloc_array(obj_req
->bvec_count
,
2072 sizeof(*obj_req
->bvec_pos
.bvecs
),
2074 if (!obj_req
->bvec_pos
.bvecs
)
2079 * Fill in each object request's private bio_vec array, splitting and
2080 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2082 fctx
->iter
= *fctx
->pos
;
2083 for (i
= 0; i
< num_img_extents
; i
++) {
2084 ret
= ceph_iterate_extents(&rbd_dev
->layout
,
2085 img_extents
[i
].fe_off
,
2086 img_extents
[i
].fe_len
,
2087 &img_req
->object_extents
,
2088 fctx
->copy_fn
, &fctx
->iter
);
2093 return __rbd_img_fill_request(img_req
);
2096 static int rbd_img_fill_nodata(struct rbd_img_request
*img_req
,
2099 struct ceph_file_extent ex
= { off
, len
};
2100 union rbd_img_fill_iter dummy
;
2101 struct rbd_img_fill_ctx fctx
= {
2102 .pos_type
= OBJ_REQUEST_NODATA
,
2106 return rbd_img_fill_request(img_req
, &ex
, 1, &fctx
);
2109 static void set_bio_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2111 struct rbd_obj_request
*obj_req
=
2112 container_of(ex
, struct rbd_obj_request
, ex
);
2113 struct ceph_bio_iter
*it
= arg
;
2115 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2116 obj_req
->bio_pos
= *it
;
2117 ceph_bio_iter_advance(it
, bytes
);
2120 static void count_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2122 struct rbd_obj_request
*obj_req
=
2123 container_of(ex
, struct rbd_obj_request
, ex
);
2124 struct ceph_bio_iter
*it
= arg
;
2126 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2127 ceph_bio_iter_advance_step(it
, bytes
, ({
2128 obj_req
->bvec_count
++;
2133 static void copy_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2135 struct rbd_obj_request
*obj_req
=
2136 container_of(ex
, struct rbd_obj_request
, ex
);
2137 struct ceph_bio_iter
*it
= arg
;
2139 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2140 ceph_bio_iter_advance_step(it
, bytes
, ({
2141 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2142 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2146 static int __rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2147 struct ceph_file_extent
*img_extents
,
2148 u32 num_img_extents
,
2149 struct ceph_bio_iter
*bio_pos
)
2151 struct rbd_img_fill_ctx fctx
= {
2152 .pos_type
= OBJ_REQUEST_BIO
,
2153 .pos
= (union rbd_img_fill_iter
*)bio_pos
,
2154 .set_pos_fn
= set_bio_pos
,
2155 .count_fn
= count_bio_bvecs
,
2156 .copy_fn
= copy_bio_bvecs
,
2159 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2163 static int rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2164 u64 off
, u64 len
, struct bio
*bio
)
2166 struct ceph_file_extent ex
= { off
, len
};
2167 struct ceph_bio_iter it
= { .bio
= bio
, .iter
= bio
->bi_iter
};
2169 return __rbd_img_fill_from_bio(img_req
, &ex
, 1, &it
);
2172 static void set_bvec_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2174 struct rbd_obj_request
*obj_req
=
2175 container_of(ex
, struct rbd_obj_request
, ex
);
2176 struct ceph_bvec_iter
*it
= arg
;
2178 obj_req
->bvec_pos
= *it
;
2179 ceph_bvec_iter_shorten(&obj_req
->bvec_pos
, bytes
);
2180 ceph_bvec_iter_advance(it
, bytes
);
2183 static void count_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2185 struct rbd_obj_request
*obj_req
=
2186 container_of(ex
, struct rbd_obj_request
, ex
);
2187 struct ceph_bvec_iter
*it
= arg
;
2189 ceph_bvec_iter_advance_step(it
, bytes
, ({
2190 obj_req
->bvec_count
++;
2194 static void copy_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2196 struct rbd_obj_request
*obj_req
=
2197 container_of(ex
, struct rbd_obj_request
, ex
);
2198 struct ceph_bvec_iter
*it
= arg
;
2200 ceph_bvec_iter_advance_step(it
, bytes
, ({
2201 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2202 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2206 static int __rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2207 struct ceph_file_extent
*img_extents
,
2208 u32 num_img_extents
,
2209 struct ceph_bvec_iter
*bvec_pos
)
2211 struct rbd_img_fill_ctx fctx
= {
2212 .pos_type
= OBJ_REQUEST_BVECS
,
2213 .pos
= (union rbd_img_fill_iter
*)bvec_pos
,
2214 .set_pos_fn
= set_bvec_pos
,
2215 .count_fn
= count_bvecs
,
2216 .copy_fn
= copy_bvecs
,
2219 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2223 static int rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2224 struct ceph_file_extent
*img_extents
,
2225 u32 num_img_extents
,
2226 struct bio_vec
*bvecs
)
2228 struct ceph_bvec_iter it
= {
2230 .iter
= { .bi_size
= ceph_file_extents_bytes(img_extents
,
2234 return __rbd_img_fill_from_bvecs(img_req
, img_extents
, num_img_extents
,
2238 static void rbd_img_request_submit(struct rbd_img_request
*img_request
)
2240 struct rbd_obj_request
*obj_request
;
2242 dout("%s: img %p\n", __func__
, img_request
);
2244 rbd_img_request_get(img_request
);
2245 for_each_obj_request(img_request
, obj_request
)
2246 rbd_obj_request_submit(obj_request
);
2248 rbd_img_request_put(img_request
);
2251 static int rbd_obj_read_from_parent(struct rbd_obj_request
*obj_req
)
2253 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2254 struct rbd_img_request
*child_img_req
;
2257 child_img_req
= rbd_img_request_create(img_req
->rbd_dev
->parent
,
2262 __set_bit(IMG_REQ_CHILD
, &child_img_req
->flags
);
2263 child_img_req
->obj_request
= obj_req
;
2265 if (!rbd_img_is_write(img_req
)) {
2266 switch (img_req
->data_type
) {
2267 case OBJ_REQUEST_BIO
:
2268 ret
= __rbd_img_fill_from_bio(child_img_req
,
2269 obj_req
->img_extents
,
2270 obj_req
->num_img_extents
,
2273 case OBJ_REQUEST_BVECS
:
2274 case OBJ_REQUEST_OWN_BVECS
:
2275 ret
= __rbd_img_fill_from_bvecs(child_img_req
,
2276 obj_req
->img_extents
,
2277 obj_req
->num_img_extents
,
2278 &obj_req
->bvec_pos
);
2284 ret
= rbd_img_fill_from_bvecs(child_img_req
,
2285 obj_req
->img_extents
,
2286 obj_req
->num_img_extents
,
2287 obj_req
->copyup_bvecs
);
2290 rbd_img_request_put(child_img_req
);
2294 rbd_img_request_submit(child_img_req
);
2298 static bool rbd_obj_handle_read(struct rbd_obj_request
*obj_req
)
2300 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2303 if (obj_req
->result
== -ENOENT
&&
2304 rbd_dev
->parent_overlap
&& !obj_req
->tried_parent
) {
2305 /* reverse map this object extent onto the parent */
2306 ret
= rbd_obj_calc_img_extents(obj_req
, false);
2308 obj_req
->result
= ret
;
2312 if (obj_req
->num_img_extents
) {
2313 obj_req
->tried_parent
= true;
2314 ret
= rbd_obj_read_from_parent(obj_req
);
2316 obj_req
->result
= ret
;
2324 * -ENOENT means a hole in the image -- zero-fill the entire
2325 * length of the request. A short read also implies zero-fill
2326 * to the end of the request. In both cases we update xferred
2327 * count to indicate the whole request was satisfied.
2329 if (obj_req
->result
== -ENOENT
||
2330 (!obj_req
->result
&& obj_req
->xferred
< obj_req
->ex
.oe_len
)) {
2331 rbd_assert(!obj_req
->xferred
|| !obj_req
->result
);
2332 rbd_obj_zero_range(obj_req
, obj_req
->xferred
,
2333 obj_req
->ex
.oe_len
- obj_req
->xferred
);
2334 obj_req
->result
= 0;
2335 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2342 * copyup_bvecs pages are never highmem pages
2344 static bool is_zero_bvecs(struct bio_vec
*bvecs
, u32 bytes
)
2346 struct ceph_bvec_iter it
= {
2348 .iter
= { .bi_size
= bytes
},
2351 ceph_bvec_iter_advance_step(&it
, bytes
, ({
2352 if (memchr_inv(page_address(bv
.bv_page
) + bv
.bv_offset
, 0,
2359 static int rbd_obj_issue_copyup(struct rbd_obj_request
*obj_req
, u32 bytes
)
2361 unsigned int num_osd_ops
= obj_req
->osd_req
->r_num_ops
;
2364 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
2365 rbd_assert(obj_req
->osd_req
->r_ops
[0].op
== CEPH_OSD_OP_STAT
);
2366 rbd_osd_req_destroy(obj_req
->osd_req
);
2369 * Create a copyup request with the same number of OSD ops as
2370 * the original request. The original request was stat + op(s),
2371 * the new copyup request will be copyup + the same op(s).
2373 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
2374 if (!obj_req
->osd_req
)
2377 ret
= osd_req_op_cls_init(obj_req
->osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd",
2383 * Only send non-zero copyup data to save some I/O and network
2384 * bandwidth -- zero copyup data is equivalent to the object not
2387 if (is_zero_bvecs(obj_req
->copyup_bvecs
, bytes
)) {
2388 dout("%s obj_req %p detected zeroes\n", __func__
, obj_req
);
2391 osd_req_op_cls_request_data_bvecs(obj_req
->osd_req
, 0,
2392 obj_req
->copyup_bvecs
,
2393 obj_req
->copyup_bvec_count
,
2396 switch (obj_req
->img_request
->op_type
) {
2398 __rbd_obj_setup_write(obj_req
, 1);
2400 case OBJ_OP_DISCARD
:
2401 rbd_assert(!rbd_obj_is_entire(obj_req
));
2402 __rbd_obj_setup_discard(obj_req
, 1);
2408 rbd_obj_request_submit(obj_req
);
2412 static int setup_copyup_bvecs(struct rbd_obj_request
*obj_req
, u64 obj_overlap
)
2416 rbd_assert(!obj_req
->copyup_bvecs
);
2417 obj_req
->copyup_bvec_count
= calc_pages_for(0, obj_overlap
);
2418 obj_req
->copyup_bvecs
= kcalloc(obj_req
->copyup_bvec_count
,
2419 sizeof(*obj_req
->copyup_bvecs
),
2421 if (!obj_req
->copyup_bvecs
)
2424 for (i
= 0; i
< obj_req
->copyup_bvec_count
; i
++) {
2425 unsigned int len
= min(obj_overlap
, (u64
)PAGE_SIZE
);
2427 obj_req
->copyup_bvecs
[i
].bv_page
= alloc_page(GFP_NOIO
);
2428 if (!obj_req
->copyup_bvecs
[i
].bv_page
)
2431 obj_req
->copyup_bvecs
[i
].bv_offset
= 0;
2432 obj_req
->copyup_bvecs
[i
].bv_len
= len
;
2436 rbd_assert(!obj_overlap
);
2440 static int rbd_obj_handle_write_guard(struct rbd_obj_request
*obj_req
)
2442 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2445 rbd_assert(obj_req
->num_img_extents
);
2446 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
2447 rbd_dev
->parent_overlap
);
2448 if (!obj_req
->num_img_extents
) {
2450 * The overlap has become 0 (most likely because the
2451 * image has been flattened). Use rbd_obj_issue_copyup()
2452 * to re-submit the original write request -- the copyup
2453 * operation itself will be a no-op, since someone must
2454 * have populated the child object while we weren't
2455 * looking. Move to WRITE_FLAT state as we'll be done
2456 * with the operation once the null copyup completes.
2458 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
2459 return rbd_obj_issue_copyup(obj_req
, 0);
2462 ret
= setup_copyup_bvecs(obj_req
, rbd_obj_img_extents_bytes(obj_req
));
2466 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP
;
2467 return rbd_obj_read_from_parent(obj_req
);
2470 static bool rbd_obj_handle_write(struct rbd_obj_request
*obj_req
)
2475 switch (obj_req
->write_state
) {
2476 case RBD_OBJ_WRITE_GUARD
:
2477 rbd_assert(!obj_req
->xferred
);
2478 if (obj_req
->result
== -ENOENT
) {
2480 * The target object doesn't exist. Read the data for
2481 * the entire target object up to the overlap point (if
2482 * any) from the parent, so we can use it for a copyup.
2484 ret
= rbd_obj_handle_write_guard(obj_req
);
2486 obj_req
->result
= ret
;
2492 case RBD_OBJ_WRITE_FLAT
:
2493 if (!obj_req
->result
)
2495 * There is no such thing as a successful short
2496 * write -- indicate the whole request was satisfied.
2498 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2500 case RBD_OBJ_WRITE_COPYUP
:
2501 obj_req
->write_state
= RBD_OBJ_WRITE_GUARD
;
2502 if (obj_req
->result
)
2505 rbd_assert(obj_req
->xferred
);
2506 ret
= rbd_obj_issue_copyup(obj_req
, obj_req
->xferred
);
2508 obj_req
->result
= ret
;
2518 * Returns true if @obj_req is completed, or false otherwise.
2520 static bool __rbd_obj_handle_request(struct rbd_obj_request
*obj_req
)
2522 switch (obj_req
->img_request
->op_type
) {
2524 return rbd_obj_handle_read(obj_req
);
2526 return rbd_obj_handle_write(obj_req
);
2527 case OBJ_OP_DISCARD
:
2528 if (rbd_obj_handle_write(obj_req
)) {
2530 * Hide -ENOENT from delete/truncate/zero -- discarding
2531 * a non-existent object is not a problem.
2533 if (obj_req
->result
== -ENOENT
) {
2534 obj_req
->result
= 0;
2535 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2545 static void rbd_obj_end_request(struct rbd_obj_request
*obj_req
)
2547 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2549 rbd_assert((!obj_req
->result
&&
2550 obj_req
->xferred
== obj_req
->ex
.oe_len
) ||
2551 (obj_req
->result
< 0 && !obj_req
->xferred
));
2552 if (!obj_req
->result
) {
2553 img_req
->xferred
+= obj_req
->xferred
;
2557 rbd_warn(img_req
->rbd_dev
,
2558 "%s at objno %llu %llu~%llu result %d xferred %llu",
2559 obj_op_name(img_req
->op_type
), obj_req
->ex
.oe_objno
,
2560 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, obj_req
->result
,
2562 if (!img_req
->result
) {
2563 img_req
->result
= obj_req
->result
;
2564 img_req
->xferred
= 0;
2568 static void rbd_img_end_child_request(struct rbd_img_request
*img_req
)
2570 struct rbd_obj_request
*obj_req
= img_req
->obj_request
;
2572 rbd_assert(test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
2573 rbd_assert((!img_req
->result
&&
2574 img_req
->xferred
== rbd_obj_img_extents_bytes(obj_req
)) ||
2575 (img_req
->result
< 0 && !img_req
->xferred
));
2577 obj_req
->result
= img_req
->result
;
2578 obj_req
->xferred
= img_req
->xferred
;
2579 rbd_img_request_put(img_req
);
2582 static void rbd_img_end_request(struct rbd_img_request
*img_req
)
2584 rbd_assert(!test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
2585 rbd_assert((!img_req
->result
&&
2586 img_req
->xferred
== blk_rq_bytes(img_req
->rq
)) ||
2587 (img_req
->result
< 0 && !img_req
->xferred
));
2589 blk_mq_end_request(img_req
->rq
,
2590 errno_to_blk_status(img_req
->result
));
2591 rbd_img_request_put(img_req
);
2594 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
)
2596 struct rbd_img_request
*img_req
;
2599 if (!__rbd_obj_handle_request(obj_req
))
2602 img_req
= obj_req
->img_request
;
2603 spin_lock(&img_req
->completion_lock
);
2604 rbd_obj_end_request(obj_req
);
2605 rbd_assert(img_req
->pending_count
);
2606 if (--img_req
->pending_count
) {
2607 spin_unlock(&img_req
->completion_lock
);
2611 spin_unlock(&img_req
->completion_lock
);
2612 if (test_bit(IMG_REQ_CHILD
, &img_req
->flags
)) {
2613 obj_req
= img_req
->obj_request
;
2614 rbd_img_end_child_request(img_req
);
2617 rbd_img_end_request(img_req
);
2620 static const struct rbd_client_id rbd_empty_cid
;
2622 static bool rbd_cid_equal(const struct rbd_client_id
*lhs
,
2623 const struct rbd_client_id
*rhs
)
2625 return lhs
->gid
== rhs
->gid
&& lhs
->handle
== rhs
->handle
;
2628 static struct rbd_client_id
rbd_get_cid(struct rbd_device
*rbd_dev
)
2630 struct rbd_client_id cid
;
2632 mutex_lock(&rbd_dev
->watch_mutex
);
2633 cid
.gid
= ceph_client_gid(rbd_dev
->rbd_client
->client
);
2634 cid
.handle
= rbd_dev
->watch_cookie
;
2635 mutex_unlock(&rbd_dev
->watch_mutex
);
2640 * lock_rwsem must be held for write
2642 static void rbd_set_owner_cid(struct rbd_device
*rbd_dev
,
2643 const struct rbd_client_id
*cid
)
2645 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__
, rbd_dev
,
2646 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
,
2647 cid
->gid
, cid
->handle
);
2648 rbd_dev
->owner_cid
= *cid
; /* struct */
2651 static void format_lock_cookie(struct rbd_device
*rbd_dev
, char *buf
)
2653 mutex_lock(&rbd_dev
->watch_mutex
);
2654 sprintf(buf
, "%s %llu", RBD_LOCK_COOKIE_PREFIX
, rbd_dev
->watch_cookie
);
2655 mutex_unlock(&rbd_dev
->watch_mutex
);
2658 static void __rbd_lock(struct rbd_device
*rbd_dev
, const char *cookie
)
2660 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
2662 strcpy(rbd_dev
->lock_cookie
, cookie
);
2663 rbd_set_owner_cid(rbd_dev
, &cid
);
2664 queue_work(rbd_dev
->task_wq
, &rbd_dev
->acquired_lock_work
);
2668 * lock_rwsem must be held for write
2670 static int rbd_lock(struct rbd_device
*rbd_dev
)
2672 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2676 WARN_ON(__rbd_is_lock_owner(rbd_dev
) ||
2677 rbd_dev
->lock_cookie
[0] != '\0');
2679 format_lock_cookie(rbd_dev
, cookie
);
2680 ret
= ceph_cls_lock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
2681 RBD_LOCK_NAME
, CEPH_CLS_LOCK_EXCLUSIVE
, cookie
,
2682 RBD_LOCK_TAG
, "", 0);
2686 rbd_dev
->lock_state
= RBD_LOCK_STATE_LOCKED
;
2687 __rbd_lock(rbd_dev
, cookie
);
2692 * lock_rwsem must be held for write
2694 static void rbd_unlock(struct rbd_device
*rbd_dev
)
2696 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2699 WARN_ON(!__rbd_is_lock_owner(rbd_dev
) ||
2700 rbd_dev
->lock_cookie
[0] == '\0');
2702 ret
= ceph_cls_unlock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
2703 RBD_LOCK_NAME
, rbd_dev
->lock_cookie
);
2704 if (ret
&& ret
!= -ENOENT
)
2705 rbd_warn(rbd_dev
, "failed to unlock: %d", ret
);
2707 /* treat errors as the image is unlocked */
2708 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
2709 rbd_dev
->lock_cookie
[0] = '\0';
2710 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
2711 queue_work(rbd_dev
->task_wq
, &rbd_dev
->released_lock_work
);
2714 static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
2715 enum rbd_notify_op notify_op
,
2716 struct page
***preply_pages
,
2719 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2720 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
2721 char buf
[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN
];
2722 int buf_size
= sizeof(buf
);
2725 dout("%s rbd_dev %p notify_op %d\n", __func__
, rbd_dev
, notify_op
);
2727 /* encode *LockPayload NotifyMessage (op + ClientId) */
2728 ceph_start_encoding(&p
, 2, 1, buf_size
- CEPH_ENCODING_START_BLK_LEN
);
2729 ceph_encode_32(&p
, notify_op
);
2730 ceph_encode_64(&p
, cid
.gid
);
2731 ceph_encode_64(&p
, cid
.handle
);
2733 return ceph_osdc_notify(osdc
, &rbd_dev
->header_oid
,
2734 &rbd_dev
->header_oloc
, buf
, buf_size
,
2735 RBD_NOTIFY_TIMEOUT
, preply_pages
, preply_len
);
2738 static void rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
2739 enum rbd_notify_op notify_op
)
2741 struct page
**reply_pages
;
2744 __rbd_notify_op_lock(rbd_dev
, notify_op
, &reply_pages
, &reply_len
);
2745 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
2748 static void rbd_notify_acquired_lock(struct work_struct
*work
)
2750 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
2751 acquired_lock_work
);
2753 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_ACQUIRED_LOCK
);
2756 static void rbd_notify_released_lock(struct work_struct
*work
)
2758 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
2759 released_lock_work
);
2761 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_RELEASED_LOCK
);
2764 static int rbd_request_lock(struct rbd_device
*rbd_dev
)
2766 struct page
**reply_pages
;
2768 bool lock_owner_responded
= false;
2771 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
2773 ret
= __rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_REQUEST_LOCK
,
2774 &reply_pages
, &reply_len
);
2775 if (ret
&& ret
!= -ETIMEDOUT
) {
2776 rbd_warn(rbd_dev
, "failed to request lock: %d", ret
);
2780 if (reply_len
> 0 && reply_len
<= PAGE_SIZE
) {
2781 void *p
= page_address(reply_pages
[0]);
2782 void *const end
= p
+ reply_len
;
2785 ceph_decode_32_safe(&p
, end
, n
, e_inval
); /* num_acks */
2790 ceph_decode_need(&p
, end
, 8 + 8, e_inval
);
2791 p
+= 8 + 8; /* skip gid and cookie */
2793 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
2797 if (lock_owner_responded
) {
2799 "duplicate lock owners detected");
2804 lock_owner_responded
= true;
2805 ret
= ceph_start_decoding(&p
, end
, 1, "ResponseMessage",
2809 "failed to decode ResponseMessage: %d",
2814 ret
= ceph_decode_32(&p
);
2818 if (!lock_owner_responded
) {
2819 rbd_warn(rbd_dev
, "no lock owners detected");
2824 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
2832 static void wake_requests(struct rbd_device
*rbd_dev
, bool wake_all
)
2834 dout("%s rbd_dev %p wake_all %d\n", __func__
, rbd_dev
, wake_all
);
2836 cancel_delayed_work(&rbd_dev
->lock_dwork
);
2838 wake_up_all(&rbd_dev
->lock_waitq
);
2840 wake_up(&rbd_dev
->lock_waitq
);
2843 static int get_lock_owner_info(struct rbd_device
*rbd_dev
,
2844 struct ceph_locker
**lockers
, u32
*num_lockers
)
2846 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2851 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
2853 ret
= ceph_cls_lock_info(osdc
, &rbd_dev
->header_oid
,
2854 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
2855 &lock_type
, &lock_tag
, lockers
, num_lockers
);
2859 if (*num_lockers
== 0) {
2860 dout("%s rbd_dev %p no lockers detected\n", __func__
, rbd_dev
);
2864 if (strcmp(lock_tag
, RBD_LOCK_TAG
)) {
2865 rbd_warn(rbd_dev
, "locked by external mechanism, tag %s",
2871 if (lock_type
== CEPH_CLS_LOCK_SHARED
) {
2872 rbd_warn(rbd_dev
, "shared lock type detected");
2877 if (strncmp((*lockers
)[0].id
.cookie
, RBD_LOCK_COOKIE_PREFIX
,
2878 strlen(RBD_LOCK_COOKIE_PREFIX
))) {
2879 rbd_warn(rbd_dev
, "locked by external mechanism, cookie %s",
2880 (*lockers
)[0].id
.cookie
);
2890 static int find_watcher(struct rbd_device
*rbd_dev
,
2891 const struct ceph_locker
*locker
)
2893 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2894 struct ceph_watch_item
*watchers
;
2900 ret
= ceph_osdc_list_watchers(osdc
, &rbd_dev
->header_oid
,
2901 &rbd_dev
->header_oloc
, &watchers
,
2906 sscanf(locker
->id
.cookie
, RBD_LOCK_COOKIE_PREFIX
" %llu", &cookie
);
2907 for (i
= 0; i
< num_watchers
; i
++) {
2908 if (!memcmp(&watchers
[i
].addr
, &locker
->info
.addr
,
2909 sizeof(locker
->info
.addr
)) &&
2910 watchers
[i
].cookie
== cookie
) {
2911 struct rbd_client_id cid
= {
2912 .gid
= le64_to_cpu(watchers
[i
].name
.num
),
2916 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__
,
2917 rbd_dev
, cid
.gid
, cid
.handle
);
2918 rbd_set_owner_cid(rbd_dev
, &cid
);
2924 dout("%s rbd_dev %p no watchers\n", __func__
, rbd_dev
);
2932 * lock_rwsem must be held for write
2934 static int rbd_try_lock(struct rbd_device
*rbd_dev
)
2936 struct ceph_client
*client
= rbd_dev
->rbd_client
->client
;
2937 struct ceph_locker
*lockers
;
2942 ret
= rbd_lock(rbd_dev
);
2946 /* determine if the current lock holder is still alive */
2947 ret
= get_lock_owner_info(rbd_dev
, &lockers
, &num_lockers
);
2951 if (num_lockers
== 0)
2954 ret
= find_watcher(rbd_dev
, lockers
);
2957 ret
= 0; /* have to request lock */
2961 rbd_warn(rbd_dev
, "%s%llu seems dead, breaking lock",
2962 ENTITY_NAME(lockers
[0].id
.name
));
2964 ret
= ceph_monc_blacklist_add(&client
->monc
,
2965 &lockers
[0].info
.addr
);
2967 rbd_warn(rbd_dev
, "blacklist of %s%llu failed: %d",
2968 ENTITY_NAME(lockers
[0].id
.name
), ret
);
2972 ret
= ceph_cls_break_lock(&client
->osdc
, &rbd_dev
->header_oid
,
2973 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
2974 lockers
[0].id
.cookie
,
2975 &lockers
[0].id
.name
);
2976 if (ret
&& ret
!= -ENOENT
)
2980 ceph_free_lockers(lockers
, num_lockers
);
2984 ceph_free_lockers(lockers
, num_lockers
);
2989 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2991 static enum rbd_lock_state
rbd_try_acquire_lock(struct rbd_device
*rbd_dev
,
2994 enum rbd_lock_state lock_state
;
2996 down_read(&rbd_dev
->lock_rwsem
);
2997 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
2998 rbd_dev
->lock_state
);
2999 if (__rbd_is_lock_owner(rbd_dev
)) {
3000 lock_state
= rbd_dev
->lock_state
;
3001 up_read(&rbd_dev
->lock_rwsem
);
3005 up_read(&rbd_dev
->lock_rwsem
);
3006 down_write(&rbd_dev
->lock_rwsem
);
3007 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3008 rbd_dev
->lock_state
);
3009 if (!__rbd_is_lock_owner(rbd_dev
)) {
3010 *pret
= rbd_try_lock(rbd_dev
);
3012 rbd_warn(rbd_dev
, "failed to acquire lock: %d", *pret
);
3015 lock_state
= rbd_dev
->lock_state
;
3016 up_write(&rbd_dev
->lock_rwsem
);
3020 static void rbd_acquire_lock(struct work_struct
*work
)
3022 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3023 struct rbd_device
, lock_dwork
);
3024 enum rbd_lock_state lock_state
;
3027 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3029 lock_state
= rbd_try_acquire_lock(rbd_dev
, &ret
);
3030 if (lock_state
!= RBD_LOCK_STATE_UNLOCKED
|| ret
== -EBLACKLISTED
) {
3031 if (lock_state
== RBD_LOCK_STATE_LOCKED
)
3032 wake_requests(rbd_dev
, true);
3033 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__
,
3034 rbd_dev
, lock_state
, ret
);
3038 ret
= rbd_request_lock(rbd_dev
);
3039 if (ret
== -ETIMEDOUT
) {
3040 goto again
; /* treat this as a dead client */
3041 } else if (ret
== -EROFS
) {
3042 rbd_warn(rbd_dev
, "peer will not release lock");
3044 * If this is rbd_add_acquire_lock(), we want to fail
3045 * immediately -- reuse BLACKLISTED flag. Otherwise we
3048 if (!(rbd_dev
->disk
->flags
& GENHD_FL_UP
)) {
3049 set_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
);
3050 /* wake "rbd map --exclusive" process */
3051 wake_requests(rbd_dev
, false);
3053 } else if (ret
< 0) {
3054 rbd_warn(rbd_dev
, "error requesting lock: %d", ret
);
3055 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3059 * lock owner acked, but resend if we don't see them
3062 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__
,
3064 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3065 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT
* MSEC_PER_SEC
));
3070 * lock_rwsem must be held for write
3072 static bool rbd_release_lock(struct rbd_device
*rbd_dev
)
3074 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
3075 rbd_dev
->lock_state
);
3076 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
)
3079 rbd_dev
->lock_state
= RBD_LOCK_STATE_RELEASING
;
3080 downgrade_write(&rbd_dev
->lock_rwsem
);
3082 * Ensure that all in-flight IO is flushed.
3084 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3085 * may be shared with other devices.
3087 ceph_osdc_sync(&rbd_dev
->rbd_client
->client
->osdc
);
3088 up_read(&rbd_dev
->lock_rwsem
);
3090 down_write(&rbd_dev
->lock_rwsem
);
3091 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3092 rbd_dev
->lock_state
);
3093 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_RELEASING
)
3096 rbd_unlock(rbd_dev
);
3098 * Give others a chance to grab the lock - we would re-acquire
3099 * almost immediately if we got new IO during ceph_osdc_sync()
3100 * otherwise. We need to ack our own notifications, so this
3101 * lock_dwork will be requeued from rbd_wait_state_locked()
3102 * after wake_requests() in rbd_handle_released_lock().
3104 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3108 static void rbd_release_lock_work(struct work_struct
*work
)
3110 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3113 down_write(&rbd_dev
->lock_rwsem
);
3114 rbd_release_lock(rbd_dev
);
3115 up_write(&rbd_dev
->lock_rwsem
);
3118 static void rbd_handle_acquired_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3121 struct rbd_client_id cid
= { 0 };
3123 if (struct_v
>= 2) {
3124 cid
.gid
= ceph_decode_64(p
);
3125 cid
.handle
= ceph_decode_64(p
);
3128 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3130 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3131 down_write(&rbd_dev
->lock_rwsem
);
3132 if (rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3134 * we already know that the remote client is
3137 up_write(&rbd_dev
->lock_rwsem
);
3141 rbd_set_owner_cid(rbd_dev
, &cid
);
3142 downgrade_write(&rbd_dev
->lock_rwsem
);
3144 down_read(&rbd_dev
->lock_rwsem
);
3147 if (!__rbd_is_lock_owner(rbd_dev
))
3148 wake_requests(rbd_dev
, false);
3149 up_read(&rbd_dev
->lock_rwsem
);
3152 static void rbd_handle_released_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3155 struct rbd_client_id cid
= { 0 };
3157 if (struct_v
>= 2) {
3158 cid
.gid
= ceph_decode_64(p
);
3159 cid
.handle
= ceph_decode_64(p
);
3162 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3164 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3165 down_write(&rbd_dev
->lock_rwsem
);
3166 if (!rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3167 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3168 __func__
, rbd_dev
, cid
.gid
, cid
.handle
,
3169 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
);
3170 up_write(&rbd_dev
->lock_rwsem
);
3174 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3175 downgrade_write(&rbd_dev
->lock_rwsem
);
3177 down_read(&rbd_dev
->lock_rwsem
);
3180 if (!__rbd_is_lock_owner(rbd_dev
))
3181 wake_requests(rbd_dev
, false);
3182 up_read(&rbd_dev
->lock_rwsem
);
3186 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3187 * ResponseMessage is needed.
3189 static int rbd_handle_request_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3192 struct rbd_client_id my_cid
= rbd_get_cid(rbd_dev
);
3193 struct rbd_client_id cid
= { 0 };
3196 if (struct_v
>= 2) {
3197 cid
.gid
= ceph_decode_64(p
);
3198 cid
.handle
= ceph_decode_64(p
);
3201 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3203 if (rbd_cid_equal(&cid
, &my_cid
))
3206 down_read(&rbd_dev
->lock_rwsem
);
3207 if (__rbd_is_lock_owner(rbd_dev
)) {
3208 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
&&
3209 rbd_cid_equal(&rbd_dev
->owner_cid
, &rbd_empty_cid
))
3213 * encode ResponseMessage(0) so the peer can detect
3218 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) {
3219 if (!rbd_dev
->opts
->exclusive
) {
3220 dout("%s rbd_dev %p queueing unlock_work\n",
3222 queue_work(rbd_dev
->task_wq
,
3223 &rbd_dev
->unlock_work
);
3225 /* refuse to release the lock */
3232 up_read(&rbd_dev
->lock_rwsem
);
3236 static void __rbd_acknowledge_notify(struct rbd_device
*rbd_dev
,
3237 u64 notify_id
, u64 cookie
, s32
*result
)
3239 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3240 char buf
[4 + CEPH_ENCODING_START_BLK_LEN
];
3241 int buf_size
= sizeof(buf
);
3247 /* encode ResponseMessage */
3248 ceph_start_encoding(&p
, 1, 1,
3249 buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3250 ceph_encode_32(&p
, *result
);
3255 ret
= ceph_osdc_notify_ack(osdc
, &rbd_dev
->header_oid
,
3256 &rbd_dev
->header_oloc
, notify_id
, cookie
,
3259 rbd_warn(rbd_dev
, "acknowledge_notify failed: %d", ret
);
3262 static void rbd_acknowledge_notify(struct rbd_device
*rbd_dev
, u64 notify_id
,
3265 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3266 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, NULL
);
3269 static void rbd_acknowledge_notify_result(struct rbd_device
*rbd_dev
,
3270 u64 notify_id
, u64 cookie
, s32 result
)
3272 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
3273 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, &result
);
3276 static void rbd_watch_cb(void *arg
, u64 notify_id
, u64 cookie
,
3277 u64 notifier_id
, void *data
, size_t data_len
)
3279 struct rbd_device
*rbd_dev
= arg
;
3281 void *const end
= p
+ data_len
;
3287 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3288 __func__
, rbd_dev
, cookie
, notify_id
, data_len
);
3290 ret
= ceph_start_decoding(&p
, end
, 1, "NotifyMessage",
3293 rbd_warn(rbd_dev
, "failed to decode NotifyMessage: %d",
3298 notify_op
= ceph_decode_32(&p
);
3300 /* legacy notification for header updates */
3301 notify_op
= RBD_NOTIFY_OP_HEADER_UPDATE
;
3305 dout("%s rbd_dev %p notify_op %u\n", __func__
, rbd_dev
, notify_op
);
3306 switch (notify_op
) {
3307 case RBD_NOTIFY_OP_ACQUIRED_LOCK
:
3308 rbd_handle_acquired_lock(rbd_dev
, struct_v
, &p
);
3309 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3311 case RBD_NOTIFY_OP_RELEASED_LOCK
:
3312 rbd_handle_released_lock(rbd_dev
, struct_v
, &p
);
3313 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3315 case RBD_NOTIFY_OP_REQUEST_LOCK
:
3316 ret
= rbd_handle_request_lock(rbd_dev
, struct_v
, &p
);
3318 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3321 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3323 case RBD_NOTIFY_OP_HEADER_UPDATE
:
3324 ret
= rbd_dev_refresh(rbd_dev
);
3326 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
3328 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3331 if (rbd_is_lock_owner(rbd_dev
))
3332 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3333 cookie
, -EOPNOTSUPP
);
3335 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3340 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
);
3342 static void rbd_watch_errcb(void *arg
, u64 cookie
, int err
)
3344 struct rbd_device
*rbd_dev
= arg
;
3346 rbd_warn(rbd_dev
, "encountered watch error: %d", err
);
3348 down_write(&rbd_dev
->lock_rwsem
);
3349 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3350 up_write(&rbd_dev
->lock_rwsem
);
3352 mutex_lock(&rbd_dev
->watch_mutex
);
3353 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
) {
3354 __rbd_unregister_watch(rbd_dev
);
3355 rbd_dev
->watch_state
= RBD_WATCH_STATE_ERROR
;
3357 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->watch_dwork
, 0);
3359 mutex_unlock(&rbd_dev
->watch_mutex
);
3363 * watch_mutex must be locked
3365 static int __rbd_register_watch(struct rbd_device
*rbd_dev
)
3367 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3368 struct ceph_osd_linger_request
*handle
;
3370 rbd_assert(!rbd_dev
->watch_handle
);
3371 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3373 handle
= ceph_osdc_watch(osdc
, &rbd_dev
->header_oid
,
3374 &rbd_dev
->header_oloc
, rbd_watch_cb
,
3375 rbd_watch_errcb
, rbd_dev
);
3377 return PTR_ERR(handle
);
3379 rbd_dev
->watch_handle
= handle
;
3384 * watch_mutex must be locked
3386 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3388 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3391 rbd_assert(rbd_dev
->watch_handle
);
3392 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3394 ret
= ceph_osdc_unwatch(osdc
, rbd_dev
->watch_handle
);
3396 rbd_warn(rbd_dev
, "failed to unwatch: %d", ret
);
3398 rbd_dev
->watch_handle
= NULL
;
3401 static int rbd_register_watch(struct rbd_device
*rbd_dev
)
3405 mutex_lock(&rbd_dev
->watch_mutex
);
3406 rbd_assert(rbd_dev
->watch_state
== RBD_WATCH_STATE_UNREGISTERED
);
3407 ret
= __rbd_register_watch(rbd_dev
);
3411 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3412 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3415 mutex_unlock(&rbd_dev
->watch_mutex
);
3419 static void cancel_tasks_sync(struct rbd_device
*rbd_dev
)
3421 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3423 cancel_work_sync(&rbd_dev
->acquired_lock_work
);
3424 cancel_work_sync(&rbd_dev
->released_lock_work
);
3425 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
3426 cancel_work_sync(&rbd_dev
->unlock_work
);
3429 static void rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3431 WARN_ON(waitqueue_active(&rbd_dev
->lock_waitq
));
3432 cancel_tasks_sync(rbd_dev
);
3434 mutex_lock(&rbd_dev
->watch_mutex
);
3435 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
)
3436 __rbd_unregister_watch(rbd_dev
);
3437 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
3438 mutex_unlock(&rbd_dev
->watch_mutex
);
3440 cancel_delayed_work_sync(&rbd_dev
->watch_dwork
);
3441 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
3445 * lock_rwsem must be held for write
3447 static void rbd_reacquire_lock(struct rbd_device
*rbd_dev
)
3449 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3453 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
);
3455 format_lock_cookie(rbd_dev
, cookie
);
3456 ret
= ceph_cls_set_cookie(osdc
, &rbd_dev
->header_oid
,
3457 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3458 CEPH_CLS_LOCK_EXCLUSIVE
, rbd_dev
->lock_cookie
,
3459 RBD_LOCK_TAG
, cookie
);
3461 if (ret
!= -EOPNOTSUPP
)
3462 rbd_warn(rbd_dev
, "failed to update lock cookie: %d",
3466 * Lock cookie cannot be updated on older OSDs, so do
3467 * a manual release and queue an acquire.
3469 if (rbd_release_lock(rbd_dev
))
3470 queue_delayed_work(rbd_dev
->task_wq
,
3471 &rbd_dev
->lock_dwork
, 0);
3473 __rbd_lock(rbd_dev
, cookie
);
3477 static void rbd_reregister_watch(struct work_struct
*work
)
3479 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3480 struct rbd_device
, watch_dwork
);
3483 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3485 mutex_lock(&rbd_dev
->watch_mutex
);
3486 if (rbd_dev
->watch_state
!= RBD_WATCH_STATE_ERROR
) {
3487 mutex_unlock(&rbd_dev
->watch_mutex
);
3491 ret
= __rbd_register_watch(rbd_dev
);
3493 rbd_warn(rbd_dev
, "failed to reregister watch: %d", ret
);
3494 if (ret
== -EBLACKLISTED
|| ret
== -ENOENT
) {
3495 set_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
);
3496 wake_requests(rbd_dev
, true);
3498 queue_delayed_work(rbd_dev
->task_wq
,
3499 &rbd_dev
->watch_dwork
,
3502 mutex_unlock(&rbd_dev
->watch_mutex
);
3506 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3507 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3508 mutex_unlock(&rbd_dev
->watch_mutex
);
3510 down_write(&rbd_dev
->lock_rwsem
);
3511 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
3512 rbd_reacquire_lock(rbd_dev
);
3513 up_write(&rbd_dev
->lock_rwsem
);
3515 ret
= rbd_dev_refresh(rbd_dev
);
3517 rbd_warn(rbd_dev
, "reregistration refresh failed: %d", ret
);
3521 * Synchronous osd object method call. Returns the number of bytes
3522 * returned in the outbound buffer, or a negative error code.
3524 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
3525 struct ceph_object_id
*oid
,
3526 struct ceph_object_locator
*oloc
,
3527 const char *method_name
,
3528 const void *outbound
,
3529 size_t outbound_size
,
3531 size_t inbound_size
)
3533 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3534 struct page
*req_page
= NULL
;
3535 struct page
*reply_page
;
3539 * Method calls are ultimately read operations. The result
3540 * should placed into the inbound buffer provided. They
3541 * also supply outbound data--parameters for the object
3542 * method. Currently if this is present it will be a
3546 if (outbound_size
> PAGE_SIZE
)
3549 req_page
= alloc_page(GFP_KERNEL
);
3553 memcpy(page_address(req_page
), outbound
, outbound_size
);
3556 reply_page
= alloc_page(GFP_KERNEL
);
3559 __free_page(req_page
);
3563 ret
= ceph_osdc_call(osdc
, oid
, oloc
, RBD_DRV_NAME
, method_name
,
3564 CEPH_OSD_FLAG_READ
, req_page
, outbound_size
,
3565 reply_page
, &inbound_size
);
3567 memcpy(inbound
, page_address(reply_page
), inbound_size
);
3572 __free_page(req_page
);
3573 __free_page(reply_page
);
3578 * lock_rwsem must be held for read
3580 static int rbd_wait_state_locked(struct rbd_device
*rbd_dev
, bool may_acquire
)
3583 unsigned long timeout
;
3586 if (test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
))
3587 return -EBLACKLISTED
;
3589 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
3593 rbd_warn(rbd_dev
, "exclusive lock required");
3599 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3600 * and cancel_delayed_work() in wake_requests().
3602 dout("%s rbd_dev %p queueing lock_dwork\n", __func__
, rbd_dev
);
3603 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
3604 prepare_to_wait_exclusive(&rbd_dev
->lock_waitq
, &wait
,
3605 TASK_UNINTERRUPTIBLE
);
3606 up_read(&rbd_dev
->lock_rwsem
);
3607 timeout
= schedule_timeout(ceph_timeout_jiffies(
3608 rbd_dev
->opts
->lock_timeout
));
3609 down_read(&rbd_dev
->lock_rwsem
);
3610 if (test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
)) {
3611 ret
= -EBLACKLISTED
;
3615 rbd_warn(rbd_dev
, "timed out waiting for lock");
3619 } while (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
);
3621 finish_wait(&rbd_dev
->lock_waitq
, &wait
);
3625 static void rbd_queue_workfn(struct work_struct
*work
)
3627 struct request
*rq
= blk_mq_rq_from_pdu(work
);
3628 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
3629 struct rbd_img_request
*img_request
;
3630 struct ceph_snap_context
*snapc
= NULL
;
3631 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
3632 u64 length
= blk_rq_bytes(rq
);
3633 enum obj_operation_type op_type
;
3635 bool must_be_locked
;
3638 switch (req_op(rq
)) {
3639 case REQ_OP_DISCARD
:
3640 case REQ_OP_WRITE_ZEROES
:
3641 op_type
= OBJ_OP_DISCARD
;
3644 op_type
= OBJ_OP_WRITE
;
3647 op_type
= OBJ_OP_READ
;
3650 dout("%s: non-fs request type %d\n", __func__
, req_op(rq
));
3655 /* Ignore/skip any zero-length requests */
3658 dout("%s: zero-length request\n", __func__
);
3663 rbd_assert(op_type
== OBJ_OP_READ
||
3664 rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3667 * Quit early if the mapped snapshot no longer exists. It's
3668 * still possible the snapshot will have disappeared by the
3669 * time our request arrives at the osd, but there's no sense in
3670 * sending it if we already know.
3672 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3673 dout("request for non-existent snapshot");
3674 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3679 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3680 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
3683 goto err_rq
; /* Shouldn't happen */
3686 blk_mq_start_request(rq
);
3688 down_read(&rbd_dev
->header_rwsem
);
3689 mapping_size
= rbd_dev
->mapping
.size
;
3690 if (op_type
!= OBJ_OP_READ
) {
3691 snapc
= rbd_dev
->header
.snapc
;
3692 ceph_get_snap_context(snapc
);
3694 up_read(&rbd_dev
->header_rwsem
);
3696 if (offset
+ length
> mapping_size
) {
3697 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
3698 length
, mapping_size
);
3704 (rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
) &&
3705 (op_type
!= OBJ_OP_READ
|| rbd_dev
->opts
->lock_on_read
);
3706 if (must_be_locked
) {
3707 down_read(&rbd_dev
->lock_rwsem
);
3708 result
= rbd_wait_state_locked(rbd_dev
,
3709 !rbd_dev
->opts
->exclusive
);
3714 img_request
= rbd_img_request_create(rbd_dev
, op_type
, snapc
);
3719 img_request
->rq
= rq
;
3720 snapc
= NULL
; /* img_request consumes a ref */
3722 if (op_type
== OBJ_OP_DISCARD
)
3723 result
= rbd_img_fill_nodata(img_request
, offset
, length
);
3725 result
= rbd_img_fill_from_bio(img_request
, offset
, length
,
3728 goto err_img_request
;
3730 rbd_img_request_submit(img_request
);
3732 up_read(&rbd_dev
->lock_rwsem
);
3736 rbd_img_request_put(img_request
);
3739 up_read(&rbd_dev
->lock_rwsem
);
3742 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
3743 obj_op_name(op_type
), length
, offset
, result
);
3744 ceph_put_snap_context(snapc
);
3746 blk_mq_end_request(rq
, errno_to_blk_status(result
));
3749 static blk_status_t
rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
3750 const struct blk_mq_queue_data
*bd
)
3752 struct request
*rq
= bd
->rq
;
3753 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3755 queue_work(rbd_wq
, work
);
3759 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3761 blk_cleanup_queue(rbd_dev
->disk
->queue
);
3762 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3763 put_disk(rbd_dev
->disk
);
3764 rbd_dev
->disk
= NULL
;
3767 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3768 struct ceph_object_id
*oid
,
3769 struct ceph_object_locator
*oloc
,
3770 void *buf
, int buf_len
)
3773 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3774 struct ceph_osd_request
*req
;
3775 struct page
**pages
;
3776 int num_pages
= calc_pages_for(0, buf_len
);
3779 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
3783 ceph_oid_copy(&req
->r_base_oid
, oid
);
3784 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
3785 req
->r_flags
= CEPH_OSD_FLAG_READ
;
3787 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
3791 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
3792 if (IS_ERR(pages
)) {
3793 ret
= PTR_ERR(pages
);
3797 osd_req_op_extent_init(req
, 0, CEPH_OSD_OP_READ
, 0, buf_len
, 0, 0);
3798 osd_req_op_extent_osd_data_pages(req
, 0, pages
, buf_len
, 0, false,
3801 ceph_osdc_start_request(osdc
, req
, false);
3802 ret
= ceph_osdc_wait_request(osdc
, req
);
3804 ceph_copy_from_page_vector(pages
, buf
, 0, ret
);
3807 ceph_osdc_put_request(req
);
3812 * Read the complete header for the given rbd device. On successful
3813 * return, the rbd_dev->header field will contain up-to-date
3814 * information about the image.
3816 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3818 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3825 * The complete header will include an array of its 64-bit
3826 * snapshot ids, followed by the names of those snapshots as
3827 * a contiguous block of NUL-terminated strings. Note that
3828 * the number of snapshots could change by the time we read
3829 * it in, in which case we re-read it.
3836 size
= sizeof (*ondisk
);
3837 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3839 ondisk
= kmalloc(size
, GFP_KERNEL
);
3843 ret
= rbd_obj_read_sync(rbd_dev
, &rbd_dev
->header_oid
,
3844 &rbd_dev
->header_oloc
, ondisk
, size
);
3847 if ((size_t)ret
< size
) {
3849 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3853 if (!rbd_dev_ondisk_valid(ondisk
)) {
3855 rbd_warn(rbd_dev
, "invalid header");
3859 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3860 want_count
= snap_count
;
3861 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3862 } while (snap_count
!= want_count
);
3864 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3872 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3873 * has disappeared from the (just updated) snapshot context.
3875 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3879 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3882 snap_id
= rbd_dev
->spec
->snap_id
;
3883 if (snap_id
== CEPH_NOSNAP
)
3886 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3887 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3890 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3895 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3896 * try to update its size. If REMOVING is set, updating size
3897 * is just useless work since the device can't be opened.
3899 if (test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
) &&
3900 !test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
)) {
3901 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3902 dout("setting size to %llu sectors", (unsigned long long)size
);
3903 set_capacity(rbd_dev
->disk
, size
);
3904 revalidate_disk(rbd_dev
->disk
);
3908 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3913 down_write(&rbd_dev
->header_rwsem
);
3914 mapping_size
= rbd_dev
->mapping
.size
;
3916 ret
= rbd_dev_header_info(rbd_dev
);
3921 * If there is a parent, see if it has disappeared due to the
3922 * mapped image getting flattened.
3924 if (rbd_dev
->parent
) {
3925 ret
= rbd_dev_v2_parent_info(rbd_dev
);
3930 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
3931 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
3933 /* validate mapped snapshot's EXISTS flag */
3934 rbd_exists_validate(rbd_dev
);
3938 up_write(&rbd_dev
->header_rwsem
);
3939 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
3940 rbd_dev_update_size(rbd_dev
);
3945 static int rbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
3946 unsigned int hctx_idx
, unsigned int numa_node
)
3948 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3950 INIT_WORK(work
, rbd_queue_workfn
);
3954 static const struct blk_mq_ops rbd_mq_ops
= {
3955 .queue_rq
= rbd_queue_rq
,
3956 .init_request
= rbd_init_request
,
3959 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3961 struct gendisk
*disk
;
3962 struct request_queue
*q
;
3963 unsigned int objset_bytes
=
3964 rbd_dev
->layout
.object_size
* rbd_dev
->layout
.stripe_count
;
3967 /* create gendisk info */
3968 disk
= alloc_disk(single_major
?
3969 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
3970 RBD_MINORS_PER_MAJOR
);
3974 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3976 disk
->major
= rbd_dev
->major
;
3977 disk
->first_minor
= rbd_dev
->minor
;
3979 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3980 disk
->fops
= &rbd_bd_ops
;
3981 disk
->private_data
= rbd_dev
;
3983 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
3984 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
3985 rbd_dev
->tag_set
.queue_depth
= rbd_dev
->opts
->queue_depth
;
3986 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
3987 rbd_dev
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
3988 rbd_dev
->tag_set
.nr_hw_queues
= 1;
3989 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
3991 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
3995 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
4001 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
4002 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4004 blk_queue_max_hw_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4005 q
->limits
.max_sectors
= queue_max_hw_sectors(q
);
4006 blk_queue_max_segments(q
, USHRT_MAX
);
4007 blk_queue_max_segment_size(q
, UINT_MAX
);
4008 blk_queue_io_min(q
, objset_bytes
);
4009 blk_queue_io_opt(q
, objset_bytes
);
4011 if (rbd_dev
->opts
->trim
) {
4012 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
4013 q
->limits
.discard_granularity
= objset_bytes
;
4014 blk_queue_max_discard_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4015 blk_queue_max_write_zeroes_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4018 if (!ceph_test_opt(rbd_dev
->rbd_client
->client
, NOCRC
))
4019 q
->backing_dev_info
->capabilities
|= BDI_CAP_STABLE_WRITES
;
4022 * disk_release() expects a queue ref from add_disk() and will
4023 * put it. Hold an extra ref until add_disk() is called.
4025 WARN_ON(!blk_get_queue(q
));
4027 q
->queuedata
= rbd_dev
;
4029 rbd_dev
->disk
= disk
;
4033 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4043 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
4045 return container_of(dev
, struct rbd_device
, dev
);
4048 static ssize_t
rbd_size_show(struct device
*dev
,
4049 struct device_attribute
*attr
, char *buf
)
4051 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4053 return sprintf(buf
, "%llu\n",
4054 (unsigned long long)rbd_dev
->mapping
.size
);
4058 * Note this shows the features for whatever's mapped, which is not
4059 * necessarily the base image.
4061 static ssize_t
rbd_features_show(struct device
*dev
,
4062 struct device_attribute
*attr
, char *buf
)
4064 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4066 return sprintf(buf
, "0x%016llx\n",
4067 (unsigned long long)rbd_dev
->mapping
.features
);
4070 static ssize_t
rbd_major_show(struct device
*dev
,
4071 struct device_attribute
*attr
, char *buf
)
4073 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4076 return sprintf(buf
, "%d\n", rbd_dev
->major
);
4078 return sprintf(buf
, "(none)\n");
4081 static ssize_t
rbd_minor_show(struct device
*dev
,
4082 struct device_attribute
*attr
, char *buf
)
4084 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4086 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
4089 static ssize_t
rbd_client_addr_show(struct device
*dev
,
4090 struct device_attribute
*attr
, char *buf
)
4092 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4093 struct ceph_entity_addr
*client_addr
=
4094 ceph_client_addr(rbd_dev
->rbd_client
->client
);
4096 return sprintf(buf
, "%pISpc/%u\n", &client_addr
->in_addr
,
4097 le32_to_cpu(client_addr
->nonce
));
4100 static ssize_t
rbd_client_id_show(struct device
*dev
,
4101 struct device_attribute
*attr
, char *buf
)
4103 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4105 return sprintf(buf
, "client%lld\n",
4106 ceph_client_gid(rbd_dev
->rbd_client
->client
));
4109 static ssize_t
rbd_cluster_fsid_show(struct device
*dev
,
4110 struct device_attribute
*attr
, char *buf
)
4112 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4114 return sprintf(buf
, "%pU\n", &rbd_dev
->rbd_client
->client
->fsid
);
4117 static ssize_t
rbd_config_info_show(struct device
*dev
,
4118 struct device_attribute
*attr
, char *buf
)
4120 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4122 return sprintf(buf
, "%s\n", rbd_dev
->config_info
);
4125 static ssize_t
rbd_pool_show(struct device
*dev
,
4126 struct device_attribute
*attr
, char *buf
)
4128 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4130 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
4133 static ssize_t
rbd_pool_id_show(struct device
*dev
,
4134 struct device_attribute
*attr
, char *buf
)
4136 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4138 return sprintf(buf
, "%llu\n",
4139 (unsigned long long) rbd_dev
->spec
->pool_id
);
4142 static ssize_t
rbd_pool_ns_show(struct device
*dev
,
4143 struct device_attribute
*attr
, char *buf
)
4145 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4147 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_ns
?: "");
4150 static ssize_t
rbd_name_show(struct device
*dev
,
4151 struct device_attribute
*attr
, char *buf
)
4153 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4155 if (rbd_dev
->spec
->image_name
)
4156 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
4158 return sprintf(buf
, "(unknown)\n");
4161 static ssize_t
rbd_image_id_show(struct device
*dev
,
4162 struct device_attribute
*attr
, char *buf
)
4164 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4166 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
4170 * Shows the name of the currently-mapped snapshot (or
4171 * RBD_SNAP_HEAD_NAME for the base image).
4173 static ssize_t
rbd_snap_show(struct device
*dev
,
4174 struct device_attribute
*attr
,
4177 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4179 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
4182 static ssize_t
rbd_snap_id_show(struct device
*dev
,
4183 struct device_attribute
*attr
, char *buf
)
4185 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4187 return sprintf(buf
, "%llu\n", rbd_dev
->spec
->snap_id
);
4191 * For a v2 image, shows the chain of parent images, separated by empty
4192 * lines. For v1 images or if there is no parent, shows "(no parent
4195 static ssize_t
rbd_parent_show(struct device
*dev
,
4196 struct device_attribute
*attr
,
4199 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4202 if (!rbd_dev
->parent
)
4203 return sprintf(buf
, "(no parent image)\n");
4205 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
4206 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
4208 count
+= sprintf(&buf
[count
], "%s"
4209 "pool_id %llu\npool_name %s\n"
4211 "image_id %s\nimage_name %s\n"
4212 "snap_id %llu\nsnap_name %s\n"
4214 !count
? "" : "\n", /* first? */
4215 spec
->pool_id
, spec
->pool_name
,
4216 spec
->pool_ns
?: "",
4217 spec
->image_id
, spec
->image_name
?: "(unknown)",
4218 spec
->snap_id
, spec
->snap_name
,
4219 rbd_dev
->parent_overlap
);
4225 static ssize_t
rbd_image_refresh(struct device
*dev
,
4226 struct device_attribute
*attr
,
4230 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4233 ret
= rbd_dev_refresh(rbd_dev
);
4240 static DEVICE_ATTR(size
, 0444, rbd_size_show
, NULL
);
4241 static DEVICE_ATTR(features
, 0444, rbd_features_show
, NULL
);
4242 static DEVICE_ATTR(major
, 0444, rbd_major_show
, NULL
);
4243 static DEVICE_ATTR(minor
, 0444, rbd_minor_show
, NULL
);
4244 static DEVICE_ATTR(client_addr
, 0444, rbd_client_addr_show
, NULL
);
4245 static DEVICE_ATTR(client_id
, 0444, rbd_client_id_show
, NULL
);
4246 static DEVICE_ATTR(cluster_fsid
, 0444, rbd_cluster_fsid_show
, NULL
);
4247 static DEVICE_ATTR(config_info
, 0400, rbd_config_info_show
, NULL
);
4248 static DEVICE_ATTR(pool
, 0444, rbd_pool_show
, NULL
);
4249 static DEVICE_ATTR(pool_id
, 0444, rbd_pool_id_show
, NULL
);
4250 static DEVICE_ATTR(pool_ns
, 0444, rbd_pool_ns_show
, NULL
);
4251 static DEVICE_ATTR(name
, 0444, rbd_name_show
, NULL
);
4252 static DEVICE_ATTR(image_id
, 0444, rbd_image_id_show
, NULL
);
4253 static DEVICE_ATTR(refresh
, 0200, NULL
, rbd_image_refresh
);
4254 static DEVICE_ATTR(current_snap
, 0444, rbd_snap_show
, NULL
);
4255 static DEVICE_ATTR(snap_id
, 0444, rbd_snap_id_show
, NULL
);
4256 static DEVICE_ATTR(parent
, 0444, rbd_parent_show
, NULL
);
4258 static struct attribute
*rbd_attrs
[] = {
4259 &dev_attr_size
.attr
,
4260 &dev_attr_features
.attr
,
4261 &dev_attr_major
.attr
,
4262 &dev_attr_minor
.attr
,
4263 &dev_attr_client_addr
.attr
,
4264 &dev_attr_client_id
.attr
,
4265 &dev_attr_cluster_fsid
.attr
,
4266 &dev_attr_config_info
.attr
,
4267 &dev_attr_pool
.attr
,
4268 &dev_attr_pool_id
.attr
,
4269 &dev_attr_pool_ns
.attr
,
4270 &dev_attr_name
.attr
,
4271 &dev_attr_image_id
.attr
,
4272 &dev_attr_current_snap
.attr
,
4273 &dev_attr_snap_id
.attr
,
4274 &dev_attr_parent
.attr
,
4275 &dev_attr_refresh
.attr
,
4279 static struct attribute_group rbd_attr_group
= {
4283 static const struct attribute_group
*rbd_attr_groups
[] = {
4288 static void rbd_dev_release(struct device
*dev
);
4290 static const struct device_type rbd_device_type
= {
4292 .groups
= rbd_attr_groups
,
4293 .release
= rbd_dev_release
,
4296 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
4298 kref_get(&spec
->kref
);
4303 static void rbd_spec_free(struct kref
*kref
);
4304 static void rbd_spec_put(struct rbd_spec
*spec
)
4307 kref_put(&spec
->kref
, rbd_spec_free
);
4310 static struct rbd_spec
*rbd_spec_alloc(void)
4312 struct rbd_spec
*spec
;
4314 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
4318 spec
->pool_id
= CEPH_NOPOOL
;
4319 spec
->snap_id
= CEPH_NOSNAP
;
4320 kref_init(&spec
->kref
);
4325 static void rbd_spec_free(struct kref
*kref
)
4327 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
4329 kfree(spec
->pool_name
);
4330 kfree(spec
->pool_ns
);
4331 kfree(spec
->image_id
);
4332 kfree(spec
->image_name
);
4333 kfree(spec
->snap_name
);
4337 static void rbd_dev_free(struct rbd_device
*rbd_dev
)
4339 WARN_ON(rbd_dev
->watch_state
!= RBD_WATCH_STATE_UNREGISTERED
);
4340 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_UNLOCKED
);
4342 ceph_oid_destroy(&rbd_dev
->header_oid
);
4343 ceph_oloc_destroy(&rbd_dev
->header_oloc
);
4344 kfree(rbd_dev
->config_info
);
4346 rbd_put_client(rbd_dev
->rbd_client
);
4347 rbd_spec_put(rbd_dev
->spec
);
4348 kfree(rbd_dev
->opts
);
4352 static void rbd_dev_release(struct device
*dev
)
4354 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4355 bool need_put
= !!rbd_dev
->opts
;
4358 destroy_workqueue(rbd_dev
->task_wq
);
4359 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4362 rbd_dev_free(rbd_dev
);
4365 * This is racy, but way better than putting module outside of
4366 * the release callback. The race window is pretty small, so
4367 * doing something similar to dm (dm-builtin.c) is overkill.
4370 module_put(THIS_MODULE
);
4373 static struct rbd_device
*__rbd_dev_create(struct rbd_client
*rbdc
,
4374 struct rbd_spec
*spec
)
4376 struct rbd_device
*rbd_dev
;
4378 rbd_dev
= kzalloc(sizeof(*rbd_dev
), GFP_KERNEL
);
4382 spin_lock_init(&rbd_dev
->lock
);
4383 INIT_LIST_HEAD(&rbd_dev
->node
);
4384 init_rwsem(&rbd_dev
->header_rwsem
);
4386 rbd_dev
->header
.data_pool_id
= CEPH_NOPOOL
;
4387 ceph_oid_init(&rbd_dev
->header_oid
);
4388 rbd_dev
->header_oloc
.pool
= spec
->pool_id
;
4389 if (spec
->pool_ns
) {
4390 WARN_ON(!*spec
->pool_ns
);
4391 rbd_dev
->header_oloc
.pool_ns
=
4392 ceph_find_or_create_string(spec
->pool_ns
,
4393 strlen(spec
->pool_ns
));
4396 mutex_init(&rbd_dev
->watch_mutex
);
4397 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
4398 INIT_DELAYED_WORK(&rbd_dev
->watch_dwork
, rbd_reregister_watch
);
4400 init_rwsem(&rbd_dev
->lock_rwsem
);
4401 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
4402 INIT_WORK(&rbd_dev
->acquired_lock_work
, rbd_notify_acquired_lock
);
4403 INIT_WORK(&rbd_dev
->released_lock_work
, rbd_notify_released_lock
);
4404 INIT_DELAYED_WORK(&rbd_dev
->lock_dwork
, rbd_acquire_lock
);
4405 INIT_WORK(&rbd_dev
->unlock_work
, rbd_release_lock_work
);
4406 init_waitqueue_head(&rbd_dev
->lock_waitq
);
4408 rbd_dev
->dev
.bus
= &rbd_bus_type
;
4409 rbd_dev
->dev
.type
= &rbd_device_type
;
4410 rbd_dev
->dev
.parent
= &rbd_root_dev
;
4411 device_initialize(&rbd_dev
->dev
);
4413 rbd_dev
->rbd_client
= rbdc
;
4414 rbd_dev
->spec
= spec
;
4420 * Create a mapping rbd_dev.
4422 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
4423 struct rbd_spec
*spec
,
4424 struct rbd_options
*opts
)
4426 struct rbd_device
*rbd_dev
;
4428 rbd_dev
= __rbd_dev_create(rbdc
, spec
);
4432 rbd_dev
->opts
= opts
;
4434 /* get an id and fill in device name */
4435 rbd_dev
->dev_id
= ida_simple_get(&rbd_dev_id_ida
, 0,
4436 minor_to_rbd_dev_id(1 << MINORBITS
),
4438 if (rbd_dev
->dev_id
< 0)
4441 sprintf(rbd_dev
->name
, RBD_DRV_NAME
"%d", rbd_dev
->dev_id
);
4442 rbd_dev
->task_wq
= alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM
,
4444 if (!rbd_dev
->task_wq
)
4447 /* we have a ref from do_rbd_add() */
4448 __module_get(THIS_MODULE
);
4450 dout("%s rbd_dev %p dev_id %d\n", __func__
, rbd_dev
, rbd_dev
->dev_id
);
4454 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4456 rbd_dev_free(rbd_dev
);
4460 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
4463 put_device(&rbd_dev
->dev
);
4467 * Get the size and object order for an image snapshot, or if
4468 * snap_id is CEPH_NOSNAP, gets this information for the base
4471 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
4472 u8
*order
, u64
*snap_size
)
4474 __le64 snapid
= cpu_to_le64(snap_id
);
4479 } __attribute__ ((packed
)) size_buf
= { 0 };
4481 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4482 &rbd_dev
->header_oloc
, "get_size",
4483 &snapid
, sizeof(snapid
),
4484 &size_buf
, sizeof(size_buf
));
4485 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4488 if (ret
< sizeof (size_buf
))
4492 *order
= size_buf
.order
;
4493 dout(" order %u", (unsigned int)*order
);
4495 *snap_size
= le64_to_cpu(size_buf
.size
);
4497 dout(" snap_id 0x%016llx snap_size = %llu\n",
4498 (unsigned long long)snap_id
,
4499 (unsigned long long)*snap_size
);
4504 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
4506 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
4507 &rbd_dev
->header
.obj_order
,
4508 &rbd_dev
->header
.image_size
);
4511 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
4517 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
4521 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4522 &rbd_dev
->header_oloc
, "get_object_prefix",
4523 NULL
, 0, reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
4524 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4529 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
4530 p
+ ret
, NULL
, GFP_NOIO
);
4533 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
4534 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
4535 rbd_dev
->header
.object_prefix
= NULL
;
4537 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
4545 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
4548 __le64 snapid
= cpu_to_le64(snap_id
);
4552 } __attribute__ ((packed
)) features_buf
= { 0 };
4556 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4557 &rbd_dev
->header_oloc
, "get_features",
4558 &snapid
, sizeof(snapid
),
4559 &features_buf
, sizeof(features_buf
));
4560 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4563 if (ret
< sizeof (features_buf
))
4566 unsup
= le64_to_cpu(features_buf
.incompat
) & ~RBD_FEATURES_SUPPORTED
;
4568 rbd_warn(rbd_dev
, "image uses unsupported features: 0x%llx",
4573 *snap_features
= le64_to_cpu(features_buf
.features
);
4575 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4576 (unsigned long long)snap_id
,
4577 (unsigned long long)*snap_features
,
4578 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
4583 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
4585 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
4586 &rbd_dev
->header
.features
);
4589 struct parent_image_info
{
4591 const char *pool_ns
;
4592 const char *image_id
;
4600 * The caller is responsible for @pii.
4602 static int decode_parent_image_spec(void **p
, void *end
,
4603 struct parent_image_info
*pii
)
4609 ret
= ceph_start_decoding(p
, end
, 1, "ParentImageSpec",
4610 &struct_v
, &struct_len
);
4614 ceph_decode_64_safe(p
, end
, pii
->pool_id
, e_inval
);
4615 pii
->pool_ns
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
4616 if (IS_ERR(pii
->pool_ns
)) {
4617 ret
= PTR_ERR(pii
->pool_ns
);
4618 pii
->pool_ns
= NULL
;
4621 pii
->image_id
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
4622 if (IS_ERR(pii
->image_id
)) {
4623 ret
= PTR_ERR(pii
->image_id
);
4624 pii
->image_id
= NULL
;
4627 ceph_decode_64_safe(p
, end
, pii
->snap_id
, e_inval
);
4634 static int __get_parent_info(struct rbd_device
*rbd_dev
,
4635 struct page
*req_page
,
4636 struct page
*reply_page
,
4637 struct parent_image_info
*pii
)
4639 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4640 size_t reply_len
= PAGE_SIZE
;
4644 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4645 "rbd", "parent_get", CEPH_OSD_FLAG_READ
,
4646 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4648 return ret
== -EOPNOTSUPP
? 1 : ret
;
4650 p
= page_address(reply_page
);
4651 end
= p
+ reply_len
;
4652 ret
= decode_parent_image_spec(&p
, end
, pii
);
4656 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4657 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ
,
4658 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4662 p
= page_address(reply_page
);
4663 end
= p
+ reply_len
;
4664 ceph_decode_8_safe(&p
, end
, pii
->has_overlap
, e_inval
);
4665 if (pii
->has_overlap
)
4666 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
4675 * The caller is responsible for @pii.
4677 static int __get_parent_info_legacy(struct rbd_device
*rbd_dev
,
4678 struct page
*req_page
,
4679 struct page
*reply_page
,
4680 struct parent_image_info
*pii
)
4682 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4683 size_t reply_len
= PAGE_SIZE
;
4687 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4688 "rbd", "get_parent", CEPH_OSD_FLAG_READ
,
4689 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4693 p
= page_address(reply_page
);
4694 end
= p
+ reply_len
;
4695 ceph_decode_64_safe(&p
, end
, pii
->pool_id
, e_inval
);
4696 pii
->image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4697 if (IS_ERR(pii
->image_id
)) {
4698 ret
= PTR_ERR(pii
->image_id
);
4699 pii
->image_id
= NULL
;
4702 ceph_decode_64_safe(&p
, end
, pii
->snap_id
, e_inval
);
4703 pii
->has_overlap
= true;
4704 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
4712 static int get_parent_info(struct rbd_device
*rbd_dev
,
4713 struct parent_image_info
*pii
)
4715 struct page
*req_page
, *reply_page
;
4719 req_page
= alloc_page(GFP_KERNEL
);
4723 reply_page
= alloc_page(GFP_KERNEL
);
4725 __free_page(req_page
);
4729 p
= page_address(req_page
);
4730 ceph_encode_64(&p
, rbd_dev
->spec
->snap_id
);
4731 ret
= __get_parent_info(rbd_dev
, req_page
, reply_page
, pii
);
4733 ret
= __get_parent_info_legacy(rbd_dev
, req_page
, reply_page
,
4736 __free_page(req_page
);
4737 __free_page(reply_page
);
4741 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
4743 struct rbd_spec
*parent_spec
;
4744 struct parent_image_info pii
= { 0 };
4747 parent_spec
= rbd_spec_alloc();
4751 ret
= get_parent_info(rbd_dev
, &pii
);
4755 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4756 __func__
, pii
.pool_id
, pii
.pool_ns
, pii
.image_id
, pii
.snap_id
,
4757 pii
.has_overlap
, pii
.overlap
);
4759 if (pii
.pool_id
== CEPH_NOPOOL
|| !pii
.has_overlap
) {
4761 * Either the parent never existed, or we have
4762 * record of it but the image got flattened so it no
4763 * longer has a parent. When the parent of a
4764 * layered image disappears we immediately set the
4765 * overlap to 0. The effect of this is that all new
4766 * requests will be treated as if the image had no
4769 * If !pii.has_overlap, the parent image spec is not
4770 * applicable. It's there to avoid duplication in each
4773 if (rbd_dev
->parent_overlap
) {
4774 rbd_dev
->parent_overlap
= 0;
4775 rbd_dev_parent_put(rbd_dev
);
4776 pr_info("%s: clone image has been flattened\n",
4777 rbd_dev
->disk
->disk_name
);
4780 goto out
; /* No parent? No problem. */
4783 /* The ceph file layout needs to fit pool id in 32 bits */
4786 if (pii
.pool_id
> (u64
)U32_MAX
) {
4787 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
4788 (unsigned long long)pii
.pool_id
, U32_MAX
);
4793 * The parent won't change (except when the clone is
4794 * flattened, already handled that). So we only need to
4795 * record the parent spec we have not already done so.
4797 if (!rbd_dev
->parent_spec
) {
4798 parent_spec
->pool_id
= pii
.pool_id
;
4799 if (pii
.pool_ns
&& *pii
.pool_ns
) {
4800 parent_spec
->pool_ns
= pii
.pool_ns
;
4803 parent_spec
->image_id
= pii
.image_id
;
4804 pii
.image_id
= NULL
;
4805 parent_spec
->snap_id
= pii
.snap_id
;
4807 rbd_dev
->parent_spec
= parent_spec
;
4808 parent_spec
= NULL
; /* rbd_dev now owns this */
4812 * We always update the parent overlap. If it's zero we issue
4813 * a warning, as we will proceed as if there was no parent.
4817 /* refresh, careful to warn just once */
4818 if (rbd_dev
->parent_overlap
)
4820 "clone now standalone (overlap became 0)");
4823 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
4826 rbd_dev
->parent_overlap
= pii
.overlap
;
4832 kfree(pii
.image_id
);
4833 rbd_spec_put(parent_spec
);
4837 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
4841 __le64 stripe_count
;
4842 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
4843 size_t size
= sizeof (striping_info_buf
);
4847 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4848 &rbd_dev
->header_oloc
, "get_stripe_unit_count",
4849 NULL
, 0, &striping_info_buf
, size
);
4850 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4856 p
= &striping_info_buf
;
4857 rbd_dev
->header
.stripe_unit
= ceph_decode_64(&p
);
4858 rbd_dev
->header
.stripe_count
= ceph_decode_64(&p
);
4862 static int rbd_dev_v2_data_pool(struct rbd_device
*rbd_dev
)
4864 __le64 data_pool_id
;
4867 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4868 &rbd_dev
->header_oloc
, "get_data_pool",
4869 NULL
, 0, &data_pool_id
, sizeof(data_pool_id
));
4872 if (ret
< sizeof(data_pool_id
))
4875 rbd_dev
->header
.data_pool_id
= le64_to_cpu(data_pool_id
);
4876 WARN_ON(rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
);
4880 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
4882 CEPH_DEFINE_OID_ONSTACK(oid
);
4883 size_t image_id_size
;
4888 void *reply_buf
= NULL
;
4890 char *image_name
= NULL
;
4893 rbd_assert(!rbd_dev
->spec
->image_name
);
4895 len
= strlen(rbd_dev
->spec
->image_id
);
4896 image_id_size
= sizeof (__le32
) + len
;
4897 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4902 end
= image_id
+ image_id_size
;
4903 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4905 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4906 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4910 ceph_oid_printf(&oid
, "%s", RBD_DIRECTORY
);
4911 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
4912 "dir_get_name", image_id
, image_id_size
,
4917 end
= reply_buf
+ ret
;
4919 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4920 if (IS_ERR(image_name
))
4923 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4931 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4933 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4934 const char *snap_name
;
4937 /* Skip over names until we find the one we are looking for */
4939 snap_name
= rbd_dev
->header
.snap_names
;
4940 while (which
< snapc
->num_snaps
) {
4941 if (!strcmp(name
, snap_name
))
4942 return snapc
->snaps
[which
];
4943 snap_name
+= strlen(snap_name
) + 1;
4949 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4951 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4956 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4957 const char *snap_name
;
4959 snap_id
= snapc
->snaps
[which
];
4960 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4961 if (IS_ERR(snap_name
)) {
4962 /* ignore no-longer existing snapshots */
4963 if (PTR_ERR(snap_name
) == -ENOENT
)
4968 found
= !strcmp(name
, snap_name
);
4971 return found
? snap_id
: CEPH_NOSNAP
;
4975 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4976 * no snapshot by that name is found, or if an error occurs.
4978 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4980 if (rbd_dev
->image_format
== 1)
4981 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4983 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4987 * An image being mapped will have everything but the snap id.
4989 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
4991 struct rbd_spec
*spec
= rbd_dev
->spec
;
4993 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
4994 rbd_assert(spec
->image_id
&& spec
->image_name
);
4995 rbd_assert(spec
->snap_name
);
4997 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
5000 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
5001 if (snap_id
== CEPH_NOSNAP
)
5004 spec
->snap_id
= snap_id
;
5006 spec
->snap_id
= CEPH_NOSNAP
;
5013 * A parent image will have all ids but none of the names.
5015 * All names in an rbd spec are dynamically allocated. It's OK if we
5016 * can't figure out the name for an image id.
5018 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
5020 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5021 struct rbd_spec
*spec
= rbd_dev
->spec
;
5022 const char *pool_name
;
5023 const char *image_name
;
5024 const char *snap_name
;
5027 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
5028 rbd_assert(spec
->image_id
);
5029 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
5031 /* Get the pool name; we have to make our own copy of this */
5033 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
5035 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
5038 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
5042 /* Fetch the image name; tolerate failure here */
5044 image_name
= rbd_dev_image_name(rbd_dev
);
5046 rbd_warn(rbd_dev
, "unable to get image name");
5048 /* Fetch the snapshot name */
5050 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
5051 if (IS_ERR(snap_name
)) {
5052 ret
= PTR_ERR(snap_name
);
5056 spec
->pool_name
= pool_name
;
5057 spec
->image_name
= image_name
;
5058 spec
->snap_name
= snap_name
;
5068 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
5077 struct ceph_snap_context
*snapc
;
5081 * We'll need room for the seq value (maximum snapshot id),
5082 * snapshot count, and array of that many snapshot ids.
5083 * For now we have a fixed upper limit on the number we're
5084 * prepared to receive.
5086 size
= sizeof (__le64
) + sizeof (__le32
) +
5087 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
5088 reply_buf
= kzalloc(size
, GFP_KERNEL
);
5092 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5093 &rbd_dev
->header_oloc
, "get_snapcontext",
5094 NULL
, 0, reply_buf
, size
);
5095 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5100 end
= reply_buf
+ ret
;
5102 ceph_decode_64_safe(&p
, end
, seq
, out
);
5103 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
5106 * Make sure the reported number of snapshot ids wouldn't go
5107 * beyond the end of our buffer. But before checking that,
5108 * make sure the computed size of the snapshot context we
5109 * allocate is representable in a size_t.
5111 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
5116 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
5120 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
5126 for (i
= 0; i
< snap_count
; i
++)
5127 snapc
->snaps
[i
] = ceph_decode_64(&p
);
5129 ceph_put_snap_context(rbd_dev
->header
.snapc
);
5130 rbd_dev
->header
.snapc
= snapc
;
5132 dout(" snap context seq = %llu, snap_count = %u\n",
5133 (unsigned long long)seq
, (unsigned int)snap_count
);
5140 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
5151 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
5152 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5154 return ERR_PTR(-ENOMEM
);
5156 snapid
= cpu_to_le64(snap_id
);
5157 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5158 &rbd_dev
->header_oloc
, "get_snapshot_name",
5159 &snapid
, sizeof(snapid
), reply_buf
, size
);
5160 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5162 snap_name
= ERR_PTR(ret
);
5167 end
= reply_buf
+ ret
;
5168 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5169 if (IS_ERR(snap_name
))
5172 dout(" snap_id 0x%016llx snap_name = %s\n",
5173 (unsigned long long)snap_id
, snap_name
);
5180 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
5182 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
5185 ret
= rbd_dev_v2_image_size(rbd_dev
);
5190 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
5195 ret
= rbd_dev_v2_snap_context(rbd_dev
);
5196 if (ret
&& first_time
) {
5197 kfree(rbd_dev
->header
.object_prefix
);
5198 rbd_dev
->header
.object_prefix
= NULL
;
5204 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
5206 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5208 if (rbd_dev
->image_format
== 1)
5209 return rbd_dev_v1_header_info(rbd_dev
);
5211 return rbd_dev_v2_header_info(rbd_dev
);
5215 * Skips over white space at *buf, and updates *buf to point to the
5216 * first found non-space character (if any). Returns the length of
5217 * the token (string of non-white space characters) found. Note
5218 * that *buf must be terminated with '\0'.
5220 static inline size_t next_token(const char **buf
)
5223 * These are the characters that produce nonzero for
5224 * isspace() in the "C" and "POSIX" locales.
5226 const char *spaces
= " \f\n\r\t\v";
5228 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
5230 return strcspn(*buf
, spaces
); /* Return token length */
5234 * Finds the next token in *buf, dynamically allocates a buffer big
5235 * enough to hold a copy of it, and copies the token into the new
5236 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5237 * that a duplicate buffer is created even for a zero-length token.
5239 * Returns a pointer to the newly-allocated duplicate, or a null
5240 * pointer if memory for the duplicate was not available. If
5241 * the lenp argument is a non-null pointer, the length of the token
5242 * (not including the '\0') is returned in *lenp.
5244 * If successful, the *buf pointer will be updated to point beyond
5245 * the end of the found token.
5247 * Note: uses GFP_KERNEL for allocation.
5249 static inline char *dup_token(const char **buf
, size_t *lenp
)
5254 len
= next_token(buf
);
5255 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
5258 *(dup
+ len
) = '\0';
5268 * Parse the options provided for an "rbd add" (i.e., rbd image
5269 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5270 * and the data written is passed here via a NUL-terminated buffer.
5271 * Returns 0 if successful or an error code otherwise.
5273 * The information extracted from these options is recorded in
5274 * the other parameters which return dynamically-allocated
5277 * The address of a pointer that will refer to a ceph options
5278 * structure. Caller must release the returned pointer using
5279 * ceph_destroy_options() when it is no longer needed.
5281 * Address of an rbd options pointer. Fully initialized by
5282 * this function; caller must release with kfree().
5284 * Address of an rbd image specification pointer. Fully
5285 * initialized by this function based on parsed options.
5286 * Caller must release with rbd_spec_put().
5288 * The options passed take this form:
5289 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5292 * A comma-separated list of one or more monitor addresses.
5293 * A monitor address is an ip address, optionally followed
5294 * by a port number (separated by a colon).
5295 * I.e.: ip1[:port1][,ip2[:port2]...]
5297 * A comma-separated list of ceph and/or rbd options.
5299 * The name of the rados pool containing the rbd image.
5301 * The name of the image in that pool to map.
5303 * An optional snapshot id. If provided, the mapping will
5304 * present data from the image at the time that snapshot was
5305 * created. The image head is used if no snapshot id is
5306 * provided. Snapshot mappings are always read-only.
5308 static int rbd_add_parse_args(const char *buf
,
5309 struct ceph_options
**ceph_opts
,
5310 struct rbd_options
**opts
,
5311 struct rbd_spec
**rbd_spec
)
5315 const char *mon_addrs
;
5317 size_t mon_addrs_size
;
5318 struct parse_rbd_opts_ctx pctx
= { 0 };
5319 struct ceph_options
*copts
;
5322 /* The first four tokens are required */
5324 len
= next_token(&buf
);
5326 rbd_warn(NULL
, "no monitor address(es) provided");
5330 mon_addrs_size
= len
+ 1;
5334 options
= dup_token(&buf
, NULL
);
5338 rbd_warn(NULL
, "no options provided");
5342 pctx
.spec
= rbd_spec_alloc();
5346 pctx
.spec
->pool_name
= dup_token(&buf
, NULL
);
5347 if (!pctx
.spec
->pool_name
)
5349 if (!*pctx
.spec
->pool_name
) {
5350 rbd_warn(NULL
, "no pool name provided");
5354 pctx
.spec
->image_name
= dup_token(&buf
, NULL
);
5355 if (!pctx
.spec
->image_name
)
5357 if (!*pctx
.spec
->image_name
) {
5358 rbd_warn(NULL
, "no image name provided");
5363 * Snapshot name is optional; default is to use "-"
5364 * (indicating the head/no snapshot).
5366 len
= next_token(&buf
);
5368 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
5369 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
5370 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
5371 ret
= -ENAMETOOLONG
;
5374 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
5377 *(snap_name
+ len
) = '\0';
5378 pctx
.spec
->snap_name
= snap_name
;
5380 /* Initialize all rbd options to the defaults */
5382 pctx
.opts
= kzalloc(sizeof(*pctx
.opts
), GFP_KERNEL
);
5386 pctx
.opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
5387 pctx
.opts
->queue_depth
= RBD_QUEUE_DEPTH_DEFAULT
;
5388 pctx
.opts
->lock_timeout
= RBD_LOCK_TIMEOUT_DEFAULT
;
5389 pctx
.opts
->lock_on_read
= RBD_LOCK_ON_READ_DEFAULT
;
5390 pctx
.opts
->exclusive
= RBD_EXCLUSIVE_DEFAULT
;
5391 pctx
.opts
->trim
= RBD_TRIM_DEFAULT
;
5393 copts
= ceph_parse_options(options
, mon_addrs
,
5394 mon_addrs
+ mon_addrs_size
- 1,
5395 parse_rbd_opts_token
, &pctx
);
5396 if (IS_ERR(copts
)) {
5397 ret
= PTR_ERR(copts
);
5404 *rbd_spec
= pctx
.spec
;
5411 rbd_spec_put(pctx
.spec
);
5417 static void rbd_dev_image_unlock(struct rbd_device
*rbd_dev
)
5419 down_write(&rbd_dev
->lock_rwsem
);
5420 if (__rbd_is_lock_owner(rbd_dev
))
5421 rbd_unlock(rbd_dev
);
5422 up_write(&rbd_dev
->lock_rwsem
);
5425 static int rbd_add_acquire_lock(struct rbd_device
*rbd_dev
)
5429 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
)) {
5430 rbd_warn(rbd_dev
, "exclusive-lock feature is not enabled");
5434 /* FIXME: "rbd map --exclusive" should be in interruptible */
5435 down_read(&rbd_dev
->lock_rwsem
);
5436 ret
= rbd_wait_state_locked(rbd_dev
, true);
5437 up_read(&rbd_dev
->lock_rwsem
);
5439 rbd_warn(rbd_dev
, "failed to acquire exclusive lock");
5447 * An rbd format 2 image has a unique identifier, distinct from the
5448 * name given to it by the user. Internally, that identifier is
5449 * what's used to specify the names of objects related to the image.
5451 * A special "rbd id" object is used to map an rbd image name to its
5452 * id. If that object doesn't exist, then there is no v2 rbd image
5453 * with the supplied name.
5455 * This function will record the given rbd_dev's image_id field if
5456 * it can be determined, and in that case will return 0. If any
5457 * errors occur a negative errno will be returned and the rbd_dev's
5458 * image_id field will be unchanged (and should be NULL).
5460 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
5464 CEPH_DEFINE_OID_ONSTACK(oid
);
5469 * When probing a parent image, the image id is already
5470 * known (and the image name likely is not). There's no
5471 * need to fetch the image id again in this case. We
5472 * do still need to set the image format though.
5474 if (rbd_dev
->spec
->image_id
) {
5475 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
5481 * First, see if the format 2 image id file exists, and if
5482 * so, get the image's persistent id from it.
5484 ret
= ceph_oid_aprintf(&oid
, GFP_KERNEL
, "%s%s", RBD_ID_PREFIX
,
5485 rbd_dev
->spec
->image_name
);
5489 dout("rbd id object name is %s\n", oid
.name
);
5491 /* Response will be an encoded string, which includes a length */
5493 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
5494 response
= kzalloc(size
, GFP_NOIO
);
5500 /* If it doesn't exist we'll assume it's a format 1 image */
5502 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5504 response
, RBD_IMAGE_ID_LEN_MAX
);
5505 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5506 if (ret
== -ENOENT
) {
5507 image_id
= kstrdup("", GFP_KERNEL
);
5508 ret
= image_id
? 0 : -ENOMEM
;
5510 rbd_dev
->image_format
= 1;
5511 } else if (ret
>= 0) {
5514 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
5516 ret
= PTR_ERR_OR_ZERO(image_id
);
5518 rbd_dev
->image_format
= 2;
5522 rbd_dev
->spec
->image_id
= image_id
;
5523 dout("image_id is %s\n", image_id
);
5527 ceph_oid_destroy(&oid
);
5532 * Undo whatever state changes are made by v1 or v2 header info
5535 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
5537 struct rbd_image_header
*header
;
5539 rbd_dev_parent_put(rbd_dev
);
5541 /* Free dynamic fields from the header, then zero it out */
5543 header
= &rbd_dev
->header
;
5544 ceph_put_snap_context(header
->snapc
);
5545 kfree(header
->snap_sizes
);
5546 kfree(header
->snap_names
);
5547 kfree(header
->object_prefix
);
5548 memset(header
, 0, sizeof (*header
));
5551 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
5555 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
5560 * Get the and check features for the image. Currently the
5561 * features are assumed to never change.
5563 ret
= rbd_dev_v2_features(rbd_dev
);
5567 /* If the image supports fancy striping, get its parameters */
5569 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
5570 ret
= rbd_dev_v2_striping_info(rbd_dev
);
5575 if (rbd_dev
->header
.features
& RBD_FEATURE_DATA_POOL
) {
5576 ret
= rbd_dev_v2_data_pool(rbd_dev
);
5581 rbd_init_layout(rbd_dev
);
5585 rbd_dev
->header
.features
= 0;
5586 kfree(rbd_dev
->header
.object_prefix
);
5587 rbd_dev
->header
.object_prefix
= NULL
;
5592 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5593 * rbd_dev_image_probe() recursion depth, which means it's also the
5594 * length of the already discovered part of the parent chain.
5596 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
, int depth
)
5598 struct rbd_device
*parent
= NULL
;
5601 if (!rbd_dev
->parent_spec
)
5604 if (++depth
> RBD_MAX_PARENT_CHAIN_LEN
) {
5605 pr_info("parent chain is too long (%d)\n", depth
);
5610 parent
= __rbd_dev_create(rbd_dev
->rbd_client
, rbd_dev
->parent_spec
);
5617 * Images related by parent/child relationships always share
5618 * rbd_client and spec/parent_spec, so bump their refcounts.
5620 __rbd_get_client(rbd_dev
->rbd_client
);
5621 rbd_spec_get(rbd_dev
->parent_spec
);
5623 ret
= rbd_dev_image_probe(parent
, depth
);
5627 rbd_dev
->parent
= parent
;
5628 atomic_set(&rbd_dev
->parent_ref
, 1);
5632 rbd_dev_unparent(rbd_dev
);
5633 rbd_dev_destroy(parent
);
5637 static void rbd_dev_device_release(struct rbd_device
*rbd_dev
)
5639 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5640 rbd_dev_mapping_clear(rbd_dev
);
5641 rbd_free_disk(rbd_dev
);
5643 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5647 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5650 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
5654 /* Record our major and minor device numbers. */
5656 if (!single_major
) {
5657 ret
= register_blkdev(0, rbd_dev
->name
);
5659 goto err_out_unlock
;
5661 rbd_dev
->major
= ret
;
5664 rbd_dev
->major
= rbd_major
;
5665 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
5668 /* Set up the blkdev mapping. */
5670 ret
= rbd_init_disk(rbd_dev
);
5672 goto err_out_blkdev
;
5674 ret
= rbd_dev_mapping_set(rbd_dev
);
5678 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
5679 set_disk_ro(rbd_dev
->disk
, rbd_dev
->opts
->read_only
);
5681 ret
= dev_set_name(&rbd_dev
->dev
, "%d", rbd_dev
->dev_id
);
5683 goto err_out_mapping
;
5685 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5686 up_write(&rbd_dev
->header_rwsem
);
5690 rbd_dev_mapping_clear(rbd_dev
);
5692 rbd_free_disk(rbd_dev
);
5695 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5697 up_write(&rbd_dev
->header_rwsem
);
5701 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
5703 struct rbd_spec
*spec
= rbd_dev
->spec
;
5706 /* Record the header object name for this rbd image. */
5708 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5709 if (rbd_dev
->image_format
== 1)
5710 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5711 spec
->image_name
, RBD_SUFFIX
);
5713 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5714 RBD_HEADER_PREFIX
, spec
->image_id
);
5719 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
5721 rbd_dev_unprobe(rbd_dev
);
5723 rbd_unregister_watch(rbd_dev
);
5724 rbd_dev
->image_format
= 0;
5725 kfree(rbd_dev
->spec
->image_id
);
5726 rbd_dev
->spec
->image_id
= NULL
;
5730 * Probe for the existence of the header object for the given rbd
5731 * device. If this image is the one being mapped (i.e., not a
5732 * parent), initiate a watch on its header object before using that
5733 * object to get detailed information about the rbd image.
5735 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
)
5740 * Get the id from the image id object. Unless there's an
5741 * error, rbd_dev->spec->image_id will be filled in with
5742 * a dynamically-allocated string, and rbd_dev->image_format
5743 * will be set to either 1 or 2.
5745 ret
= rbd_dev_image_id(rbd_dev
);
5749 ret
= rbd_dev_header_name(rbd_dev
);
5751 goto err_out_format
;
5754 ret
= rbd_register_watch(rbd_dev
);
5757 pr_info("image %s/%s%s%s does not exist\n",
5758 rbd_dev
->spec
->pool_name
,
5759 rbd_dev
->spec
->pool_ns
?: "",
5760 rbd_dev
->spec
->pool_ns
? "/" : "",
5761 rbd_dev
->spec
->image_name
);
5762 goto err_out_format
;
5766 ret
= rbd_dev_header_info(rbd_dev
);
5771 * If this image is the one being mapped, we have pool name and
5772 * id, image name and id, and snap name - need to fill snap id.
5773 * Otherwise this is a parent image, identified by pool, image
5774 * and snap ids - need to fill in names for those ids.
5777 ret
= rbd_spec_fill_snap_id(rbd_dev
);
5779 ret
= rbd_spec_fill_names(rbd_dev
);
5782 pr_info("snap %s/%s%s%s@%s does not exist\n",
5783 rbd_dev
->spec
->pool_name
,
5784 rbd_dev
->spec
->pool_ns
?: "",
5785 rbd_dev
->spec
->pool_ns
? "/" : "",
5786 rbd_dev
->spec
->image_name
,
5787 rbd_dev
->spec
->snap_name
);
5791 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
5792 ret
= rbd_dev_v2_parent_info(rbd_dev
);
5797 * Need to warn users if this image is the one being
5798 * mapped and has a parent.
5800 if (!depth
&& rbd_dev
->parent_spec
)
5802 "WARNING: kernel layering is EXPERIMENTAL!");
5805 ret
= rbd_dev_probe_parent(rbd_dev
, depth
);
5809 dout("discovered format %u image, header name is %s\n",
5810 rbd_dev
->image_format
, rbd_dev
->header_oid
.name
);
5814 rbd_dev_unprobe(rbd_dev
);
5817 rbd_unregister_watch(rbd_dev
);
5819 rbd_dev
->image_format
= 0;
5820 kfree(rbd_dev
->spec
->image_id
);
5821 rbd_dev
->spec
->image_id
= NULL
;
5825 static ssize_t
do_rbd_add(struct bus_type
*bus
,
5829 struct rbd_device
*rbd_dev
= NULL
;
5830 struct ceph_options
*ceph_opts
= NULL
;
5831 struct rbd_options
*rbd_opts
= NULL
;
5832 struct rbd_spec
*spec
= NULL
;
5833 struct rbd_client
*rbdc
;
5836 if (!try_module_get(THIS_MODULE
))
5839 /* parse add command */
5840 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5844 rbdc
= rbd_get_client(ceph_opts
);
5851 rc
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, spec
->pool_name
);
5854 pr_info("pool %s does not exist\n", spec
->pool_name
);
5855 goto err_out_client
;
5857 spec
->pool_id
= (u64
)rc
;
5859 rbd_dev
= rbd_dev_create(rbdc
, spec
, rbd_opts
);
5862 goto err_out_client
;
5864 rbdc
= NULL
; /* rbd_dev now owns this */
5865 spec
= NULL
; /* rbd_dev now owns this */
5866 rbd_opts
= NULL
; /* rbd_dev now owns this */
5868 rbd_dev
->config_info
= kstrdup(buf
, GFP_KERNEL
);
5869 if (!rbd_dev
->config_info
) {
5871 goto err_out_rbd_dev
;
5874 down_write(&rbd_dev
->header_rwsem
);
5875 rc
= rbd_dev_image_probe(rbd_dev
, 0);
5877 up_write(&rbd_dev
->header_rwsem
);
5878 goto err_out_rbd_dev
;
5881 /* If we are mapping a snapshot it must be marked read-only */
5882 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5883 rbd_dev
->opts
->read_only
= true;
5885 rc
= rbd_dev_device_setup(rbd_dev
);
5887 goto err_out_image_probe
;
5889 if (rbd_dev
->opts
->exclusive
) {
5890 rc
= rbd_add_acquire_lock(rbd_dev
);
5892 goto err_out_device_setup
;
5895 /* Everything's ready. Announce the disk to the world. */
5897 rc
= device_add(&rbd_dev
->dev
);
5899 goto err_out_image_lock
;
5901 add_disk(rbd_dev
->disk
);
5902 /* see rbd_init_disk() */
5903 blk_put_queue(rbd_dev
->disk
->queue
);
5905 spin_lock(&rbd_dev_list_lock
);
5906 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
5907 spin_unlock(&rbd_dev_list_lock
);
5909 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev
->disk
->disk_name
,
5910 (unsigned long long)get_capacity(rbd_dev
->disk
) << SECTOR_SHIFT
,
5911 rbd_dev
->header
.features
);
5914 module_put(THIS_MODULE
);
5918 rbd_dev_image_unlock(rbd_dev
);
5919 err_out_device_setup
:
5920 rbd_dev_device_release(rbd_dev
);
5921 err_out_image_probe
:
5922 rbd_dev_image_release(rbd_dev
);
5924 rbd_dev_destroy(rbd_dev
);
5926 rbd_put_client(rbdc
);
5933 static ssize_t
rbd_add(struct bus_type
*bus
,
5940 return do_rbd_add(bus
, buf
, count
);
5943 static ssize_t
rbd_add_single_major(struct bus_type
*bus
,
5947 return do_rbd_add(bus
, buf
, count
);
5950 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5952 while (rbd_dev
->parent
) {
5953 struct rbd_device
*first
= rbd_dev
;
5954 struct rbd_device
*second
= first
->parent
;
5955 struct rbd_device
*third
;
5958 * Follow to the parent with no grandparent and
5961 while (second
&& (third
= second
->parent
)) {
5966 rbd_dev_image_release(second
);
5967 rbd_dev_destroy(second
);
5968 first
->parent
= NULL
;
5969 first
->parent_overlap
= 0;
5971 rbd_assert(first
->parent_spec
);
5972 rbd_spec_put(first
->parent_spec
);
5973 first
->parent_spec
= NULL
;
5977 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
5981 struct rbd_device
*rbd_dev
= NULL
;
5982 struct list_head
*tmp
;
5985 bool already
= false;
5991 sscanf(buf
, "%d %5s", &dev_id
, opt_buf
);
5993 pr_err("dev_id out of range\n");
5996 if (opt_buf
[0] != '\0') {
5997 if (!strcmp(opt_buf
, "force")) {
6000 pr_err("bad remove option at '%s'\n", opt_buf
);
6006 spin_lock(&rbd_dev_list_lock
);
6007 list_for_each(tmp
, &rbd_dev_list
) {
6008 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
6009 if (rbd_dev
->dev_id
== dev_id
) {
6015 spin_lock_irq(&rbd_dev
->lock
);
6016 if (rbd_dev
->open_count
&& !force
)
6019 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
6021 spin_unlock_irq(&rbd_dev
->lock
);
6023 spin_unlock(&rbd_dev_list_lock
);
6024 if (ret
< 0 || already
)
6029 * Prevent new IO from being queued and wait for existing
6030 * IO to complete/fail.
6032 blk_mq_freeze_queue(rbd_dev
->disk
->queue
);
6033 blk_set_queue_dying(rbd_dev
->disk
->queue
);
6036 del_gendisk(rbd_dev
->disk
);
6037 spin_lock(&rbd_dev_list_lock
);
6038 list_del_init(&rbd_dev
->node
);
6039 spin_unlock(&rbd_dev_list_lock
);
6040 device_del(&rbd_dev
->dev
);
6042 rbd_dev_image_unlock(rbd_dev
);
6043 rbd_dev_device_release(rbd_dev
);
6044 rbd_dev_image_release(rbd_dev
);
6045 rbd_dev_destroy(rbd_dev
);
6049 static ssize_t
rbd_remove(struct bus_type
*bus
,
6056 return do_rbd_remove(bus
, buf
, count
);
6059 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
,
6063 return do_rbd_remove(bus
, buf
, count
);
6067 * create control files in sysfs
6070 static int rbd_sysfs_init(void)
6074 ret
= device_register(&rbd_root_dev
);
6078 ret
= bus_register(&rbd_bus_type
);
6080 device_unregister(&rbd_root_dev
);
6085 static void rbd_sysfs_cleanup(void)
6087 bus_unregister(&rbd_bus_type
);
6088 device_unregister(&rbd_root_dev
);
6091 static int rbd_slab_init(void)
6093 rbd_assert(!rbd_img_request_cache
);
6094 rbd_img_request_cache
= KMEM_CACHE(rbd_img_request
, 0);
6095 if (!rbd_img_request_cache
)
6098 rbd_assert(!rbd_obj_request_cache
);
6099 rbd_obj_request_cache
= KMEM_CACHE(rbd_obj_request
, 0);
6100 if (!rbd_obj_request_cache
)
6106 kmem_cache_destroy(rbd_img_request_cache
);
6107 rbd_img_request_cache
= NULL
;
6111 static void rbd_slab_exit(void)
6113 rbd_assert(rbd_obj_request_cache
);
6114 kmem_cache_destroy(rbd_obj_request_cache
);
6115 rbd_obj_request_cache
= NULL
;
6117 rbd_assert(rbd_img_request_cache
);
6118 kmem_cache_destroy(rbd_img_request_cache
);
6119 rbd_img_request_cache
= NULL
;
6122 static int __init
rbd_init(void)
6126 if (!libceph_compatible(NULL
)) {
6127 rbd_warn(NULL
, "libceph incompatibility (quitting)");
6131 rc
= rbd_slab_init();
6136 * The number of active work items is limited by the number of
6137 * rbd devices * queue depth, so leave @max_active at default.
6139 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
6146 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
6147 if (rbd_major
< 0) {
6153 rc
= rbd_sysfs_init();
6155 goto err_out_blkdev
;
6158 pr_info("loaded (major %d)\n", rbd_major
);
6160 pr_info("loaded\n");
6166 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6168 destroy_workqueue(rbd_wq
);
6174 static void __exit
rbd_exit(void)
6176 ida_destroy(&rbd_dev_id_ida
);
6177 rbd_sysfs_cleanup();
6179 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6180 destroy_workqueue(rbd_wq
);
6184 module_init(rbd_init
);
6185 module_exit(rbd_exit
);
6187 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6188 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6189 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6190 /* following authorship retained from original osdblk.c */
6191 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6193 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6194 MODULE_LICENSE("GPL");