3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t
*v
)
72 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
73 if (counter
<= (unsigned int)INT_MAX
)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t
*v
)
86 counter
= atomic_dec_return(v
);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1ULL<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
126 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
128 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
129 RBD_FEATURE_STRIPINGV2 | \
130 RBD_FEATURE_EXCLUSIVE_LOCK | \
131 RBD_FEATURE_DATA_POOL)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header
{
147 /* These six fields never change for a given rbd image */
153 u64 features
; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context
*snapc
;
158 char *snap_names
; /* format 1 only */
159 u64
*snap_sizes
; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name
;
191 const char *image_id
;
192 const char *image_name
;
195 const char *snap_name
;
201 * an instance of the client. multiple devices may share an rbd client.
204 struct ceph_client
*client
;
206 struct list_head node
;
209 struct rbd_img_request
;
210 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
212 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
214 struct rbd_obj_request
;
215 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
217 enum obj_request_type
{
218 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
221 enum obj_operation_type
{
228 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
229 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
230 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
231 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
234 struct rbd_obj_request
{
236 u64 offset
; /* object start byte */
237 u64 length
; /* bytes from offset */
241 * An object request associated with an image will have its
242 * img_data flag set; a standalone object request will not.
244 * A standalone object request will have which == BAD_WHICH
245 * and a null obj_request pointer.
247 * An object request initiated in support of a layered image
248 * object (to check for its existence before a write) will
249 * have which == BAD_WHICH and a non-null obj_request pointer.
251 * Finally, an object request for rbd image data will have
252 * which != BAD_WHICH, and will have a non-null img_request
253 * pointer. The value of which will be in the range
254 * 0..(img_request->obj_request_count-1).
257 struct rbd_obj_request
*obj_request
; /* STAT op */
259 struct rbd_img_request
*img_request
;
261 /* links for img_request->obj_requests list */
262 struct list_head links
;
265 u32 which
; /* posn image request list */
267 enum obj_request_type type
;
269 struct bio
*bio_list
;
275 struct page
**copyup_pages
;
276 u32 copyup_page_count
;
278 struct ceph_osd_request
*osd_req
;
280 u64 xferred
; /* bytes transferred */
283 rbd_obj_callback_t callback
;
284 struct completion completion
;
290 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
291 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
292 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
293 IMG_REQ_DISCARD
, /* discard: normal = 0, discard request = 1 */
296 struct rbd_img_request
{
297 struct rbd_device
*rbd_dev
;
298 u64 offset
; /* starting image byte offset */
299 u64 length
; /* byte count from offset */
302 u64 snap_id
; /* for reads */
303 struct ceph_snap_context
*snapc
; /* for writes */
306 struct request
*rq
; /* block request */
307 struct rbd_obj_request
*obj_request
; /* obj req initiator */
309 struct page
**copyup_pages
;
310 u32 copyup_page_count
;
311 spinlock_t completion_lock
;/* protects next_completion */
313 rbd_img_callback_t callback
;
314 u64 xferred
;/* aggregate bytes transferred */
315 int result
; /* first nonzero obj_request result */
317 u32 obj_request_count
;
318 struct list_head obj_requests
; /* rbd_obj_request structs */
323 #define for_each_obj_request(ireq, oreq) \
324 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_from(ireq, oreq) \
326 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
327 #define for_each_obj_request_safe(ireq, oreq, n) \
328 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
330 enum rbd_watch_state
{
331 RBD_WATCH_STATE_UNREGISTERED
,
332 RBD_WATCH_STATE_REGISTERED
,
333 RBD_WATCH_STATE_ERROR
,
336 enum rbd_lock_state
{
337 RBD_LOCK_STATE_UNLOCKED
,
338 RBD_LOCK_STATE_LOCKED
,
339 RBD_LOCK_STATE_RELEASING
,
342 /* WatchNotify::ClientId */
343 struct rbd_client_id
{
358 int dev_id
; /* blkdev unique id */
360 int major
; /* blkdev assigned major */
362 struct gendisk
*disk
; /* blkdev's gendisk and rq */
364 u32 image_format
; /* Either 1 or 2 */
365 struct rbd_client
*rbd_client
;
367 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
369 spinlock_t lock
; /* queue, flags, open_count */
371 struct rbd_image_header header
;
372 unsigned long flags
; /* possibly lock protected */
373 struct rbd_spec
*spec
;
374 struct rbd_options
*opts
;
375 char *config_info
; /* add{,_single_major} string */
377 struct ceph_object_id header_oid
;
378 struct ceph_object_locator header_oloc
;
380 struct ceph_file_layout layout
; /* used for all rbd requests */
382 struct mutex watch_mutex
;
383 enum rbd_watch_state watch_state
;
384 struct ceph_osd_linger_request
*watch_handle
;
386 struct delayed_work watch_dwork
;
388 struct rw_semaphore lock_rwsem
;
389 enum rbd_lock_state lock_state
;
390 struct rbd_client_id owner_cid
;
391 struct work_struct acquired_lock_work
;
392 struct work_struct released_lock_work
;
393 struct delayed_work lock_dwork
;
394 struct work_struct unlock_work
;
395 wait_queue_head_t lock_waitq
;
397 struct workqueue_struct
*task_wq
;
399 struct rbd_spec
*parent_spec
;
402 struct rbd_device
*parent
;
404 /* Block layer tags. */
405 struct blk_mq_tag_set tag_set
;
407 /* protects updating the header */
408 struct rw_semaphore header_rwsem
;
410 struct rbd_mapping mapping
;
412 struct list_head node
;
416 unsigned long open_count
; /* protected by lock */
420 * Flag bits for rbd_dev->flags:
421 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
423 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
426 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
427 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
428 RBD_DEV_FLAG_BLACKLISTED
, /* our ceph_client is blacklisted */
431 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
433 static LIST_HEAD(rbd_dev_list
); /* devices */
434 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
436 static LIST_HEAD(rbd_client_list
); /* clients */
437 static DEFINE_SPINLOCK(rbd_client_list_lock
);
439 /* Slab caches for frequently-allocated structures */
441 static struct kmem_cache
*rbd_img_request_cache
;
442 static struct kmem_cache
*rbd_obj_request_cache
;
444 static int rbd_major
;
445 static DEFINE_IDA(rbd_dev_id_ida
);
447 static struct workqueue_struct
*rbd_wq
;
450 * Default to false for now, as single-major requires >= 0.75 version of
451 * userspace rbd utility.
453 static bool single_major
= false;
454 module_param(single_major
, bool, S_IRUGO
);
455 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: false)");
457 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
459 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
461 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
463 static ssize_t
rbd_add_single_major(struct bus_type
*bus
, const char *buf
,
465 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
, const char *buf
,
467 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
);
468 static void rbd_spec_put(struct rbd_spec
*spec
);
470 static int rbd_dev_id_to_minor(int dev_id
)
472 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
475 static int minor_to_rbd_dev_id(int minor
)
477 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
480 static bool rbd_is_lock_supported(struct rbd_device
*rbd_dev
)
482 return (rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
) &&
483 rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
&&
484 !rbd_dev
->mapping
.read_only
;
487 static bool __rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
489 return rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
||
490 rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
;
493 static bool rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
497 down_read(&rbd_dev
->lock_rwsem
);
498 is_lock_owner
= __rbd_is_lock_owner(rbd_dev
);
499 up_read(&rbd_dev
->lock_rwsem
);
500 return is_lock_owner
;
503 static ssize_t
rbd_supported_features_show(struct bus_type
*bus
, char *buf
)
505 return sprintf(buf
, "0x%llx\n", RBD_FEATURES_SUPPORTED
);
508 static BUS_ATTR(add
, S_IWUSR
, NULL
, rbd_add
);
509 static BUS_ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
);
510 static BUS_ATTR(add_single_major
, S_IWUSR
, NULL
, rbd_add_single_major
);
511 static BUS_ATTR(remove_single_major
, S_IWUSR
, NULL
, rbd_remove_single_major
);
512 static BUS_ATTR(supported_features
, S_IRUGO
, rbd_supported_features_show
, NULL
);
514 static struct attribute
*rbd_bus_attrs
[] = {
516 &bus_attr_remove
.attr
,
517 &bus_attr_add_single_major
.attr
,
518 &bus_attr_remove_single_major
.attr
,
519 &bus_attr_supported_features
.attr
,
523 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
524 struct attribute
*attr
, int index
)
527 (attr
== &bus_attr_add_single_major
.attr
||
528 attr
== &bus_attr_remove_single_major
.attr
))
534 static const struct attribute_group rbd_bus_group
= {
535 .attrs
= rbd_bus_attrs
,
536 .is_visible
= rbd_bus_is_visible
,
538 __ATTRIBUTE_GROUPS(rbd_bus
);
540 static struct bus_type rbd_bus_type
= {
542 .bus_groups
= rbd_bus_groups
,
545 static void rbd_root_dev_release(struct device
*dev
)
549 static struct device rbd_root_dev
= {
551 .release
= rbd_root_dev_release
,
554 static __printf(2, 3)
555 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
557 struct va_format vaf
;
565 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
566 else if (rbd_dev
->disk
)
567 printk(KERN_WARNING
"%s: %s: %pV\n",
568 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
569 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
570 printk(KERN_WARNING
"%s: image %s: %pV\n",
571 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
572 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
573 printk(KERN_WARNING
"%s: id %s: %pV\n",
574 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
576 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
577 RBD_DRV_NAME
, rbd_dev
, &vaf
);
582 #define rbd_assert(expr) \
583 if (unlikely(!(expr))) { \
584 printk(KERN_ERR "\nAssertion failure in %s() " \
586 "\trbd_assert(%s);\n\n", \
587 __func__, __LINE__, #expr); \
590 #else /* !RBD_DEBUG */
591 # define rbd_assert(expr) ((void) 0)
592 #endif /* !RBD_DEBUG */
594 static void rbd_osd_copyup_callback(struct rbd_obj_request
*obj_request
);
595 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
596 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
597 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
599 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
600 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
601 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
602 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
603 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
605 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
606 u8
*order
, u64
*snap_size
);
607 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
610 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
612 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
613 bool removing
= false;
615 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
618 spin_lock_irq(&rbd_dev
->lock
);
619 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
622 rbd_dev
->open_count
++;
623 spin_unlock_irq(&rbd_dev
->lock
);
627 (void) get_device(&rbd_dev
->dev
);
632 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
634 struct rbd_device
*rbd_dev
= disk
->private_data
;
635 unsigned long open_count_before
;
637 spin_lock_irq(&rbd_dev
->lock
);
638 open_count_before
= rbd_dev
->open_count
--;
639 spin_unlock_irq(&rbd_dev
->lock
);
640 rbd_assert(open_count_before
> 0);
642 put_device(&rbd_dev
->dev
);
645 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
650 bool ro_changed
= false;
652 /* get_user() may sleep, so call it before taking rbd_dev->lock */
653 if (get_user(val
, (int __user
*)(arg
)))
656 ro
= val
? true : false;
657 /* Snapshot doesn't allow to write*/
658 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
661 spin_lock_irq(&rbd_dev
->lock
);
662 /* prevent others open this device */
663 if (rbd_dev
->open_count
> 1) {
668 if (rbd_dev
->mapping
.read_only
!= ro
) {
669 rbd_dev
->mapping
.read_only
= ro
;
674 spin_unlock_irq(&rbd_dev
->lock
);
675 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
676 if (ret
== 0 && ro_changed
)
677 set_disk_ro(rbd_dev
->disk
, ro
? 1 : 0);
682 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
683 unsigned int cmd
, unsigned long arg
)
685 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
690 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
700 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
701 unsigned int cmd
, unsigned long arg
)
703 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
705 #endif /* CONFIG_COMPAT */
707 static const struct block_device_operations rbd_bd_ops
= {
708 .owner
= THIS_MODULE
,
710 .release
= rbd_release
,
713 .compat_ioctl
= rbd_compat_ioctl
,
718 * Initialize an rbd client instance. Success or not, this function
719 * consumes ceph_opts. Caller holds client_mutex.
721 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
723 struct rbd_client
*rbdc
;
726 dout("%s:\n", __func__
);
727 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
731 kref_init(&rbdc
->kref
);
732 INIT_LIST_HEAD(&rbdc
->node
);
734 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
735 if (IS_ERR(rbdc
->client
))
737 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
739 ret
= ceph_open_session(rbdc
->client
);
743 spin_lock(&rbd_client_list_lock
);
744 list_add_tail(&rbdc
->node
, &rbd_client_list
);
745 spin_unlock(&rbd_client_list_lock
);
747 dout("%s: rbdc %p\n", __func__
, rbdc
);
751 ceph_destroy_client(rbdc
->client
);
756 ceph_destroy_options(ceph_opts
);
757 dout("%s: error %d\n", __func__
, ret
);
762 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
764 kref_get(&rbdc
->kref
);
770 * Find a ceph client with specific addr and configuration. If
771 * found, bump its reference count.
773 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
775 struct rbd_client
*client_node
;
778 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
781 spin_lock(&rbd_client_list_lock
);
782 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
783 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
784 __rbd_get_client(client_node
);
790 spin_unlock(&rbd_client_list_lock
);
792 return found
? client_node
: NULL
;
796 * (Per device) rbd map options
803 /* string args above */
810 static match_table_t rbd_opts_tokens
= {
811 {Opt_queue_depth
, "queue_depth=%d"},
813 /* string args above */
814 {Opt_read_only
, "read_only"},
815 {Opt_read_only
, "ro"}, /* Alternate spelling */
816 {Opt_read_write
, "read_write"},
817 {Opt_read_write
, "rw"}, /* Alternate spelling */
818 {Opt_lock_on_read
, "lock_on_read"},
828 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
829 #define RBD_READ_ONLY_DEFAULT false
830 #define RBD_LOCK_ON_READ_DEFAULT false
832 static int parse_rbd_opts_token(char *c
, void *private)
834 struct rbd_options
*rbd_opts
= private;
835 substring_t argstr
[MAX_OPT_ARGS
];
836 int token
, intval
, ret
;
838 token
= match_token(c
, rbd_opts_tokens
, argstr
);
839 if (token
< Opt_last_int
) {
840 ret
= match_int(&argstr
[0], &intval
);
842 pr_err("bad mount option arg (not int) at '%s'\n", c
);
845 dout("got int token %d val %d\n", token
, intval
);
846 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
847 dout("got string token %d val %s\n", token
, argstr
[0].from
);
849 dout("got token %d\n", token
);
853 case Opt_queue_depth
:
855 pr_err("queue_depth out of range\n");
858 rbd_opts
->queue_depth
= intval
;
861 rbd_opts
->read_only
= true;
864 rbd_opts
->read_only
= false;
866 case Opt_lock_on_read
:
867 rbd_opts
->lock_on_read
= true;
870 /* libceph prints "bad option" msg */
877 static char* obj_op_name(enum obj_operation_type op_type
)
892 * Get a ceph client with specific addr and configuration, if one does
893 * not exist create it. Either way, ceph_opts is consumed by this
896 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
898 struct rbd_client
*rbdc
;
900 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
901 rbdc
= rbd_client_find(ceph_opts
);
902 if (rbdc
) /* using an existing client */
903 ceph_destroy_options(ceph_opts
);
905 rbdc
= rbd_client_create(ceph_opts
);
906 mutex_unlock(&client_mutex
);
912 * Destroy ceph client
914 * Caller must hold rbd_client_list_lock.
916 static void rbd_client_release(struct kref
*kref
)
918 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
920 dout("%s: rbdc %p\n", __func__
, rbdc
);
921 spin_lock(&rbd_client_list_lock
);
922 list_del(&rbdc
->node
);
923 spin_unlock(&rbd_client_list_lock
);
925 ceph_destroy_client(rbdc
->client
);
930 * Drop reference to ceph client node. If it's not referenced anymore, release
933 static void rbd_put_client(struct rbd_client
*rbdc
)
936 kref_put(&rbdc
->kref
, rbd_client_release
);
939 static bool rbd_image_format_valid(u32 image_format
)
941 return image_format
== 1 || image_format
== 2;
944 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
949 /* The header has to start with the magic rbd header text */
950 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
953 /* The bio layer requires at least sector-sized I/O */
955 if (ondisk
->options
.order
< SECTOR_SHIFT
)
958 /* If we use u64 in a few spots we may be able to loosen this */
960 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
964 * The size of a snapshot header has to fit in a size_t, and
965 * that limits the number of snapshots.
967 snap_count
= le32_to_cpu(ondisk
->snap_count
);
968 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
969 if (snap_count
> size
/ sizeof (__le64
))
973 * Not only that, but the size of the entire the snapshot
974 * header must also be representable in a size_t.
976 size
-= snap_count
* sizeof (__le64
);
977 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
984 * returns the size of an object in the image
986 static u32
rbd_obj_bytes(struct rbd_image_header
*header
)
988 return 1U << header
->obj_order
;
991 static void rbd_init_layout(struct rbd_device
*rbd_dev
)
993 if (rbd_dev
->header
.stripe_unit
== 0 ||
994 rbd_dev
->header
.stripe_count
== 0) {
995 rbd_dev
->header
.stripe_unit
= rbd_obj_bytes(&rbd_dev
->header
);
996 rbd_dev
->header
.stripe_count
= 1;
999 rbd_dev
->layout
.stripe_unit
= rbd_dev
->header
.stripe_unit
;
1000 rbd_dev
->layout
.stripe_count
= rbd_dev
->header
.stripe_count
;
1001 rbd_dev
->layout
.object_size
= rbd_obj_bytes(&rbd_dev
->header
);
1002 rbd_dev
->layout
.pool_id
= rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
?
1003 rbd_dev
->spec
->pool_id
: rbd_dev
->header
.data_pool_id
;
1004 RCU_INIT_POINTER(rbd_dev
->layout
.pool_ns
, NULL
);
1008 * Fill an rbd image header with information from the given format 1
1011 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
1012 struct rbd_image_header_ondisk
*ondisk
)
1014 struct rbd_image_header
*header
= &rbd_dev
->header
;
1015 bool first_time
= header
->object_prefix
== NULL
;
1016 struct ceph_snap_context
*snapc
;
1017 char *object_prefix
= NULL
;
1018 char *snap_names
= NULL
;
1019 u64
*snap_sizes
= NULL
;
1024 /* Allocate this now to avoid having to handle failure below */
1027 object_prefix
= kstrndup(ondisk
->object_prefix
,
1028 sizeof(ondisk
->object_prefix
),
1034 /* Allocate the snapshot context and fill it in */
1036 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1037 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
1040 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
1042 struct rbd_image_snap_ondisk
*snaps
;
1043 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
1045 /* We'll keep a copy of the snapshot names... */
1047 if (snap_names_len
> (u64
)SIZE_MAX
)
1049 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
1053 /* ...as well as the array of their sizes. */
1054 snap_sizes
= kmalloc_array(snap_count
,
1055 sizeof(*header
->snap_sizes
),
1061 * Copy the names, and fill in each snapshot's id
1064 * Note that rbd_dev_v1_header_info() guarantees the
1065 * ondisk buffer we're working with has
1066 * snap_names_len bytes beyond the end of the
1067 * snapshot id array, this memcpy() is safe.
1069 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
1070 snaps
= ondisk
->snaps
;
1071 for (i
= 0; i
< snap_count
; i
++) {
1072 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
1073 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
1077 /* We won't fail any more, fill in the header */
1080 header
->object_prefix
= object_prefix
;
1081 header
->obj_order
= ondisk
->options
.order
;
1082 rbd_init_layout(rbd_dev
);
1084 ceph_put_snap_context(header
->snapc
);
1085 kfree(header
->snap_names
);
1086 kfree(header
->snap_sizes
);
1089 /* The remaining fields always get updated (when we refresh) */
1091 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
1092 header
->snapc
= snapc
;
1093 header
->snap_names
= snap_names
;
1094 header
->snap_sizes
= snap_sizes
;
1102 ceph_put_snap_context(snapc
);
1103 kfree(object_prefix
);
1108 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1110 const char *snap_name
;
1112 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1114 /* Skip over names until we find the one we are looking for */
1116 snap_name
= rbd_dev
->header
.snap_names
;
1118 snap_name
+= strlen(snap_name
) + 1;
1120 return kstrdup(snap_name
, GFP_KERNEL
);
1124 * Snapshot id comparison function for use with qsort()/bsearch().
1125 * Note that result is for snapshots in *descending* order.
1127 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1129 u64 snap_id1
= *(u64
*)s1
;
1130 u64 snap_id2
= *(u64
*)s2
;
1132 if (snap_id1
< snap_id2
)
1134 return snap_id1
== snap_id2
? 0 : -1;
1138 * Search a snapshot context to see if the given snapshot id is
1141 * Returns the position of the snapshot id in the array if it's found,
1142 * or BAD_SNAP_INDEX otherwise.
1144 * Note: The snapshot array is in kept sorted (by the osd) in
1145 * reverse order, highest snapshot id first.
1147 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1149 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1152 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1153 sizeof (snap_id
), snapid_compare_reverse
);
1155 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1158 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1162 const char *snap_name
;
1164 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1165 if (which
== BAD_SNAP_INDEX
)
1166 return ERR_PTR(-ENOENT
);
1168 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1169 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1172 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1174 if (snap_id
== CEPH_NOSNAP
)
1175 return RBD_SNAP_HEAD_NAME
;
1177 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1178 if (rbd_dev
->image_format
== 1)
1179 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1181 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1184 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1187 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1188 if (snap_id
== CEPH_NOSNAP
) {
1189 *snap_size
= rbd_dev
->header
.image_size
;
1190 } else if (rbd_dev
->image_format
== 1) {
1193 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1194 if (which
== BAD_SNAP_INDEX
)
1197 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1202 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1211 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1214 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1215 if (snap_id
== CEPH_NOSNAP
) {
1216 *snap_features
= rbd_dev
->header
.features
;
1217 } else if (rbd_dev
->image_format
== 1) {
1218 *snap_features
= 0; /* No features for format 1 */
1223 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1227 *snap_features
= features
;
1232 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1234 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1239 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1242 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1246 rbd_dev
->mapping
.size
= size
;
1247 rbd_dev
->mapping
.features
= features
;
1252 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1254 rbd_dev
->mapping
.size
= 0;
1255 rbd_dev
->mapping
.features
= 0;
1258 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1260 u64 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
1262 return offset
& (segment_size
- 1);
1265 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1266 u64 offset
, u64 length
)
1268 u64 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
1270 offset
&= segment_size
- 1;
1272 rbd_assert(length
<= U64_MAX
- offset
);
1273 if (offset
+ length
> segment_size
)
1274 length
= segment_size
- offset
;
1283 static void bio_chain_put(struct bio
*chain
)
1289 chain
= chain
->bi_next
;
1295 * zeros a bio chain, starting at specific offset
1297 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1300 struct bvec_iter iter
;
1301 unsigned long flags
;
1306 bio_for_each_segment(bv
, chain
, iter
) {
1307 if (pos
+ bv
.bv_len
> start_ofs
) {
1308 int remainder
= max(start_ofs
- pos
, 0);
1309 buf
= bvec_kmap_irq(&bv
, &flags
);
1310 memset(buf
+ remainder
, 0,
1311 bv
.bv_len
- remainder
);
1312 flush_dcache_page(bv
.bv_page
);
1313 bvec_kunmap_irq(buf
, &flags
);
1318 chain
= chain
->bi_next
;
1323 * similar to zero_bio_chain(), zeros data defined by a page array,
1324 * starting at the given byte offset from the start of the array and
1325 * continuing up to the given end offset. The pages array is
1326 * assumed to be big enough to hold all bytes up to the end.
1328 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1330 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1332 rbd_assert(end
> offset
);
1333 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1334 while (offset
< end
) {
1337 unsigned long flags
;
1340 page_offset
= offset
& ~PAGE_MASK
;
1341 length
= min_t(size_t, PAGE_SIZE
- page_offset
, end
- offset
);
1342 local_irq_save(flags
);
1343 kaddr
= kmap_atomic(*page
);
1344 memset(kaddr
+ page_offset
, 0, length
);
1345 flush_dcache_page(*page
);
1346 kunmap_atomic(kaddr
);
1347 local_irq_restore(flags
);
1355 * Clone a portion of a bio, starting at the given byte offset
1356 * and continuing for the number of bytes indicated.
1358 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1359 unsigned int offset
,
1365 bio
= bio_clone(bio_src
, gfpmask
);
1367 return NULL
; /* ENOMEM */
1369 bio_advance(bio
, offset
);
1370 bio
->bi_iter
.bi_size
= len
;
1376 * Clone a portion of a bio chain, starting at the given byte offset
1377 * into the first bio in the source chain and continuing for the
1378 * number of bytes indicated. The result is another bio chain of
1379 * exactly the given length, or a null pointer on error.
1381 * The bio_src and offset parameters are both in-out. On entry they
1382 * refer to the first source bio and the offset into that bio where
1383 * the start of data to be cloned is located.
1385 * On return, bio_src is updated to refer to the bio in the source
1386 * chain that contains first un-cloned byte, and *offset will
1387 * contain the offset of that byte within that bio.
1389 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1390 unsigned int *offset
,
1394 struct bio
*bi
= *bio_src
;
1395 unsigned int off
= *offset
;
1396 struct bio
*chain
= NULL
;
1399 /* Build up a chain of clone bios up to the limit */
1401 if (!bi
|| off
>= bi
->bi_iter
.bi_size
|| !len
)
1402 return NULL
; /* Nothing to clone */
1406 unsigned int bi_size
;
1410 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1411 goto out_err
; /* EINVAL; ran out of bio's */
1413 bi_size
= min_t(unsigned int, bi
->bi_iter
.bi_size
- off
, len
);
1414 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1416 goto out_err
; /* ENOMEM */
1419 end
= &bio
->bi_next
;
1422 if (off
== bi
->bi_iter
.bi_size
) {
1433 bio_chain_put(chain
);
1439 * The default/initial value for all object request flags is 0. For
1440 * each flag, once its value is set to 1 it is never reset to 0
1443 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1445 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1446 struct rbd_device
*rbd_dev
;
1448 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1449 rbd_warn(rbd_dev
, "obj_request %p already marked img_data",
1454 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1457 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1460 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1462 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1463 struct rbd_device
*rbd_dev
= NULL
;
1465 if (obj_request_img_data_test(obj_request
))
1466 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1467 rbd_warn(rbd_dev
, "obj_request %p already marked done",
1472 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1475 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1479 * This sets the KNOWN flag after (possibly) setting the EXISTS
1480 * flag. The latter is set based on the "exists" value provided.
1482 * Note that for our purposes once an object exists it never goes
1483 * away again. It's possible that the response from two existence
1484 * checks are separated by the creation of the target object, and
1485 * the first ("doesn't exist") response arrives *after* the second
1486 * ("does exist"). In that case we ignore the second one.
1488 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1492 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1493 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1497 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1500 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1503 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1506 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1509 static bool obj_request_overlaps_parent(struct rbd_obj_request
*obj_request
)
1511 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
1513 return obj_request
->img_offset
<
1514 round_up(rbd_dev
->parent_overlap
, rbd_obj_bytes(&rbd_dev
->header
));
1517 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1519 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1520 kref_read(&obj_request
->kref
));
1521 kref_get(&obj_request
->kref
);
1524 static void rbd_obj_request_destroy(struct kref
*kref
);
1525 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1527 rbd_assert(obj_request
!= NULL
);
1528 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1529 kref_read(&obj_request
->kref
));
1530 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1533 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1535 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1536 kref_read(&img_request
->kref
));
1537 kref_get(&img_request
->kref
);
1540 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1541 static void rbd_parent_request_destroy(struct kref
*kref
);
1542 static void rbd_img_request_destroy(struct kref
*kref
);
1543 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1545 rbd_assert(img_request
!= NULL
);
1546 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1547 kref_read(&img_request
->kref
));
1548 if (img_request_child_test(img_request
))
1549 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1551 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1554 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1555 struct rbd_obj_request
*obj_request
)
1557 rbd_assert(obj_request
->img_request
== NULL
);
1559 /* Image request now owns object's original reference */
1560 obj_request
->img_request
= img_request
;
1561 obj_request
->which
= img_request
->obj_request_count
;
1562 rbd_assert(!obj_request_img_data_test(obj_request
));
1563 obj_request_img_data_set(obj_request
);
1564 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1565 img_request
->obj_request_count
++;
1566 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1567 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1568 obj_request
->which
);
1571 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1572 struct rbd_obj_request
*obj_request
)
1574 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1576 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1577 obj_request
->which
);
1578 list_del(&obj_request
->links
);
1579 rbd_assert(img_request
->obj_request_count
> 0);
1580 img_request
->obj_request_count
--;
1581 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1582 obj_request
->which
= BAD_WHICH
;
1583 rbd_assert(obj_request_img_data_test(obj_request
));
1584 rbd_assert(obj_request
->img_request
== img_request
);
1585 obj_request
->img_request
= NULL
;
1586 obj_request
->callback
= NULL
;
1587 rbd_obj_request_put(obj_request
);
1590 static bool obj_request_type_valid(enum obj_request_type type
)
1593 case OBJ_REQUEST_NODATA
:
1594 case OBJ_REQUEST_BIO
:
1595 case OBJ_REQUEST_PAGES
:
1602 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
);
1604 static void rbd_obj_request_submit(struct rbd_obj_request
*obj_request
)
1606 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1608 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__
,
1609 obj_request
, obj_request
->object_no
, obj_request
->offset
,
1610 obj_request
->length
, osd_req
);
1611 if (obj_request_img_data_test(obj_request
)) {
1612 WARN_ON(obj_request
->callback
!= rbd_img_obj_callback
);
1613 rbd_img_request_get(obj_request
->img_request
);
1615 ceph_osdc_start_request(osd_req
->r_osdc
, osd_req
, false);
1618 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1621 dout("%s: img %p\n", __func__
, img_request
);
1624 * If no error occurred, compute the aggregate transfer
1625 * count for the image request. We could instead use
1626 * atomic64_cmpxchg() to update it as each object request
1627 * completes; not clear which way is better off hand.
1629 if (!img_request
->result
) {
1630 struct rbd_obj_request
*obj_request
;
1633 for_each_obj_request(img_request
, obj_request
)
1634 xferred
+= obj_request
->xferred
;
1635 img_request
->xferred
= xferred
;
1638 if (img_request
->callback
)
1639 img_request
->callback(img_request
);
1641 rbd_img_request_put(img_request
);
1645 * The default/initial value for all image request flags is 0. Each
1646 * is conditionally set to 1 at image request initialization time
1647 * and currently never change thereafter.
1649 static void img_request_write_set(struct rbd_img_request
*img_request
)
1651 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1655 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1658 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1662 * Set the discard flag when the img_request is an discard request
1664 static void img_request_discard_set(struct rbd_img_request
*img_request
)
1666 set_bit(IMG_REQ_DISCARD
, &img_request
->flags
);
1670 static bool img_request_discard_test(struct rbd_img_request
*img_request
)
1673 return test_bit(IMG_REQ_DISCARD
, &img_request
->flags
) != 0;
1676 static void img_request_child_set(struct rbd_img_request
*img_request
)
1678 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1682 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1684 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1688 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1691 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1694 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1696 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1700 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1702 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1706 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1709 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1712 static enum obj_operation_type
1713 rbd_img_request_op_type(struct rbd_img_request
*img_request
)
1715 if (img_request_write_test(img_request
))
1716 return OBJ_OP_WRITE
;
1717 else if (img_request_discard_test(img_request
))
1718 return OBJ_OP_DISCARD
;
1724 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1726 u64 xferred
= obj_request
->xferred
;
1727 u64 length
= obj_request
->length
;
1729 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1730 obj_request
, obj_request
->img_request
, obj_request
->result
,
1733 * ENOENT means a hole in the image. We zero-fill the entire
1734 * length of the request. A short read also implies zero-fill
1735 * to the end of the request. An error requires the whole
1736 * length of the request to be reported finished with an error
1737 * to the block layer. In each case we update the xferred
1738 * count to indicate the whole request was satisfied.
1740 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1741 if (obj_request
->result
== -ENOENT
) {
1742 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1743 zero_bio_chain(obj_request
->bio_list
, 0);
1745 zero_pages(obj_request
->pages
, 0, length
);
1746 obj_request
->result
= 0;
1747 } else if (xferred
< length
&& !obj_request
->result
) {
1748 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1749 zero_bio_chain(obj_request
->bio_list
, xferred
);
1751 zero_pages(obj_request
->pages
, xferred
, length
);
1753 obj_request
->xferred
= length
;
1754 obj_request_done_set(obj_request
);
1757 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1759 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1760 obj_request
->callback
);
1761 if (obj_request
->callback
)
1762 obj_request
->callback(obj_request
);
1764 complete_all(&obj_request
->completion
);
1767 static void rbd_obj_request_error(struct rbd_obj_request
*obj_request
, int err
)
1769 obj_request
->result
= err
;
1770 obj_request
->xferred
= 0;
1772 * kludge - mirror rbd_obj_request_submit() to match a put in
1773 * rbd_img_obj_callback()
1775 if (obj_request_img_data_test(obj_request
)) {
1776 WARN_ON(obj_request
->callback
!= rbd_img_obj_callback
);
1777 rbd_img_request_get(obj_request
->img_request
);
1779 obj_request_done_set(obj_request
);
1780 rbd_obj_request_complete(obj_request
);
1783 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1785 struct rbd_img_request
*img_request
= NULL
;
1786 struct rbd_device
*rbd_dev
= NULL
;
1787 bool layered
= false;
1789 if (obj_request_img_data_test(obj_request
)) {
1790 img_request
= obj_request
->img_request
;
1791 layered
= img_request
&& img_request_layered_test(img_request
);
1792 rbd_dev
= img_request
->rbd_dev
;
1795 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1796 obj_request
, img_request
, obj_request
->result
,
1797 obj_request
->xferred
, obj_request
->length
);
1798 if (layered
&& obj_request
->result
== -ENOENT
&&
1799 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1800 rbd_img_parent_read(obj_request
);
1801 else if (img_request
)
1802 rbd_img_obj_request_read_callback(obj_request
);
1804 obj_request_done_set(obj_request
);
1807 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1809 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1810 obj_request
->result
, obj_request
->length
);
1812 * There is no such thing as a successful short write. Set
1813 * it to our originally-requested length.
1815 obj_request
->xferred
= obj_request
->length
;
1816 obj_request_done_set(obj_request
);
1819 static void rbd_osd_discard_callback(struct rbd_obj_request
*obj_request
)
1821 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1822 obj_request
->result
, obj_request
->length
);
1824 * There is no such thing as a successful short discard. Set
1825 * it to our originally-requested length.
1827 obj_request
->xferred
= obj_request
->length
;
1828 /* discarding a non-existent object is not a problem */
1829 if (obj_request
->result
== -ENOENT
)
1830 obj_request
->result
= 0;
1831 obj_request_done_set(obj_request
);
1835 * For a simple stat call there's nothing to do. We'll do more if
1836 * this is part of a write sequence for a layered image.
1838 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1840 dout("%s: obj %p\n", __func__
, obj_request
);
1841 obj_request_done_set(obj_request
);
1844 static void rbd_osd_call_callback(struct rbd_obj_request
*obj_request
)
1846 dout("%s: obj %p\n", __func__
, obj_request
);
1848 if (obj_request_img_data_test(obj_request
))
1849 rbd_osd_copyup_callback(obj_request
);
1851 obj_request_done_set(obj_request
);
1854 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
)
1856 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1859 dout("%s: osd_req %p\n", __func__
, osd_req
);
1860 rbd_assert(osd_req
== obj_request
->osd_req
);
1861 if (obj_request_img_data_test(obj_request
)) {
1862 rbd_assert(obj_request
->img_request
);
1863 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1865 rbd_assert(obj_request
->which
== BAD_WHICH
);
1868 if (osd_req
->r_result
< 0)
1869 obj_request
->result
= osd_req
->r_result
;
1872 * We support a 64-bit length, but ultimately it has to be
1873 * passed to the block layer, which just supports a 32-bit
1876 obj_request
->xferred
= osd_req
->r_ops
[0].outdata_len
;
1877 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1879 opcode
= osd_req
->r_ops
[0].op
;
1881 case CEPH_OSD_OP_READ
:
1882 rbd_osd_read_callback(obj_request
);
1884 case CEPH_OSD_OP_SETALLOCHINT
:
1885 rbd_assert(osd_req
->r_ops
[1].op
== CEPH_OSD_OP_WRITE
||
1886 osd_req
->r_ops
[1].op
== CEPH_OSD_OP_WRITEFULL
);
1888 case CEPH_OSD_OP_WRITE
:
1889 case CEPH_OSD_OP_WRITEFULL
:
1890 rbd_osd_write_callback(obj_request
);
1892 case CEPH_OSD_OP_STAT
:
1893 rbd_osd_stat_callback(obj_request
);
1895 case CEPH_OSD_OP_DELETE
:
1896 case CEPH_OSD_OP_TRUNCATE
:
1897 case CEPH_OSD_OP_ZERO
:
1898 rbd_osd_discard_callback(obj_request
);
1900 case CEPH_OSD_OP_CALL
:
1901 rbd_osd_call_callback(obj_request
);
1904 rbd_warn(NULL
, "unexpected OSD op: object_no %016llx opcode %d",
1905 obj_request
->object_no
, opcode
);
1909 if (obj_request_done_test(obj_request
))
1910 rbd_obj_request_complete(obj_request
);
1913 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1915 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1917 rbd_assert(obj_request_img_data_test(obj_request
));
1918 osd_req
->r_snapid
= obj_request
->img_request
->snap_id
;
1921 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1923 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1925 osd_req
->r_mtime
= CURRENT_TIME
;
1926 osd_req
->r_data_offset
= obj_request
->offset
;
1929 static struct ceph_osd_request
*
1930 __rbd_osd_req_create(struct rbd_device
*rbd_dev
,
1931 struct ceph_snap_context
*snapc
,
1932 int num_ops
, unsigned int flags
,
1933 struct rbd_obj_request
*obj_request
)
1935 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1936 struct ceph_osd_request
*req
;
1937 const char *name_format
= rbd_dev
->image_format
== 1 ?
1938 RBD_V1_DATA_FORMAT
: RBD_V2_DATA_FORMAT
;
1940 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false, GFP_NOIO
);
1944 req
->r_flags
= flags
;
1945 req
->r_callback
= rbd_osd_req_callback
;
1946 req
->r_priv
= obj_request
;
1948 req
->r_base_oloc
.pool
= rbd_dev
->layout
.pool_id
;
1949 if (ceph_oid_aprintf(&req
->r_base_oid
, GFP_NOIO
, name_format
,
1950 rbd_dev
->header
.object_prefix
, obj_request
->object_no
))
1953 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
))
1959 ceph_osdc_put_request(req
);
1964 * Create an osd request. A read request has one osd op (read).
1965 * A write request has either one (watch) or two (hint+write) osd ops.
1966 * (All rbd data writes are prefixed with an allocation hint op, but
1967 * technically osd watch is a write request, hence this distinction.)
1969 static struct ceph_osd_request
*rbd_osd_req_create(
1970 struct rbd_device
*rbd_dev
,
1971 enum obj_operation_type op_type
,
1972 unsigned int num_ops
,
1973 struct rbd_obj_request
*obj_request
)
1975 struct ceph_snap_context
*snapc
= NULL
;
1977 if (obj_request_img_data_test(obj_request
) &&
1978 (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_WRITE
)) {
1979 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1980 if (op_type
== OBJ_OP_WRITE
) {
1981 rbd_assert(img_request_write_test(img_request
));
1983 rbd_assert(img_request_discard_test(img_request
));
1985 snapc
= img_request
->snapc
;
1988 rbd_assert(num_ops
== 1 || ((op_type
== OBJ_OP_WRITE
) && num_ops
== 2));
1990 return __rbd_osd_req_create(rbd_dev
, snapc
, num_ops
,
1991 (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
) ?
1992 CEPH_OSD_FLAG_WRITE
: CEPH_OSD_FLAG_READ
, obj_request
);
1996 * Create a copyup osd request based on the information in the object
1997 * request supplied. A copyup request has two or three osd ops, a
1998 * copyup method call, potentially a hint op, and a write or truncate
2001 static struct ceph_osd_request
*
2002 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
2004 struct rbd_img_request
*img_request
;
2005 int num_osd_ops
= 3;
2007 rbd_assert(obj_request_img_data_test(obj_request
));
2008 img_request
= obj_request
->img_request
;
2009 rbd_assert(img_request
);
2010 rbd_assert(img_request_write_test(img_request
) ||
2011 img_request_discard_test(img_request
));
2013 if (img_request_discard_test(img_request
))
2016 return __rbd_osd_req_create(img_request
->rbd_dev
,
2017 img_request
->snapc
, num_osd_ops
,
2018 CEPH_OSD_FLAG_WRITE
, obj_request
);
2021 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
2023 ceph_osdc_put_request(osd_req
);
2026 static struct rbd_obj_request
*
2027 rbd_obj_request_create(enum obj_request_type type
)
2029 struct rbd_obj_request
*obj_request
;
2031 rbd_assert(obj_request_type_valid(type
));
2033 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
2037 obj_request
->which
= BAD_WHICH
;
2038 obj_request
->type
= type
;
2039 INIT_LIST_HEAD(&obj_request
->links
);
2040 init_completion(&obj_request
->completion
);
2041 kref_init(&obj_request
->kref
);
2043 dout("%s %p\n", __func__
, obj_request
);
2047 static void rbd_obj_request_destroy(struct kref
*kref
)
2049 struct rbd_obj_request
*obj_request
;
2051 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
2053 dout("%s: obj %p\n", __func__
, obj_request
);
2055 rbd_assert(obj_request
->img_request
== NULL
);
2056 rbd_assert(obj_request
->which
== BAD_WHICH
);
2058 if (obj_request
->osd_req
)
2059 rbd_osd_req_destroy(obj_request
->osd_req
);
2061 rbd_assert(obj_request_type_valid(obj_request
->type
));
2062 switch (obj_request
->type
) {
2063 case OBJ_REQUEST_NODATA
:
2064 break; /* Nothing to do */
2065 case OBJ_REQUEST_BIO
:
2066 if (obj_request
->bio_list
)
2067 bio_chain_put(obj_request
->bio_list
);
2069 case OBJ_REQUEST_PAGES
:
2070 /* img_data requests don't own their page array */
2071 if (obj_request
->pages
&&
2072 !obj_request_img_data_test(obj_request
))
2073 ceph_release_page_vector(obj_request
->pages
,
2074 obj_request
->page_count
);
2078 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
2081 /* It's OK to call this for a device with no parent */
2083 static void rbd_spec_put(struct rbd_spec
*spec
);
2084 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
2086 rbd_dev_remove_parent(rbd_dev
);
2087 rbd_spec_put(rbd_dev
->parent_spec
);
2088 rbd_dev
->parent_spec
= NULL
;
2089 rbd_dev
->parent_overlap
= 0;
2093 * Parent image reference counting is used to determine when an
2094 * image's parent fields can be safely torn down--after there are no
2095 * more in-flight requests to the parent image. When the last
2096 * reference is dropped, cleaning them up is safe.
2098 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
2102 if (!rbd_dev
->parent_spec
)
2105 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
2109 /* Last reference; clean up parent data structures */
2112 rbd_dev_unparent(rbd_dev
);
2114 rbd_warn(rbd_dev
, "parent reference underflow");
2118 * If an image has a non-zero parent overlap, get a reference to its
2121 * Returns true if the rbd device has a parent with a non-zero
2122 * overlap and a reference for it was successfully taken, or
2125 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
2129 if (!rbd_dev
->parent_spec
)
2132 down_read(&rbd_dev
->header_rwsem
);
2133 if (rbd_dev
->parent_overlap
)
2134 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
2135 up_read(&rbd_dev
->header_rwsem
);
2138 rbd_warn(rbd_dev
, "parent reference overflow");
2144 * Caller is responsible for filling in the list of object requests
2145 * that comprises the image request, and the Linux request pointer
2146 * (if there is one).
2148 static struct rbd_img_request
*rbd_img_request_create(
2149 struct rbd_device
*rbd_dev
,
2150 u64 offset
, u64 length
,
2151 enum obj_operation_type op_type
,
2152 struct ceph_snap_context
*snapc
)
2154 struct rbd_img_request
*img_request
;
2156 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_NOIO
);
2160 img_request
->rq
= NULL
;
2161 img_request
->rbd_dev
= rbd_dev
;
2162 img_request
->offset
= offset
;
2163 img_request
->length
= length
;
2164 img_request
->flags
= 0;
2165 if (op_type
== OBJ_OP_DISCARD
) {
2166 img_request_discard_set(img_request
);
2167 img_request
->snapc
= snapc
;
2168 } else if (op_type
== OBJ_OP_WRITE
) {
2169 img_request_write_set(img_request
);
2170 img_request
->snapc
= snapc
;
2172 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
2174 if (rbd_dev_parent_get(rbd_dev
))
2175 img_request_layered_set(img_request
);
2176 spin_lock_init(&img_request
->completion_lock
);
2177 img_request
->next_completion
= 0;
2178 img_request
->callback
= NULL
;
2179 img_request
->result
= 0;
2180 img_request
->obj_request_count
= 0;
2181 INIT_LIST_HEAD(&img_request
->obj_requests
);
2182 kref_init(&img_request
->kref
);
2184 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2185 obj_op_name(op_type
), offset
, length
, img_request
);
2190 static void rbd_img_request_destroy(struct kref
*kref
)
2192 struct rbd_img_request
*img_request
;
2193 struct rbd_obj_request
*obj_request
;
2194 struct rbd_obj_request
*next_obj_request
;
2196 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2198 dout("%s: img %p\n", __func__
, img_request
);
2200 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2201 rbd_img_obj_request_del(img_request
, obj_request
);
2202 rbd_assert(img_request
->obj_request_count
== 0);
2204 if (img_request_layered_test(img_request
)) {
2205 img_request_layered_clear(img_request
);
2206 rbd_dev_parent_put(img_request
->rbd_dev
);
2209 if (img_request_write_test(img_request
) ||
2210 img_request_discard_test(img_request
))
2211 ceph_put_snap_context(img_request
->snapc
);
2213 kmem_cache_free(rbd_img_request_cache
, img_request
);
2216 static struct rbd_img_request
*rbd_parent_request_create(
2217 struct rbd_obj_request
*obj_request
,
2218 u64 img_offset
, u64 length
)
2220 struct rbd_img_request
*parent_request
;
2221 struct rbd_device
*rbd_dev
;
2223 rbd_assert(obj_request
->img_request
);
2224 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2226 parent_request
= rbd_img_request_create(rbd_dev
->parent
, img_offset
,
2227 length
, OBJ_OP_READ
, NULL
);
2228 if (!parent_request
)
2231 img_request_child_set(parent_request
);
2232 rbd_obj_request_get(obj_request
);
2233 parent_request
->obj_request
= obj_request
;
2235 return parent_request
;
2238 static void rbd_parent_request_destroy(struct kref
*kref
)
2240 struct rbd_img_request
*parent_request
;
2241 struct rbd_obj_request
*orig_request
;
2243 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2244 orig_request
= parent_request
->obj_request
;
2246 parent_request
->obj_request
= NULL
;
2247 rbd_obj_request_put(orig_request
);
2248 img_request_child_clear(parent_request
);
2250 rbd_img_request_destroy(kref
);
2253 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2255 struct rbd_img_request
*img_request
;
2256 unsigned int xferred
;
2260 rbd_assert(obj_request_img_data_test(obj_request
));
2261 img_request
= obj_request
->img_request
;
2263 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2264 xferred
= (unsigned int)obj_request
->xferred
;
2265 result
= obj_request
->result
;
2267 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2268 enum obj_operation_type op_type
;
2270 if (img_request_discard_test(img_request
))
2271 op_type
= OBJ_OP_DISCARD
;
2272 else if (img_request_write_test(img_request
))
2273 op_type
= OBJ_OP_WRITE
;
2275 op_type
= OBJ_OP_READ
;
2277 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)",
2278 obj_op_name(op_type
), obj_request
->length
,
2279 obj_request
->img_offset
, obj_request
->offset
);
2280 rbd_warn(rbd_dev
, " result %d xferred %x",
2282 if (!img_request
->result
)
2283 img_request
->result
= result
;
2285 * Need to end I/O on the entire obj_request worth of
2286 * bytes in case of error.
2288 xferred
= obj_request
->length
;
2291 if (img_request_child_test(img_request
)) {
2292 rbd_assert(img_request
->obj_request
!= NULL
);
2293 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2295 rbd_assert(img_request
->rq
!= NULL
);
2297 more
= blk_update_request(img_request
->rq
, result
, xferred
);
2299 __blk_mq_end_request(img_request
->rq
, result
);
2305 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2307 struct rbd_img_request
*img_request
;
2308 u32 which
= obj_request
->which
;
2311 rbd_assert(obj_request_img_data_test(obj_request
));
2312 img_request
= obj_request
->img_request
;
2314 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2315 rbd_assert(img_request
!= NULL
);
2316 rbd_assert(img_request
->obj_request_count
> 0);
2317 rbd_assert(which
!= BAD_WHICH
);
2318 rbd_assert(which
< img_request
->obj_request_count
);
2320 spin_lock_irq(&img_request
->completion_lock
);
2321 if (which
!= img_request
->next_completion
)
2324 for_each_obj_request_from(img_request
, obj_request
) {
2326 rbd_assert(which
< img_request
->obj_request_count
);
2328 if (!obj_request_done_test(obj_request
))
2330 more
= rbd_img_obj_end_request(obj_request
);
2334 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2335 img_request
->next_completion
= which
;
2337 spin_unlock_irq(&img_request
->completion_lock
);
2338 rbd_img_request_put(img_request
);
2341 rbd_img_request_complete(img_request
);
2345 * Add individual osd ops to the given ceph_osd_request and prepare
2346 * them for submission. num_ops is the current number of
2347 * osd operations already to the object request.
2349 static void rbd_img_obj_request_fill(struct rbd_obj_request
*obj_request
,
2350 struct ceph_osd_request
*osd_request
,
2351 enum obj_operation_type op_type
,
2352 unsigned int num_ops
)
2354 struct rbd_img_request
*img_request
= obj_request
->img_request
;
2355 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2356 u64 object_size
= rbd_obj_bytes(&rbd_dev
->header
);
2357 u64 offset
= obj_request
->offset
;
2358 u64 length
= obj_request
->length
;
2362 if (op_type
== OBJ_OP_DISCARD
) {
2363 if (!offset
&& length
== object_size
&&
2364 (!img_request_layered_test(img_request
) ||
2365 !obj_request_overlaps_parent(obj_request
))) {
2366 opcode
= CEPH_OSD_OP_DELETE
;
2367 } else if ((offset
+ length
== object_size
)) {
2368 opcode
= CEPH_OSD_OP_TRUNCATE
;
2370 down_read(&rbd_dev
->header_rwsem
);
2371 img_end
= rbd_dev
->header
.image_size
;
2372 up_read(&rbd_dev
->header_rwsem
);
2374 if (obj_request
->img_offset
+ length
== img_end
)
2375 opcode
= CEPH_OSD_OP_TRUNCATE
;
2377 opcode
= CEPH_OSD_OP_ZERO
;
2379 } else if (op_type
== OBJ_OP_WRITE
) {
2380 if (!offset
&& length
== object_size
)
2381 opcode
= CEPH_OSD_OP_WRITEFULL
;
2383 opcode
= CEPH_OSD_OP_WRITE
;
2384 osd_req_op_alloc_hint_init(osd_request
, num_ops
,
2385 object_size
, object_size
);
2388 opcode
= CEPH_OSD_OP_READ
;
2391 if (opcode
== CEPH_OSD_OP_DELETE
)
2392 osd_req_op_init(osd_request
, num_ops
, opcode
, 0);
2394 osd_req_op_extent_init(osd_request
, num_ops
, opcode
,
2395 offset
, length
, 0, 0);
2397 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2398 osd_req_op_extent_osd_data_bio(osd_request
, num_ops
,
2399 obj_request
->bio_list
, length
);
2400 else if (obj_request
->type
== OBJ_REQUEST_PAGES
)
2401 osd_req_op_extent_osd_data_pages(osd_request
, num_ops
,
2402 obj_request
->pages
, length
,
2403 offset
& ~PAGE_MASK
, false, false);
2405 /* Discards are also writes */
2406 if (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
)
2407 rbd_osd_req_format_write(obj_request
);
2409 rbd_osd_req_format_read(obj_request
);
2413 * Split up an image request into one or more object requests, each
2414 * to a different object. The "type" parameter indicates whether
2415 * "data_desc" is the pointer to the head of a list of bio
2416 * structures, or the base of a page array. In either case this
2417 * function assumes data_desc describes memory sufficient to hold
2418 * all data described by the image request.
2420 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2421 enum obj_request_type type
,
2424 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2425 struct rbd_obj_request
*obj_request
= NULL
;
2426 struct rbd_obj_request
*next_obj_request
;
2427 struct bio
*bio_list
= NULL
;
2428 unsigned int bio_offset
= 0;
2429 struct page
**pages
= NULL
;
2430 enum obj_operation_type op_type
;
2434 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2435 (int)type
, data_desc
);
2437 img_offset
= img_request
->offset
;
2438 resid
= img_request
->length
;
2439 rbd_assert(resid
> 0);
2440 op_type
= rbd_img_request_op_type(img_request
);
2442 if (type
== OBJ_REQUEST_BIO
) {
2443 bio_list
= data_desc
;
2444 rbd_assert(img_offset
==
2445 bio_list
->bi_iter
.bi_sector
<< SECTOR_SHIFT
);
2446 } else if (type
== OBJ_REQUEST_PAGES
) {
2451 struct ceph_osd_request
*osd_req
;
2452 u64 object_no
= img_offset
>> rbd_dev
->header
.obj_order
;
2453 u64 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2454 u64 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2456 obj_request
= rbd_obj_request_create(type
);
2460 obj_request
->object_no
= object_no
;
2461 obj_request
->offset
= offset
;
2462 obj_request
->length
= length
;
2465 * set obj_request->img_request before creating the
2466 * osd_request so that it gets the right snapc
2468 rbd_img_obj_request_add(img_request
, obj_request
);
2470 if (type
== OBJ_REQUEST_BIO
) {
2471 unsigned int clone_size
;
2473 rbd_assert(length
<= (u64
)UINT_MAX
);
2474 clone_size
= (unsigned int)length
;
2475 obj_request
->bio_list
=
2476 bio_chain_clone_range(&bio_list
,
2480 if (!obj_request
->bio_list
)
2482 } else if (type
== OBJ_REQUEST_PAGES
) {
2483 unsigned int page_count
;
2485 obj_request
->pages
= pages
;
2486 page_count
= (u32
)calc_pages_for(offset
, length
);
2487 obj_request
->page_count
= page_count
;
2488 if ((offset
+ length
) & ~PAGE_MASK
)
2489 page_count
--; /* more on last page */
2490 pages
+= page_count
;
2493 osd_req
= rbd_osd_req_create(rbd_dev
, op_type
,
2494 (op_type
== OBJ_OP_WRITE
) ? 2 : 1,
2499 obj_request
->osd_req
= osd_req
;
2500 obj_request
->callback
= rbd_img_obj_callback
;
2501 obj_request
->img_offset
= img_offset
;
2503 rbd_img_obj_request_fill(obj_request
, osd_req
, op_type
, 0);
2505 img_offset
+= length
;
2512 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2513 rbd_img_obj_request_del(img_request
, obj_request
);
2519 rbd_osd_copyup_callback(struct rbd_obj_request
*obj_request
)
2521 struct rbd_img_request
*img_request
;
2522 struct rbd_device
*rbd_dev
;
2523 struct page
**pages
;
2526 dout("%s: obj %p\n", __func__
, obj_request
);
2528 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
||
2529 obj_request
->type
== OBJ_REQUEST_NODATA
);
2530 rbd_assert(obj_request_img_data_test(obj_request
));
2531 img_request
= obj_request
->img_request
;
2532 rbd_assert(img_request
);
2534 rbd_dev
= img_request
->rbd_dev
;
2535 rbd_assert(rbd_dev
);
2537 pages
= obj_request
->copyup_pages
;
2538 rbd_assert(pages
!= NULL
);
2539 obj_request
->copyup_pages
= NULL
;
2540 page_count
= obj_request
->copyup_page_count
;
2541 rbd_assert(page_count
);
2542 obj_request
->copyup_page_count
= 0;
2543 ceph_release_page_vector(pages
, page_count
);
2546 * We want the transfer count to reflect the size of the
2547 * original write request. There is no such thing as a
2548 * successful short write, so if the request was successful
2549 * we can just set it to the originally-requested length.
2551 if (!obj_request
->result
)
2552 obj_request
->xferred
= obj_request
->length
;
2554 obj_request_done_set(obj_request
);
2558 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2560 struct rbd_obj_request
*orig_request
;
2561 struct ceph_osd_request
*osd_req
;
2562 struct rbd_device
*rbd_dev
;
2563 struct page
**pages
;
2564 enum obj_operation_type op_type
;
2569 rbd_assert(img_request_child_test(img_request
));
2571 /* First get what we need from the image request */
2573 pages
= img_request
->copyup_pages
;
2574 rbd_assert(pages
!= NULL
);
2575 img_request
->copyup_pages
= NULL
;
2576 page_count
= img_request
->copyup_page_count
;
2577 rbd_assert(page_count
);
2578 img_request
->copyup_page_count
= 0;
2580 orig_request
= img_request
->obj_request
;
2581 rbd_assert(orig_request
!= NULL
);
2582 rbd_assert(obj_request_type_valid(orig_request
->type
));
2583 img_result
= img_request
->result
;
2584 parent_length
= img_request
->length
;
2585 rbd_assert(img_result
|| parent_length
== img_request
->xferred
);
2586 rbd_img_request_put(img_request
);
2588 rbd_assert(orig_request
->img_request
);
2589 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2590 rbd_assert(rbd_dev
);
2593 * If the overlap has become 0 (most likely because the
2594 * image has been flattened) we need to free the pages
2595 * and re-submit the original write request.
2597 if (!rbd_dev
->parent_overlap
) {
2598 ceph_release_page_vector(pages
, page_count
);
2599 rbd_obj_request_submit(orig_request
);
2607 * The original osd request is of no use to use any more.
2608 * We need a new one that can hold the three ops in a copyup
2609 * request. Allocate the new copyup osd request for the
2610 * original request, and release the old one.
2612 img_result
= -ENOMEM
;
2613 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2616 rbd_osd_req_destroy(orig_request
->osd_req
);
2617 orig_request
->osd_req
= osd_req
;
2618 orig_request
->copyup_pages
= pages
;
2619 orig_request
->copyup_page_count
= page_count
;
2621 /* Initialize the copyup op */
2623 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2624 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2627 /* Add the other op(s) */
2629 op_type
= rbd_img_request_op_type(orig_request
->img_request
);
2630 rbd_img_obj_request_fill(orig_request
, osd_req
, op_type
, 1);
2632 /* All set, send it off. */
2634 rbd_obj_request_submit(orig_request
);
2638 ceph_release_page_vector(pages
, page_count
);
2639 rbd_obj_request_error(orig_request
, img_result
);
2643 * Read from the parent image the range of data that covers the
2644 * entire target of the given object request. This is used for
2645 * satisfying a layered image write request when the target of an
2646 * object request from the image request does not exist.
2648 * A page array big enough to hold the returned data is allocated
2649 * and supplied to rbd_img_request_fill() as the "data descriptor."
2650 * When the read completes, this page array will be transferred to
2651 * the original object request for the copyup operation.
2653 * If an error occurs, it is recorded as the result of the original
2654 * object request in rbd_img_obj_exists_callback().
2656 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2658 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
2659 struct rbd_img_request
*parent_request
= NULL
;
2662 struct page
**pages
= NULL
;
2666 rbd_assert(rbd_dev
->parent
!= NULL
);
2669 * Determine the byte range covered by the object in the
2670 * child image to which the original request was to be sent.
2672 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2673 length
= rbd_obj_bytes(&rbd_dev
->header
);
2676 * There is no defined parent data beyond the parent
2677 * overlap, so limit what we read at that boundary if
2680 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2681 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2682 length
= rbd_dev
->parent_overlap
- img_offset
;
2686 * Allocate a page array big enough to receive the data read
2689 page_count
= (u32
)calc_pages_for(0, length
);
2690 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2691 if (IS_ERR(pages
)) {
2692 result
= PTR_ERR(pages
);
2698 parent_request
= rbd_parent_request_create(obj_request
,
2699 img_offset
, length
);
2700 if (!parent_request
)
2703 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2707 parent_request
->copyup_pages
= pages
;
2708 parent_request
->copyup_page_count
= page_count
;
2709 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2711 result
= rbd_img_request_submit(parent_request
);
2715 parent_request
->copyup_pages
= NULL
;
2716 parent_request
->copyup_page_count
= 0;
2717 parent_request
->obj_request
= NULL
;
2718 rbd_obj_request_put(obj_request
);
2721 ceph_release_page_vector(pages
, page_count
);
2723 rbd_img_request_put(parent_request
);
2727 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2729 struct rbd_obj_request
*orig_request
;
2730 struct rbd_device
*rbd_dev
;
2733 rbd_assert(!obj_request_img_data_test(obj_request
));
2736 * All we need from the object request is the original
2737 * request and the result of the STAT op. Grab those, then
2738 * we're done with the request.
2740 orig_request
= obj_request
->obj_request
;
2741 obj_request
->obj_request
= NULL
;
2742 rbd_obj_request_put(orig_request
);
2743 rbd_assert(orig_request
);
2744 rbd_assert(orig_request
->img_request
);
2746 result
= obj_request
->result
;
2747 obj_request
->result
= 0;
2749 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2750 obj_request
, orig_request
, result
,
2751 obj_request
->xferred
, obj_request
->length
);
2752 rbd_obj_request_put(obj_request
);
2755 * If the overlap has become 0 (most likely because the
2756 * image has been flattened) we need to re-submit the
2759 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2760 if (!rbd_dev
->parent_overlap
) {
2761 rbd_obj_request_submit(orig_request
);
2766 * Our only purpose here is to determine whether the object
2767 * exists, and we don't want to treat the non-existence as
2768 * an error. If something else comes back, transfer the
2769 * error to the original request and complete it now.
2772 obj_request_existence_set(orig_request
, true);
2773 } else if (result
== -ENOENT
) {
2774 obj_request_existence_set(orig_request
, false);
2776 goto fail_orig_request
;
2780 * Resubmit the original request now that we have recorded
2781 * whether the target object exists.
2783 result
= rbd_img_obj_request_submit(orig_request
);
2785 goto fail_orig_request
;
2790 rbd_obj_request_error(orig_request
, result
);
2793 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2795 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
2796 struct rbd_obj_request
*stat_request
;
2797 struct page
**pages
;
2802 stat_request
= rbd_obj_request_create(OBJ_REQUEST_PAGES
);
2806 stat_request
->object_no
= obj_request
->object_no
;
2808 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
2810 if (!stat_request
->osd_req
) {
2812 goto fail_stat_request
;
2816 * The response data for a STAT call consists of:
2823 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2824 page_count
= (u32
)calc_pages_for(0, size
);
2825 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2826 if (IS_ERR(pages
)) {
2827 ret
= PTR_ERR(pages
);
2828 goto fail_stat_request
;
2831 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
, 0);
2832 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2835 rbd_obj_request_get(obj_request
);
2836 stat_request
->obj_request
= obj_request
;
2837 stat_request
->pages
= pages
;
2838 stat_request
->page_count
= page_count
;
2839 stat_request
->callback
= rbd_img_obj_exists_callback
;
2841 rbd_obj_request_submit(stat_request
);
2845 rbd_obj_request_put(stat_request
);
2849 static bool img_obj_request_simple(struct rbd_obj_request
*obj_request
)
2851 struct rbd_img_request
*img_request
= obj_request
->img_request
;
2852 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2855 if (!img_request_write_test(img_request
) &&
2856 !img_request_discard_test(img_request
))
2859 /* Non-layered writes */
2860 if (!img_request_layered_test(img_request
))
2864 * Layered writes outside of the parent overlap range don't
2865 * share any data with the parent.
2867 if (!obj_request_overlaps_parent(obj_request
))
2871 * Entire-object layered writes - we will overwrite whatever
2872 * parent data there is anyway.
2874 if (!obj_request
->offset
&&
2875 obj_request
->length
== rbd_obj_bytes(&rbd_dev
->header
))
2879 * If the object is known to already exist, its parent data has
2880 * already been copied.
2882 if (obj_request_known_test(obj_request
) &&
2883 obj_request_exists_test(obj_request
))
2889 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2891 rbd_assert(obj_request_img_data_test(obj_request
));
2892 rbd_assert(obj_request_type_valid(obj_request
->type
));
2893 rbd_assert(obj_request
->img_request
);
2895 if (img_obj_request_simple(obj_request
)) {
2896 rbd_obj_request_submit(obj_request
);
2901 * It's a layered write. The target object might exist but
2902 * we may not know that yet. If we know it doesn't exist,
2903 * start by reading the data for the full target object from
2904 * the parent so we can use it for a copyup to the target.
2906 if (obj_request_known_test(obj_request
))
2907 return rbd_img_obj_parent_read_full(obj_request
);
2909 /* We don't know whether the target exists. Go find out. */
2911 return rbd_img_obj_exists_submit(obj_request
);
2914 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2916 struct rbd_obj_request
*obj_request
;
2917 struct rbd_obj_request
*next_obj_request
;
2920 dout("%s: img %p\n", __func__
, img_request
);
2922 rbd_img_request_get(img_request
);
2923 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2924 ret
= rbd_img_obj_request_submit(obj_request
);
2930 rbd_img_request_put(img_request
);
2934 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2936 struct rbd_obj_request
*obj_request
;
2937 struct rbd_device
*rbd_dev
;
2942 rbd_assert(img_request_child_test(img_request
));
2944 /* First get what we need from the image request and release it */
2946 obj_request
= img_request
->obj_request
;
2947 img_xferred
= img_request
->xferred
;
2948 img_result
= img_request
->result
;
2949 rbd_img_request_put(img_request
);
2952 * If the overlap has become 0 (most likely because the
2953 * image has been flattened) we need to re-submit the
2956 rbd_assert(obj_request
);
2957 rbd_assert(obj_request
->img_request
);
2958 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2959 if (!rbd_dev
->parent_overlap
) {
2960 rbd_obj_request_submit(obj_request
);
2964 obj_request
->result
= img_result
;
2965 if (obj_request
->result
)
2969 * We need to zero anything beyond the parent overlap
2970 * boundary. Since rbd_img_obj_request_read_callback()
2971 * will zero anything beyond the end of a short read, an
2972 * easy way to do this is to pretend the data from the
2973 * parent came up short--ending at the overlap boundary.
2975 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
2976 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
2977 if (obj_end
> rbd_dev
->parent_overlap
) {
2980 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
2981 xferred
= rbd_dev
->parent_overlap
-
2982 obj_request
->img_offset
;
2984 obj_request
->xferred
= min(img_xferred
, xferred
);
2986 obj_request
->xferred
= img_xferred
;
2989 rbd_img_obj_request_read_callback(obj_request
);
2990 rbd_obj_request_complete(obj_request
);
2993 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
2995 struct rbd_img_request
*img_request
;
2998 rbd_assert(obj_request_img_data_test(obj_request
));
2999 rbd_assert(obj_request
->img_request
!= NULL
);
3000 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
3001 rbd_assert(obj_request_type_valid(obj_request
->type
));
3003 /* rbd_read_finish(obj_request, obj_request->length); */
3004 img_request
= rbd_parent_request_create(obj_request
,
3005 obj_request
->img_offset
,
3006 obj_request
->length
);
3011 if (obj_request
->type
== OBJ_REQUEST_BIO
)
3012 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3013 obj_request
->bio_list
);
3015 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
3016 obj_request
->pages
);
3020 img_request
->callback
= rbd_img_parent_read_callback
;
3021 result
= rbd_img_request_submit(img_request
);
3028 rbd_img_request_put(img_request
);
3029 obj_request
->result
= result
;
3030 obj_request
->xferred
= 0;
3031 obj_request_done_set(obj_request
);
3034 static const struct rbd_client_id rbd_empty_cid
;
3036 static bool rbd_cid_equal(const struct rbd_client_id
*lhs
,
3037 const struct rbd_client_id
*rhs
)
3039 return lhs
->gid
== rhs
->gid
&& lhs
->handle
== rhs
->handle
;
3042 static struct rbd_client_id
rbd_get_cid(struct rbd_device
*rbd_dev
)
3044 struct rbd_client_id cid
;
3046 mutex_lock(&rbd_dev
->watch_mutex
);
3047 cid
.gid
= ceph_client_gid(rbd_dev
->rbd_client
->client
);
3048 cid
.handle
= rbd_dev
->watch_cookie
;
3049 mutex_unlock(&rbd_dev
->watch_mutex
);
3054 * lock_rwsem must be held for write
3056 static void rbd_set_owner_cid(struct rbd_device
*rbd_dev
,
3057 const struct rbd_client_id
*cid
)
3059 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__
, rbd_dev
,
3060 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
,
3061 cid
->gid
, cid
->handle
);
3062 rbd_dev
->owner_cid
= *cid
; /* struct */
3065 static void format_lock_cookie(struct rbd_device
*rbd_dev
, char *buf
)
3067 mutex_lock(&rbd_dev
->watch_mutex
);
3068 sprintf(buf
, "%s %llu", RBD_LOCK_COOKIE_PREFIX
, rbd_dev
->watch_cookie
);
3069 mutex_unlock(&rbd_dev
->watch_mutex
);
3073 * lock_rwsem must be held for write
3075 static int rbd_lock(struct rbd_device
*rbd_dev
)
3077 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3078 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3082 WARN_ON(__rbd_is_lock_owner(rbd_dev
));
3084 format_lock_cookie(rbd_dev
, cookie
);
3085 ret
= ceph_cls_lock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3086 RBD_LOCK_NAME
, CEPH_CLS_LOCK_EXCLUSIVE
, cookie
,
3087 RBD_LOCK_TAG
, "", 0);
3091 rbd_dev
->lock_state
= RBD_LOCK_STATE_LOCKED
;
3092 rbd_set_owner_cid(rbd_dev
, &cid
);
3093 queue_work(rbd_dev
->task_wq
, &rbd_dev
->acquired_lock_work
);
3098 * lock_rwsem must be held for write
3100 static int rbd_unlock(struct rbd_device
*rbd_dev
)
3102 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3106 WARN_ON(!__rbd_is_lock_owner(rbd_dev
));
3108 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
3110 format_lock_cookie(rbd_dev
, cookie
);
3111 ret
= ceph_cls_unlock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3112 RBD_LOCK_NAME
, cookie
);
3113 if (ret
&& ret
!= -ENOENT
) {
3114 rbd_warn(rbd_dev
, "cls_unlock failed: %d", ret
);
3118 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3119 queue_work(rbd_dev
->task_wq
, &rbd_dev
->released_lock_work
);
3123 static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3124 enum rbd_notify_op notify_op
,
3125 struct page
***preply_pages
,
3128 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3129 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3130 int buf_size
= 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN
;
3134 dout("%s rbd_dev %p notify_op %d\n", __func__
, rbd_dev
, notify_op
);
3136 /* encode *LockPayload NotifyMessage (op + ClientId) */
3137 ceph_start_encoding(&p
, 2, 1, buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3138 ceph_encode_32(&p
, notify_op
);
3139 ceph_encode_64(&p
, cid
.gid
);
3140 ceph_encode_64(&p
, cid
.handle
);
3142 return ceph_osdc_notify(osdc
, &rbd_dev
->header_oid
,
3143 &rbd_dev
->header_oloc
, buf
, buf_size
,
3144 RBD_NOTIFY_TIMEOUT
, preply_pages
, preply_len
);
3147 static void rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3148 enum rbd_notify_op notify_op
)
3150 struct page
**reply_pages
;
3153 __rbd_notify_op_lock(rbd_dev
, notify_op
, &reply_pages
, &reply_len
);
3154 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3157 static void rbd_notify_acquired_lock(struct work_struct
*work
)
3159 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3160 acquired_lock_work
);
3162 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_ACQUIRED_LOCK
);
3165 static void rbd_notify_released_lock(struct work_struct
*work
)
3167 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3168 released_lock_work
);
3170 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_RELEASED_LOCK
);
3173 static int rbd_request_lock(struct rbd_device
*rbd_dev
)
3175 struct page
**reply_pages
;
3177 bool lock_owner_responded
= false;
3180 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3182 ret
= __rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_REQUEST_LOCK
,
3183 &reply_pages
, &reply_len
);
3184 if (ret
&& ret
!= -ETIMEDOUT
) {
3185 rbd_warn(rbd_dev
, "failed to request lock: %d", ret
);
3189 if (reply_len
> 0 && reply_len
<= PAGE_SIZE
) {
3190 void *p
= page_address(reply_pages
[0]);
3191 void *const end
= p
+ reply_len
;
3194 ceph_decode_32_safe(&p
, end
, n
, e_inval
); /* num_acks */
3199 ceph_decode_need(&p
, end
, 8 + 8, e_inval
);
3200 p
+= 8 + 8; /* skip gid and cookie */
3202 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3206 if (lock_owner_responded
) {
3208 "duplicate lock owners detected");
3213 lock_owner_responded
= true;
3214 ret
= ceph_start_decoding(&p
, end
, 1, "ResponseMessage",
3218 "failed to decode ResponseMessage: %d",
3223 ret
= ceph_decode_32(&p
);
3227 if (!lock_owner_responded
) {
3228 rbd_warn(rbd_dev
, "no lock owners detected");
3233 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3241 static void wake_requests(struct rbd_device
*rbd_dev
, bool wake_all
)
3243 dout("%s rbd_dev %p wake_all %d\n", __func__
, rbd_dev
, wake_all
);
3245 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3247 wake_up_all(&rbd_dev
->lock_waitq
);
3249 wake_up(&rbd_dev
->lock_waitq
);
3252 static int get_lock_owner_info(struct rbd_device
*rbd_dev
,
3253 struct ceph_locker
**lockers
, u32
*num_lockers
)
3255 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3260 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3262 ret
= ceph_cls_lock_info(osdc
, &rbd_dev
->header_oid
,
3263 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3264 &lock_type
, &lock_tag
, lockers
, num_lockers
);
3268 if (*num_lockers
== 0) {
3269 dout("%s rbd_dev %p no lockers detected\n", __func__
, rbd_dev
);
3273 if (strcmp(lock_tag
, RBD_LOCK_TAG
)) {
3274 rbd_warn(rbd_dev
, "locked by external mechanism, tag %s",
3280 if (lock_type
== CEPH_CLS_LOCK_SHARED
) {
3281 rbd_warn(rbd_dev
, "shared lock type detected");
3286 if (strncmp((*lockers
)[0].id
.cookie
, RBD_LOCK_COOKIE_PREFIX
,
3287 strlen(RBD_LOCK_COOKIE_PREFIX
))) {
3288 rbd_warn(rbd_dev
, "locked by external mechanism, cookie %s",
3289 (*lockers
)[0].id
.cookie
);
3299 static int find_watcher(struct rbd_device
*rbd_dev
,
3300 const struct ceph_locker
*locker
)
3302 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3303 struct ceph_watch_item
*watchers
;
3309 ret
= ceph_osdc_list_watchers(osdc
, &rbd_dev
->header_oid
,
3310 &rbd_dev
->header_oloc
, &watchers
,
3315 sscanf(locker
->id
.cookie
, RBD_LOCK_COOKIE_PREFIX
" %llu", &cookie
);
3316 for (i
= 0; i
< num_watchers
; i
++) {
3317 if (!memcmp(&watchers
[i
].addr
, &locker
->info
.addr
,
3318 sizeof(locker
->info
.addr
)) &&
3319 watchers
[i
].cookie
== cookie
) {
3320 struct rbd_client_id cid
= {
3321 .gid
= le64_to_cpu(watchers
[i
].name
.num
),
3325 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__
,
3326 rbd_dev
, cid
.gid
, cid
.handle
);
3327 rbd_set_owner_cid(rbd_dev
, &cid
);
3333 dout("%s rbd_dev %p no watchers\n", __func__
, rbd_dev
);
3341 * lock_rwsem must be held for write
3343 static int rbd_try_lock(struct rbd_device
*rbd_dev
)
3345 struct ceph_client
*client
= rbd_dev
->rbd_client
->client
;
3346 struct ceph_locker
*lockers
;
3351 ret
= rbd_lock(rbd_dev
);
3355 /* determine if the current lock holder is still alive */
3356 ret
= get_lock_owner_info(rbd_dev
, &lockers
, &num_lockers
);
3360 if (num_lockers
== 0)
3363 ret
= find_watcher(rbd_dev
, lockers
);
3366 ret
= 0; /* have to request lock */
3370 rbd_warn(rbd_dev
, "%s%llu seems dead, breaking lock",
3371 ENTITY_NAME(lockers
[0].id
.name
));
3373 ret
= ceph_monc_blacklist_add(&client
->monc
,
3374 &lockers
[0].info
.addr
);
3376 rbd_warn(rbd_dev
, "blacklist of %s%llu failed: %d",
3377 ENTITY_NAME(lockers
[0].id
.name
), ret
);
3381 ret
= ceph_cls_break_lock(&client
->osdc
, &rbd_dev
->header_oid
,
3382 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3383 lockers
[0].id
.cookie
,
3384 &lockers
[0].id
.name
);
3385 if (ret
&& ret
!= -ENOENT
)
3389 ceph_free_lockers(lockers
, num_lockers
);
3393 ceph_free_lockers(lockers
, num_lockers
);
3398 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3400 static enum rbd_lock_state
rbd_try_acquire_lock(struct rbd_device
*rbd_dev
,
3403 enum rbd_lock_state lock_state
;
3405 down_read(&rbd_dev
->lock_rwsem
);
3406 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
3407 rbd_dev
->lock_state
);
3408 if (__rbd_is_lock_owner(rbd_dev
)) {
3409 lock_state
= rbd_dev
->lock_state
;
3410 up_read(&rbd_dev
->lock_rwsem
);
3414 up_read(&rbd_dev
->lock_rwsem
);
3415 down_write(&rbd_dev
->lock_rwsem
);
3416 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3417 rbd_dev
->lock_state
);
3418 if (!__rbd_is_lock_owner(rbd_dev
)) {
3419 *pret
= rbd_try_lock(rbd_dev
);
3421 rbd_warn(rbd_dev
, "failed to acquire lock: %d", *pret
);
3424 lock_state
= rbd_dev
->lock_state
;
3425 up_write(&rbd_dev
->lock_rwsem
);
3429 static void rbd_acquire_lock(struct work_struct
*work
)
3431 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3432 struct rbd_device
, lock_dwork
);
3433 enum rbd_lock_state lock_state
;
3436 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3438 lock_state
= rbd_try_acquire_lock(rbd_dev
, &ret
);
3439 if (lock_state
!= RBD_LOCK_STATE_UNLOCKED
|| ret
== -EBLACKLISTED
) {
3440 if (lock_state
== RBD_LOCK_STATE_LOCKED
)
3441 wake_requests(rbd_dev
, true);
3442 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__
,
3443 rbd_dev
, lock_state
, ret
);
3447 ret
= rbd_request_lock(rbd_dev
);
3448 if (ret
== -ETIMEDOUT
) {
3449 goto again
; /* treat this as a dead client */
3450 } else if (ret
< 0) {
3451 rbd_warn(rbd_dev
, "error requesting lock: %d", ret
);
3452 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3456 * lock owner acked, but resend if we don't see them
3459 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__
,
3461 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3462 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT
* MSEC_PER_SEC
));
3467 * lock_rwsem must be held for write
3469 static bool rbd_release_lock(struct rbd_device
*rbd_dev
)
3471 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
3472 rbd_dev
->lock_state
);
3473 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
)
3476 rbd_dev
->lock_state
= RBD_LOCK_STATE_RELEASING
;
3477 downgrade_write(&rbd_dev
->lock_rwsem
);
3479 * Ensure that all in-flight IO is flushed.
3481 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3482 * may be shared with other devices.
3484 ceph_osdc_sync(&rbd_dev
->rbd_client
->client
->osdc
);
3485 up_read(&rbd_dev
->lock_rwsem
);
3487 down_write(&rbd_dev
->lock_rwsem
);
3488 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3489 rbd_dev
->lock_state
);
3490 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_RELEASING
)
3493 if (!rbd_unlock(rbd_dev
))
3495 * Give others a chance to grab the lock - we would re-acquire
3496 * almost immediately if we got new IO during ceph_osdc_sync()
3497 * otherwise. We need to ack our own notifications, so this
3498 * lock_dwork will be requeued from rbd_wait_state_locked()
3499 * after wake_requests() in rbd_handle_released_lock().
3501 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3506 static void rbd_release_lock_work(struct work_struct
*work
)
3508 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3511 down_write(&rbd_dev
->lock_rwsem
);
3512 rbd_release_lock(rbd_dev
);
3513 up_write(&rbd_dev
->lock_rwsem
);
3516 static void rbd_handle_acquired_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3519 struct rbd_client_id cid
= { 0 };
3521 if (struct_v
>= 2) {
3522 cid
.gid
= ceph_decode_64(p
);
3523 cid
.handle
= ceph_decode_64(p
);
3526 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3528 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3529 down_write(&rbd_dev
->lock_rwsem
);
3530 if (rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3532 * we already know that the remote client is
3535 up_write(&rbd_dev
->lock_rwsem
);
3539 rbd_set_owner_cid(rbd_dev
, &cid
);
3540 downgrade_write(&rbd_dev
->lock_rwsem
);
3542 down_read(&rbd_dev
->lock_rwsem
);
3545 if (!__rbd_is_lock_owner(rbd_dev
))
3546 wake_requests(rbd_dev
, false);
3547 up_read(&rbd_dev
->lock_rwsem
);
3550 static void rbd_handle_released_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3553 struct rbd_client_id cid
= { 0 };
3555 if (struct_v
>= 2) {
3556 cid
.gid
= ceph_decode_64(p
);
3557 cid
.handle
= ceph_decode_64(p
);
3560 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3562 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3563 down_write(&rbd_dev
->lock_rwsem
);
3564 if (!rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3565 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3566 __func__
, rbd_dev
, cid
.gid
, cid
.handle
,
3567 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
);
3568 up_write(&rbd_dev
->lock_rwsem
);
3572 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3573 downgrade_write(&rbd_dev
->lock_rwsem
);
3575 down_read(&rbd_dev
->lock_rwsem
);
3578 if (!__rbd_is_lock_owner(rbd_dev
))
3579 wake_requests(rbd_dev
, false);
3580 up_read(&rbd_dev
->lock_rwsem
);
3583 static bool rbd_handle_request_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3586 struct rbd_client_id my_cid
= rbd_get_cid(rbd_dev
);
3587 struct rbd_client_id cid
= { 0 };
3590 if (struct_v
>= 2) {
3591 cid
.gid
= ceph_decode_64(p
);
3592 cid
.handle
= ceph_decode_64(p
);
3595 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3597 if (rbd_cid_equal(&cid
, &my_cid
))
3600 down_read(&rbd_dev
->lock_rwsem
);
3601 need_to_send
= __rbd_is_lock_owner(rbd_dev
);
3602 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) {
3603 if (!rbd_cid_equal(&rbd_dev
->owner_cid
, &rbd_empty_cid
)) {
3604 dout("%s rbd_dev %p queueing unlock_work\n", __func__
,
3606 queue_work(rbd_dev
->task_wq
, &rbd_dev
->unlock_work
);
3609 up_read(&rbd_dev
->lock_rwsem
);
3610 return need_to_send
;
3613 static void __rbd_acknowledge_notify(struct rbd_device
*rbd_dev
,
3614 u64 notify_id
, u64 cookie
, s32
*result
)
3616 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3617 int buf_size
= 4 + CEPH_ENCODING_START_BLK_LEN
;
3624 /* encode ResponseMessage */
3625 ceph_start_encoding(&p
, 1, 1,
3626 buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3627 ceph_encode_32(&p
, *result
);
3632 ret
= ceph_osdc_notify_ack(osdc
, &rbd_dev
->header_oid
,
3633 &rbd_dev
->header_oloc
, notify_id
, cookie
,
3636 rbd_warn(rbd_dev
, "acknowledge_notify failed: %d", ret
);
3639 static void rbd_acknowledge_notify(struct rbd_device
*rbd_dev
, u64 notify_id
,
3642 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3643 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, NULL
);
3646 static void rbd_acknowledge_notify_result(struct rbd_device
*rbd_dev
,
3647 u64 notify_id
, u64 cookie
, s32 result
)
3649 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
3650 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, &result
);
3653 static void rbd_watch_cb(void *arg
, u64 notify_id
, u64 cookie
,
3654 u64 notifier_id
, void *data
, size_t data_len
)
3656 struct rbd_device
*rbd_dev
= arg
;
3658 void *const end
= p
+ data_len
;
3664 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3665 __func__
, rbd_dev
, cookie
, notify_id
, data_len
);
3667 ret
= ceph_start_decoding(&p
, end
, 1, "NotifyMessage",
3670 rbd_warn(rbd_dev
, "failed to decode NotifyMessage: %d",
3675 notify_op
= ceph_decode_32(&p
);
3677 /* legacy notification for header updates */
3678 notify_op
= RBD_NOTIFY_OP_HEADER_UPDATE
;
3682 dout("%s rbd_dev %p notify_op %u\n", __func__
, rbd_dev
, notify_op
);
3683 switch (notify_op
) {
3684 case RBD_NOTIFY_OP_ACQUIRED_LOCK
:
3685 rbd_handle_acquired_lock(rbd_dev
, struct_v
, &p
);
3686 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3688 case RBD_NOTIFY_OP_RELEASED_LOCK
:
3689 rbd_handle_released_lock(rbd_dev
, struct_v
, &p
);
3690 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3692 case RBD_NOTIFY_OP_REQUEST_LOCK
:
3693 if (rbd_handle_request_lock(rbd_dev
, struct_v
, &p
))
3695 * send ResponseMessage(0) back so the client
3696 * can detect a missing owner
3698 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3701 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3703 case RBD_NOTIFY_OP_HEADER_UPDATE
:
3704 ret
= rbd_dev_refresh(rbd_dev
);
3706 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
3708 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3711 if (rbd_is_lock_owner(rbd_dev
))
3712 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3713 cookie
, -EOPNOTSUPP
);
3715 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3720 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
);
3722 static void rbd_watch_errcb(void *arg
, u64 cookie
, int err
)
3724 struct rbd_device
*rbd_dev
= arg
;
3726 rbd_warn(rbd_dev
, "encountered watch error: %d", err
);
3728 down_write(&rbd_dev
->lock_rwsem
);
3729 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3730 up_write(&rbd_dev
->lock_rwsem
);
3732 mutex_lock(&rbd_dev
->watch_mutex
);
3733 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
) {
3734 __rbd_unregister_watch(rbd_dev
);
3735 rbd_dev
->watch_state
= RBD_WATCH_STATE_ERROR
;
3737 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->watch_dwork
, 0);
3739 mutex_unlock(&rbd_dev
->watch_mutex
);
3743 * watch_mutex must be locked
3745 static int __rbd_register_watch(struct rbd_device
*rbd_dev
)
3747 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3748 struct ceph_osd_linger_request
*handle
;
3750 rbd_assert(!rbd_dev
->watch_handle
);
3751 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3753 handle
= ceph_osdc_watch(osdc
, &rbd_dev
->header_oid
,
3754 &rbd_dev
->header_oloc
, rbd_watch_cb
,
3755 rbd_watch_errcb
, rbd_dev
);
3757 return PTR_ERR(handle
);
3759 rbd_dev
->watch_handle
= handle
;
3764 * watch_mutex must be locked
3766 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3768 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3771 rbd_assert(rbd_dev
->watch_handle
);
3772 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3774 ret
= ceph_osdc_unwatch(osdc
, rbd_dev
->watch_handle
);
3776 rbd_warn(rbd_dev
, "failed to unwatch: %d", ret
);
3778 rbd_dev
->watch_handle
= NULL
;
3781 static int rbd_register_watch(struct rbd_device
*rbd_dev
)
3785 mutex_lock(&rbd_dev
->watch_mutex
);
3786 rbd_assert(rbd_dev
->watch_state
== RBD_WATCH_STATE_UNREGISTERED
);
3787 ret
= __rbd_register_watch(rbd_dev
);
3791 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3792 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3795 mutex_unlock(&rbd_dev
->watch_mutex
);
3799 static void cancel_tasks_sync(struct rbd_device
*rbd_dev
)
3801 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3803 cancel_delayed_work_sync(&rbd_dev
->watch_dwork
);
3804 cancel_work_sync(&rbd_dev
->acquired_lock_work
);
3805 cancel_work_sync(&rbd_dev
->released_lock_work
);
3806 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
3807 cancel_work_sync(&rbd_dev
->unlock_work
);
3810 static void rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3812 WARN_ON(waitqueue_active(&rbd_dev
->lock_waitq
));
3813 cancel_tasks_sync(rbd_dev
);
3815 mutex_lock(&rbd_dev
->watch_mutex
);
3816 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
)
3817 __rbd_unregister_watch(rbd_dev
);
3818 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
3819 mutex_unlock(&rbd_dev
->watch_mutex
);
3821 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
3824 static void rbd_reregister_watch(struct work_struct
*work
)
3826 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3827 struct rbd_device
, watch_dwork
);
3828 bool was_lock_owner
= false;
3829 bool need_to_wake
= false;
3832 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3834 down_write(&rbd_dev
->lock_rwsem
);
3835 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
3836 was_lock_owner
= rbd_release_lock(rbd_dev
);
3838 mutex_lock(&rbd_dev
->watch_mutex
);
3839 if (rbd_dev
->watch_state
!= RBD_WATCH_STATE_ERROR
) {
3840 mutex_unlock(&rbd_dev
->watch_mutex
);
3844 ret
= __rbd_register_watch(rbd_dev
);
3846 rbd_warn(rbd_dev
, "failed to reregister watch: %d", ret
);
3847 if (ret
== -EBLACKLISTED
|| ret
== -ENOENT
) {
3848 set_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
);
3849 need_to_wake
= true;
3851 queue_delayed_work(rbd_dev
->task_wq
,
3852 &rbd_dev
->watch_dwork
,
3855 mutex_unlock(&rbd_dev
->watch_mutex
);
3859 need_to_wake
= true;
3860 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3861 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3862 mutex_unlock(&rbd_dev
->watch_mutex
);
3864 ret
= rbd_dev_refresh(rbd_dev
);
3866 rbd_warn(rbd_dev
, "reregisteration refresh failed: %d", ret
);
3868 if (was_lock_owner
) {
3869 ret
= rbd_try_lock(rbd_dev
);
3871 rbd_warn(rbd_dev
, "reregisteration lock failed: %d",
3876 up_write(&rbd_dev
->lock_rwsem
);
3878 wake_requests(rbd_dev
, true);
3882 * Synchronous osd object method call. Returns the number of bytes
3883 * returned in the outbound buffer, or a negative error code.
3885 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
3886 struct ceph_object_id
*oid
,
3887 struct ceph_object_locator
*oloc
,
3888 const char *method_name
,
3889 const void *outbound
,
3890 size_t outbound_size
,
3892 size_t inbound_size
)
3894 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3895 struct page
*req_page
= NULL
;
3896 struct page
*reply_page
;
3900 * Method calls are ultimately read operations. The result
3901 * should placed into the inbound buffer provided. They
3902 * also supply outbound data--parameters for the object
3903 * method. Currently if this is present it will be a
3907 if (outbound_size
> PAGE_SIZE
)
3910 req_page
= alloc_page(GFP_KERNEL
);
3914 memcpy(page_address(req_page
), outbound
, outbound_size
);
3917 reply_page
= alloc_page(GFP_KERNEL
);
3920 __free_page(req_page
);
3924 ret
= ceph_osdc_call(osdc
, oid
, oloc
, RBD_DRV_NAME
, method_name
,
3925 CEPH_OSD_FLAG_READ
, req_page
, outbound_size
,
3926 reply_page
, &inbound_size
);
3928 memcpy(inbound
, page_address(reply_page
), inbound_size
);
3933 __free_page(req_page
);
3934 __free_page(reply_page
);
3939 * lock_rwsem must be held for read
3941 static void rbd_wait_state_locked(struct rbd_device
*rbd_dev
)
3947 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3948 * and cancel_delayed_work() in wake_requests().
3950 dout("%s rbd_dev %p queueing lock_dwork\n", __func__
, rbd_dev
);
3951 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
3952 prepare_to_wait_exclusive(&rbd_dev
->lock_waitq
, &wait
,
3953 TASK_UNINTERRUPTIBLE
);
3954 up_read(&rbd_dev
->lock_rwsem
);
3956 down_read(&rbd_dev
->lock_rwsem
);
3957 } while (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
&&
3958 !test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
));
3960 finish_wait(&rbd_dev
->lock_waitq
, &wait
);
3963 static void rbd_queue_workfn(struct work_struct
*work
)
3965 struct request
*rq
= blk_mq_rq_from_pdu(work
);
3966 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
3967 struct rbd_img_request
*img_request
;
3968 struct ceph_snap_context
*snapc
= NULL
;
3969 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
3970 u64 length
= blk_rq_bytes(rq
);
3971 enum obj_operation_type op_type
;
3973 bool must_be_locked
;
3976 switch (req_op(rq
)) {
3977 case REQ_OP_DISCARD
:
3978 op_type
= OBJ_OP_DISCARD
;
3981 op_type
= OBJ_OP_WRITE
;
3984 op_type
= OBJ_OP_READ
;
3987 dout("%s: non-fs request type %d\n", __func__
, req_op(rq
));
3992 /* Ignore/skip any zero-length requests */
3995 dout("%s: zero-length request\n", __func__
);
4000 /* Only reads are allowed to a read-only device */
4002 if (op_type
!= OBJ_OP_READ
) {
4003 if (rbd_dev
->mapping
.read_only
) {
4007 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
4011 * Quit early if the mapped snapshot no longer exists. It's
4012 * still possible the snapshot will have disappeared by the
4013 * time our request arrives at the osd, but there's no sense in
4014 * sending it if we already know.
4016 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
4017 dout("request for non-existent snapshot");
4018 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
4023 if (offset
&& length
> U64_MAX
- offset
+ 1) {
4024 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
4027 goto err_rq
; /* Shouldn't happen */
4030 blk_mq_start_request(rq
);
4032 down_read(&rbd_dev
->header_rwsem
);
4033 mapping_size
= rbd_dev
->mapping
.size
;
4034 if (op_type
!= OBJ_OP_READ
) {
4035 snapc
= rbd_dev
->header
.snapc
;
4036 ceph_get_snap_context(snapc
);
4037 must_be_locked
= rbd_is_lock_supported(rbd_dev
);
4039 must_be_locked
= rbd_dev
->opts
->lock_on_read
&&
4040 rbd_is_lock_supported(rbd_dev
);
4042 up_read(&rbd_dev
->header_rwsem
);
4044 if (offset
+ length
> mapping_size
) {
4045 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
4046 length
, mapping_size
);
4051 if (must_be_locked
) {
4052 down_read(&rbd_dev
->lock_rwsem
);
4053 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
&&
4054 !test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
))
4055 rbd_wait_state_locked(rbd_dev
);
4057 WARN_ON((rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) ^
4058 !test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
));
4059 if (test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
)) {
4060 result
= -EBLACKLISTED
;
4065 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
, op_type
,
4071 img_request
->rq
= rq
;
4072 snapc
= NULL
; /* img_request consumes a ref */
4074 if (op_type
== OBJ_OP_DISCARD
)
4075 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_NODATA
,
4078 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
4081 goto err_img_request
;
4083 result
= rbd_img_request_submit(img_request
);
4085 goto err_img_request
;
4088 up_read(&rbd_dev
->lock_rwsem
);
4092 rbd_img_request_put(img_request
);
4095 up_read(&rbd_dev
->lock_rwsem
);
4098 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
4099 obj_op_name(op_type
), length
, offset
, result
);
4100 ceph_put_snap_context(snapc
);
4102 blk_mq_end_request(rq
, result
);
4105 static int rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
4106 const struct blk_mq_queue_data
*bd
)
4108 struct request
*rq
= bd
->rq
;
4109 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
4111 queue_work(rbd_wq
, work
);
4112 return BLK_MQ_RQ_QUEUE_OK
;
4115 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
4117 struct gendisk
*disk
= rbd_dev
->disk
;
4122 rbd_dev
->disk
= NULL
;
4123 if (disk
->flags
& GENHD_FL_UP
) {
4126 blk_cleanup_queue(disk
->queue
);
4127 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4132 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
4133 struct ceph_object_id
*oid
,
4134 struct ceph_object_locator
*oloc
,
4135 void *buf
, int buf_len
)
4138 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4139 struct ceph_osd_request
*req
;
4140 struct page
**pages
;
4141 int num_pages
= calc_pages_for(0, buf_len
);
4144 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
4148 ceph_oid_copy(&req
->r_base_oid
, oid
);
4149 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4150 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4152 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
4156 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
4157 if (IS_ERR(pages
)) {
4158 ret
= PTR_ERR(pages
);
4162 osd_req_op_extent_init(req
, 0, CEPH_OSD_OP_READ
, 0, buf_len
, 0, 0);
4163 osd_req_op_extent_osd_data_pages(req
, 0, pages
, buf_len
, 0, false,
4166 ceph_osdc_start_request(osdc
, req
, false);
4167 ret
= ceph_osdc_wait_request(osdc
, req
);
4169 ceph_copy_from_page_vector(pages
, buf
, 0, ret
);
4172 ceph_osdc_put_request(req
);
4177 * Read the complete header for the given rbd device. On successful
4178 * return, the rbd_dev->header field will contain up-to-date
4179 * information about the image.
4181 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
4183 struct rbd_image_header_ondisk
*ondisk
= NULL
;
4190 * The complete header will include an array of its 64-bit
4191 * snapshot ids, followed by the names of those snapshots as
4192 * a contiguous block of NUL-terminated strings. Note that
4193 * the number of snapshots could change by the time we read
4194 * it in, in which case we re-read it.
4201 size
= sizeof (*ondisk
);
4202 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
4204 ondisk
= kmalloc(size
, GFP_KERNEL
);
4208 ret
= rbd_obj_read_sync(rbd_dev
, &rbd_dev
->header_oid
,
4209 &rbd_dev
->header_oloc
, ondisk
, size
);
4212 if ((size_t)ret
< size
) {
4214 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
4218 if (!rbd_dev_ondisk_valid(ondisk
)) {
4220 rbd_warn(rbd_dev
, "invalid header");
4224 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
4225 want_count
= snap_count
;
4226 snap_count
= le32_to_cpu(ondisk
->snap_count
);
4227 } while (snap_count
!= want_count
);
4229 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
4237 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4238 * has disappeared from the (just updated) snapshot context.
4240 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
4244 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
4247 snap_id
= rbd_dev
->spec
->snap_id
;
4248 if (snap_id
== CEPH_NOSNAP
)
4251 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
4252 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
4255 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
4260 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4261 * try to update its size. If REMOVING is set, updating size
4262 * is just useless work since the device can't be opened.
4264 if (test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
) &&
4265 !test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
)) {
4266 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
4267 dout("setting size to %llu sectors", (unsigned long long)size
);
4268 set_capacity(rbd_dev
->disk
, size
);
4269 revalidate_disk(rbd_dev
->disk
);
4273 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
4278 down_write(&rbd_dev
->header_rwsem
);
4279 mapping_size
= rbd_dev
->mapping
.size
;
4281 ret
= rbd_dev_header_info(rbd_dev
);
4286 * If there is a parent, see if it has disappeared due to the
4287 * mapped image getting flattened.
4289 if (rbd_dev
->parent
) {
4290 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4295 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
4296 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4298 /* validate mapped snapshot's EXISTS flag */
4299 rbd_exists_validate(rbd_dev
);
4303 up_write(&rbd_dev
->header_rwsem
);
4304 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
4305 rbd_dev_update_size(rbd_dev
);
4310 static int rbd_init_request(void *data
, struct request
*rq
,
4311 unsigned int hctx_idx
, unsigned int request_idx
,
4312 unsigned int numa_node
)
4314 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
4316 INIT_WORK(work
, rbd_queue_workfn
);
4320 static const struct blk_mq_ops rbd_mq_ops
= {
4321 .queue_rq
= rbd_queue_rq
,
4322 .init_request
= rbd_init_request
,
4325 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
4327 struct gendisk
*disk
;
4328 struct request_queue
*q
;
4332 /* create gendisk info */
4333 disk
= alloc_disk(single_major
?
4334 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
4335 RBD_MINORS_PER_MAJOR
);
4339 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
4341 disk
->major
= rbd_dev
->major
;
4342 disk
->first_minor
= rbd_dev
->minor
;
4344 disk
->flags
|= GENHD_FL_EXT_DEVT
;
4345 disk
->fops
= &rbd_bd_ops
;
4346 disk
->private_data
= rbd_dev
;
4348 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
4349 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
4350 rbd_dev
->tag_set
.queue_depth
= rbd_dev
->opts
->queue_depth
;
4351 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
4352 rbd_dev
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
4353 rbd_dev
->tag_set
.nr_hw_queues
= 1;
4354 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
4356 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
4360 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
4366 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
4367 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4369 /* set io sizes to object size */
4370 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
4371 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
4372 q
->limits
.max_sectors
= queue_max_hw_sectors(q
);
4373 blk_queue_max_segments(q
, segment_size
/ SECTOR_SIZE
);
4374 blk_queue_max_segment_size(q
, segment_size
);
4375 blk_queue_io_min(q
, segment_size
);
4376 blk_queue_io_opt(q
, segment_size
);
4378 /* enable the discard support */
4379 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
4380 q
->limits
.discard_granularity
= segment_size
;
4381 q
->limits
.discard_alignment
= segment_size
;
4382 blk_queue_max_discard_sectors(q
, segment_size
/ SECTOR_SIZE
);
4384 if (!ceph_test_opt(rbd_dev
->rbd_client
->client
, NOCRC
))
4385 q
->backing_dev_info
->capabilities
|= BDI_CAP_STABLE_WRITES
;
4389 q
->queuedata
= rbd_dev
;
4391 rbd_dev
->disk
= disk
;
4395 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4405 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
4407 return container_of(dev
, struct rbd_device
, dev
);
4410 static ssize_t
rbd_size_show(struct device
*dev
,
4411 struct device_attribute
*attr
, char *buf
)
4413 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4415 return sprintf(buf
, "%llu\n",
4416 (unsigned long long)rbd_dev
->mapping
.size
);
4420 * Note this shows the features for whatever's mapped, which is not
4421 * necessarily the base image.
4423 static ssize_t
rbd_features_show(struct device
*dev
,
4424 struct device_attribute
*attr
, char *buf
)
4426 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4428 return sprintf(buf
, "0x%016llx\n",
4429 (unsigned long long)rbd_dev
->mapping
.features
);
4432 static ssize_t
rbd_major_show(struct device
*dev
,
4433 struct device_attribute
*attr
, char *buf
)
4435 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4438 return sprintf(buf
, "%d\n", rbd_dev
->major
);
4440 return sprintf(buf
, "(none)\n");
4443 static ssize_t
rbd_minor_show(struct device
*dev
,
4444 struct device_attribute
*attr
, char *buf
)
4446 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4448 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
4451 static ssize_t
rbd_client_addr_show(struct device
*dev
,
4452 struct device_attribute
*attr
, char *buf
)
4454 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4455 struct ceph_entity_addr
*client_addr
=
4456 ceph_client_addr(rbd_dev
->rbd_client
->client
);
4458 return sprintf(buf
, "%pISpc/%u\n", &client_addr
->in_addr
,
4459 le32_to_cpu(client_addr
->nonce
));
4462 static ssize_t
rbd_client_id_show(struct device
*dev
,
4463 struct device_attribute
*attr
, char *buf
)
4465 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4467 return sprintf(buf
, "client%lld\n",
4468 ceph_client_gid(rbd_dev
->rbd_client
->client
));
4471 static ssize_t
rbd_cluster_fsid_show(struct device
*dev
,
4472 struct device_attribute
*attr
, char *buf
)
4474 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4476 return sprintf(buf
, "%pU\n", &rbd_dev
->rbd_client
->client
->fsid
);
4479 static ssize_t
rbd_config_info_show(struct device
*dev
,
4480 struct device_attribute
*attr
, char *buf
)
4482 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4484 return sprintf(buf
, "%s\n", rbd_dev
->config_info
);
4487 static ssize_t
rbd_pool_show(struct device
*dev
,
4488 struct device_attribute
*attr
, char *buf
)
4490 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4492 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
4495 static ssize_t
rbd_pool_id_show(struct device
*dev
,
4496 struct device_attribute
*attr
, char *buf
)
4498 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4500 return sprintf(buf
, "%llu\n",
4501 (unsigned long long) rbd_dev
->spec
->pool_id
);
4504 static ssize_t
rbd_name_show(struct device
*dev
,
4505 struct device_attribute
*attr
, char *buf
)
4507 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4509 if (rbd_dev
->spec
->image_name
)
4510 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
4512 return sprintf(buf
, "(unknown)\n");
4515 static ssize_t
rbd_image_id_show(struct device
*dev
,
4516 struct device_attribute
*attr
, char *buf
)
4518 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4520 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
4524 * Shows the name of the currently-mapped snapshot (or
4525 * RBD_SNAP_HEAD_NAME for the base image).
4527 static ssize_t
rbd_snap_show(struct device
*dev
,
4528 struct device_attribute
*attr
,
4531 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4533 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
4536 static ssize_t
rbd_snap_id_show(struct device
*dev
,
4537 struct device_attribute
*attr
, char *buf
)
4539 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4541 return sprintf(buf
, "%llu\n", rbd_dev
->spec
->snap_id
);
4545 * For a v2 image, shows the chain of parent images, separated by empty
4546 * lines. For v1 images or if there is no parent, shows "(no parent
4549 static ssize_t
rbd_parent_show(struct device
*dev
,
4550 struct device_attribute
*attr
,
4553 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4556 if (!rbd_dev
->parent
)
4557 return sprintf(buf
, "(no parent image)\n");
4559 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
4560 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
4562 count
+= sprintf(&buf
[count
], "%s"
4563 "pool_id %llu\npool_name %s\n"
4564 "image_id %s\nimage_name %s\n"
4565 "snap_id %llu\nsnap_name %s\n"
4567 !count
? "" : "\n", /* first? */
4568 spec
->pool_id
, spec
->pool_name
,
4569 spec
->image_id
, spec
->image_name
?: "(unknown)",
4570 spec
->snap_id
, spec
->snap_name
,
4571 rbd_dev
->parent_overlap
);
4577 static ssize_t
rbd_image_refresh(struct device
*dev
,
4578 struct device_attribute
*attr
,
4582 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4585 ret
= rbd_dev_refresh(rbd_dev
);
4592 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
4593 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
4594 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
4595 static DEVICE_ATTR(minor
, S_IRUGO
, rbd_minor_show
, NULL
);
4596 static DEVICE_ATTR(client_addr
, S_IRUGO
, rbd_client_addr_show
, NULL
);
4597 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
4598 static DEVICE_ATTR(cluster_fsid
, S_IRUGO
, rbd_cluster_fsid_show
, NULL
);
4599 static DEVICE_ATTR(config_info
, S_IRUSR
, rbd_config_info_show
, NULL
);
4600 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
4601 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
4602 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
4603 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
4604 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
4605 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
4606 static DEVICE_ATTR(snap_id
, S_IRUGO
, rbd_snap_id_show
, NULL
);
4607 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
4609 static struct attribute
*rbd_attrs
[] = {
4610 &dev_attr_size
.attr
,
4611 &dev_attr_features
.attr
,
4612 &dev_attr_major
.attr
,
4613 &dev_attr_minor
.attr
,
4614 &dev_attr_client_addr
.attr
,
4615 &dev_attr_client_id
.attr
,
4616 &dev_attr_cluster_fsid
.attr
,
4617 &dev_attr_config_info
.attr
,
4618 &dev_attr_pool
.attr
,
4619 &dev_attr_pool_id
.attr
,
4620 &dev_attr_name
.attr
,
4621 &dev_attr_image_id
.attr
,
4622 &dev_attr_current_snap
.attr
,
4623 &dev_attr_snap_id
.attr
,
4624 &dev_attr_parent
.attr
,
4625 &dev_attr_refresh
.attr
,
4629 static struct attribute_group rbd_attr_group
= {
4633 static const struct attribute_group
*rbd_attr_groups
[] = {
4638 static void rbd_dev_release(struct device
*dev
);
4640 static const struct device_type rbd_device_type
= {
4642 .groups
= rbd_attr_groups
,
4643 .release
= rbd_dev_release
,
4646 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
4648 kref_get(&spec
->kref
);
4653 static void rbd_spec_free(struct kref
*kref
);
4654 static void rbd_spec_put(struct rbd_spec
*spec
)
4657 kref_put(&spec
->kref
, rbd_spec_free
);
4660 static struct rbd_spec
*rbd_spec_alloc(void)
4662 struct rbd_spec
*spec
;
4664 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
4668 spec
->pool_id
= CEPH_NOPOOL
;
4669 spec
->snap_id
= CEPH_NOSNAP
;
4670 kref_init(&spec
->kref
);
4675 static void rbd_spec_free(struct kref
*kref
)
4677 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
4679 kfree(spec
->pool_name
);
4680 kfree(spec
->image_id
);
4681 kfree(spec
->image_name
);
4682 kfree(spec
->snap_name
);
4686 static void rbd_dev_free(struct rbd_device
*rbd_dev
)
4688 WARN_ON(rbd_dev
->watch_state
!= RBD_WATCH_STATE_UNREGISTERED
);
4689 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_UNLOCKED
);
4691 ceph_oid_destroy(&rbd_dev
->header_oid
);
4692 ceph_oloc_destroy(&rbd_dev
->header_oloc
);
4693 kfree(rbd_dev
->config_info
);
4695 rbd_put_client(rbd_dev
->rbd_client
);
4696 rbd_spec_put(rbd_dev
->spec
);
4697 kfree(rbd_dev
->opts
);
4701 static void rbd_dev_release(struct device
*dev
)
4703 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4704 bool need_put
= !!rbd_dev
->opts
;
4707 destroy_workqueue(rbd_dev
->task_wq
);
4708 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4711 rbd_dev_free(rbd_dev
);
4714 * This is racy, but way better than putting module outside of
4715 * the release callback. The race window is pretty small, so
4716 * doing something similar to dm (dm-builtin.c) is overkill.
4719 module_put(THIS_MODULE
);
4722 static struct rbd_device
*__rbd_dev_create(struct rbd_client
*rbdc
,
4723 struct rbd_spec
*spec
)
4725 struct rbd_device
*rbd_dev
;
4727 rbd_dev
= kzalloc(sizeof(*rbd_dev
), GFP_KERNEL
);
4731 spin_lock_init(&rbd_dev
->lock
);
4732 INIT_LIST_HEAD(&rbd_dev
->node
);
4733 init_rwsem(&rbd_dev
->header_rwsem
);
4735 rbd_dev
->header
.data_pool_id
= CEPH_NOPOOL
;
4736 ceph_oid_init(&rbd_dev
->header_oid
);
4737 rbd_dev
->header_oloc
.pool
= spec
->pool_id
;
4739 mutex_init(&rbd_dev
->watch_mutex
);
4740 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
4741 INIT_DELAYED_WORK(&rbd_dev
->watch_dwork
, rbd_reregister_watch
);
4743 init_rwsem(&rbd_dev
->lock_rwsem
);
4744 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
4745 INIT_WORK(&rbd_dev
->acquired_lock_work
, rbd_notify_acquired_lock
);
4746 INIT_WORK(&rbd_dev
->released_lock_work
, rbd_notify_released_lock
);
4747 INIT_DELAYED_WORK(&rbd_dev
->lock_dwork
, rbd_acquire_lock
);
4748 INIT_WORK(&rbd_dev
->unlock_work
, rbd_release_lock_work
);
4749 init_waitqueue_head(&rbd_dev
->lock_waitq
);
4751 rbd_dev
->dev
.bus
= &rbd_bus_type
;
4752 rbd_dev
->dev
.type
= &rbd_device_type
;
4753 rbd_dev
->dev
.parent
= &rbd_root_dev
;
4754 device_initialize(&rbd_dev
->dev
);
4756 rbd_dev
->rbd_client
= rbdc
;
4757 rbd_dev
->spec
= spec
;
4763 * Create a mapping rbd_dev.
4765 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
4766 struct rbd_spec
*spec
,
4767 struct rbd_options
*opts
)
4769 struct rbd_device
*rbd_dev
;
4771 rbd_dev
= __rbd_dev_create(rbdc
, spec
);
4775 rbd_dev
->opts
= opts
;
4777 /* get an id and fill in device name */
4778 rbd_dev
->dev_id
= ida_simple_get(&rbd_dev_id_ida
, 0,
4779 minor_to_rbd_dev_id(1 << MINORBITS
),
4781 if (rbd_dev
->dev_id
< 0)
4784 sprintf(rbd_dev
->name
, RBD_DRV_NAME
"%d", rbd_dev
->dev_id
);
4785 rbd_dev
->task_wq
= alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM
,
4787 if (!rbd_dev
->task_wq
)
4790 /* we have a ref from do_rbd_add() */
4791 __module_get(THIS_MODULE
);
4793 dout("%s rbd_dev %p dev_id %d\n", __func__
, rbd_dev
, rbd_dev
->dev_id
);
4797 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4799 rbd_dev_free(rbd_dev
);
4803 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
4806 put_device(&rbd_dev
->dev
);
4810 * Get the size and object order for an image snapshot, or if
4811 * snap_id is CEPH_NOSNAP, gets this information for the base
4814 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
4815 u8
*order
, u64
*snap_size
)
4817 __le64 snapid
= cpu_to_le64(snap_id
);
4822 } __attribute__ ((packed
)) size_buf
= { 0 };
4824 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4825 &rbd_dev
->header_oloc
, "get_size",
4826 &snapid
, sizeof(snapid
),
4827 &size_buf
, sizeof(size_buf
));
4828 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4831 if (ret
< sizeof (size_buf
))
4835 *order
= size_buf
.order
;
4836 dout(" order %u", (unsigned int)*order
);
4838 *snap_size
= le64_to_cpu(size_buf
.size
);
4840 dout(" snap_id 0x%016llx snap_size = %llu\n",
4841 (unsigned long long)snap_id
,
4842 (unsigned long long)*snap_size
);
4847 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
4849 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
4850 &rbd_dev
->header
.obj_order
,
4851 &rbd_dev
->header
.image_size
);
4854 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
4860 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
4864 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4865 &rbd_dev
->header_oloc
, "get_object_prefix",
4866 NULL
, 0, reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
4867 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4872 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
4873 p
+ ret
, NULL
, GFP_NOIO
);
4876 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
4877 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
4878 rbd_dev
->header
.object_prefix
= NULL
;
4880 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
4888 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
4891 __le64 snapid
= cpu_to_le64(snap_id
);
4895 } __attribute__ ((packed
)) features_buf
= { 0 };
4899 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4900 &rbd_dev
->header_oloc
, "get_features",
4901 &snapid
, sizeof(snapid
),
4902 &features_buf
, sizeof(features_buf
));
4903 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4906 if (ret
< sizeof (features_buf
))
4909 unsup
= le64_to_cpu(features_buf
.incompat
) & ~RBD_FEATURES_SUPPORTED
;
4911 rbd_warn(rbd_dev
, "image uses unsupported features: 0x%llx",
4916 *snap_features
= le64_to_cpu(features_buf
.features
);
4918 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4919 (unsigned long long)snap_id
,
4920 (unsigned long long)*snap_features
,
4921 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
4926 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
4928 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
4929 &rbd_dev
->header
.features
);
4932 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
4934 struct rbd_spec
*parent_spec
;
4936 void *reply_buf
= NULL
;
4946 parent_spec
= rbd_spec_alloc();
4950 size
= sizeof (__le64
) + /* pool_id */
4951 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
4952 sizeof (__le64
) + /* snap_id */
4953 sizeof (__le64
); /* overlap */
4954 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4960 snapid
= cpu_to_le64(rbd_dev
->spec
->snap_id
);
4961 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4962 &rbd_dev
->header_oloc
, "get_parent",
4963 &snapid
, sizeof(snapid
), reply_buf
, size
);
4964 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4969 end
= reply_buf
+ ret
;
4971 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
4972 if (pool_id
== CEPH_NOPOOL
) {
4974 * Either the parent never existed, or we have
4975 * record of it but the image got flattened so it no
4976 * longer has a parent. When the parent of a
4977 * layered image disappears we immediately set the
4978 * overlap to 0. The effect of this is that all new
4979 * requests will be treated as if the image had no
4982 if (rbd_dev
->parent_overlap
) {
4983 rbd_dev
->parent_overlap
= 0;
4984 rbd_dev_parent_put(rbd_dev
);
4985 pr_info("%s: clone image has been flattened\n",
4986 rbd_dev
->disk
->disk_name
);
4989 goto out
; /* No parent? No problem. */
4992 /* The ceph file layout needs to fit pool id in 32 bits */
4995 if (pool_id
> (u64
)U32_MAX
) {
4996 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
4997 (unsigned long long)pool_id
, U32_MAX
);
5001 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5002 if (IS_ERR(image_id
)) {
5003 ret
= PTR_ERR(image_id
);
5006 ceph_decode_64_safe(&p
, end
, snap_id
, out_err
);
5007 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
5010 * The parent won't change (except when the clone is
5011 * flattened, already handled that). So we only need to
5012 * record the parent spec we have not already done so.
5014 if (!rbd_dev
->parent_spec
) {
5015 parent_spec
->pool_id
= pool_id
;
5016 parent_spec
->image_id
= image_id
;
5017 parent_spec
->snap_id
= snap_id
;
5018 rbd_dev
->parent_spec
= parent_spec
;
5019 parent_spec
= NULL
; /* rbd_dev now owns this */
5025 * We always update the parent overlap. If it's zero we issue
5026 * a warning, as we will proceed as if there was no parent.
5030 /* refresh, careful to warn just once */
5031 if (rbd_dev
->parent_overlap
)
5033 "clone now standalone (overlap became 0)");
5036 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
5039 rbd_dev
->parent_overlap
= overlap
;
5045 rbd_spec_put(parent_spec
);
5050 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
5054 __le64 stripe_count
;
5055 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
5056 size_t size
= sizeof (striping_info_buf
);
5063 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5064 &rbd_dev
->header_oloc
, "get_stripe_unit_count",
5065 NULL
, 0, &striping_info_buf
, size
);
5066 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5073 * We don't actually support the "fancy striping" feature
5074 * (STRIPINGV2) yet, but if the striping sizes are the
5075 * defaults the behavior is the same as before. So find
5076 * out, and only fail if the image has non-default values.
5079 obj_size
= rbd_obj_bytes(&rbd_dev
->header
);
5080 p
= &striping_info_buf
;
5081 stripe_unit
= ceph_decode_64(&p
);
5082 if (stripe_unit
!= obj_size
) {
5083 rbd_warn(rbd_dev
, "unsupported stripe unit "
5084 "(got %llu want %llu)",
5085 stripe_unit
, obj_size
);
5088 stripe_count
= ceph_decode_64(&p
);
5089 if (stripe_count
!= 1) {
5090 rbd_warn(rbd_dev
, "unsupported stripe count "
5091 "(got %llu want 1)", stripe_count
);
5094 rbd_dev
->header
.stripe_unit
= stripe_unit
;
5095 rbd_dev
->header
.stripe_count
= stripe_count
;
5100 static int rbd_dev_v2_data_pool(struct rbd_device
*rbd_dev
)
5102 __le64 data_pool_id
;
5105 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5106 &rbd_dev
->header_oloc
, "get_data_pool",
5107 NULL
, 0, &data_pool_id
, sizeof(data_pool_id
));
5110 if (ret
< sizeof(data_pool_id
))
5113 rbd_dev
->header
.data_pool_id
= le64_to_cpu(data_pool_id
);
5114 WARN_ON(rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
);
5118 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
5120 CEPH_DEFINE_OID_ONSTACK(oid
);
5121 size_t image_id_size
;
5126 void *reply_buf
= NULL
;
5128 char *image_name
= NULL
;
5131 rbd_assert(!rbd_dev
->spec
->image_name
);
5133 len
= strlen(rbd_dev
->spec
->image_id
);
5134 image_id_size
= sizeof (__le32
) + len
;
5135 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
5140 end
= image_id
+ image_id_size
;
5141 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
5143 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
5144 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5148 ceph_oid_printf(&oid
, "%s", RBD_DIRECTORY
);
5149 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5150 "dir_get_name", image_id
, image_id_size
,
5155 end
= reply_buf
+ ret
;
5157 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
5158 if (IS_ERR(image_name
))
5161 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
5169 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5171 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5172 const char *snap_name
;
5175 /* Skip over names until we find the one we are looking for */
5177 snap_name
= rbd_dev
->header
.snap_names
;
5178 while (which
< snapc
->num_snaps
) {
5179 if (!strcmp(name
, snap_name
))
5180 return snapc
->snaps
[which
];
5181 snap_name
+= strlen(snap_name
) + 1;
5187 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5189 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5194 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
5195 const char *snap_name
;
5197 snap_id
= snapc
->snaps
[which
];
5198 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
5199 if (IS_ERR(snap_name
)) {
5200 /* ignore no-longer existing snapshots */
5201 if (PTR_ERR(snap_name
) == -ENOENT
)
5206 found
= !strcmp(name
, snap_name
);
5209 return found
? snap_id
: CEPH_NOSNAP
;
5213 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5214 * no snapshot by that name is found, or if an error occurs.
5216 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5218 if (rbd_dev
->image_format
== 1)
5219 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
5221 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
5225 * An image being mapped will have everything but the snap id.
5227 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
5229 struct rbd_spec
*spec
= rbd_dev
->spec
;
5231 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
5232 rbd_assert(spec
->image_id
&& spec
->image_name
);
5233 rbd_assert(spec
->snap_name
);
5235 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
5238 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
5239 if (snap_id
== CEPH_NOSNAP
)
5242 spec
->snap_id
= snap_id
;
5244 spec
->snap_id
= CEPH_NOSNAP
;
5251 * A parent image will have all ids but none of the names.
5253 * All names in an rbd spec are dynamically allocated. It's OK if we
5254 * can't figure out the name for an image id.
5256 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
5258 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5259 struct rbd_spec
*spec
= rbd_dev
->spec
;
5260 const char *pool_name
;
5261 const char *image_name
;
5262 const char *snap_name
;
5265 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
5266 rbd_assert(spec
->image_id
);
5267 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
5269 /* Get the pool name; we have to make our own copy of this */
5271 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
5273 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
5276 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
5280 /* Fetch the image name; tolerate failure here */
5282 image_name
= rbd_dev_image_name(rbd_dev
);
5284 rbd_warn(rbd_dev
, "unable to get image name");
5286 /* Fetch the snapshot name */
5288 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
5289 if (IS_ERR(snap_name
)) {
5290 ret
= PTR_ERR(snap_name
);
5294 spec
->pool_name
= pool_name
;
5295 spec
->image_name
= image_name
;
5296 spec
->snap_name
= snap_name
;
5306 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
5315 struct ceph_snap_context
*snapc
;
5319 * We'll need room for the seq value (maximum snapshot id),
5320 * snapshot count, and array of that many snapshot ids.
5321 * For now we have a fixed upper limit on the number we're
5322 * prepared to receive.
5324 size
= sizeof (__le64
) + sizeof (__le32
) +
5325 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
5326 reply_buf
= kzalloc(size
, GFP_KERNEL
);
5330 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5331 &rbd_dev
->header_oloc
, "get_snapcontext",
5332 NULL
, 0, reply_buf
, size
);
5333 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5338 end
= reply_buf
+ ret
;
5340 ceph_decode_64_safe(&p
, end
, seq
, out
);
5341 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
5344 * Make sure the reported number of snapshot ids wouldn't go
5345 * beyond the end of our buffer. But before checking that,
5346 * make sure the computed size of the snapshot context we
5347 * allocate is representable in a size_t.
5349 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
5354 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
5358 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
5364 for (i
= 0; i
< snap_count
; i
++)
5365 snapc
->snaps
[i
] = ceph_decode_64(&p
);
5367 ceph_put_snap_context(rbd_dev
->header
.snapc
);
5368 rbd_dev
->header
.snapc
= snapc
;
5370 dout(" snap context seq = %llu, snap_count = %u\n",
5371 (unsigned long long)seq
, (unsigned int)snap_count
);
5378 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
5389 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
5390 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5392 return ERR_PTR(-ENOMEM
);
5394 snapid
= cpu_to_le64(snap_id
);
5395 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5396 &rbd_dev
->header_oloc
, "get_snapshot_name",
5397 &snapid
, sizeof(snapid
), reply_buf
, size
);
5398 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5400 snap_name
= ERR_PTR(ret
);
5405 end
= reply_buf
+ ret
;
5406 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5407 if (IS_ERR(snap_name
))
5410 dout(" snap_id 0x%016llx snap_name = %s\n",
5411 (unsigned long long)snap_id
, snap_name
);
5418 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
5420 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
5423 ret
= rbd_dev_v2_image_size(rbd_dev
);
5428 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
5433 ret
= rbd_dev_v2_snap_context(rbd_dev
);
5434 if (ret
&& first_time
) {
5435 kfree(rbd_dev
->header
.object_prefix
);
5436 rbd_dev
->header
.object_prefix
= NULL
;
5442 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
5444 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5446 if (rbd_dev
->image_format
== 1)
5447 return rbd_dev_v1_header_info(rbd_dev
);
5449 return rbd_dev_v2_header_info(rbd_dev
);
5453 * Skips over white space at *buf, and updates *buf to point to the
5454 * first found non-space character (if any). Returns the length of
5455 * the token (string of non-white space characters) found. Note
5456 * that *buf must be terminated with '\0'.
5458 static inline size_t next_token(const char **buf
)
5461 * These are the characters that produce nonzero for
5462 * isspace() in the "C" and "POSIX" locales.
5464 const char *spaces
= " \f\n\r\t\v";
5466 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
5468 return strcspn(*buf
, spaces
); /* Return token length */
5472 * Finds the next token in *buf, dynamically allocates a buffer big
5473 * enough to hold a copy of it, and copies the token into the new
5474 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5475 * that a duplicate buffer is created even for a zero-length token.
5477 * Returns a pointer to the newly-allocated duplicate, or a null
5478 * pointer if memory for the duplicate was not available. If
5479 * the lenp argument is a non-null pointer, the length of the token
5480 * (not including the '\0') is returned in *lenp.
5482 * If successful, the *buf pointer will be updated to point beyond
5483 * the end of the found token.
5485 * Note: uses GFP_KERNEL for allocation.
5487 static inline char *dup_token(const char **buf
, size_t *lenp
)
5492 len
= next_token(buf
);
5493 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
5496 *(dup
+ len
) = '\0';
5506 * Parse the options provided for an "rbd add" (i.e., rbd image
5507 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5508 * and the data written is passed here via a NUL-terminated buffer.
5509 * Returns 0 if successful or an error code otherwise.
5511 * The information extracted from these options is recorded in
5512 * the other parameters which return dynamically-allocated
5515 * The address of a pointer that will refer to a ceph options
5516 * structure. Caller must release the returned pointer using
5517 * ceph_destroy_options() when it is no longer needed.
5519 * Address of an rbd options pointer. Fully initialized by
5520 * this function; caller must release with kfree().
5522 * Address of an rbd image specification pointer. Fully
5523 * initialized by this function based on parsed options.
5524 * Caller must release with rbd_spec_put().
5526 * The options passed take this form:
5527 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5530 * A comma-separated list of one or more monitor addresses.
5531 * A monitor address is an ip address, optionally followed
5532 * by a port number (separated by a colon).
5533 * I.e.: ip1[:port1][,ip2[:port2]...]
5535 * A comma-separated list of ceph and/or rbd options.
5537 * The name of the rados pool containing the rbd image.
5539 * The name of the image in that pool to map.
5541 * An optional snapshot id. If provided, the mapping will
5542 * present data from the image at the time that snapshot was
5543 * created. The image head is used if no snapshot id is
5544 * provided. Snapshot mappings are always read-only.
5546 static int rbd_add_parse_args(const char *buf
,
5547 struct ceph_options
**ceph_opts
,
5548 struct rbd_options
**opts
,
5549 struct rbd_spec
**rbd_spec
)
5553 const char *mon_addrs
;
5555 size_t mon_addrs_size
;
5556 struct rbd_spec
*spec
= NULL
;
5557 struct rbd_options
*rbd_opts
= NULL
;
5558 struct ceph_options
*copts
;
5561 /* The first four tokens are required */
5563 len
= next_token(&buf
);
5565 rbd_warn(NULL
, "no monitor address(es) provided");
5569 mon_addrs_size
= len
+ 1;
5573 options
= dup_token(&buf
, NULL
);
5577 rbd_warn(NULL
, "no options provided");
5581 spec
= rbd_spec_alloc();
5585 spec
->pool_name
= dup_token(&buf
, NULL
);
5586 if (!spec
->pool_name
)
5588 if (!*spec
->pool_name
) {
5589 rbd_warn(NULL
, "no pool name provided");
5593 spec
->image_name
= dup_token(&buf
, NULL
);
5594 if (!spec
->image_name
)
5596 if (!*spec
->image_name
) {
5597 rbd_warn(NULL
, "no image name provided");
5602 * Snapshot name is optional; default is to use "-"
5603 * (indicating the head/no snapshot).
5605 len
= next_token(&buf
);
5607 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
5608 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
5609 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
5610 ret
= -ENAMETOOLONG
;
5613 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
5616 *(snap_name
+ len
) = '\0';
5617 spec
->snap_name
= snap_name
;
5619 /* Initialize all rbd options to the defaults */
5621 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
5625 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
5626 rbd_opts
->queue_depth
= RBD_QUEUE_DEPTH_DEFAULT
;
5627 rbd_opts
->lock_on_read
= RBD_LOCK_ON_READ_DEFAULT
;
5629 copts
= ceph_parse_options(options
, mon_addrs
,
5630 mon_addrs
+ mon_addrs_size
- 1,
5631 parse_rbd_opts_token
, rbd_opts
);
5632 if (IS_ERR(copts
)) {
5633 ret
= PTR_ERR(copts
);
5654 * Return pool id (>= 0) or a negative error code.
5656 static int rbd_add_get_pool_id(struct rbd_client
*rbdc
, const char *pool_name
)
5658 struct ceph_options
*opts
= rbdc
->client
->options
;
5664 ret
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, pool_name
);
5665 if (ret
== -ENOENT
&& tries
++ < 1) {
5666 ret
= ceph_monc_get_version(&rbdc
->client
->monc
, "osdmap",
5671 if (rbdc
->client
->osdc
.osdmap
->epoch
< newest_epoch
) {
5672 ceph_osdc_maybe_request_map(&rbdc
->client
->osdc
);
5673 (void) ceph_monc_wait_osdmap(&rbdc
->client
->monc
,
5675 opts
->mount_timeout
);
5678 /* the osdmap we have is new enough */
5687 * An rbd format 2 image has a unique identifier, distinct from the
5688 * name given to it by the user. Internally, that identifier is
5689 * what's used to specify the names of objects related to the image.
5691 * A special "rbd id" object is used to map an rbd image name to its
5692 * id. If that object doesn't exist, then there is no v2 rbd image
5693 * with the supplied name.
5695 * This function will record the given rbd_dev's image_id field if
5696 * it can be determined, and in that case will return 0. If any
5697 * errors occur a negative errno will be returned and the rbd_dev's
5698 * image_id field will be unchanged (and should be NULL).
5700 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
5704 CEPH_DEFINE_OID_ONSTACK(oid
);
5709 * When probing a parent image, the image id is already
5710 * known (and the image name likely is not). There's no
5711 * need to fetch the image id again in this case. We
5712 * do still need to set the image format though.
5714 if (rbd_dev
->spec
->image_id
) {
5715 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
5721 * First, see if the format 2 image id file exists, and if
5722 * so, get the image's persistent id from it.
5724 ret
= ceph_oid_aprintf(&oid
, GFP_KERNEL
, "%s%s", RBD_ID_PREFIX
,
5725 rbd_dev
->spec
->image_name
);
5729 dout("rbd id object name is %s\n", oid
.name
);
5731 /* Response will be an encoded string, which includes a length */
5733 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
5734 response
= kzalloc(size
, GFP_NOIO
);
5740 /* If it doesn't exist we'll assume it's a format 1 image */
5742 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5744 response
, RBD_IMAGE_ID_LEN_MAX
);
5745 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5746 if (ret
== -ENOENT
) {
5747 image_id
= kstrdup("", GFP_KERNEL
);
5748 ret
= image_id
? 0 : -ENOMEM
;
5750 rbd_dev
->image_format
= 1;
5751 } else if (ret
>= 0) {
5754 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
5756 ret
= PTR_ERR_OR_ZERO(image_id
);
5758 rbd_dev
->image_format
= 2;
5762 rbd_dev
->spec
->image_id
= image_id
;
5763 dout("image_id is %s\n", image_id
);
5767 ceph_oid_destroy(&oid
);
5772 * Undo whatever state changes are made by v1 or v2 header info
5775 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
5777 struct rbd_image_header
*header
;
5779 rbd_dev_parent_put(rbd_dev
);
5781 /* Free dynamic fields from the header, then zero it out */
5783 header
= &rbd_dev
->header
;
5784 ceph_put_snap_context(header
->snapc
);
5785 kfree(header
->snap_sizes
);
5786 kfree(header
->snap_names
);
5787 kfree(header
->object_prefix
);
5788 memset(header
, 0, sizeof (*header
));
5791 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
5795 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
5800 * Get the and check features for the image. Currently the
5801 * features are assumed to never change.
5803 ret
= rbd_dev_v2_features(rbd_dev
);
5807 /* If the image supports fancy striping, get its parameters */
5809 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
5810 ret
= rbd_dev_v2_striping_info(rbd_dev
);
5815 if (rbd_dev
->header
.features
& RBD_FEATURE_DATA_POOL
) {
5816 ret
= rbd_dev_v2_data_pool(rbd_dev
);
5821 rbd_init_layout(rbd_dev
);
5825 rbd_dev
->header
.features
= 0;
5826 kfree(rbd_dev
->header
.object_prefix
);
5827 rbd_dev
->header
.object_prefix
= NULL
;
5832 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5833 * rbd_dev_image_probe() recursion depth, which means it's also the
5834 * length of the already discovered part of the parent chain.
5836 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
, int depth
)
5838 struct rbd_device
*parent
= NULL
;
5841 if (!rbd_dev
->parent_spec
)
5844 if (++depth
> RBD_MAX_PARENT_CHAIN_LEN
) {
5845 pr_info("parent chain is too long (%d)\n", depth
);
5850 parent
= __rbd_dev_create(rbd_dev
->rbd_client
, rbd_dev
->parent_spec
);
5857 * Images related by parent/child relationships always share
5858 * rbd_client and spec/parent_spec, so bump their refcounts.
5860 __rbd_get_client(rbd_dev
->rbd_client
);
5861 rbd_spec_get(rbd_dev
->parent_spec
);
5863 ret
= rbd_dev_image_probe(parent
, depth
);
5867 rbd_dev
->parent
= parent
;
5868 atomic_set(&rbd_dev
->parent_ref
, 1);
5872 rbd_dev_unparent(rbd_dev
);
5873 rbd_dev_destroy(parent
);
5878 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5881 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
5885 /* Record our major and minor device numbers. */
5887 if (!single_major
) {
5888 ret
= register_blkdev(0, rbd_dev
->name
);
5890 goto err_out_unlock
;
5892 rbd_dev
->major
= ret
;
5895 rbd_dev
->major
= rbd_major
;
5896 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
5899 /* Set up the blkdev mapping. */
5901 ret
= rbd_init_disk(rbd_dev
);
5903 goto err_out_blkdev
;
5905 ret
= rbd_dev_mapping_set(rbd_dev
);
5909 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
5910 set_disk_ro(rbd_dev
->disk
, rbd_dev
->mapping
.read_only
);
5912 dev_set_name(&rbd_dev
->dev
, "%d", rbd_dev
->dev_id
);
5913 ret
= device_add(&rbd_dev
->dev
);
5915 goto err_out_mapping
;
5917 /* Everything's ready. Announce the disk to the world. */
5919 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5920 up_write(&rbd_dev
->header_rwsem
);
5922 spin_lock(&rbd_dev_list_lock
);
5923 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
5924 spin_unlock(&rbd_dev_list_lock
);
5926 add_disk(rbd_dev
->disk
);
5927 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev
->disk
->disk_name
,
5928 (unsigned long long)get_capacity(rbd_dev
->disk
) << SECTOR_SHIFT
,
5929 rbd_dev
->header
.features
);
5934 rbd_dev_mapping_clear(rbd_dev
);
5936 rbd_free_disk(rbd_dev
);
5939 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5941 up_write(&rbd_dev
->header_rwsem
);
5945 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
5947 struct rbd_spec
*spec
= rbd_dev
->spec
;
5950 /* Record the header object name for this rbd image. */
5952 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5953 if (rbd_dev
->image_format
== 1)
5954 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5955 spec
->image_name
, RBD_SUFFIX
);
5957 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5958 RBD_HEADER_PREFIX
, spec
->image_id
);
5963 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
5965 rbd_dev_unprobe(rbd_dev
);
5966 rbd_dev
->image_format
= 0;
5967 kfree(rbd_dev
->spec
->image_id
);
5968 rbd_dev
->spec
->image_id
= NULL
;
5970 rbd_dev_destroy(rbd_dev
);
5974 * Probe for the existence of the header object for the given rbd
5975 * device. If this image is the one being mapped (i.e., not a
5976 * parent), initiate a watch on its header object before using that
5977 * object to get detailed information about the rbd image.
5979 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
)
5984 * Get the id from the image id object. Unless there's an
5985 * error, rbd_dev->spec->image_id will be filled in with
5986 * a dynamically-allocated string, and rbd_dev->image_format
5987 * will be set to either 1 or 2.
5989 ret
= rbd_dev_image_id(rbd_dev
);
5993 ret
= rbd_dev_header_name(rbd_dev
);
5995 goto err_out_format
;
5998 ret
= rbd_register_watch(rbd_dev
);
6001 pr_info("image %s/%s does not exist\n",
6002 rbd_dev
->spec
->pool_name
,
6003 rbd_dev
->spec
->image_name
);
6004 goto err_out_format
;
6008 ret
= rbd_dev_header_info(rbd_dev
);
6013 * If this image is the one being mapped, we have pool name and
6014 * id, image name and id, and snap name - need to fill snap id.
6015 * Otherwise this is a parent image, identified by pool, image
6016 * and snap ids - need to fill in names for those ids.
6019 ret
= rbd_spec_fill_snap_id(rbd_dev
);
6021 ret
= rbd_spec_fill_names(rbd_dev
);
6024 pr_info("snap %s/%s@%s does not exist\n",
6025 rbd_dev
->spec
->pool_name
,
6026 rbd_dev
->spec
->image_name
,
6027 rbd_dev
->spec
->snap_name
);
6031 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
6032 ret
= rbd_dev_v2_parent_info(rbd_dev
);
6037 * Need to warn users if this image is the one being
6038 * mapped and has a parent.
6040 if (!depth
&& rbd_dev
->parent_spec
)
6042 "WARNING: kernel layering is EXPERIMENTAL!");
6045 ret
= rbd_dev_probe_parent(rbd_dev
, depth
);
6049 dout("discovered format %u image, header name is %s\n",
6050 rbd_dev
->image_format
, rbd_dev
->header_oid
.name
);
6054 rbd_dev_unprobe(rbd_dev
);
6057 rbd_unregister_watch(rbd_dev
);
6059 rbd_dev
->image_format
= 0;
6060 kfree(rbd_dev
->spec
->image_id
);
6061 rbd_dev
->spec
->image_id
= NULL
;
6065 static ssize_t
do_rbd_add(struct bus_type
*bus
,
6069 struct rbd_device
*rbd_dev
= NULL
;
6070 struct ceph_options
*ceph_opts
= NULL
;
6071 struct rbd_options
*rbd_opts
= NULL
;
6072 struct rbd_spec
*spec
= NULL
;
6073 struct rbd_client
*rbdc
;
6077 if (!try_module_get(THIS_MODULE
))
6080 /* parse add command */
6081 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
6085 rbdc
= rbd_get_client(ceph_opts
);
6092 rc
= rbd_add_get_pool_id(rbdc
, spec
->pool_name
);
6095 pr_info("pool %s does not exist\n", spec
->pool_name
);
6096 goto err_out_client
;
6098 spec
->pool_id
= (u64
)rc
;
6100 rbd_dev
= rbd_dev_create(rbdc
, spec
, rbd_opts
);
6103 goto err_out_client
;
6105 rbdc
= NULL
; /* rbd_dev now owns this */
6106 spec
= NULL
; /* rbd_dev now owns this */
6107 rbd_opts
= NULL
; /* rbd_dev now owns this */
6109 rbd_dev
->config_info
= kstrdup(buf
, GFP_KERNEL
);
6110 if (!rbd_dev
->config_info
) {
6112 goto err_out_rbd_dev
;
6115 down_write(&rbd_dev
->header_rwsem
);
6116 rc
= rbd_dev_image_probe(rbd_dev
, 0);
6118 up_write(&rbd_dev
->header_rwsem
);
6119 goto err_out_rbd_dev
;
6122 /* If we are mapping a snapshot it must be marked read-only */
6124 read_only
= rbd_dev
->opts
->read_only
;
6125 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
6127 rbd_dev
->mapping
.read_only
= read_only
;
6129 rc
= rbd_dev_device_setup(rbd_dev
);
6132 * rbd_unregister_watch() can't be moved into
6133 * rbd_dev_image_release() without refactoring, see
6134 * commit 1f3ef78861ac.
6136 rbd_unregister_watch(rbd_dev
);
6137 rbd_dev_image_release(rbd_dev
);
6143 module_put(THIS_MODULE
);
6147 rbd_dev_destroy(rbd_dev
);
6149 rbd_put_client(rbdc
);
6156 static ssize_t
rbd_add(struct bus_type
*bus
,
6163 return do_rbd_add(bus
, buf
, count
);
6166 static ssize_t
rbd_add_single_major(struct bus_type
*bus
,
6170 return do_rbd_add(bus
, buf
, count
);
6173 static void rbd_dev_device_release(struct rbd_device
*rbd_dev
)
6175 rbd_free_disk(rbd_dev
);
6177 spin_lock(&rbd_dev_list_lock
);
6178 list_del_init(&rbd_dev
->node
);
6179 spin_unlock(&rbd_dev_list_lock
);
6181 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
6182 device_del(&rbd_dev
->dev
);
6183 rbd_dev_mapping_clear(rbd_dev
);
6185 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
6188 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
6190 while (rbd_dev
->parent
) {
6191 struct rbd_device
*first
= rbd_dev
;
6192 struct rbd_device
*second
= first
->parent
;
6193 struct rbd_device
*third
;
6196 * Follow to the parent with no grandparent and
6199 while (second
&& (third
= second
->parent
)) {
6204 rbd_dev_image_release(second
);
6205 first
->parent
= NULL
;
6206 first
->parent_overlap
= 0;
6208 rbd_assert(first
->parent_spec
);
6209 rbd_spec_put(first
->parent_spec
);
6210 first
->parent_spec
= NULL
;
6214 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
6218 struct rbd_device
*rbd_dev
= NULL
;
6219 struct list_head
*tmp
;
6222 bool already
= false;
6228 sscanf(buf
, "%d %5s", &dev_id
, opt_buf
);
6230 pr_err("dev_id out of range\n");
6233 if (opt_buf
[0] != '\0') {
6234 if (!strcmp(opt_buf
, "force")) {
6237 pr_err("bad remove option at '%s'\n", opt_buf
);
6243 spin_lock(&rbd_dev_list_lock
);
6244 list_for_each(tmp
, &rbd_dev_list
) {
6245 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
6246 if (rbd_dev
->dev_id
== dev_id
) {
6252 spin_lock_irq(&rbd_dev
->lock
);
6253 if (rbd_dev
->open_count
&& !force
)
6256 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
6258 spin_unlock_irq(&rbd_dev
->lock
);
6260 spin_unlock(&rbd_dev_list_lock
);
6261 if (ret
< 0 || already
)
6266 * Prevent new IO from being queued and wait for existing
6267 * IO to complete/fail.
6269 blk_mq_freeze_queue(rbd_dev
->disk
->queue
);
6270 blk_set_queue_dying(rbd_dev
->disk
->queue
);
6273 down_write(&rbd_dev
->lock_rwsem
);
6274 if (__rbd_is_lock_owner(rbd_dev
))
6275 rbd_unlock(rbd_dev
);
6276 up_write(&rbd_dev
->lock_rwsem
);
6277 rbd_unregister_watch(rbd_dev
);
6280 * Don't free anything from rbd_dev->disk until after all
6281 * notifies are completely processed. Otherwise
6282 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6283 * in a potential use after free of rbd_dev->disk or rbd_dev.
6285 rbd_dev_device_release(rbd_dev
);
6286 rbd_dev_image_release(rbd_dev
);
6291 static ssize_t
rbd_remove(struct bus_type
*bus
,
6298 return do_rbd_remove(bus
, buf
, count
);
6301 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
,
6305 return do_rbd_remove(bus
, buf
, count
);
6309 * create control files in sysfs
6312 static int rbd_sysfs_init(void)
6316 ret
= device_register(&rbd_root_dev
);
6320 ret
= bus_register(&rbd_bus_type
);
6322 device_unregister(&rbd_root_dev
);
6327 static void rbd_sysfs_cleanup(void)
6329 bus_unregister(&rbd_bus_type
);
6330 device_unregister(&rbd_root_dev
);
6333 static int rbd_slab_init(void)
6335 rbd_assert(!rbd_img_request_cache
);
6336 rbd_img_request_cache
= KMEM_CACHE(rbd_img_request
, 0);
6337 if (!rbd_img_request_cache
)
6340 rbd_assert(!rbd_obj_request_cache
);
6341 rbd_obj_request_cache
= KMEM_CACHE(rbd_obj_request
, 0);
6342 if (!rbd_obj_request_cache
)
6348 kmem_cache_destroy(rbd_img_request_cache
);
6349 rbd_img_request_cache
= NULL
;
6353 static void rbd_slab_exit(void)
6355 rbd_assert(rbd_obj_request_cache
);
6356 kmem_cache_destroy(rbd_obj_request_cache
);
6357 rbd_obj_request_cache
= NULL
;
6359 rbd_assert(rbd_img_request_cache
);
6360 kmem_cache_destroy(rbd_img_request_cache
);
6361 rbd_img_request_cache
= NULL
;
6364 static int __init
rbd_init(void)
6368 if (!libceph_compatible(NULL
)) {
6369 rbd_warn(NULL
, "libceph incompatibility (quitting)");
6373 rc
= rbd_slab_init();
6378 * The number of active work items is limited by the number of
6379 * rbd devices * queue depth, so leave @max_active at default.
6381 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
6388 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
6389 if (rbd_major
< 0) {
6395 rc
= rbd_sysfs_init();
6397 goto err_out_blkdev
;
6400 pr_info("loaded (major %d)\n", rbd_major
);
6402 pr_info("loaded\n");
6408 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6410 destroy_workqueue(rbd_wq
);
6416 static void __exit
rbd_exit(void)
6418 ida_destroy(&rbd_dev_id_ida
);
6419 rbd_sysfs_cleanup();
6421 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6422 destroy_workqueue(rbd_wq
);
6426 module_init(rbd_init
);
6427 module_exit(rbd_exit
);
6429 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6430 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6431 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6432 /* following authorship retained from original osdblk.c */
6433 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6435 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6436 MODULE_LICENSE("GPL");