]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/block/rbd.c
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
ed95b21a 34#include <linux/ceph/cls_lock_client.h>
43df3d35 35#include <linux/ceph/striper.h>
602adf40 36#include <linux/ceph/decode.h>
59c2be1e 37#include <linux/parser.h>
30d1cff8 38#include <linux/bsearch.h>
602adf40
YS
39
40#include <linux/kernel.h>
41#include <linux/device.h>
42#include <linux/module.h>
7ad18afa 43#include <linux/blk-mq.h>
602adf40
YS
44#include <linux/fs.h>
45#include <linux/blkdev.h>
1c2a9dfe 46#include <linux/slab.h>
f8a22fc2 47#include <linux/idr.h>
bc1ecc65 48#include <linux/workqueue.h>
602adf40
YS
49
50#include "rbd_types.h"
51
aafb230e
AE
52#define RBD_DEBUG /* Activate rbd_assert() calls */
53
a2acd00e
AE
54/*
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
60static int atomic_inc_return_safe(atomic_t *v)
61{
62 unsigned int counter;
63
bfc18e38 64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
a2acd00e
AE
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71}
72
73/* Decrement the counter. Return the resulting value, or -EINVAL */
74static int atomic_dec_return_safe(atomic_t *v)
75{
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85}
86
f0f8cef5 87#define RBD_DRV_NAME "rbd"
602adf40 88
7e513d43
ID
89#define RBD_MINORS_PER_MAJOR 256
90#define RBD_SINGLE_MAJOR_PART_SHIFT 4
602adf40 91
6d69bb53
ID
92#define RBD_MAX_PARENT_CHAIN_LEN 16
93
d4b125e9
AE
94#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95#define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
35d489f9 98#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
99
100#define RBD_SNAP_HEAD_NAME "-"
101
9682fc6d
AE
102#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
9e15b77d
AE
104/* This allows a single page to hold an image name sent by OSD */
105#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 106#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 107
1e130199 108#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 109
ed95b21a 110#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
99d16943
ID
111#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
d889140c
AE
113/* Feature bits */
114
8767b293
ID
115#define RBD_FEATURE_LAYERING (1ULL<<0)
116#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118#define RBD_FEATURE_DATA_POOL (1ULL<<7)
e573427a 119#define RBD_FEATURE_OPERATIONS (1ULL<<8)
8767b293 120
ed95b21a
ID
121#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
7e97332e 123 RBD_FEATURE_EXCLUSIVE_LOCK | \
e573427a
ID
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
d889140c
AE
126
127/* Features supported by this (client software) implementation. */
128
770eba6e 129#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 130
81a89793
AE
131/*
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
81a89793 134 */
602adf40
YS
135#define DEV_NAME_LEN 32
136
137/*
138 * block device image metadata (in-memory version)
139 */
140struct rbd_image_header {
f35a4dee 141 /* These six fields never change for a given rbd image */
849b4260 142 char *object_prefix;
602adf40 143 __u8 obj_order;
f35a4dee
AE
144 u64 stripe_unit;
145 u64 stripe_count;
7e97332e 146 s64 data_pool_id;
f35a4dee 147 u64 features; /* Might be changeable someday? */
602adf40 148
f84344f3
AE
149 /* The remaining fields need to be updated occasionally */
150 u64 image_size;
151 struct ceph_snap_context *snapc;
f35a4dee
AE
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
59c2be1e
YS
154};
155
0d7dbfce
AE
156/*
157 * An rbd image specification.
158 *
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
162 *
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
167 *
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
173 *
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
177 *
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
0d7dbfce
AE
180 */
181struct rbd_spec {
182 u64 pool_id;
ecb4dc22 183 const char *pool_name;
b26c047b 184 const char *pool_ns; /* NULL if default, never "" */
0d7dbfce 185
ecb4dc22
AE
186 const char *image_id;
187 const char *image_name;
0d7dbfce
AE
188
189 u64 snap_id;
ecb4dc22 190 const char *snap_name;
0d7dbfce
AE
191
192 struct kref kref;
193};
194
602adf40 195/*
f0f8cef5 196 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
197 */
198struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202};
203
bf0d5f50 204struct rbd_img_request;
bf0d5f50 205
9969ebc5 206enum obj_request_type {
a1fbb5e7 207 OBJ_REQUEST_NODATA = 1,
5359a17d 208 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
7e07efb1 209 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
afb97888 210 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
9969ebc5 211};
bf0d5f50 212
6d2940c8 213enum obj_operation_type {
a1fbb5e7 214 OBJ_OP_READ = 1,
6d2940c8 215 OBJ_OP_WRITE,
90e98c52 216 OBJ_OP_DISCARD,
6d2940c8
GZ
217};
218
3da691bf
ID
219/*
220 * Writes go through the following state machine to deal with
221 * layering:
222 *
223 * need copyup
224 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
225 * | ^ |
226 * v \------------------------------/
227 * done
228 * ^
229 * |
230 * RBD_OBJ_WRITE_FLAT
231 *
232 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233 * there is a parent or not.
234 */
235enum rbd_obj_write_state {
236 RBD_OBJ_WRITE_FLAT = 1,
237 RBD_OBJ_WRITE_GUARD,
238 RBD_OBJ_WRITE_COPYUP,
926f9b3f
AE
239};
240
bf0d5f50 241struct rbd_obj_request {
43df3d35 242 struct ceph_object_extent ex;
c5b5ef6c 243 union {
3da691bf
ID
244 bool tried_parent; /* for reads */
245 enum rbd_obj_write_state write_state; /* for writes */
c5b5ef6c 246 };
bf0d5f50 247
51c3509e 248 struct rbd_img_request *img_request;
86bd7998
ID
249 struct ceph_file_extent *img_extents;
250 u32 num_img_extents;
bf0d5f50 251
788e2df3 252 union {
5359a17d 253 struct ceph_bio_iter bio_pos;
788e2df3 254 struct {
7e07efb1
ID
255 struct ceph_bvec_iter bvec_pos;
256 u32 bvec_count;
afb97888 257 u32 bvec_idx;
788e2df3
AE
258 };
259 };
7e07efb1
ID
260 struct bio_vec *copyup_bvecs;
261 u32 copyup_bvec_count;
bf0d5f50
AE
262
263 struct ceph_osd_request *osd_req;
264
265 u64 xferred; /* bytes transferred */
1b83bef2 266 int result;
bf0d5f50 267
bf0d5f50
AE
268 struct kref kref;
269};
270
0c425248 271enum img_req_flags {
9849e986 272 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 273 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
274};
275
bf0d5f50 276struct rbd_img_request {
bf0d5f50 277 struct rbd_device *rbd_dev;
9bb0248d 278 enum obj_operation_type op_type;
ecc633ca 279 enum obj_request_type data_type;
0c425248 280 unsigned long flags;
bf0d5f50 281 union {
9849e986 282 u64 snap_id; /* for reads */
bf0d5f50 283 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
284 };
285 union {
286 struct request *rq; /* block request */
287 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 288 };
15961b44 289 spinlock_t completion_lock;
55f27e09 290 u64 xferred;/* aggregate bytes transferred */
a5a337d4 291 int result; /* first nonzero obj_request result */
bf0d5f50 292
43df3d35 293 struct list_head object_extents; /* obj_req.ex structs */
bf0d5f50 294 u32 obj_request_count;
7114edac 295 u32 pending_count;
bf0d5f50
AE
296
297 struct kref kref;
298};
299
300#define for_each_obj_request(ireq, oreq) \
43df3d35 301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 302#define for_each_obj_request_safe(ireq, oreq, n) \
43df3d35 303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 304
99d16943
ID
305enum rbd_watch_state {
306 RBD_WATCH_STATE_UNREGISTERED,
307 RBD_WATCH_STATE_REGISTERED,
308 RBD_WATCH_STATE_ERROR,
309};
310
ed95b21a
ID
311enum rbd_lock_state {
312 RBD_LOCK_STATE_UNLOCKED,
313 RBD_LOCK_STATE_LOCKED,
314 RBD_LOCK_STATE_RELEASING,
315};
316
317/* WatchNotify::ClientId */
318struct rbd_client_id {
319 u64 gid;
320 u64 handle;
321};
322
f84344f3 323struct rbd_mapping {
99c1f08f 324 u64 size;
34b13184 325 u64 features;
f84344f3
AE
326};
327
602adf40
YS
328/*
329 * a single device
330 */
331struct rbd_device {
de71a297 332 int dev_id; /* blkdev unique id */
602adf40
YS
333
334 int major; /* blkdev assigned major */
dd82fff1 335 int minor;
602adf40 336 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 337
a30b71b9 338 u32 image_format; /* Either 1 or 2 */
602adf40
YS
339 struct rbd_client *rbd_client;
340
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342
b82d167b 343 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
344
345 struct rbd_image_header header;
b82d167b 346 unsigned long flags; /* possibly lock protected */
0d7dbfce 347 struct rbd_spec *spec;
d147543d 348 struct rbd_options *opts;
0d6d1e9c 349 char *config_info; /* add{,_single_major} string */
602adf40 350
c41d13a3 351 struct ceph_object_id header_oid;
922dab61 352 struct ceph_object_locator header_oloc;
971f839a 353
1643dfa4 354 struct ceph_file_layout layout; /* used for all rbd requests */
0903e875 355
99d16943
ID
356 struct mutex watch_mutex;
357 enum rbd_watch_state watch_state;
922dab61 358 struct ceph_osd_linger_request *watch_handle;
99d16943
ID
359 u64 watch_cookie;
360 struct delayed_work watch_dwork;
59c2be1e 361
ed95b21a
ID
362 struct rw_semaphore lock_rwsem;
363 enum rbd_lock_state lock_state;
cbbfb0ff 364 char lock_cookie[32];
ed95b21a
ID
365 struct rbd_client_id owner_cid;
366 struct work_struct acquired_lock_work;
367 struct work_struct released_lock_work;
368 struct delayed_work lock_dwork;
369 struct work_struct unlock_work;
370 wait_queue_head_t lock_waitq;
371
1643dfa4 372 struct workqueue_struct *task_wq;
59c2be1e 373
86b00e0d
AE
374 struct rbd_spec *parent_spec;
375 u64 parent_overlap;
a2acd00e 376 atomic_t parent_ref;
2f82ee54 377 struct rbd_device *parent;
86b00e0d 378
7ad18afa
CH
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set;
381
c666601a
JD
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem;
f84344f3
AE
384
385 struct rbd_mapping mapping;
602adf40
YS
386
387 struct list_head node;
dfc5606d 388
dfc5606d
YS
389 /* sysfs related */
390 struct device dev;
b82d167b 391 unsigned long open_count; /* protected by lock */
dfc5606d
YS
392};
393
b82d167b 394/*
87c0fded
ID
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
397 * by rbd_dev->lock
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
b82d167b 399 */
6d292906
AE
400enum rbd_dev_flags {
401 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 402 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
87c0fded 403 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
6d292906
AE
404};
405
cfbf6377 406static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
e124a82f 407
602adf40 408static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
409static DEFINE_SPINLOCK(rbd_dev_list_lock);
410
432b8587
AE
411static LIST_HEAD(rbd_client_list); /* clients */
412static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 413
78c2a44a
AE
414/* Slab caches for frequently-allocated structures */
415
1c2a9dfe 416static struct kmem_cache *rbd_img_request_cache;
868311b1 417static struct kmem_cache *rbd_obj_request_cache;
1c2a9dfe 418
9b60e70b 419static int rbd_major;
f8a22fc2
ID
420static DEFINE_IDA(rbd_dev_id_ida);
421
f5ee37bd
ID
422static struct workqueue_struct *rbd_wq;
423
9b60e70b 424/*
3cfa3b16 425 * single-major requires >= 0.75 version of userspace rbd utility.
9b60e70b 426 */
3cfa3b16 427static bool single_major = true;
5657a819 428module_param(single_major, bool, 0444);
3cfa3b16 429MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
9b60e70b 430
f0f8cef5
AE
431static ssize_t rbd_add(struct bus_type *bus, const char *buf,
432 size_t count);
433static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
434 size_t count);
9b60e70b
ID
435static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
436 size_t count);
437static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
438 size_t count);
6d69bb53 439static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
f0f8cef5 440
9b60e70b
ID
441static int rbd_dev_id_to_minor(int dev_id)
442{
7e513d43 443 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
444}
445
446static int minor_to_rbd_dev_id(int minor)
447{
7e513d43 448 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
449}
450
ed95b21a
ID
451static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
452{
453 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
454 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
455}
456
457static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
458{
459 bool is_lock_owner;
460
461 down_read(&rbd_dev->lock_rwsem);
462 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
463 up_read(&rbd_dev->lock_rwsem);
464 return is_lock_owner;
465}
466
8767b293
ID
467static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
468{
469 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
470}
471
5657a819
JP
472static BUS_ATTR(add, 0200, NULL, rbd_add);
473static BUS_ATTR(remove, 0200, NULL, rbd_remove);
474static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
475static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
476static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
b15a21dd
GKH
477
478static struct attribute *rbd_bus_attrs[] = {
479 &bus_attr_add.attr,
480 &bus_attr_remove.attr,
9b60e70b
ID
481 &bus_attr_add_single_major.attr,
482 &bus_attr_remove_single_major.attr,
8767b293 483 &bus_attr_supported_features.attr,
b15a21dd 484 NULL,
f0f8cef5 485};
92c76dc0
ID
486
487static umode_t rbd_bus_is_visible(struct kobject *kobj,
488 struct attribute *attr, int index)
489{
9b60e70b
ID
490 if (!single_major &&
491 (attr == &bus_attr_add_single_major.attr ||
492 attr == &bus_attr_remove_single_major.attr))
493 return 0;
494
92c76dc0
ID
495 return attr->mode;
496}
497
498static const struct attribute_group rbd_bus_group = {
499 .attrs = rbd_bus_attrs,
500 .is_visible = rbd_bus_is_visible,
501};
502__ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5
AE
503
504static struct bus_type rbd_bus_type = {
505 .name = "rbd",
b15a21dd 506 .bus_groups = rbd_bus_groups,
f0f8cef5
AE
507};
508
509static void rbd_root_dev_release(struct device *dev)
510{
511}
512
513static struct device rbd_root_dev = {
514 .init_name = "rbd",
515 .release = rbd_root_dev_release,
516};
517
06ecc6cb
AE
518static __printf(2, 3)
519void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
520{
521 struct va_format vaf;
522 va_list args;
523
524 va_start(args, fmt);
525 vaf.fmt = fmt;
526 vaf.va = &args;
527
528 if (!rbd_dev)
529 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
530 else if (rbd_dev->disk)
531 printk(KERN_WARNING "%s: %s: %pV\n",
532 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
533 else if (rbd_dev->spec && rbd_dev->spec->image_name)
534 printk(KERN_WARNING "%s: image %s: %pV\n",
535 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
536 else if (rbd_dev->spec && rbd_dev->spec->image_id)
537 printk(KERN_WARNING "%s: id %s: %pV\n",
538 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
539 else /* punt */
540 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
541 RBD_DRV_NAME, rbd_dev, &vaf);
542 va_end(args);
543}
544
aafb230e
AE
545#ifdef RBD_DEBUG
546#define rbd_assert(expr) \
547 if (unlikely(!(expr))) { \
548 printk(KERN_ERR "\nAssertion failure in %s() " \
549 "at line %d:\n\n" \
550 "\trbd_assert(%s);\n\n", \
551 __func__, __LINE__, #expr); \
552 BUG(); \
553 }
554#else /* !RBD_DEBUG */
555# define rbd_assert(expr) ((void) 0)
556#endif /* !RBD_DEBUG */
dfc5606d 557
05a46afd 558static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 559
cc4a38bd 560static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac7 561static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae09 562static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b59 563static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61f
AE
564static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
565 u64 snap_id);
2ad3d716
AE
566static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
567 u8 *order, u64 *snap_size);
568static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
569 u64 *snap_features);
59c2be1e 570
602adf40
YS
571static int rbd_open(struct block_device *bdev, fmode_t mode)
572{
f0f8cef5 573 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 574 bool removing = false;
602adf40 575
a14ea269 576 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
577 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
578 removing = true;
579 else
580 rbd_dev->open_count++;
a14ea269 581 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
582 if (removing)
583 return -ENOENT;
584
c3e946ce 585 (void) get_device(&rbd_dev->dev);
340c7a2b 586
602adf40
YS
587 return 0;
588}
589
db2a144b 590static void rbd_release(struct gendisk *disk, fmode_t mode)
dfc5606d
YS
591{
592 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
593 unsigned long open_count_before;
594
a14ea269 595 spin_lock_irq(&rbd_dev->lock);
b82d167b 596 open_count_before = rbd_dev->open_count--;
a14ea269 597 spin_unlock_irq(&rbd_dev->lock);
b82d167b 598 rbd_assert(open_count_before > 0);
dfc5606d 599
c3e946ce 600 put_device(&rbd_dev->dev);
dfc5606d
YS
601}
602
131fd9f6
GZ
603static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
604{
1de797bb 605 int ro;
131fd9f6 606
1de797bb 607 if (get_user(ro, (int __user *)arg))
131fd9f6
GZ
608 return -EFAULT;
609
1de797bb 610 /* Snapshots can't be marked read-write */
131fd9f6
GZ
611 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
612 return -EROFS;
613
1de797bb
ID
614 /* Let blkdev_roset() handle it */
615 return -ENOTTY;
131fd9f6
GZ
616}
617
618static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
619 unsigned int cmd, unsigned long arg)
620{
621 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
1de797bb 622 int ret;
131fd9f6 623
131fd9f6
GZ
624 switch (cmd) {
625 case BLKROSET:
626 ret = rbd_ioctl_set_ro(rbd_dev, arg);
627 break;
628 default:
629 ret = -ENOTTY;
630 }
631
131fd9f6
GZ
632 return ret;
633}
634
635#ifdef CONFIG_COMPAT
636static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
637 unsigned int cmd, unsigned long arg)
638{
639 return rbd_ioctl(bdev, mode, cmd, arg);
640}
641#endif /* CONFIG_COMPAT */
642
602adf40
YS
643static const struct block_device_operations rbd_bd_ops = {
644 .owner = THIS_MODULE,
645 .open = rbd_open,
dfc5606d 646 .release = rbd_release,
131fd9f6
GZ
647 .ioctl = rbd_ioctl,
648#ifdef CONFIG_COMPAT
649 .compat_ioctl = rbd_compat_ioctl,
650#endif
602adf40
YS
651};
652
653/*
7262cfca 654 * Initialize an rbd client instance. Success or not, this function
cfbf6377 655 * consumes ceph_opts. Caller holds client_mutex.
602adf40 656 */
f8c38929 657static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
658{
659 struct rbd_client *rbdc;
660 int ret = -ENOMEM;
661
37206ee5 662 dout("%s:\n", __func__);
602adf40
YS
663 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
664 if (!rbdc)
665 goto out_opt;
666
667 kref_init(&rbdc->kref);
668 INIT_LIST_HEAD(&rbdc->node);
669
74da4a0f 670 rbdc->client = ceph_create_client(ceph_opts, rbdc);
602adf40 671 if (IS_ERR(rbdc->client))
08f75463 672 goto out_rbdc;
43ae4701 673 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
674
675 ret = ceph_open_session(rbdc->client);
676 if (ret < 0)
08f75463 677 goto out_client;
602adf40 678
432b8587 679 spin_lock(&rbd_client_list_lock);
602adf40 680 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 681 spin_unlock(&rbd_client_list_lock);
602adf40 682
37206ee5 683 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 684
602adf40 685 return rbdc;
08f75463 686out_client:
602adf40 687 ceph_destroy_client(rbdc->client);
08f75463 688out_rbdc:
602adf40
YS
689 kfree(rbdc);
690out_opt:
43ae4701
AE
691 if (ceph_opts)
692 ceph_destroy_options(ceph_opts);
37206ee5
AE
693 dout("%s: error %d\n", __func__, ret);
694
28f259b7 695 return ERR_PTR(ret);
602adf40
YS
696}
697
2f82ee54
AE
698static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699{
700 kref_get(&rbdc->kref);
701
702 return rbdc;
703}
704
602adf40 705/*
1f7ba331
AE
706 * Find a ceph client with specific addr and configuration. If
707 * found, bump its reference count.
602adf40 708 */
1f7ba331 709static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
710{
711 struct rbd_client *client_node;
1f7ba331 712 bool found = false;
602adf40 713
43ae4701 714 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
715 return NULL;
716
1f7ba331
AE
717 spin_lock(&rbd_client_list_lock);
718 list_for_each_entry(client_node, &rbd_client_list, node) {
719 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
720 __rbd_get_client(client_node);
721
1f7ba331
AE
722 found = true;
723 break;
724 }
725 }
726 spin_unlock(&rbd_client_list_lock);
727
728 return found ? client_node : NULL;
602adf40
YS
729}
730
59c2be1e 731/*
210c104c 732 * (Per device) rbd map options
59c2be1e
YS
733 */
734enum {
b5584180 735 Opt_queue_depth,
34f55d0b 736 Opt_lock_timeout,
59c2be1e
YS
737 Opt_last_int,
738 /* int args above */
b26c047b 739 Opt_pool_ns,
59c2be1e
YS
740 Opt_last_string,
741 /* string args above */
cc0538b6
AE
742 Opt_read_only,
743 Opt_read_write,
80de1912 744 Opt_lock_on_read,
e010dd0a 745 Opt_exclusive,
d9360540 746 Opt_notrim,
210c104c 747 Opt_err
59c2be1e
YS
748};
749
43ae4701 750static match_table_t rbd_opts_tokens = {
b5584180 751 {Opt_queue_depth, "queue_depth=%d"},
34f55d0b 752 {Opt_lock_timeout, "lock_timeout=%d"},
59c2be1e 753 /* int args above */
b26c047b 754 {Opt_pool_ns, "_pool_ns=%s"},
59c2be1e 755 /* string args above */
be466c1c 756 {Opt_read_only, "read_only"},
cc0538b6
AE
757 {Opt_read_only, "ro"}, /* Alternate spelling */
758 {Opt_read_write, "read_write"},
759 {Opt_read_write, "rw"}, /* Alternate spelling */
80de1912 760 {Opt_lock_on_read, "lock_on_read"},
e010dd0a 761 {Opt_exclusive, "exclusive"},
d9360540 762 {Opt_notrim, "notrim"},
210c104c 763 {Opt_err, NULL}
59c2be1e
YS
764};
765
98571b5a 766struct rbd_options {
b5584180 767 int queue_depth;
34f55d0b 768 unsigned long lock_timeout;
98571b5a 769 bool read_only;
80de1912 770 bool lock_on_read;
e010dd0a 771 bool exclusive;
d9360540 772 bool trim;
98571b5a
AE
773};
774
b5584180 775#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
34f55d0b 776#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
98571b5a 777#define RBD_READ_ONLY_DEFAULT false
80de1912 778#define RBD_LOCK_ON_READ_DEFAULT false
e010dd0a 779#define RBD_EXCLUSIVE_DEFAULT false
d9360540 780#define RBD_TRIM_DEFAULT true
98571b5a 781
c300156b
ID
782struct parse_rbd_opts_ctx {
783 struct rbd_spec *spec;
784 struct rbd_options *opts;
785};
786
59c2be1e
YS
787static int parse_rbd_opts_token(char *c, void *private)
788{
c300156b 789 struct parse_rbd_opts_ctx *pctx = private;
59c2be1e
YS
790 substring_t argstr[MAX_OPT_ARGS];
791 int token, intval, ret;
792
43ae4701 793 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
794 if (token < Opt_last_int) {
795 ret = match_int(&argstr[0], &intval);
796 if (ret < 0) {
2f56b6ba 797 pr_err("bad option arg (not int) at '%s'\n", c);
59c2be1e
YS
798 return ret;
799 }
800 dout("got int token %d val %d\n", token, intval);
801 } else if (token > Opt_last_int && token < Opt_last_string) {
210c104c 802 dout("got string token %d val %s\n", token, argstr[0].from);
59c2be1e
YS
803 } else {
804 dout("got token %d\n", token);
805 }
806
807 switch (token) {
b5584180
ID
808 case Opt_queue_depth:
809 if (intval < 1) {
810 pr_err("queue_depth out of range\n");
811 return -EINVAL;
812 }
c300156b 813 pctx->opts->queue_depth = intval;
b5584180 814 break;
34f55d0b
DY
815 case Opt_lock_timeout:
816 /* 0 is "wait forever" (i.e. infinite timeout) */
817 if (intval < 0 || intval > INT_MAX / 1000) {
818 pr_err("lock_timeout out of range\n");
819 return -EINVAL;
820 }
c300156b 821 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
34f55d0b 822 break;
b26c047b
ID
823 case Opt_pool_ns:
824 kfree(pctx->spec->pool_ns);
825 pctx->spec->pool_ns = match_strdup(argstr);
826 if (!pctx->spec->pool_ns)
827 return -ENOMEM;
34f55d0b 828 break;
cc0538b6 829 case Opt_read_only:
c300156b 830 pctx->opts->read_only = true;
cc0538b6
AE
831 break;
832 case Opt_read_write:
c300156b 833 pctx->opts->read_only = false;
cc0538b6 834 break;
80de1912 835 case Opt_lock_on_read:
c300156b 836 pctx->opts->lock_on_read = true;
80de1912 837 break;
e010dd0a 838 case Opt_exclusive:
c300156b 839 pctx->opts->exclusive = true;
e010dd0a 840 break;
d9360540 841 case Opt_notrim:
c300156b 842 pctx->opts->trim = false;
d9360540 843 break;
59c2be1e 844 default:
210c104c
ID
845 /* libceph prints "bad option" msg */
846 return -EINVAL;
59c2be1e 847 }
210c104c 848
59c2be1e
YS
849 return 0;
850}
851
6d2940c8
GZ
852static char* obj_op_name(enum obj_operation_type op_type)
853{
854 switch (op_type) {
855 case OBJ_OP_READ:
856 return "read";
857 case OBJ_OP_WRITE:
858 return "write";
90e98c52
GZ
859 case OBJ_OP_DISCARD:
860 return "discard";
6d2940c8
GZ
861 default:
862 return "???";
863 }
864}
865
602adf40
YS
866/*
867 * Destroy ceph client
d23a4b3f 868 *
432b8587 869 * Caller must hold rbd_client_list_lock.
602adf40
YS
870 */
871static void rbd_client_release(struct kref *kref)
872{
873 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
874
37206ee5 875 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 876 spin_lock(&rbd_client_list_lock);
602adf40 877 list_del(&rbdc->node);
cd9d9f5d 878 spin_unlock(&rbd_client_list_lock);
602adf40
YS
879
880 ceph_destroy_client(rbdc->client);
881 kfree(rbdc);
882}
883
884/*
885 * Drop reference to ceph client node. If it's not referenced anymore, release
886 * it.
887 */
9d3997fd 888static void rbd_put_client(struct rbd_client *rbdc)
602adf40 889{
c53d5893
AE
890 if (rbdc)
891 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
892}
893
dd435855
ID
894static int wait_for_latest_osdmap(struct ceph_client *client)
895{
896 u64 newest_epoch;
897 int ret;
898
899 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
900 if (ret)
901 return ret;
902
903 if (client->osdc.osdmap->epoch >= newest_epoch)
904 return 0;
905
906 ceph_osdc_maybe_request_map(&client->osdc);
907 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
908 client->options->mount_timeout);
909}
910
5feb0d8d
ID
911/*
912 * Get a ceph client with specific addr and configuration, if one does
913 * not exist create it. Either way, ceph_opts is consumed by this
914 * function.
915 */
916static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
917{
918 struct rbd_client *rbdc;
dd435855 919 int ret;
5feb0d8d
ID
920
921 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
922 rbdc = rbd_client_find(ceph_opts);
dd435855 923 if (rbdc) {
5feb0d8d 924 ceph_destroy_options(ceph_opts);
dd435855
ID
925
926 /*
927 * Using an existing client. Make sure ->pg_pools is up to
928 * date before we look up the pool id in do_rbd_add().
929 */
930 ret = wait_for_latest_osdmap(rbdc->client);
931 if (ret) {
932 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
933 rbd_put_client(rbdc);
934 rbdc = ERR_PTR(ret);
935 }
936 } else {
5feb0d8d 937 rbdc = rbd_client_create(ceph_opts);
dd435855 938 }
5feb0d8d
ID
939 mutex_unlock(&client_mutex);
940
941 return rbdc;
942}
943
a30b71b9
AE
944static bool rbd_image_format_valid(u32 image_format)
945{
946 return image_format == 1 || image_format == 2;
947}
948
8e94af8e
AE
949static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
950{
103a150f
AE
951 size_t size;
952 u32 snap_count;
953
954 /* The header has to start with the magic rbd header text */
955 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
956 return false;
957
db2388b6
AE
958 /* The bio layer requires at least sector-sized I/O */
959
960 if (ondisk->options.order < SECTOR_SHIFT)
961 return false;
962
963 /* If we use u64 in a few spots we may be able to loosen this */
964
965 if (ondisk->options.order > 8 * sizeof (int) - 1)
966 return false;
967
103a150f
AE
968 /*
969 * The size of a snapshot header has to fit in a size_t, and
970 * that limits the number of snapshots.
971 */
972 snap_count = le32_to_cpu(ondisk->snap_count);
973 size = SIZE_MAX - sizeof (struct ceph_snap_context);
974 if (snap_count > size / sizeof (__le64))
975 return false;
976
977 /*
978 * Not only that, but the size of the entire the snapshot
979 * header must also be representable in a size_t.
980 */
981 size -= snap_count * sizeof (__le64);
982 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
983 return false;
984
985 return true;
8e94af8e
AE
986}
987
5bc3fb17
ID
988/*
989 * returns the size of an object in the image
990 */
991static u32 rbd_obj_bytes(struct rbd_image_header *header)
992{
993 return 1U << header->obj_order;
994}
995
263423f8
ID
996static void rbd_init_layout(struct rbd_device *rbd_dev)
997{
998 if (rbd_dev->header.stripe_unit == 0 ||
999 rbd_dev->header.stripe_count == 0) {
1000 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1001 rbd_dev->header.stripe_count = 1;
1002 }
1003
1004 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1005 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1006 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
7e97332e
ID
1007 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1008 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
263423f8
ID
1009 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1010}
1011
602adf40 1012/*
bb23e37a
AE
1013 * Fill an rbd image header with information from the given format 1
1014 * on-disk header.
602adf40 1015 */
662518b1 1016static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d998 1017 struct rbd_image_header_ondisk *ondisk)
602adf40 1018{
662518b1 1019 struct rbd_image_header *header = &rbd_dev->header;
bb23e37a
AE
1020 bool first_time = header->object_prefix == NULL;
1021 struct ceph_snap_context *snapc;
1022 char *object_prefix = NULL;
1023 char *snap_names = NULL;
1024 u64 *snap_sizes = NULL;
ccece235 1025 u32 snap_count;
bb23e37a 1026 int ret = -ENOMEM;
621901d6 1027 u32 i;
602adf40 1028
bb23e37a 1029 /* Allocate this now to avoid having to handle failure below */
6a52325f 1030
bb23e37a 1031 if (first_time) {
848d796c
ID
1032 object_prefix = kstrndup(ondisk->object_prefix,
1033 sizeof(ondisk->object_prefix),
1034 GFP_KERNEL);
bb23e37a
AE
1035 if (!object_prefix)
1036 return -ENOMEM;
bb23e37a 1037 }
00f1f36f 1038
bb23e37a 1039 /* Allocate the snapshot context and fill it in */
00f1f36f 1040
bb23e37a
AE
1041 snap_count = le32_to_cpu(ondisk->snap_count);
1042 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1043 if (!snapc)
1044 goto out_err;
1045 snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 1046 if (snap_count) {
bb23e37a 1047 struct rbd_image_snap_ondisk *snaps;
f785cc1d
AE
1048 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1049
bb23e37a 1050 /* We'll keep a copy of the snapshot names... */
621901d6 1051
bb23e37a
AE
1052 if (snap_names_len > (u64)SIZE_MAX)
1053 goto out_2big;
1054 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1055 if (!snap_names)
6a52325f
AE
1056 goto out_err;
1057
bb23e37a 1058 /* ...as well as the array of their sizes. */
88a25a5f
ME
1059 snap_sizes = kmalloc_array(snap_count,
1060 sizeof(*header->snap_sizes),
1061 GFP_KERNEL);
bb23e37a 1062 if (!snap_sizes)
6a52325f 1063 goto out_err;
bb23e37a 1064
f785cc1d 1065 /*
bb23e37a
AE
1066 * Copy the names, and fill in each snapshot's id
1067 * and size.
1068 *
99a41ebc 1069 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37a 1070 * ondisk buffer we're working with has
f785cc1d
AE
1071 * snap_names_len bytes beyond the end of the
1072 * snapshot id array, this memcpy() is safe.
1073 */
bb23e37a
AE
1074 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1075 snaps = ondisk->snaps;
1076 for (i = 0; i < snap_count; i++) {
1077 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1078 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1079 }
602adf40 1080 }
6a52325f 1081
bb23e37a 1082 /* We won't fail any more, fill in the header */
621901d6 1083
bb23e37a
AE
1084 if (first_time) {
1085 header->object_prefix = object_prefix;
1086 header->obj_order = ondisk->options.order;
263423f8 1087 rbd_init_layout(rbd_dev);
602adf40 1088 } else {
662518b1
AE
1089 ceph_put_snap_context(header->snapc);
1090 kfree(header->snap_names);
1091 kfree(header->snap_sizes);
602adf40 1092 }
849b4260 1093
bb23e37a 1094 /* The remaining fields always get updated (when we refresh) */
621901d6 1095
f84344f3 1096 header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37a
AE
1097 header->snapc = snapc;
1098 header->snap_names = snap_names;
1099 header->snap_sizes = snap_sizes;
468521c1 1100
602adf40 1101 return 0;
bb23e37a
AE
1102out_2big:
1103 ret = -EIO;
6a52325f 1104out_err:
bb23e37a
AE
1105 kfree(snap_sizes);
1106 kfree(snap_names);
1107 ceph_put_snap_context(snapc);
1108 kfree(object_prefix);
ccece235 1109
bb23e37a 1110 return ret;
602adf40
YS
1111}
1112
9682fc6d
AE
1113static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1114{
1115 const char *snap_name;
1116
1117 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1118
1119 /* Skip over names until we find the one we are looking for */
1120
1121 snap_name = rbd_dev->header.snap_names;
1122 while (which--)
1123 snap_name += strlen(snap_name) + 1;
1124
1125 return kstrdup(snap_name, GFP_KERNEL);
1126}
1127
30d1cff8
AE
1128/*
1129 * Snapshot id comparison function for use with qsort()/bsearch().
1130 * Note that result is for snapshots in *descending* order.
1131 */
1132static int snapid_compare_reverse(const void *s1, const void *s2)
1133{
1134 u64 snap_id1 = *(u64 *)s1;
1135 u64 snap_id2 = *(u64 *)s2;
1136
1137 if (snap_id1 < snap_id2)
1138 return 1;
1139 return snap_id1 == snap_id2 ? 0 : -1;
1140}
1141
1142/*
1143 * Search a snapshot context to see if the given snapshot id is
1144 * present.
1145 *
1146 * Returns the position of the snapshot id in the array if it's found,
1147 * or BAD_SNAP_INDEX otherwise.
1148 *
1149 * Note: The snapshot array is in kept sorted (by the osd) in
1150 * reverse order, highest snapshot id first.
1151 */
9682fc6d
AE
1152static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1153{
1154 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 1155 u64 *found;
9682fc6d 1156
30d1cff8
AE
1157 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1158 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 1159
30d1cff8 1160 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
1161}
1162
2ad3d716
AE
1163static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1164 u64 snap_id)
9e15b77d 1165{
54cac61f 1166 u32 which;
da6a6b63 1167 const char *snap_name;
9e15b77d 1168
54cac61f
AE
1169 which = rbd_dev_snap_index(rbd_dev, snap_id);
1170 if (which == BAD_SNAP_INDEX)
da6a6b63 1171 return ERR_PTR(-ENOENT);
54cac61f 1172
da6a6b63
JD
1173 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1174 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61f
AE
1175}
1176
1177static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1178{
9e15b77d
AE
1179 if (snap_id == CEPH_NOSNAP)
1180 return RBD_SNAP_HEAD_NAME;
1181
54cac61f
AE
1182 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1183 if (rbd_dev->image_format == 1)
1184 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 1185
54cac61f 1186 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
1187}
1188
2ad3d716
AE
1189static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1190 u64 *snap_size)
602adf40 1191{
2ad3d716
AE
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_size = rbd_dev->header.image_size;
1195 } else if (rbd_dev->image_format == 1) {
1196 u32 which;
602adf40 1197
2ad3d716
AE
1198 which = rbd_dev_snap_index(rbd_dev, snap_id);
1199 if (which == BAD_SNAP_INDEX)
1200 return -ENOENT;
e86924a8 1201
2ad3d716
AE
1202 *snap_size = rbd_dev->header.snap_sizes[which];
1203 } else {
1204 u64 size = 0;
1205 int ret;
1206
1207 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1208 if (ret)
1209 return ret;
1210
1211 *snap_size = size;
1212 }
1213 return 0;
602adf40
YS
1214}
1215
2ad3d716
AE
1216static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1217 u64 *snap_features)
602adf40 1218{
2ad3d716
AE
1219 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1220 if (snap_id == CEPH_NOSNAP) {
1221 *snap_features = rbd_dev->header.features;
1222 } else if (rbd_dev->image_format == 1) {
1223 *snap_features = 0; /* No features for format 1 */
602adf40 1224 } else {
2ad3d716
AE
1225 u64 features = 0;
1226 int ret;
8b0241f8 1227
2ad3d716
AE
1228 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1229 if (ret)
1230 return ret;
1231
1232 *snap_features = features;
1233 }
1234 return 0;
1235}
1236
1237static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1238{
8f4b7d98 1239 u64 snap_id = rbd_dev->spec->snap_id;
2ad3d716
AE
1240 u64 size = 0;
1241 u64 features = 0;
1242 int ret;
1243
2ad3d716
AE
1244 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1245 if (ret)
1246 return ret;
1247 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1248 if (ret)
1249 return ret;
1250
1251 rbd_dev->mapping.size = size;
1252 rbd_dev->mapping.features = features;
1253
8b0241f8 1254 return 0;
602adf40
YS
1255}
1256
d1cf5788
AE
1257static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1258{
1259 rbd_dev->mapping.size = 0;
1260 rbd_dev->mapping.features = 0;
200a6a8b
AE
1261}
1262
5359a17d 1263static void zero_bvec(struct bio_vec *bv)
602adf40 1264{
602adf40 1265 void *buf;
5359a17d 1266 unsigned long flags;
602adf40 1267
5359a17d
ID
1268 buf = bvec_kmap_irq(bv, &flags);
1269 memset(buf, 0, bv->bv_len);
1270 flush_dcache_page(bv->bv_page);
1271 bvec_kunmap_irq(buf, &flags);
602adf40
YS
1272}
1273
5359a17d 1274static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
b9434c5b 1275{
5359a17d 1276 struct ceph_bio_iter it = *bio_pos;
b9434c5b 1277
5359a17d
ID
1278 ceph_bio_iter_advance(&it, off);
1279 ceph_bio_iter_advance_step(&it, bytes, ({
1280 zero_bvec(&bv);
1281 }));
b9434c5b
AE
1282}
1283
7e07efb1 1284static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
602adf40 1285{
7e07efb1 1286 struct ceph_bvec_iter it = *bvec_pos;
602adf40 1287
7e07efb1
ID
1288 ceph_bvec_iter_advance(&it, off);
1289 ceph_bvec_iter_advance_step(&it, bytes, ({
1290 zero_bvec(&bv);
1291 }));
f7760dad
AE
1292}
1293
1294/*
3da691bf 1295 * Zero a range in @obj_req data buffer defined by a bio (list) or
afb97888 1296 * (private) bio_vec array.
f7760dad 1297 *
3da691bf 1298 * @off is relative to the start of the data buffer.
926f9b3f 1299 */
3da691bf
ID
1300static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1301 u32 bytes)
926f9b3f 1302{
ecc633ca 1303 switch (obj_req->img_request->data_type) {
3da691bf
ID
1304 case OBJ_REQUEST_BIO:
1305 zero_bios(&obj_req->bio_pos, off, bytes);
1306 break;
1307 case OBJ_REQUEST_BVECS:
afb97888 1308 case OBJ_REQUEST_OWN_BVECS:
3da691bf
ID
1309 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1310 break;
1311 default:
1312 rbd_assert(0);
6365d33a
AE
1313 }
1314}
1315
bf0d5f50
AE
1316static void rbd_obj_request_destroy(struct kref *kref);
1317static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1318{
1319 rbd_assert(obj_request != NULL);
37206ee5 1320 dout("%s: obj %p (was %d)\n", __func__, obj_request,
2c935bc5 1321 kref_read(&obj_request->kref));
bf0d5f50
AE
1322 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1323}
1324
0f2d5be7
AE
1325static void rbd_img_request_get(struct rbd_img_request *img_request)
1326{
1327 dout("%s: img %p (was %d)\n", __func__, img_request,
2c935bc5 1328 kref_read(&img_request->kref));
0f2d5be7
AE
1329 kref_get(&img_request->kref);
1330}
1331
bf0d5f50
AE
1332static void rbd_img_request_destroy(struct kref *kref);
1333static void rbd_img_request_put(struct rbd_img_request *img_request)
1334{
1335 rbd_assert(img_request != NULL);
37206ee5 1336 dout("%s: img %p (was %d)\n", __func__, img_request,
2c935bc5 1337 kref_read(&img_request->kref));
e93aca0a 1338 kref_put(&img_request->kref, rbd_img_request_destroy);
bf0d5f50
AE
1339}
1340
1341static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1342 struct rbd_obj_request *obj_request)
1343{
25dcf954
AE
1344 rbd_assert(obj_request->img_request == NULL);
1345
b155e86c 1346 /* Image request now owns object's original reference */
bf0d5f50 1347 obj_request->img_request = img_request;
25dcf954 1348 img_request->obj_request_count++;
7114edac 1349 img_request->pending_count++;
15961b44 1350 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
bf0d5f50
AE
1351}
1352
1353static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1354 struct rbd_obj_request *obj_request)
1355{
15961b44 1356 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
43df3d35 1357 list_del(&obj_request->ex.oe_item);
25dcf954
AE
1358 rbd_assert(img_request->obj_request_count > 0);
1359 img_request->obj_request_count--;
bf0d5f50 1360 rbd_assert(obj_request->img_request == img_request);
bf0d5f50
AE
1361 rbd_obj_request_put(obj_request);
1362}
1363
980917fc 1364static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
bf0d5f50 1365{
980917fc
ID
1366 struct ceph_osd_request *osd_req = obj_request->osd_req;
1367
a90bb0c1 1368 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
43df3d35
ID
1369 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1370 obj_request->ex.oe_len, osd_req);
980917fc 1371 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
bf0d5f50
AE
1372}
1373
0c425248
AE
1374/*
1375 * The default/initial value for all image request flags is 0. Each
1376 * is conditionally set to 1 at image request initialization time
1377 * and currently never change thereafter.
1378 */
d0b2e944
AE
1379static void img_request_layered_set(struct rbd_img_request *img_request)
1380{
1381 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1382 smp_mb();
1383}
1384
a2acd00e
AE
1385static void img_request_layered_clear(struct rbd_img_request *img_request)
1386{
1387 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1388 smp_mb();
1389}
1390
d0b2e944
AE
1391static bool img_request_layered_test(struct rbd_img_request *img_request)
1392{
1393 smp_mb();
1394 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1395}
1396
3da691bf 1397static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
6e2a4505 1398{
3da691bf 1399 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
b9434c5b 1400
43df3d35
ID
1401 return !obj_req->ex.oe_off &&
1402 obj_req->ex.oe_len == rbd_dev->layout.object_size;
6e2a4505
AE
1403}
1404
3da691bf 1405static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
bf0d5f50 1406{
3da691bf 1407 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
bf0d5f50 1408
43df3d35 1409 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
3da691bf 1410 rbd_dev->layout.object_size;
0dcc685e
ID
1411}
1412
86bd7998 1413static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
bf0d5f50 1414{
86bd7998
ID
1415 return ceph_file_extents_bytes(obj_req->img_extents,
1416 obj_req->num_img_extents);
bf0d5f50
AE
1417}
1418
3da691bf 1419static bool rbd_img_is_write(struct rbd_img_request *img_req)
bf0d5f50 1420{
9bb0248d 1421 switch (img_req->op_type) {
3da691bf
ID
1422 case OBJ_OP_READ:
1423 return false;
1424 case OBJ_OP_WRITE:
1425 case OBJ_OP_DISCARD:
1426 return true;
1427 default:
c6244b3b 1428 BUG();
3da691bf 1429 }
90e98c52
GZ
1430}
1431
3da691bf 1432static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
2761713d 1433
85e084fe 1434static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
bf0d5f50 1435{
3da691bf 1436 struct rbd_obj_request *obj_req = osd_req->r_priv;
bf0d5f50 1437
3da691bf
ID
1438 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1439 osd_req->r_result, obj_req);
1440 rbd_assert(osd_req == obj_req->osd_req);
bf0d5f50 1441
3da691bf
ID
1442 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1443 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1444 obj_req->xferred = osd_req->r_result;
1445 else
1446 /*
1447 * Writes aren't allowed to return a data payload. In some
1448 * guarded write cases (e.g. stat + zero on an empty object)
1449 * a stat response makes it through, but we don't care.
1450 */
1451 obj_req->xferred = 0;
bf0d5f50 1452
3da691bf 1453 rbd_obj_handle_request(obj_req);
bf0d5f50
AE
1454}
1455
9d4df01f 1456static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3 1457{
8c042b0d 1458 struct ceph_osd_request *osd_req = obj_request->osd_req;
430c28c3 1459
a162b308 1460 osd_req->r_flags = CEPH_OSD_FLAG_READ;
7c84883a 1461 osd_req->r_snapid = obj_request->img_request->snap_id;
9d4df01f
AE
1462}
1463
1464static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1465{
9d4df01f 1466 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1467
a162b308 1468 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
fac02ddf 1469 ktime_get_real_ts64(&osd_req->r_mtime);
43df3d35 1470 osd_req->r_data_offset = obj_request->ex.oe_off;
430c28c3
AE
1471}
1472
bc81207e 1473static struct ceph_osd_request *
a162b308 1474rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
bc81207e 1475{
a162b308
ID
1476 struct rbd_img_request *img_req = obj_req->img_request;
1477 struct rbd_device *rbd_dev = img_req->rbd_dev;
bc81207e
ID
1478 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1479 struct ceph_osd_request *req;
a90bb0c1
ID
1480 const char *name_format = rbd_dev->image_format == 1 ?
1481 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
bc81207e 1482
a162b308
ID
1483 req = ceph_osdc_alloc_request(osdc,
1484 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1485 num_ops, false, GFP_NOIO);
bc81207e
ID
1486 if (!req)
1487 return NULL;
1488
bc81207e 1489 req->r_callback = rbd_osd_req_callback;
a162b308 1490 req->r_priv = obj_req;
bc81207e 1491
b26c047b
ID
1492 /*
1493 * Data objects may be stored in a separate pool, but always in
1494 * the same namespace in that pool as the header in its pool.
1495 */
1496 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
bc81207e 1497 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
b26c047b 1498
a90bb0c1 1499 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
43df3d35 1500 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
bc81207e
ID
1501 goto err_req;
1502
bc81207e
ID
1503 return req;
1504
1505err_req:
1506 ceph_osdc_put_request(req);
1507 return NULL;
1508}
1509
bf0d5f50
AE
1510static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1511{
1512 ceph_osdc_put_request(osd_req);
1513}
1514
ecc633ca 1515static struct rbd_obj_request *rbd_obj_request_create(void)
bf0d5f50
AE
1516{
1517 struct rbd_obj_request *obj_request;
bf0d5f50 1518
5a60e876 1519 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
6c696d85 1520 if (!obj_request)
f907ad55 1521 return NULL;
f907ad55 1522
43df3d35 1523 ceph_object_extent_init(&obj_request->ex);
bf0d5f50
AE
1524 kref_init(&obj_request->kref);
1525
67e2b652 1526 dout("%s %p\n", __func__, obj_request);
bf0d5f50
AE
1527 return obj_request;
1528}
1529
1530static void rbd_obj_request_destroy(struct kref *kref)
1531{
1532 struct rbd_obj_request *obj_request;
7e07efb1 1533 u32 i;
bf0d5f50
AE
1534
1535 obj_request = container_of(kref, struct rbd_obj_request, kref);
1536
37206ee5
AE
1537 dout("%s: obj %p\n", __func__, obj_request);
1538
bf0d5f50
AE
1539 if (obj_request->osd_req)
1540 rbd_osd_req_destroy(obj_request->osd_req);
1541
ecc633ca 1542 switch (obj_request->img_request->data_type) {
9969ebc5 1543 case OBJ_REQUEST_NODATA:
bf0d5f50 1544 case OBJ_REQUEST_BIO:
7e07efb1 1545 case OBJ_REQUEST_BVECS:
5359a17d 1546 break; /* Nothing to do */
afb97888
ID
1547 case OBJ_REQUEST_OWN_BVECS:
1548 kfree(obj_request->bvec_pos.bvecs);
788e2df3 1549 break;
7e07efb1
ID
1550 default:
1551 rbd_assert(0);
bf0d5f50
AE
1552 }
1553
86bd7998 1554 kfree(obj_request->img_extents);
7e07efb1
ID
1555 if (obj_request->copyup_bvecs) {
1556 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1557 if (obj_request->copyup_bvecs[i].bv_page)
1558 __free_page(obj_request->copyup_bvecs[i].bv_page);
1559 }
1560 kfree(obj_request->copyup_bvecs);
bf0d5f50
AE
1561 }
1562
868311b1 1563 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
1564}
1565
fb65d228
AE
1566/* It's OK to call this for a device with no parent */
1567
1568static void rbd_spec_put(struct rbd_spec *spec);
1569static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1570{
1571 rbd_dev_remove_parent(rbd_dev);
1572 rbd_spec_put(rbd_dev->parent_spec);
1573 rbd_dev->parent_spec = NULL;
1574 rbd_dev->parent_overlap = 0;
1575}
1576
a2acd00e
AE
1577/*
1578 * Parent image reference counting is used to determine when an
1579 * image's parent fields can be safely torn down--after there are no
1580 * more in-flight requests to the parent image. When the last
1581 * reference is dropped, cleaning them up is safe.
1582 */
1583static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1584{
1585 int counter;
1586
1587 if (!rbd_dev->parent_spec)
1588 return;
1589
1590 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1591 if (counter > 0)
1592 return;
1593
1594 /* Last reference; clean up parent data structures */
1595
1596 if (!counter)
1597 rbd_dev_unparent(rbd_dev);
1598 else
9584d508 1599 rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e
AE
1600}
1601
1602/*
1603 * If an image has a non-zero parent overlap, get a reference to its
1604 * parent.
1605 *
1606 * Returns true if the rbd device has a parent with a non-zero
1607 * overlap and a reference for it was successfully taken, or
1608 * false otherwise.
1609 */
1610static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1611{
ae43e9d0 1612 int counter = 0;
a2acd00e
AE
1613
1614 if (!rbd_dev->parent_spec)
1615 return false;
1616
ae43e9d0
ID
1617 down_read(&rbd_dev->header_rwsem);
1618 if (rbd_dev->parent_overlap)
1619 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1620 up_read(&rbd_dev->header_rwsem);
a2acd00e
AE
1621
1622 if (counter < 0)
9584d508 1623 rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e 1624
ae43e9d0 1625 return counter > 0;
a2acd00e
AE
1626}
1627
bf0d5f50
AE
1628/*
1629 * Caller is responsible for filling in the list of object requests
1630 * that comprises the image request, and the Linux request pointer
1631 * (if there is one).
1632 */
cc344fa1
AE
1633static struct rbd_img_request *rbd_img_request_create(
1634 struct rbd_device *rbd_dev,
6d2940c8 1635 enum obj_operation_type op_type,
4e752f0a 1636 struct ceph_snap_context *snapc)
bf0d5f50
AE
1637{
1638 struct rbd_img_request *img_request;
bf0d5f50 1639
a0c5895b 1640 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
bf0d5f50
AE
1641 if (!img_request)
1642 return NULL;
1643
bf0d5f50 1644 img_request->rbd_dev = rbd_dev;
9bb0248d 1645 img_request->op_type = op_type;
9bb0248d 1646 if (!rbd_img_is_write(img_request))
bf0d5f50 1647 img_request->snap_id = rbd_dev->spec->snap_id;
9bb0248d
ID
1648 else
1649 img_request->snapc = snapc;
1650
a2acd00e 1651 if (rbd_dev_parent_get(rbd_dev))
d0b2e944 1652 img_request_layered_set(img_request);
a0c5895b 1653
bf0d5f50 1654 spin_lock_init(&img_request->completion_lock);
43df3d35 1655 INIT_LIST_HEAD(&img_request->object_extents);
bf0d5f50
AE
1656 kref_init(&img_request->kref);
1657
dfd9875f
ID
1658 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1659 obj_op_name(op_type), img_request);
bf0d5f50
AE
1660 return img_request;
1661}
1662
1663static void rbd_img_request_destroy(struct kref *kref)
1664{
1665 struct rbd_img_request *img_request;
1666 struct rbd_obj_request *obj_request;
1667 struct rbd_obj_request *next_obj_request;
1668
1669 img_request = container_of(kref, struct rbd_img_request, kref);
1670
37206ee5
AE
1671 dout("%s: img %p\n", __func__, img_request);
1672
bf0d5f50
AE
1673 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1674 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1675 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 1676
a2acd00e
AE
1677 if (img_request_layered_test(img_request)) {
1678 img_request_layered_clear(img_request);
1679 rbd_dev_parent_put(img_request->rbd_dev);
1680 }
1681
9bb0248d 1682 if (rbd_img_is_write(img_request))
812164f8 1683 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1684
1c2a9dfe 1685 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
1686}
1687
86bd7998
ID
1688static void prune_extents(struct ceph_file_extent *img_extents,
1689 u32 *num_img_extents, u64 overlap)
e93f3152 1690{
86bd7998 1691 u32 cnt = *num_img_extents;
e93f3152 1692
86bd7998
ID
1693 /* drop extents completely beyond the overlap */
1694 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1695 cnt--;
e93f3152 1696
86bd7998
ID
1697 if (cnt) {
1698 struct ceph_file_extent *ex = &img_extents[cnt - 1];
e93f3152 1699
86bd7998
ID
1700 /* trim final overlapping extent */
1701 if (ex->fe_off + ex->fe_len > overlap)
1702 ex->fe_len = overlap - ex->fe_off;
1703 }
e93f3152 1704
86bd7998 1705 *num_img_extents = cnt;
e93f3152
AE
1706}
1707
86bd7998
ID
1708/*
1709 * Determine the byte range(s) covered by either just the object extent
1710 * or the entire object in the parent image.
1711 */
1712static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1713 bool entire)
e93f3152 1714{
86bd7998
ID
1715 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1716 int ret;
e93f3152 1717
86bd7998
ID
1718 if (!rbd_dev->parent_overlap)
1719 return 0;
e93f3152 1720
86bd7998
ID
1721 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1722 entire ? 0 : obj_req->ex.oe_off,
1723 entire ? rbd_dev->layout.object_size :
1724 obj_req->ex.oe_len,
1725 &obj_req->img_extents,
1726 &obj_req->num_img_extents);
1727 if (ret)
1728 return ret;
e93f3152 1729
86bd7998
ID
1730 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1731 rbd_dev->parent_overlap);
1732 return 0;
e93f3152
AE
1733}
1734
3da691bf 1735static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1217857f 1736{
ecc633ca 1737 switch (obj_req->img_request->data_type) {
3da691bf
ID
1738 case OBJ_REQUEST_BIO:
1739 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1740 &obj_req->bio_pos,
43df3d35 1741 obj_req->ex.oe_len);
3da691bf
ID
1742 break;
1743 case OBJ_REQUEST_BVECS:
afb97888 1744 case OBJ_REQUEST_OWN_BVECS:
3da691bf 1745 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
43df3d35 1746 obj_req->ex.oe_len);
afb97888 1747 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
3da691bf
ID
1748 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1749 &obj_req->bvec_pos);
1750 break;
1751 default:
1752 rbd_assert(0);
1217857f 1753 }
3da691bf 1754}
1217857f 1755
3da691bf
ID
1756static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1757{
a162b308 1758 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
3da691bf
ID
1759 if (!obj_req->osd_req)
1760 return -ENOMEM;
2a842aca 1761
3da691bf 1762 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
43df3d35 1763 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
3da691bf 1764 rbd_osd_req_setup_data(obj_req, 0);
7ad18afa 1765
3da691bf
ID
1766 rbd_osd_req_format_read(obj_req);
1767 return 0;
1768}
1769
1770static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1771 unsigned int which)
1772{
1773 struct page **pages;
8b3e1a56 1774
3da691bf
ID
1775 /*
1776 * The response data for a STAT call consists of:
1777 * le64 length;
1778 * struct {
1779 * le32 tv_sec;
1780 * le32 tv_nsec;
1781 * } mtime;
1782 */
1783 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1784 if (IS_ERR(pages))
1785 return PTR_ERR(pages);
1786
1787 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1788 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1789 8 + sizeof(struct ceph_timespec),
1790 0, false, true);
1791 return 0;
1217857f
AE
1792}
1793
3da691bf
ID
1794static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1795 unsigned int which)
2169238d 1796{
3da691bf
ID
1797 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1798 u16 opcode;
2169238d 1799
3da691bf
ID
1800 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1801 rbd_dev->layout.object_size,
1802 rbd_dev->layout.object_size);
2169238d 1803
3da691bf
ID
1804 if (rbd_obj_is_entire(obj_req))
1805 opcode = CEPH_OSD_OP_WRITEFULL;
1806 else
1807 opcode = CEPH_OSD_OP_WRITE;
2169238d 1808
3da691bf 1809 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
43df3d35 1810 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
3da691bf 1811 rbd_osd_req_setup_data(obj_req, which++);
2169238d 1812
3da691bf
ID
1813 rbd_assert(which == obj_req->osd_req->r_num_ops);
1814 rbd_osd_req_format_write(obj_req);
1815}
2169238d 1816
3da691bf
ID
1817static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1818{
3da691bf
ID
1819 unsigned int num_osd_ops, which = 0;
1820 int ret;
1821
86bd7998
ID
1822 /* reverse map the entire object onto the parent */
1823 ret = rbd_obj_calc_img_extents(obj_req, true);
1824 if (ret)
1825 return ret;
1826
1827 if (obj_req->num_img_extents) {
3da691bf
ID
1828 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1829 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1830 } else {
1831 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1832 num_osd_ops = 2; /* setallochint + write/writefull */
2169238d
AE
1833 }
1834
a162b308 1835 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
3da691bf
ID
1836 if (!obj_req->osd_req)
1837 return -ENOMEM;
2169238d 1838
86bd7998 1839 if (obj_req->num_img_extents) {
3da691bf
ID
1840 ret = __rbd_obj_setup_stat(obj_req, which++);
1841 if (ret)
1842 return ret;
1843 }
1844
1845 __rbd_obj_setup_write(obj_req, which);
1846 return 0;
2169238d
AE
1847}
1848
3da691bf
ID
1849static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1850 unsigned int which)
1851{
3b434a2a
JD
1852 u16 opcode;
1853
3da691bf 1854 if (rbd_obj_is_entire(obj_req)) {
86bd7998 1855 if (obj_req->num_img_extents) {
2bb1e56e
ID
1856 osd_req_op_init(obj_req->osd_req, which++,
1857 CEPH_OSD_OP_CREATE, 0);
3b434a2a
JD
1858 opcode = CEPH_OSD_OP_TRUNCATE;
1859 } else {
3da691bf
ID
1860 osd_req_op_init(obj_req->osd_req, which++,
1861 CEPH_OSD_OP_DELETE, 0);
1862 opcode = 0;
3b434a2a 1863 }
3da691bf
ID
1864 } else if (rbd_obj_is_tail(obj_req)) {
1865 opcode = CEPH_OSD_OP_TRUNCATE;
3b434a2a 1866 } else {
3da691bf 1867 opcode = CEPH_OSD_OP_ZERO;
3b434a2a
JD
1868 }
1869
3da691bf
ID
1870 if (opcode)
1871 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
43df3d35 1872 obj_req->ex.oe_off, obj_req->ex.oe_len,
3da691bf
ID
1873 0, 0);
1874
1875 rbd_assert(which == obj_req->osd_req->r_num_ops);
1876 rbd_osd_req_format_write(obj_req);
3b434a2a
JD
1877}
1878
3da691bf 1879static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
bf0d5f50 1880{
3da691bf
ID
1881 unsigned int num_osd_ops, which = 0;
1882 int ret;
37206ee5 1883
86bd7998
ID
1884 /* reverse map the entire object onto the parent */
1885 ret = rbd_obj_calc_img_extents(obj_req, true);
1886 if (ret)
1887 return ret;
f1a4739f 1888
3da691bf
ID
1889 if (rbd_obj_is_entire(obj_req)) {
1890 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2bb1e56e
ID
1891 if (obj_req->num_img_extents)
1892 num_osd_ops = 2; /* create + truncate */
1893 else
1894 num_osd_ops = 1; /* delete */
3da691bf 1895 } else {
86bd7998 1896 if (obj_req->num_img_extents) {
3da691bf
ID
1897 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1898 num_osd_ops = 2; /* stat + truncate/zero */
1899 } else {
1900 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1901 num_osd_ops = 1; /* truncate/zero */
1902 }
f1a4739f
AE
1903 }
1904
a162b308 1905 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
3da691bf
ID
1906 if (!obj_req->osd_req)
1907 return -ENOMEM;
bf0d5f50 1908
86bd7998 1909 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
3da691bf
ID
1910 ret = __rbd_obj_setup_stat(obj_req, which++);
1911 if (ret)
1912 return ret;
1913 }
3b434a2a 1914
3da691bf
ID
1915 __rbd_obj_setup_discard(obj_req, which);
1916 return 0;
1917}
9d4df01f 1918
3da691bf
ID
1919/*
1920 * For each object request in @img_req, allocate an OSD request, add
1921 * individual OSD ops and prepare them for submission. The number of
1922 * OSD ops depends on op_type and the overlap point (if any).
1923 */
1924static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1925{
1926 struct rbd_obj_request *obj_req;
1927 int ret;
430c28c3 1928
3da691bf 1929 for_each_obj_request(img_req, obj_req) {
9bb0248d 1930 switch (img_req->op_type) {
3da691bf
ID
1931 case OBJ_OP_READ:
1932 ret = rbd_obj_setup_read(obj_req);
1933 break;
1934 case OBJ_OP_WRITE:
1935 ret = rbd_obj_setup_write(obj_req);
1936 break;
1937 case OBJ_OP_DISCARD:
1938 ret = rbd_obj_setup_discard(obj_req);
1939 break;
1940 default:
1941 rbd_assert(0);
1942 }
1943 if (ret)
1944 return ret;
26f887e0
ID
1945
1946 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
1947 if (ret)
1948 return ret;
bf0d5f50
AE
1949 }
1950
1951 return 0;
3da691bf 1952}
bf0d5f50 1953
5a237819
ID
1954union rbd_img_fill_iter {
1955 struct ceph_bio_iter bio_iter;
1956 struct ceph_bvec_iter bvec_iter;
1957};
bf0d5f50 1958
5a237819
ID
1959struct rbd_img_fill_ctx {
1960 enum obj_request_type pos_type;
1961 union rbd_img_fill_iter *pos;
1962 union rbd_img_fill_iter iter;
1963 ceph_object_extent_fn_t set_pos_fn;
afb97888
ID
1964 ceph_object_extent_fn_t count_fn;
1965 ceph_object_extent_fn_t copy_fn;
5a237819 1966};
bf0d5f50 1967
5a237819 1968static struct ceph_object_extent *alloc_object_extent(void *arg)
0eefd470 1969{
5a237819
ID
1970 struct rbd_img_request *img_req = arg;
1971 struct rbd_obj_request *obj_req;
0eefd470 1972
5a237819
ID
1973 obj_req = rbd_obj_request_create();
1974 if (!obj_req)
1975 return NULL;
2761713d 1976
5a237819
ID
1977 rbd_img_obj_request_add(img_req, obj_req);
1978 return &obj_req->ex;
1979}
0eefd470 1980
afb97888
ID
1981/*
1982 * While su != os && sc == 1 is technically not fancy (it's the same
1983 * layout as su == os && sc == 1), we can't use the nocopy path for it
1984 * because ->set_pos_fn() should be called only once per object.
1985 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1986 * treat su != os && sc == 1 as fancy.
1987 */
1988static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1989{
1990 return l->stripe_unit != l->object_size;
1991}
0eefd470 1992
afb97888
ID
1993static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1994 struct ceph_file_extent *img_extents,
1995 u32 num_img_extents,
1996 struct rbd_img_fill_ctx *fctx)
1997{
1998 u32 i;
1999 int ret;
2000
2001 img_req->data_type = fctx->pos_type;
0eefd470
AE
2002
2003 /*
afb97888
ID
2004 * Create object requests and set each object request's starting
2005 * position in the provided bio (list) or bio_vec array.
0eefd470 2006 */
afb97888
ID
2007 fctx->iter = *fctx->pos;
2008 for (i = 0; i < num_img_extents; i++) {
2009 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2010 img_extents[i].fe_off,
2011 img_extents[i].fe_len,
2012 &img_req->object_extents,
2013 alloc_object_extent, img_req,
2014 fctx->set_pos_fn, &fctx->iter);
2015 if (ret)
2016 return ret;
2017 }
0eefd470 2018
afb97888 2019 return __rbd_img_fill_request(img_req);
0eefd470
AE
2020}
2021
5a237819
ID
2022/*
2023 * Map a list of image extents to a list of object extents, create the
2024 * corresponding object requests (normally each to a different object,
2025 * but not always) and add them to @img_req. For each object request,
afb97888 2026 * set up its data descriptor to point to the corresponding chunk(s) of
5a237819
ID
2027 * @fctx->pos data buffer.
2028 *
afb97888
ID
2029 * Because ceph_file_to_extents() will merge adjacent object extents
2030 * together, each object request's data descriptor may point to multiple
2031 * different chunks of @fctx->pos data buffer.
2032 *
5a237819
ID
2033 * @fctx->pos data buffer is assumed to be large enough.
2034 */
2035static int rbd_img_fill_request(struct rbd_img_request *img_req,
2036 struct ceph_file_extent *img_extents,
2037 u32 num_img_extents,
2038 struct rbd_img_fill_ctx *fctx)
3d7efd18 2039{
afb97888
ID
2040 struct rbd_device *rbd_dev = img_req->rbd_dev;
2041 struct rbd_obj_request *obj_req;
5a237819
ID
2042 u32 i;
2043 int ret;
2044
afb97888
ID
2045 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2046 !rbd_layout_is_fancy(&rbd_dev->layout))
2047 return rbd_img_fill_request_nocopy(img_req, img_extents,
2048 num_img_extents, fctx);
3d7efd18 2049
afb97888 2050 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
0eefd470 2051
bbea1c1a 2052 /*
afb97888
ID
2053 * Create object requests and determine ->bvec_count for each object
2054 * request. Note that ->bvec_count sum over all object requests may
2055 * be greater than the number of bio_vecs in the provided bio (list)
2056 * or bio_vec array because when mapped, those bio_vecs can straddle
2057 * stripe unit boundaries.
bbea1c1a 2058 */
5a237819
ID
2059 fctx->iter = *fctx->pos;
2060 for (i = 0; i < num_img_extents; i++) {
afb97888 2061 ret = ceph_file_to_extents(&rbd_dev->layout,
5a237819
ID
2062 img_extents[i].fe_off,
2063 img_extents[i].fe_len,
2064 &img_req->object_extents,
2065 alloc_object_extent, img_req,
afb97888
ID
2066 fctx->count_fn, &fctx->iter);
2067 if (ret)
2068 return ret;
bbea1c1a 2069 }
0eefd470 2070
afb97888
ID
2071 for_each_obj_request(img_req, obj_req) {
2072 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2073 sizeof(*obj_req->bvec_pos.bvecs),
2074 GFP_NOIO);
2075 if (!obj_req->bvec_pos.bvecs)
2076 return -ENOMEM;
2077 }
0eefd470 2078
8785b1d4 2079 /*
afb97888
ID
2080 * Fill in each object request's private bio_vec array, splitting and
2081 * rearranging the provided bio_vecs in stripe unit chunks as needed.
8785b1d4 2082 */
afb97888
ID
2083 fctx->iter = *fctx->pos;
2084 for (i = 0; i < num_img_extents; i++) {
2085 ret = ceph_iterate_extents(&rbd_dev->layout,
2086 img_extents[i].fe_off,
2087 img_extents[i].fe_len,
2088 &img_req->object_extents,
2089 fctx->copy_fn, &fctx->iter);
5a237819
ID
2090 if (ret)
2091 return ret;
2092 }
3d7efd18 2093
5a237819
ID
2094 return __rbd_img_fill_request(img_req);
2095}
2096
2097static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2098 u64 off, u64 len)
2099{
2100 struct ceph_file_extent ex = { off, len };
2101 union rbd_img_fill_iter dummy;
2102 struct rbd_img_fill_ctx fctx = {
2103 .pos_type = OBJ_REQUEST_NODATA,
2104 .pos = &dummy,
2105 };
2106
2107 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2108}
2109
2110static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2111{
2112 struct rbd_obj_request *obj_req =
2113 container_of(ex, struct rbd_obj_request, ex);
2114 struct ceph_bio_iter *it = arg;
3d7efd18 2115
5a237819
ID
2116 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2117 obj_req->bio_pos = *it;
2118 ceph_bio_iter_advance(it, bytes);
2119}
3d7efd18 2120
afb97888
ID
2121static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2122{
2123 struct rbd_obj_request *obj_req =
2124 container_of(ex, struct rbd_obj_request, ex);
2125 struct ceph_bio_iter *it = arg;
0eefd470 2126
afb97888
ID
2127 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2128 ceph_bio_iter_advance_step(it, bytes, ({
2129 obj_req->bvec_count++;
2130 }));
0eefd470 2131
afb97888 2132}
0eefd470 2133
afb97888
ID
2134static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2135{
2136 struct rbd_obj_request *obj_req =
2137 container_of(ex, struct rbd_obj_request, ex);
2138 struct ceph_bio_iter *it = arg;
0eefd470 2139
afb97888
ID
2140 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2141 ceph_bio_iter_advance_step(it, bytes, ({
2142 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2143 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2144 }));
3d7efd18
AE
2145}
2146
5a237819
ID
2147static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2148 struct ceph_file_extent *img_extents,
2149 u32 num_img_extents,
2150 struct ceph_bio_iter *bio_pos)
2151{
2152 struct rbd_img_fill_ctx fctx = {
2153 .pos_type = OBJ_REQUEST_BIO,
2154 .pos = (union rbd_img_fill_iter *)bio_pos,
2155 .set_pos_fn = set_bio_pos,
afb97888
ID
2156 .count_fn = count_bio_bvecs,
2157 .copy_fn = copy_bio_bvecs,
5a237819 2158 };
3d7efd18 2159
5a237819
ID
2160 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2161 &fctx);
2162}
3d7efd18 2163
5a237819
ID
2164static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2165 u64 off, u64 len, struct bio *bio)
2166{
2167 struct ceph_file_extent ex = { off, len };
2168 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
3d7efd18 2169
5a237819
ID
2170 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2171}
a9e8ba2c 2172
5a237819
ID
2173static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2174{
2175 struct rbd_obj_request *obj_req =
2176 container_of(ex, struct rbd_obj_request, ex);
2177 struct ceph_bvec_iter *it = arg;
3d7efd18 2178
5a237819
ID
2179 obj_req->bvec_pos = *it;
2180 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2181 ceph_bvec_iter_advance(it, bytes);
2182}
3d7efd18 2183
afb97888
ID
2184static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2185{
2186 struct rbd_obj_request *obj_req =
2187 container_of(ex, struct rbd_obj_request, ex);
2188 struct ceph_bvec_iter *it = arg;
058aa991 2189
afb97888
ID
2190 ceph_bvec_iter_advance_step(it, bytes, ({
2191 obj_req->bvec_count++;
2192 }));
2193}
058aa991 2194
afb97888
ID
2195static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2196{
2197 struct rbd_obj_request *obj_req =
2198 container_of(ex, struct rbd_obj_request, ex);
2199 struct ceph_bvec_iter *it = arg;
3d7efd18 2200
afb97888
ID
2201 ceph_bvec_iter_advance_step(it, bytes, ({
2202 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2203 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2204 }));
3d7efd18
AE
2205}
2206
5a237819
ID
2207static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2208 struct ceph_file_extent *img_extents,
2209 u32 num_img_extents,
2210 struct ceph_bvec_iter *bvec_pos)
c5b5ef6c 2211{
5a237819
ID
2212 struct rbd_img_fill_ctx fctx = {
2213 .pos_type = OBJ_REQUEST_BVECS,
2214 .pos = (union rbd_img_fill_iter *)bvec_pos,
2215 .set_pos_fn = set_bvec_pos,
afb97888
ID
2216 .count_fn = count_bvecs,
2217 .copy_fn = copy_bvecs,
5a237819 2218 };
c5b5ef6c 2219
5a237819
ID
2220 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2221 &fctx);
2222}
c5b5ef6c 2223
5a237819
ID
2224static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2225 struct ceph_file_extent *img_extents,
2226 u32 num_img_extents,
2227 struct bio_vec *bvecs)
2228{
2229 struct ceph_bvec_iter it = {
2230 .bvecs = bvecs,
2231 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2232 num_img_extents) },
2233 };
c5b5ef6c 2234
5a237819
ID
2235 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2236 &it);
2237}
c5b5ef6c 2238
efbd1a11 2239static void rbd_img_request_submit(struct rbd_img_request *img_request)
bf0d5f50 2240{
bf0d5f50 2241 struct rbd_obj_request *obj_request;
c5b5ef6c 2242
37206ee5 2243 dout("%s: img %p\n", __func__, img_request);
c2e82414 2244
663ae2cc 2245 rbd_img_request_get(img_request);
efbd1a11 2246 for_each_obj_request(img_request, obj_request)
3da691bf 2247 rbd_obj_request_submit(obj_request);
c2e82414 2248
663ae2cc 2249 rbd_img_request_put(img_request);
c5b5ef6c
AE
2250}
2251
86bd7998 2252static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
c5b5ef6c 2253{
3da691bf
ID
2254 struct rbd_img_request *img_req = obj_req->img_request;
2255 struct rbd_img_request *child_img_req;
c5b5ef6c
AE
2256 int ret;
2257
e93aca0a
ID
2258 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2259 OBJ_OP_READ, NULL);
3da691bf 2260 if (!child_img_req)
710214e3
ID
2261 return -ENOMEM;
2262
e93aca0a
ID
2263 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2264 child_img_req->obj_request = obj_req;
a90bb0c1 2265
3da691bf 2266 if (!rbd_img_is_write(img_req)) {
ecc633ca 2267 switch (img_req->data_type) {
3da691bf 2268 case OBJ_REQUEST_BIO:
5a237819
ID
2269 ret = __rbd_img_fill_from_bio(child_img_req,
2270 obj_req->img_extents,
2271 obj_req->num_img_extents,
2272 &obj_req->bio_pos);
3da691bf
ID
2273 break;
2274 case OBJ_REQUEST_BVECS:
afb97888 2275 case OBJ_REQUEST_OWN_BVECS:
5a237819
ID
2276 ret = __rbd_img_fill_from_bvecs(child_img_req,
2277 obj_req->img_extents,
2278 obj_req->num_img_extents,
2279 &obj_req->bvec_pos);
3da691bf
ID
2280 break;
2281 default:
2282 rbd_assert(0);
2283 }
2284 } else {
5a237819
ID
2285 ret = rbd_img_fill_from_bvecs(child_img_req,
2286 obj_req->img_extents,
2287 obj_req->num_img_extents,
2288 obj_req->copyup_bvecs);
3da691bf
ID
2289 }
2290 if (ret) {
2291 rbd_img_request_put(child_img_req);
2292 return ret;
2293 }
2294
2295 rbd_img_request_submit(child_img_req);
2296 return 0;
2297}
2298
2299static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2300{
2301 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2302 int ret;
2303
2304 if (obj_req->result == -ENOENT &&
86bd7998
ID
2305 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2306 /* reverse map this object extent onto the parent */
2307 ret = rbd_obj_calc_img_extents(obj_req, false);
3da691bf
ID
2308 if (ret) {
2309 obj_req->result = ret;
2310 return true;
2311 }
86bd7998
ID
2312
2313 if (obj_req->num_img_extents) {
2314 obj_req->tried_parent = true;
2315 ret = rbd_obj_read_from_parent(obj_req);
2316 if (ret) {
2317 obj_req->result = ret;
2318 return true;
2319 }
2320 return false;
2321 }
710214e3
ID
2322 }
2323
c5b5ef6c 2324 /*
3da691bf
ID
2325 * -ENOENT means a hole in the image -- zero-fill the entire
2326 * length of the request. A short read also implies zero-fill
2327 * to the end of the request. In both cases we update xferred
2328 * count to indicate the whole request was satisfied.
c5b5ef6c 2329 */
3da691bf 2330 if (obj_req->result == -ENOENT ||
43df3d35 2331 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
3da691bf
ID
2332 rbd_assert(!obj_req->xferred || !obj_req->result);
2333 rbd_obj_zero_range(obj_req, obj_req->xferred,
43df3d35 2334 obj_req->ex.oe_len - obj_req->xferred);
3da691bf 2335 obj_req->result = 0;
43df3d35 2336 obj_req->xferred = obj_req->ex.oe_len;
710214e3 2337 }
c5b5ef6c 2338
3da691bf
ID
2339 return true;
2340}
c5b5ef6c 2341
3da691bf
ID
2342/*
2343 * copyup_bvecs pages are never highmem pages
2344 */
2345static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2346{
2347 struct ceph_bvec_iter it = {
2348 .bvecs = bvecs,
2349 .iter = { .bi_size = bytes },
2350 };
c5b5ef6c 2351
3da691bf
ID
2352 ceph_bvec_iter_advance_step(&it, bytes, ({
2353 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2354 bv.bv_len))
2355 return false;
2356 }));
2357 return true;
c5b5ef6c
AE
2358}
2359
3da691bf 2360static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
b454e36d 2361{
3da691bf 2362 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
fe943d50 2363 int ret;
70d045f6 2364
3da691bf
ID
2365 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2366 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2367 rbd_osd_req_destroy(obj_req->osd_req);
70d045f6 2368
b454e36d 2369 /*
3da691bf
ID
2370 * Create a copyup request with the same number of OSD ops as
2371 * the original request. The original request was stat + op(s),
2372 * the new copyup request will be copyup + the same op(s).
b454e36d 2373 */
a162b308 2374 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
3da691bf
ID
2375 if (!obj_req->osd_req)
2376 return -ENOMEM;
b454e36d 2377
24639ce5 2378 ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
fe943d50
CX
2379 if (ret)
2380 return ret;
2381
c622d226 2382 /*
3da691bf
ID
2383 * Only send non-zero copyup data to save some I/O and network
2384 * bandwidth -- zero copyup data is equivalent to the object not
2385 * existing.
c622d226 2386 */
3da691bf
ID
2387 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2388 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2389 bytes = 0;
2390 }
3da691bf 2391 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
0010f705
ID
2392 obj_req->copyup_bvecs,
2393 obj_req->copyup_bvec_count,
2394 bytes);
3da691bf 2395
9bb0248d 2396 switch (obj_req->img_request->op_type) {
3da691bf
ID
2397 case OBJ_OP_WRITE:
2398 __rbd_obj_setup_write(obj_req, 1);
2399 break;
2400 case OBJ_OP_DISCARD:
2401 rbd_assert(!rbd_obj_is_entire(obj_req));
2402 __rbd_obj_setup_discard(obj_req, 1);
2403 break;
2404 default:
2405 rbd_assert(0);
2406 }
70d045f6 2407
26f887e0
ID
2408 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
2409 if (ret)
2410 return ret;
2411
3da691bf 2412 rbd_obj_request_submit(obj_req);
3da691bf 2413 return 0;
70d045f6
ID
2414}
2415
7e07efb1 2416static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
70d045f6 2417{
7e07efb1 2418 u32 i;
b454e36d 2419
7e07efb1
ID
2420 rbd_assert(!obj_req->copyup_bvecs);
2421 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2422 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2423 sizeof(*obj_req->copyup_bvecs),
2424 GFP_NOIO);
2425 if (!obj_req->copyup_bvecs)
2426 return -ENOMEM;
b454e36d 2427
7e07efb1
ID
2428 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2429 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2430
2431 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2432 if (!obj_req->copyup_bvecs[i].bv_page)
2433 return -ENOMEM;
3d7efd18 2434
7e07efb1
ID
2435 obj_req->copyup_bvecs[i].bv_offset = 0;
2436 obj_req->copyup_bvecs[i].bv_len = len;
2437 obj_overlap -= len;
2438 }
b454e36d 2439
7e07efb1
ID
2440 rbd_assert(!obj_overlap);
2441 return 0;
b454e36d
AE
2442}
2443
3da691bf 2444static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
bf0d5f50 2445{
3da691bf 2446 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3da691bf 2447 int ret;
bf0d5f50 2448
86bd7998
ID
2449 rbd_assert(obj_req->num_img_extents);
2450 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2451 rbd_dev->parent_overlap);
2452 if (!obj_req->num_img_extents) {
3da691bf
ID
2453 /*
2454 * The overlap has become 0 (most likely because the
2455 * image has been flattened). Use rbd_obj_issue_copyup()
2456 * to re-submit the original write request -- the copyup
2457 * operation itself will be a no-op, since someone must
2458 * have populated the child object while we weren't
2459 * looking. Move to WRITE_FLAT state as we'll be done
2460 * with the operation once the null copyup completes.
2461 */
2462 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2463 return rbd_obj_issue_copyup(obj_req, 0);
bf0d5f50
AE
2464 }
2465
86bd7998 2466 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3da691bf
ID
2467 if (ret)
2468 return ret;
2469
2470 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
86bd7998 2471 return rbd_obj_read_from_parent(obj_req);
bf0d5f50 2472}
8b3e1a56 2473
3da691bf 2474static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
8b3e1a56 2475{
3da691bf 2476 int ret;
8b3e1a56 2477
3da691bf
ID
2478again:
2479 switch (obj_req->write_state) {
2480 case RBD_OBJ_WRITE_GUARD:
2481 rbd_assert(!obj_req->xferred);
2482 if (obj_req->result == -ENOENT) {
2483 /*
2484 * The target object doesn't exist. Read the data for
2485 * the entire target object up to the overlap point (if
2486 * any) from the parent, so we can use it for a copyup.
2487 */
2488 ret = rbd_obj_handle_write_guard(obj_req);
2489 if (ret) {
2490 obj_req->result = ret;
2491 return true;
2492 }
2493 return false;
2494 }
2495 /* fall through */
2496 case RBD_OBJ_WRITE_FLAT:
2497 if (!obj_req->result)
2498 /*
2499 * There is no such thing as a successful short
2500 * write -- indicate the whole request was satisfied.
2501 */
43df3d35 2502 obj_req->xferred = obj_req->ex.oe_len;
3da691bf
ID
2503 return true;
2504 case RBD_OBJ_WRITE_COPYUP:
2505 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2506 if (obj_req->result)
2507 goto again;
8b3e1a56 2508
3da691bf
ID
2509 rbd_assert(obj_req->xferred);
2510 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2511 if (ret) {
2512 obj_req->result = ret;
2513 return true;
2514 }
2515 return false;
2516 default:
c6244b3b 2517 BUG();
3da691bf
ID
2518 }
2519}
02c74fba 2520
3da691bf
ID
2521/*
2522 * Returns true if @obj_req is completed, or false otherwise.
2523 */
2524static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2525{
9bb0248d 2526 switch (obj_req->img_request->op_type) {
3da691bf
ID
2527 case OBJ_OP_READ:
2528 return rbd_obj_handle_read(obj_req);
2529 case OBJ_OP_WRITE:
2530 return rbd_obj_handle_write(obj_req);
2531 case OBJ_OP_DISCARD:
2532 if (rbd_obj_handle_write(obj_req)) {
2533 /*
2534 * Hide -ENOENT from delete/truncate/zero -- discarding
2535 * a non-existent object is not a problem.
2536 */
2537 if (obj_req->result == -ENOENT) {
2538 obj_req->result = 0;
43df3d35 2539 obj_req->xferred = obj_req->ex.oe_len;
3da691bf
ID
2540 }
2541 return true;
2542 }
2543 return false;
2544 default:
c6244b3b 2545 BUG();
3da691bf
ID
2546 }
2547}
02c74fba 2548
7114edac
ID
2549static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2550{
2551 struct rbd_img_request *img_req = obj_req->img_request;
2552
2553 rbd_assert((!obj_req->result &&
43df3d35 2554 obj_req->xferred == obj_req->ex.oe_len) ||
7114edac
ID
2555 (obj_req->result < 0 && !obj_req->xferred));
2556 if (!obj_req->result) {
2557 img_req->xferred += obj_req->xferred;
980917fc 2558 return;
02c74fba 2559 }
a9e8ba2c 2560
7114edac
ID
2561 rbd_warn(img_req->rbd_dev,
2562 "%s at objno %llu %llu~%llu result %d xferred %llu",
43df3d35
ID
2563 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2564 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
7114edac
ID
2565 obj_req->xferred);
2566 if (!img_req->result) {
2567 img_req->result = obj_req->result;
2568 img_req->xferred = 0;
2569 }
2570}
a9e8ba2c 2571
3da691bf
ID
2572static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2573{
2574 struct rbd_obj_request *obj_req = img_req->obj_request;
a9e8ba2c 2575
3da691bf 2576 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
86bd7998
ID
2577 rbd_assert((!img_req->result &&
2578 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2579 (img_req->result < 0 && !img_req->xferred));
8b3e1a56 2580
3da691bf
ID
2581 obj_req->result = img_req->result;
2582 obj_req->xferred = img_req->xferred;
2583 rbd_img_request_put(img_req);
8b3e1a56
AE
2584}
2585
7114edac 2586static void rbd_img_end_request(struct rbd_img_request *img_req)
8b3e1a56 2587{
7114edac
ID
2588 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2589 rbd_assert((!img_req->result &&
2590 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2591 (img_req->result < 0 && !img_req->xferred));
8b3e1a56 2592
7114edac
ID
2593 blk_mq_end_request(img_req->rq,
2594 errno_to_blk_status(img_req->result));
2595 rbd_img_request_put(img_req);
3da691bf 2596}
8b3e1a56 2597
3da691bf
ID
2598static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2599{
7114edac 2600 struct rbd_img_request *img_req;
8b3e1a56 2601
7114edac 2602again:
3da691bf
ID
2603 if (!__rbd_obj_handle_request(obj_req))
2604 return;
8b3e1a56 2605
7114edac
ID
2606 img_req = obj_req->img_request;
2607 spin_lock(&img_req->completion_lock);
2608 rbd_obj_end_request(obj_req);
2609 rbd_assert(img_req->pending_count);
2610 if (--img_req->pending_count) {
2611 spin_unlock(&img_req->completion_lock);
2612 return;
2613 }
8b3e1a56 2614
7114edac
ID
2615 spin_unlock(&img_req->completion_lock);
2616 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2617 obj_req = img_req->obj_request;
2618 rbd_img_end_child_request(img_req);
2619 goto again;
2620 }
2621 rbd_img_end_request(img_req);
8b3e1a56 2622}
bf0d5f50 2623
ed95b21a 2624static const struct rbd_client_id rbd_empty_cid;
b8d70035 2625
ed95b21a
ID
2626static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2627 const struct rbd_client_id *rhs)
2628{
2629 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2630}
2631
2632static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2633{
2634 struct rbd_client_id cid;
2635
2636 mutex_lock(&rbd_dev->watch_mutex);
2637 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2638 cid.handle = rbd_dev->watch_cookie;
2639 mutex_unlock(&rbd_dev->watch_mutex);
2640 return cid;
2641}
2642
2643/*
2644 * lock_rwsem must be held for write
2645 */
2646static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2647 const struct rbd_client_id *cid)
2648{
2649 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2650 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2651 cid->gid, cid->handle);
2652 rbd_dev->owner_cid = *cid; /* struct */
2653}
2654
2655static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2656{
2657 mutex_lock(&rbd_dev->watch_mutex);
2658 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2659 mutex_unlock(&rbd_dev->watch_mutex);
2660}
2661
edd8ca80
FM
2662static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2663{
2664 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2665
2666 strcpy(rbd_dev->lock_cookie, cookie);
2667 rbd_set_owner_cid(rbd_dev, &cid);
2668 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2669}
2670
ed95b21a
ID
2671/*
2672 * lock_rwsem must be held for write
2673 */
2674static int rbd_lock(struct rbd_device *rbd_dev)
b8d70035 2675{
922dab61 2676 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 2677 char cookie[32];
e627db08 2678 int ret;
b8d70035 2679
cbbfb0ff
ID
2680 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2681 rbd_dev->lock_cookie[0] != '\0');
52bb1f9b 2682
ed95b21a
ID
2683 format_lock_cookie(rbd_dev, cookie);
2684 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2685 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2686 RBD_LOCK_TAG, "", 0);
e627db08 2687 if (ret)
ed95b21a 2688 return ret;
b8d70035 2689
ed95b21a 2690 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
edd8ca80 2691 __rbd_lock(rbd_dev, cookie);
ed95b21a 2692 return 0;
b8d70035
AE
2693}
2694
ed95b21a
ID
2695/*
2696 * lock_rwsem must be held for write
2697 */
bbead745 2698static void rbd_unlock(struct rbd_device *rbd_dev)
bb040aa0 2699{
922dab61 2700 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
bb040aa0
ID
2701 int ret;
2702
cbbfb0ff
ID
2703 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2704 rbd_dev->lock_cookie[0] == '\0');
bb040aa0 2705
ed95b21a 2706 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
cbbfb0ff 2707 RBD_LOCK_NAME, rbd_dev->lock_cookie);
bbead745
ID
2708 if (ret && ret != -ENOENT)
2709 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
bb040aa0 2710
bbead745
ID
2711 /* treat errors as the image is unlocked */
2712 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
cbbfb0ff 2713 rbd_dev->lock_cookie[0] = '\0';
ed95b21a
ID
2714 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2715 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
bb040aa0
ID
2716}
2717
ed95b21a
ID
2718static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2719 enum rbd_notify_op notify_op,
2720 struct page ***preply_pages,
2721 size_t *preply_len)
9969ebc5
AE
2722{
2723 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 2724 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
08a79102
KS
2725 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2726 int buf_size = sizeof(buf);
ed95b21a 2727 void *p = buf;
9969ebc5 2728
ed95b21a 2729 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
9969ebc5 2730
ed95b21a
ID
2731 /* encode *LockPayload NotifyMessage (op + ClientId) */
2732 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2733 ceph_encode_32(&p, notify_op);
2734 ceph_encode_64(&p, cid.gid);
2735 ceph_encode_64(&p, cid.handle);
8eb87565 2736
ed95b21a
ID
2737 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2738 &rbd_dev->header_oloc, buf, buf_size,
2739 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
b30a01f2
ID
2740}
2741
ed95b21a
ID
2742static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2743 enum rbd_notify_op notify_op)
b30a01f2 2744{
ed95b21a
ID
2745 struct page **reply_pages;
2746 size_t reply_len;
b30a01f2 2747
ed95b21a
ID
2748 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2749 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2750}
b30a01f2 2751
ed95b21a
ID
2752static void rbd_notify_acquired_lock(struct work_struct *work)
2753{
2754 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2755 acquired_lock_work);
76756a51 2756
ed95b21a 2757 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
c525f036
ID
2758}
2759
ed95b21a 2760static void rbd_notify_released_lock(struct work_struct *work)
c525f036 2761{
ed95b21a
ID
2762 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2763 released_lock_work);
811c6688 2764
ed95b21a 2765 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
fca27065
ID
2766}
2767
ed95b21a 2768static int rbd_request_lock(struct rbd_device *rbd_dev)
36be9a76 2769{
ed95b21a
ID
2770 struct page **reply_pages;
2771 size_t reply_len;
2772 bool lock_owner_responded = false;
36be9a76
AE
2773 int ret;
2774
ed95b21a 2775 dout("%s rbd_dev %p\n", __func__, rbd_dev);
36be9a76 2776
ed95b21a
ID
2777 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2778 &reply_pages, &reply_len);
2779 if (ret && ret != -ETIMEDOUT) {
2780 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
36be9a76 2781 goto out;
ed95b21a 2782 }
36be9a76 2783
ed95b21a
ID
2784 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2785 void *p = page_address(reply_pages[0]);
2786 void *const end = p + reply_len;
2787 u32 n;
36be9a76 2788
ed95b21a
ID
2789 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2790 while (n--) {
2791 u8 struct_v;
2792 u32 len;
36be9a76 2793
ed95b21a
ID
2794 ceph_decode_need(&p, end, 8 + 8, e_inval);
2795 p += 8 + 8; /* skip gid and cookie */
04017e29 2796
ed95b21a
ID
2797 ceph_decode_32_safe(&p, end, len, e_inval);
2798 if (!len)
2799 continue;
2800
2801 if (lock_owner_responded) {
2802 rbd_warn(rbd_dev,
2803 "duplicate lock owners detected");
2804 ret = -EIO;
2805 goto out;
2806 }
2807
2808 lock_owner_responded = true;
2809 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2810 &struct_v, &len);
2811 if (ret) {
2812 rbd_warn(rbd_dev,
2813 "failed to decode ResponseMessage: %d",
2814 ret);
2815 goto e_inval;
2816 }
2817
2818 ret = ceph_decode_32(&p);
2819 }
2820 }
2821
2822 if (!lock_owner_responded) {
2823 rbd_warn(rbd_dev, "no lock owners detected");
2824 ret = -ETIMEDOUT;
2825 }
2826
2827out:
2828 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2829 return ret;
2830
2831e_inval:
2832 ret = -EINVAL;
2833 goto out;
2834}
2835
2836static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2837{
2838 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2839
2840 cancel_delayed_work(&rbd_dev->lock_dwork);
2841 if (wake_all)
2842 wake_up_all(&rbd_dev->lock_waitq);
2843 else
2844 wake_up(&rbd_dev->lock_waitq);
2845}
2846
2847static int get_lock_owner_info(struct rbd_device *rbd_dev,
2848 struct ceph_locker **lockers, u32 *num_lockers)
2849{
2850 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2851 u8 lock_type;
2852 char *lock_tag;
2853 int ret;
2854
2855 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2856
2857 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2858 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2859 &lock_type, &lock_tag, lockers, num_lockers);
2860 if (ret)
2861 return ret;
2862
2863 if (*num_lockers == 0) {
2864 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2865 goto out;
2866 }
2867
2868 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2869 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2870 lock_tag);
2871 ret = -EBUSY;
2872 goto out;
2873 }
2874
2875 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2876 rbd_warn(rbd_dev, "shared lock type detected");
2877 ret = -EBUSY;
2878 goto out;
2879 }
2880
2881 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2882 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2883 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2884 (*lockers)[0].id.cookie);
2885 ret = -EBUSY;
2886 goto out;
2887 }
2888
2889out:
2890 kfree(lock_tag);
2891 return ret;
2892}
2893
2894static int find_watcher(struct rbd_device *rbd_dev,
2895 const struct ceph_locker *locker)
2896{
2897 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2898 struct ceph_watch_item *watchers;
2899 u32 num_watchers;
2900 u64 cookie;
2901 int i;
2902 int ret;
2903
2904 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2905 &rbd_dev->header_oloc, &watchers,
2906 &num_watchers);
2907 if (ret)
2908 return ret;
2909
2910 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2911 for (i = 0; i < num_watchers; i++) {
2912 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2913 sizeof(locker->info.addr)) &&
2914 watchers[i].cookie == cookie) {
2915 struct rbd_client_id cid = {
2916 .gid = le64_to_cpu(watchers[i].name.num),
2917 .handle = cookie,
2918 };
2919
2920 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2921 rbd_dev, cid.gid, cid.handle);
2922 rbd_set_owner_cid(rbd_dev, &cid);
2923 ret = 1;
2924 goto out;
2925 }
2926 }
2927
2928 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2929 ret = 0;
2930out:
2931 kfree(watchers);
2932 return ret;
2933}
2934
2935/*
2936 * lock_rwsem must be held for write
2937 */
2938static int rbd_try_lock(struct rbd_device *rbd_dev)
2939{
2940 struct ceph_client *client = rbd_dev->rbd_client->client;
2941 struct ceph_locker *lockers;
2942 u32 num_lockers;
2943 int ret;
2944
2945 for (;;) {
2946 ret = rbd_lock(rbd_dev);
2947 if (ret != -EBUSY)
2948 return ret;
2949
2950 /* determine if the current lock holder is still alive */
2951 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2952 if (ret)
2953 return ret;
2954
2955 if (num_lockers == 0)
2956 goto again;
2957
2958 ret = find_watcher(rbd_dev, lockers);
2959 if (ret) {
2960 if (ret > 0)
2961 ret = 0; /* have to request lock */
2962 goto out;
2963 }
2964
2965 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2966 ENTITY_NAME(lockers[0].id.name));
2967
2968 ret = ceph_monc_blacklist_add(&client->monc,
2969 &lockers[0].info.addr);
2970 if (ret) {
2971 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2972 ENTITY_NAME(lockers[0].id.name), ret);
2973 goto out;
2974 }
2975
2976 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2977 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2978 lockers[0].id.cookie,
2979 &lockers[0].id.name);
2980 if (ret && ret != -ENOENT)
2981 goto out;
2982
2983again:
2984 ceph_free_lockers(lockers, num_lockers);
2985 }
2986
2987out:
2988 ceph_free_lockers(lockers, num_lockers);
2989 return ret;
2990}
2991
2992/*
2993 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2994 */
2995static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2996 int *pret)
2997{
2998 enum rbd_lock_state lock_state;
2999
3000 down_read(&rbd_dev->lock_rwsem);
3001 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3002 rbd_dev->lock_state);
3003 if (__rbd_is_lock_owner(rbd_dev)) {
3004 lock_state = rbd_dev->lock_state;
3005 up_read(&rbd_dev->lock_rwsem);
3006 return lock_state;
3007 }
3008
3009 up_read(&rbd_dev->lock_rwsem);
3010 down_write(&rbd_dev->lock_rwsem);
3011 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3012 rbd_dev->lock_state);
3013 if (!__rbd_is_lock_owner(rbd_dev)) {
3014 *pret = rbd_try_lock(rbd_dev);
3015 if (*pret)
3016 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3017 }
3018
3019 lock_state = rbd_dev->lock_state;
3020 up_write(&rbd_dev->lock_rwsem);
3021 return lock_state;
3022}
3023
3024static void rbd_acquire_lock(struct work_struct *work)
3025{
3026 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3027 struct rbd_device, lock_dwork);
3028 enum rbd_lock_state lock_state;
37f13252 3029 int ret = 0;
ed95b21a
ID
3030
3031 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3032again:
3033 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3034 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3035 if (lock_state == RBD_LOCK_STATE_LOCKED)
3036 wake_requests(rbd_dev, true);
3037 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3038 rbd_dev, lock_state, ret);
3039 return;
3040 }
3041
3042 ret = rbd_request_lock(rbd_dev);
3043 if (ret == -ETIMEDOUT) {
3044 goto again; /* treat this as a dead client */
e010dd0a
ID
3045 } else if (ret == -EROFS) {
3046 rbd_warn(rbd_dev, "peer will not release lock");
3047 /*
3048 * If this is rbd_add_acquire_lock(), we want to fail
3049 * immediately -- reuse BLACKLISTED flag. Otherwise we
3050 * want to block.
3051 */
3052 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3053 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3054 /* wake "rbd map --exclusive" process */
3055 wake_requests(rbd_dev, false);
3056 }
ed95b21a
ID
3057 } else if (ret < 0) {
3058 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3059 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3060 RBD_RETRY_DELAY);
3061 } else {
3062 /*
3063 * lock owner acked, but resend if we don't see them
3064 * release the lock
3065 */
3066 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3067 rbd_dev);
3068 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3069 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3070 }
3071}
3072
3073/*
3074 * lock_rwsem must be held for write
3075 */
3076static bool rbd_release_lock(struct rbd_device *rbd_dev)
3077{
3078 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3079 rbd_dev->lock_state);
3080 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3081 return false;
3082
3083 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3084 downgrade_write(&rbd_dev->lock_rwsem);
52bb1f9b 3085 /*
ed95b21a 3086 * Ensure that all in-flight IO is flushed.
52bb1f9b 3087 *
ed95b21a
ID
3088 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3089 * may be shared with other devices.
52bb1f9b 3090 */
ed95b21a
ID
3091 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3092 up_read(&rbd_dev->lock_rwsem);
3093
3094 down_write(&rbd_dev->lock_rwsem);
3095 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3096 rbd_dev->lock_state);
3097 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3098 return false;
3099
bbead745
ID
3100 rbd_unlock(rbd_dev);
3101 /*
3102 * Give others a chance to grab the lock - we would re-acquire
3103 * almost immediately if we got new IO during ceph_osdc_sync()
3104 * otherwise. We need to ack our own notifications, so this
3105 * lock_dwork will be requeued from rbd_wait_state_locked()
3106 * after wake_requests() in rbd_handle_released_lock().
3107 */
3108 cancel_delayed_work(&rbd_dev->lock_dwork);
ed95b21a
ID
3109 return true;
3110}
3111
3112static void rbd_release_lock_work(struct work_struct *work)
3113{
3114 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3115 unlock_work);
3116
3117 down_write(&rbd_dev->lock_rwsem);
3118 rbd_release_lock(rbd_dev);
3119 up_write(&rbd_dev->lock_rwsem);
3120}
3121
3122static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3123 void **p)
3124{
3125 struct rbd_client_id cid = { 0 };
3126
3127 if (struct_v >= 2) {
3128 cid.gid = ceph_decode_64(p);
3129 cid.handle = ceph_decode_64(p);
3130 }
3131
3132 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3133 cid.handle);
3134 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3135 down_write(&rbd_dev->lock_rwsem);
3136 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3137 /*
3138 * we already know that the remote client is
3139 * the owner
3140 */
3141 up_write(&rbd_dev->lock_rwsem);
3142 return;
3143 }
3144
3145 rbd_set_owner_cid(rbd_dev, &cid);
3146 downgrade_write(&rbd_dev->lock_rwsem);
3147 } else {
3148 down_read(&rbd_dev->lock_rwsem);
3149 }
3150
3151 if (!__rbd_is_lock_owner(rbd_dev))
3152 wake_requests(rbd_dev, false);
3153 up_read(&rbd_dev->lock_rwsem);
3154}
3155
3156static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3157 void **p)
3158{
3159 struct rbd_client_id cid = { 0 };
3160
3161 if (struct_v >= 2) {
3162 cid.gid = ceph_decode_64(p);
3163 cid.handle = ceph_decode_64(p);
3164 }
3165
3166 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3167 cid.handle);
3168 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3169 down_write(&rbd_dev->lock_rwsem);
3170 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3171 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3172 __func__, rbd_dev, cid.gid, cid.handle,
3173 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3174 up_write(&rbd_dev->lock_rwsem);
3175 return;
3176 }
3177
3178 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3179 downgrade_write(&rbd_dev->lock_rwsem);
3180 } else {
3181 down_read(&rbd_dev->lock_rwsem);
3182 }
3183
3184 if (!__rbd_is_lock_owner(rbd_dev))
3185 wake_requests(rbd_dev, false);
3186 up_read(&rbd_dev->lock_rwsem);
3187}
3188
3b77faa0
ID
3189/*
3190 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3191 * ResponseMessage is needed.
3192 */
3193static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3194 void **p)
ed95b21a
ID
3195{
3196 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3197 struct rbd_client_id cid = { 0 };
3b77faa0 3198 int result = 1;
ed95b21a
ID
3199
3200 if (struct_v >= 2) {
3201 cid.gid = ceph_decode_64(p);
3202 cid.handle = ceph_decode_64(p);
3203 }
3204
3205 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3206 cid.handle);
3207 if (rbd_cid_equal(&cid, &my_cid))
3b77faa0 3208 return result;
ed95b21a
ID
3209
3210 down_read(&rbd_dev->lock_rwsem);
3b77faa0
ID
3211 if (__rbd_is_lock_owner(rbd_dev)) {
3212 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3213 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3214 goto out_unlock;
3215
3216 /*
3217 * encode ResponseMessage(0) so the peer can detect
3218 * a missing owner
3219 */
3220 result = 0;
3221
3222 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
e010dd0a
ID
3223 if (!rbd_dev->opts->exclusive) {
3224 dout("%s rbd_dev %p queueing unlock_work\n",
3225 __func__, rbd_dev);
3226 queue_work(rbd_dev->task_wq,
3227 &rbd_dev->unlock_work);
3228 } else {
3229 /* refuse to release the lock */
3230 result = -EROFS;
3231 }
ed95b21a
ID
3232 }
3233 }
3b77faa0
ID
3234
3235out_unlock:
ed95b21a 3236 up_read(&rbd_dev->lock_rwsem);
3b77faa0 3237 return result;
ed95b21a
ID
3238}
3239
3240static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3241 u64 notify_id, u64 cookie, s32 *result)
3242{
3243 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
08a79102
KS
3244 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3245 int buf_size = sizeof(buf);
ed95b21a
ID
3246 int ret;
3247
3248 if (result) {
3249 void *p = buf;
3250
3251 /* encode ResponseMessage */
3252 ceph_start_encoding(&p, 1, 1,
3253 buf_size - CEPH_ENCODING_START_BLK_LEN);
3254 ceph_encode_32(&p, *result);
3255 } else {
3256 buf_size = 0;
3257 }
b8d70035 3258
922dab61
ID
3259 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3260 &rbd_dev->header_oloc, notify_id, cookie,
ed95b21a 3261 buf, buf_size);
52bb1f9b 3262 if (ret)
ed95b21a
ID
3263 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3264}
3265
3266static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3267 u64 cookie)
3268{
3269 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3270 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3271}
3272
3273static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3274 u64 notify_id, u64 cookie, s32 result)
3275{
3276 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3277 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3278}
3279
3280static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3281 u64 notifier_id, void *data, size_t data_len)
3282{
3283 struct rbd_device *rbd_dev = arg;
3284 void *p = data;
3285 void *const end = p + data_len;
d4c2269b 3286 u8 struct_v = 0;
ed95b21a
ID
3287 u32 len;
3288 u32 notify_op;
3289 int ret;
3290
3291 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3292 __func__, rbd_dev, cookie, notify_id, data_len);
3293 if (data_len) {
3294 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3295 &struct_v, &len);
3296 if (ret) {
3297 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3298 ret);
3299 return;
3300 }
3301
3302 notify_op = ceph_decode_32(&p);
3303 } else {
3304 /* legacy notification for header updates */
3305 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3306 len = 0;
3307 }
3308
3309 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3310 switch (notify_op) {
3311 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3312 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3313 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3314 break;
3315 case RBD_NOTIFY_OP_RELEASED_LOCK:
3316 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3317 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3318 break;
3319 case RBD_NOTIFY_OP_REQUEST_LOCK:
3b77faa0
ID
3320 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3321 if (ret <= 0)
ed95b21a 3322 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3b77faa0 3323 cookie, ret);
ed95b21a
ID
3324 else
3325 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3326 break;
3327 case RBD_NOTIFY_OP_HEADER_UPDATE:
3328 ret = rbd_dev_refresh(rbd_dev);
3329 if (ret)
3330 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3331
3332 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3333 break;
3334 default:
3335 if (rbd_is_lock_owner(rbd_dev))
3336 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3337 cookie, -EOPNOTSUPP);
3338 else
3339 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3340 break;
3341 }
b8d70035
AE
3342}
3343
99d16943
ID
3344static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3345
922dab61 3346static void rbd_watch_errcb(void *arg, u64 cookie, int err)
bb040aa0 3347{
922dab61 3348 struct rbd_device *rbd_dev = arg;
bb040aa0 3349
922dab61 3350 rbd_warn(rbd_dev, "encountered watch error: %d", err);
bb040aa0 3351
ed95b21a
ID
3352 down_write(&rbd_dev->lock_rwsem);
3353 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3354 up_write(&rbd_dev->lock_rwsem);
3355
99d16943
ID
3356 mutex_lock(&rbd_dev->watch_mutex);
3357 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3358 __rbd_unregister_watch(rbd_dev);
3359 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
bb040aa0 3360
99d16943 3361 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
bb040aa0 3362 }
99d16943 3363 mutex_unlock(&rbd_dev->watch_mutex);
bb040aa0
ID
3364}
3365
9969ebc5 3366/*
99d16943 3367 * watch_mutex must be locked
9969ebc5 3368 */
99d16943 3369static int __rbd_register_watch(struct rbd_device *rbd_dev)
9969ebc5
AE
3370{
3371 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
922dab61 3372 struct ceph_osd_linger_request *handle;
9969ebc5 3373
922dab61 3374 rbd_assert(!rbd_dev->watch_handle);
99d16943 3375 dout("%s rbd_dev %p\n", __func__, rbd_dev);
9969ebc5 3376
922dab61
ID
3377 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3378 &rbd_dev->header_oloc, rbd_watch_cb,
3379 rbd_watch_errcb, rbd_dev);
3380 if (IS_ERR(handle))
3381 return PTR_ERR(handle);
8eb87565 3382
922dab61 3383 rbd_dev->watch_handle = handle;
b30a01f2 3384 return 0;
b30a01f2
ID
3385}
3386
99d16943
ID
3387/*
3388 * watch_mutex must be locked
3389 */
3390static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
b30a01f2 3391{
922dab61
ID
3392 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3393 int ret;
b30a01f2 3394
99d16943
ID
3395 rbd_assert(rbd_dev->watch_handle);
3396 dout("%s rbd_dev %p\n", __func__, rbd_dev);
b30a01f2 3397
922dab61
ID
3398 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3399 if (ret)
3400 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
76756a51 3401
922dab61 3402 rbd_dev->watch_handle = NULL;
c525f036
ID
3403}
3404
99d16943
ID
3405static int rbd_register_watch(struct rbd_device *rbd_dev)
3406{
3407 int ret;
3408
3409 mutex_lock(&rbd_dev->watch_mutex);
3410 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3411 ret = __rbd_register_watch(rbd_dev);
3412 if (ret)
3413 goto out;
3414
3415 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3416 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3417
3418out:
3419 mutex_unlock(&rbd_dev->watch_mutex);
3420 return ret;
3421}
3422
3423static void cancel_tasks_sync(struct rbd_device *rbd_dev)
c525f036 3424{
99d16943
ID
3425 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3426
ed95b21a
ID
3427 cancel_work_sync(&rbd_dev->acquired_lock_work);
3428 cancel_work_sync(&rbd_dev->released_lock_work);
3429 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3430 cancel_work_sync(&rbd_dev->unlock_work);
99d16943
ID
3431}
3432
3433static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3434{
ed95b21a 3435 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
99d16943
ID
3436 cancel_tasks_sync(rbd_dev);
3437
3438 mutex_lock(&rbd_dev->watch_mutex);
3439 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3440 __rbd_unregister_watch(rbd_dev);
3441 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3442 mutex_unlock(&rbd_dev->watch_mutex);
811c6688 3443
23edca86 3444 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
811c6688 3445 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca27065
ID
3446}
3447
14bb211d
ID
3448/*
3449 * lock_rwsem must be held for write
3450 */
3451static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3452{
3453 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3454 char cookie[32];
3455 int ret;
3456
3457 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3458
3459 format_lock_cookie(rbd_dev, cookie);
3460 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3461 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3462 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3463 RBD_LOCK_TAG, cookie);
3464 if (ret) {
3465 if (ret != -EOPNOTSUPP)
3466 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3467 ret);
3468
3469 /*
3470 * Lock cookie cannot be updated on older OSDs, so do
3471 * a manual release and queue an acquire.
3472 */
3473 if (rbd_release_lock(rbd_dev))
3474 queue_delayed_work(rbd_dev->task_wq,
3475 &rbd_dev->lock_dwork, 0);
3476 } else {
edd8ca80 3477 __rbd_lock(rbd_dev, cookie);
14bb211d
ID
3478 }
3479}
3480
99d16943
ID
3481static void rbd_reregister_watch(struct work_struct *work)
3482{
3483 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3484 struct rbd_device, watch_dwork);
3485 int ret;
3486
3487 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3488
3489 mutex_lock(&rbd_dev->watch_mutex);
87c0fded
ID
3490 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3491 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 3492 return;
87c0fded 3493 }
99d16943
ID
3494
3495 ret = __rbd_register_watch(rbd_dev);
3496 if (ret) {
3497 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4d73644b 3498 if (ret == -EBLACKLISTED || ret == -ENOENT) {
87c0fded 3499 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
14bb211d 3500 wake_requests(rbd_dev, true);
87c0fded 3501 } else {
99d16943
ID
3502 queue_delayed_work(rbd_dev->task_wq,
3503 &rbd_dev->watch_dwork,
3504 RBD_RETRY_DELAY);
87c0fded
ID
3505 }
3506 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 3507 return;
99d16943
ID
3508 }
3509
3510 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3511 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3512 mutex_unlock(&rbd_dev->watch_mutex);
3513
14bb211d
ID
3514 down_write(&rbd_dev->lock_rwsem);
3515 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3516 rbd_reacquire_lock(rbd_dev);
3517 up_write(&rbd_dev->lock_rwsem);
3518
99d16943
ID
3519 ret = rbd_dev_refresh(rbd_dev);
3520 if (ret)
f6870cc9 3521 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
99d16943
ID
3522}
3523
36be9a76 3524/*
f40eb349
AE
3525 * Synchronous osd object method call. Returns the number of bytes
3526 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
3527 */
3528static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ecd4a68a
ID
3529 struct ceph_object_id *oid,
3530 struct ceph_object_locator *oloc,
36be9a76 3531 const char *method_name,
4157976b 3532 const void *outbound,
36be9a76 3533 size_t outbound_size,
4157976b 3534 void *inbound,
e2a58ee5 3535 size_t inbound_size)
36be9a76 3536{
ecd4a68a
ID
3537 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3538 struct page *req_page = NULL;
3539 struct page *reply_page;
36be9a76
AE
3540 int ret;
3541
3542 /*
6010a451
AE
3543 * Method calls are ultimately read operations. The result
3544 * should placed into the inbound buffer provided. They
3545 * also supply outbound data--parameters for the object
3546 * method. Currently if this is present it will be a
3547 * snapshot id.
36be9a76 3548 */
ecd4a68a
ID
3549 if (outbound) {
3550 if (outbound_size > PAGE_SIZE)
3551 return -E2BIG;
36be9a76 3552
ecd4a68a
ID
3553 req_page = alloc_page(GFP_KERNEL);
3554 if (!req_page)
3555 return -ENOMEM;
04017e29 3556
ecd4a68a 3557 memcpy(page_address(req_page), outbound, outbound_size);
04017e29 3558 }
36be9a76 3559
ecd4a68a
ID
3560 reply_page = alloc_page(GFP_KERNEL);
3561 if (!reply_page) {
3562 if (req_page)
3563 __free_page(req_page);
3564 return -ENOMEM;
3565 }
57385b51 3566
ecd4a68a
ID
3567 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3568 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3569 reply_page, &inbound_size);
3570 if (!ret) {
3571 memcpy(inbound, page_address(reply_page), inbound_size);
3572 ret = inbound_size;
3573 }
36be9a76 3574
ecd4a68a
ID
3575 if (req_page)
3576 __free_page(req_page);
3577 __free_page(reply_page);
36be9a76
AE
3578 return ret;
3579}
3580
ed95b21a
ID
3581/*
3582 * lock_rwsem must be held for read
3583 */
2f18d466 3584static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
ed95b21a
ID
3585{
3586 DEFINE_WAIT(wait);
34f55d0b 3587 unsigned long timeout;
2f18d466
ID
3588 int ret = 0;
3589
3590 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3591 return -EBLACKLISTED;
3592
3593 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3594 return 0;
3595
3596 if (!may_acquire) {
3597 rbd_warn(rbd_dev, "exclusive lock required");
3598 return -EROFS;
3599 }
ed95b21a
ID
3600
3601 do {
3602 /*
3603 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3604 * and cancel_delayed_work() in wake_requests().
3605 */
3606 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3607 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3608 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3609 TASK_UNINTERRUPTIBLE);
3610 up_read(&rbd_dev->lock_rwsem);
34f55d0b
DY
3611 timeout = schedule_timeout(ceph_timeout_jiffies(
3612 rbd_dev->opts->lock_timeout));
ed95b21a 3613 down_read(&rbd_dev->lock_rwsem);
2f18d466
ID
3614 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3615 ret = -EBLACKLISTED;
3616 break;
3617 }
34f55d0b
DY
3618 if (!timeout) {
3619 rbd_warn(rbd_dev, "timed out waiting for lock");
3620 ret = -ETIMEDOUT;
3621 break;
3622 }
2f18d466 3623 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
87c0fded 3624
ed95b21a 3625 finish_wait(&rbd_dev->lock_waitq, &wait);
2f18d466 3626 return ret;
ed95b21a
ID
3627}
3628
7ad18afa 3629static void rbd_queue_workfn(struct work_struct *work)
bf0d5f50 3630{
7ad18afa
CH
3631 struct request *rq = blk_mq_rq_from_pdu(work);
3632 struct rbd_device *rbd_dev = rq->q->queuedata;
bc1ecc65 3633 struct rbd_img_request *img_request;
4e752f0a 3634 struct ceph_snap_context *snapc = NULL;
bc1ecc65
ID
3635 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3636 u64 length = blk_rq_bytes(rq);
6d2940c8 3637 enum obj_operation_type op_type;
4e752f0a 3638 u64 mapping_size;
80de1912 3639 bool must_be_locked;
bf0d5f50
AE
3640 int result;
3641
aebf526b
CH
3642 switch (req_op(rq)) {
3643 case REQ_OP_DISCARD:
6ac56951 3644 case REQ_OP_WRITE_ZEROES:
90e98c52 3645 op_type = OBJ_OP_DISCARD;
aebf526b
CH
3646 break;
3647 case REQ_OP_WRITE:
6d2940c8 3648 op_type = OBJ_OP_WRITE;
aebf526b
CH
3649 break;
3650 case REQ_OP_READ:
6d2940c8 3651 op_type = OBJ_OP_READ;
aebf526b
CH
3652 break;
3653 default:
3654 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3655 result = -EIO;
3656 goto err;
3657 }
6d2940c8 3658
bc1ecc65 3659 /* Ignore/skip any zero-length requests */
bf0d5f50 3660
bc1ecc65
ID
3661 if (!length) {
3662 dout("%s: zero-length request\n", __func__);
3663 result = 0;
3664 goto err_rq;
3665 }
bf0d5f50 3666
9568c93e
ID
3667 rbd_assert(op_type == OBJ_OP_READ ||
3668 rbd_dev->spec->snap_id == CEPH_NOSNAP);
4dda41d3 3669
bc1ecc65
ID
3670 /*
3671 * Quit early if the mapped snapshot no longer exists. It's
3672 * still possible the snapshot will have disappeared by the
3673 * time our request arrives at the osd, but there's no sense in
3674 * sending it if we already know.
3675 */
3676 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3677 dout("request for non-existent snapshot");
3678 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3679 result = -ENXIO;
3680 goto err_rq;
3681 }
4dda41d3 3682
bc1ecc65
ID
3683 if (offset && length > U64_MAX - offset + 1) {
3684 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3685 length);
3686 result = -EINVAL;
3687 goto err_rq; /* Shouldn't happen */
3688 }
4dda41d3 3689
7ad18afa
CH
3690 blk_mq_start_request(rq);
3691
4e752f0a
JD
3692 down_read(&rbd_dev->header_rwsem);
3693 mapping_size = rbd_dev->mapping.size;
6d2940c8 3694 if (op_type != OBJ_OP_READ) {
4e752f0a
JD
3695 snapc = rbd_dev->header.snapc;
3696 ceph_get_snap_context(snapc);
3697 }
3698 up_read(&rbd_dev->header_rwsem);
3699
3700 if (offset + length > mapping_size) {
bc1ecc65 3701 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0a 3702 length, mapping_size);
bc1ecc65
ID
3703 result = -EIO;
3704 goto err_rq;
3705 }
bf0d5f50 3706
f9bebd58
ID
3707 must_be_locked =
3708 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3709 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
ed95b21a
ID
3710 if (must_be_locked) {
3711 down_read(&rbd_dev->lock_rwsem);
2f18d466
ID
3712 result = rbd_wait_state_locked(rbd_dev,
3713 !rbd_dev->opts->exclusive);
3714 if (result)
87c0fded 3715 goto err_unlock;
ed95b21a
ID
3716 }
3717
dfd9875f 3718 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
bc1ecc65
ID
3719 if (!img_request) {
3720 result = -ENOMEM;
ed95b21a 3721 goto err_unlock;
bc1ecc65
ID
3722 }
3723 img_request->rq = rq;
70b16db8 3724 snapc = NULL; /* img_request consumes a ref */
bf0d5f50 3725
90e98c52 3726 if (op_type == OBJ_OP_DISCARD)
5a237819 3727 result = rbd_img_fill_nodata(img_request, offset, length);
90e98c52 3728 else
5a237819
ID
3729 result = rbd_img_fill_from_bio(img_request, offset, length,
3730 rq->bio);
bc1ecc65
ID
3731 if (result)
3732 goto err_img_request;
bf0d5f50 3733
efbd1a11 3734 rbd_img_request_submit(img_request);
ed95b21a
ID
3735 if (must_be_locked)
3736 up_read(&rbd_dev->lock_rwsem);
bc1ecc65 3737 return;
bf0d5f50 3738
bc1ecc65
ID
3739err_img_request:
3740 rbd_img_request_put(img_request);
ed95b21a
ID
3741err_unlock:
3742 if (must_be_locked)
3743 up_read(&rbd_dev->lock_rwsem);
bc1ecc65
ID
3744err_rq:
3745 if (result)
3746 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c8 3747 obj_op_name(op_type), length, offset, result);
e96a650a 3748 ceph_put_snap_context(snapc);
7ad18afa 3749err:
2a842aca 3750 blk_mq_end_request(rq, errno_to_blk_status(result));
bc1ecc65 3751}
bf0d5f50 3752
fc17b653 3753static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
7ad18afa 3754 const struct blk_mq_queue_data *bd)
bc1ecc65 3755{
7ad18afa
CH
3756 struct request *rq = bd->rq;
3757 struct work_struct *work = blk_mq_rq_to_pdu(rq);
bf0d5f50 3758
7ad18afa 3759 queue_work(rbd_wq, work);
fc17b653 3760 return BLK_STS_OK;
bf0d5f50
AE
3761}
3762
602adf40
YS
3763static void rbd_free_disk(struct rbd_device *rbd_dev)
3764{
5769ed0c
ID
3765 blk_cleanup_queue(rbd_dev->disk->queue);
3766 blk_mq_free_tag_set(&rbd_dev->tag_set);
3767 put_disk(rbd_dev->disk);
a0cab924 3768 rbd_dev->disk = NULL;
602adf40
YS
3769}
3770
788e2df3 3771static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
fe5478e0
ID
3772 struct ceph_object_id *oid,
3773 struct ceph_object_locator *oloc,
3774 void *buf, int buf_len)
788e2df3
AE
3775
3776{
fe5478e0
ID
3777 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3778 struct ceph_osd_request *req;
3779 struct page **pages;
3780 int num_pages = calc_pages_for(0, buf_len);
788e2df3
AE
3781 int ret;
3782
fe5478e0
ID
3783 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3784 if (!req)
3785 return -ENOMEM;
788e2df3 3786
fe5478e0
ID
3787 ceph_oid_copy(&req->r_base_oid, oid);
3788 ceph_oloc_copy(&req->r_base_oloc, oloc);
3789 req->r_flags = CEPH_OSD_FLAG_READ;
430c28c3 3790
fe5478e0
ID
3791 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3792 if (IS_ERR(pages)) {
3793 ret = PTR_ERR(pages);
3794 goto out_req;
3795 }
1ceae7ef 3796
fe5478e0
ID
3797 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3798 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3799 true);
3800
26f887e0
ID
3801 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3802 if (ret)
3803 goto out_req;
3804
fe5478e0
ID
3805 ceph_osdc_start_request(osdc, req, false);
3806 ret = ceph_osdc_wait_request(osdc, req);
3807 if (ret >= 0)
3808 ceph_copy_from_page_vector(pages, buf, 0, ret);
788e2df3 3809
fe5478e0
ID
3810out_req:
3811 ceph_osdc_put_request(req);
788e2df3
AE
3812 return ret;
3813}
3814
602adf40 3815/*
662518b1
AE
3816 * Read the complete header for the given rbd device. On successful
3817 * return, the rbd_dev->header field will contain up-to-date
3818 * information about the image.
602adf40 3819 */
99a41ebc 3820static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf40 3821{
4156d998 3822 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 3823 u32 snap_count = 0;
4156d998
AE
3824 u64 names_size = 0;
3825 u32 want_count;
3826 int ret;
602adf40 3827
00f1f36f 3828 /*
4156d998
AE
3829 * The complete header will include an array of its 64-bit
3830 * snapshot ids, followed by the names of those snapshots as
3831 * a contiguous block of NUL-terminated strings. Note that
3832 * the number of snapshots could change by the time we read
3833 * it in, in which case we re-read it.
00f1f36f 3834 */
4156d998
AE
3835 do {
3836 size_t size;
3837
3838 kfree(ondisk);
3839
3840 size = sizeof (*ondisk);
3841 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3842 size += names_size;
3843 ondisk = kmalloc(size, GFP_KERNEL);
3844 if (!ondisk)
662518b1 3845 return -ENOMEM;
4156d998 3846
fe5478e0
ID
3847 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3848 &rbd_dev->header_oloc, ondisk, size);
4156d998 3849 if (ret < 0)
662518b1 3850 goto out;
c0cd10db 3851 if ((size_t)ret < size) {
4156d998 3852 ret = -ENXIO;
06ecc6cb
AE
3853 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3854 size, ret);
662518b1 3855 goto out;
4156d998
AE
3856 }
3857 if (!rbd_dev_ondisk_valid(ondisk)) {
3858 ret = -ENXIO;
06ecc6cb 3859 rbd_warn(rbd_dev, "invalid header");
662518b1 3860 goto out;
81e759fb 3861 }
602adf40 3862
4156d998
AE
3863 names_size = le64_to_cpu(ondisk->snap_names_len);
3864 want_count = snap_count;
3865 snap_count = le32_to_cpu(ondisk->snap_count);
3866 } while (snap_count != want_count);
00f1f36f 3867
662518b1
AE
3868 ret = rbd_header_from_disk(rbd_dev, ondisk);
3869out:
4156d998
AE
3870 kfree(ondisk);
3871
3872 return ret;
602adf40
YS
3873}
3874
15228ede
AE
3875/*
3876 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3877 * has disappeared from the (just updated) snapshot context.
3878 */
3879static void rbd_exists_validate(struct rbd_device *rbd_dev)
3880{
3881 u64 snap_id;
3882
3883 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3884 return;
3885
3886 snap_id = rbd_dev->spec->snap_id;
3887 if (snap_id == CEPH_NOSNAP)
3888 return;
3889
3890 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3891 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3892}
3893
9875201e
JD
3894static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3895{
3896 sector_t size;
9875201e
JD
3897
3898 /*
811c6688
ID
3899 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3900 * try to update its size. If REMOVING is set, updating size
3901 * is just useless work since the device can't be opened.
9875201e 3902 */
811c6688
ID
3903 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3904 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
9875201e
JD
3905 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3906 dout("setting size to %llu sectors", (unsigned long long)size);
3907 set_capacity(rbd_dev->disk, size);
3908 revalidate_disk(rbd_dev->disk);
3909 }
3910}
3911
cc4a38bd 3912static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 3913{
e627db08 3914 u64 mapping_size;
1fe5e993
AE
3915 int ret;
3916
cfbf6377 3917 down_write(&rbd_dev->header_rwsem);
3b5cf2a2 3918 mapping_size = rbd_dev->mapping.size;
a720ae09
ID
3919
3920 ret = rbd_dev_header_info(rbd_dev);
52bb1f9b 3921 if (ret)
73e39e4d 3922 goto out;
15228ede 3923
e8f59b59
ID
3924 /*
3925 * If there is a parent, see if it has disappeared due to the
3926 * mapped image getting flattened.
3927 */
3928 if (rbd_dev->parent) {
3929 ret = rbd_dev_v2_parent_info(rbd_dev);
3930 if (ret)
73e39e4d 3931 goto out;
e8f59b59
ID
3932 }
3933
5ff1108c 3934 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
73e39e4d 3935 rbd_dev->mapping.size = rbd_dev->header.image_size;
5ff1108c
ID
3936 } else {
3937 /* validate mapped snapshot's EXISTS flag */
3938 rbd_exists_validate(rbd_dev);
3939 }
15228ede 3940
73e39e4d 3941out:
cfbf6377 3942 up_write(&rbd_dev->header_rwsem);
73e39e4d 3943 if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e 3944 rbd_dev_update_size(rbd_dev);
1fe5e993 3945
73e39e4d 3946 return ret;
1fe5e993
AE
3947}
3948
d6296d39
CH
3949static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3950 unsigned int hctx_idx, unsigned int numa_node)
7ad18afa
CH
3951{
3952 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3953
3954 INIT_WORK(work, rbd_queue_workfn);
3955 return 0;
3956}
3957
f363b089 3958static const struct blk_mq_ops rbd_mq_ops = {
7ad18afa 3959 .queue_rq = rbd_queue_rq,
7ad18afa
CH
3960 .init_request = rbd_init_request,
3961};
3962
602adf40
YS
3963static int rbd_init_disk(struct rbd_device *rbd_dev)
3964{
3965 struct gendisk *disk;
3966 struct request_queue *q;
420efbdf
ID
3967 unsigned int objset_bytes =
3968 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
7ad18afa 3969 int err;
602adf40 3970
602adf40 3971 /* create gendisk info */
7e513d43
ID
3972 disk = alloc_disk(single_major ?
3973 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3974 RBD_MINORS_PER_MAJOR);
602adf40 3975 if (!disk)
1fcdb8aa 3976 return -ENOMEM;
602adf40 3977
f0f8cef5 3978 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 3979 rbd_dev->dev_id);
602adf40 3980 disk->major = rbd_dev->major;
dd82fff1 3981 disk->first_minor = rbd_dev->minor;
7e513d43
ID
3982 if (single_major)
3983 disk->flags |= GENHD_FL_EXT_DEVT;
602adf40
YS
3984 disk->fops = &rbd_bd_ops;
3985 disk->private_data = rbd_dev;
3986
7ad18afa
CH
3987 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3988 rbd_dev->tag_set.ops = &rbd_mq_ops;
b5584180 3989 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
7ad18afa 3990 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
b5584180 3991 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
7ad18afa
CH
3992 rbd_dev->tag_set.nr_hw_queues = 1;
3993 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3994
3995 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3996 if (err)
602adf40 3997 goto out_disk;
029bcbd8 3998
7ad18afa
CH
3999 q = blk_mq_init_queue(&rbd_dev->tag_set);
4000 if (IS_ERR(q)) {
4001 err = PTR_ERR(q);
4002 goto out_tag_set;
4003 }
4004
8b904b5b 4005 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
d8a2c89c 4006 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
593a9e7b 4007
420efbdf 4008 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
0d9fde4f 4009 q->limits.max_sectors = queue_max_hw_sectors(q);
21acdf45 4010 blk_queue_max_segments(q, USHRT_MAX);
24f1df60 4011 blk_queue_max_segment_size(q, UINT_MAX);
420efbdf
ID
4012 blk_queue_io_min(q, objset_bytes);
4013 blk_queue_io_opt(q, objset_bytes);
029bcbd8 4014
d9360540
ID
4015 if (rbd_dev->opts->trim) {
4016 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4017 q->limits.discard_granularity = objset_bytes;
4018 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4019 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4020 }
90e98c52 4021
bae818ee 4022 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
dc3b17cc 4023 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
bae818ee 4024
5769ed0c
ID
4025 /*
4026 * disk_release() expects a queue ref from add_disk() and will
4027 * put it. Hold an extra ref until add_disk() is called.
4028 */
4029 WARN_ON(!blk_get_queue(q));
602adf40 4030 disk->queue = q;
602adf40
YS
4031 q->queuedata = rbd_dev;
4032
4033 rbd_dev->disk = disk;
602adf40 4034
602adf40 4035 return 0;
7ad18afa
CH
4036out_tag_set:
4037 blk_mq_free_tag_set(&rbd_dev->tag_set);
602adf40
YS
4038out_disk:
4039 put_disk(disk);
7ad18afa 4040 return err;
602adf40
YS
4041}
4042
dfc5606d
YS
4043/*
4044 sysfs
4045*/
4046
593a9e7b
AE
4047static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4048{
4049 return container_of(dev, struct rbd_device, dev);
4050}
4051
dfc5606d
YS
4052static ssize_t rbd_size_show(struct device *dev,
4053 struct device_attribute *attr, char *buf)
4054{
593a9e7b 4055 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 4056
fc71d833
AE
4057 return sprintf(buf, "%llu\n",
4058 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
4059}
4060
34b13184
AE
4061/*
4062 * Note this shows the features for whatever's mapped, which is not
4063 * necessarily the base image.
4064 */
4065static ssize_t rbd_features_show(struct device *dev,
4066 struct device_attribute *attr, char *buf)
4067{
4068 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4069
4070 return sprintf(buf, "0x%016llx\n",
fc71d833 4071 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
4072}
4073
dfc5606d
YS
4074static ssize_t rbd_major_show(struct device *dev,
4075 struct device_attribute *attr, char *buf)
4076{
593a9e7b 4077 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4078
fc71d833
AE
4079 if (rbd_dev->major)
4080 return sprintf(buf, "%d\n", rbd_dev->major);
4081
4082 return sprintf(buf, "(none)\n");
dd82fff1
ID
4083}
4084
4085static ssize_t rbd_minor_show(struct device *dev,
4086 struct device_attribute *attr, char *buf)
4087{
4088 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d833 4089
dd82fff1 4090 return sprintf(buf, "%d\n", rbd_dev->minor);
dfc5606d
YS
4091}
4092
005a07bf
ID
4093static ssize_t rbd_client_addr_show(struct device *dev,
4094 struct device_attribute *attr, char *buf)
4095{
4096 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4097 struct ceph_entity_addr *client_addr =
4098 ceph_client_addr(rbd_dev->rbd_client->client);
4099
4100 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4101 le32_to_cpu(client_addr->nonce));
4102}
4103
dfc5606d
YS
4104static ssize_t rbd_client_id_show(struct device *dev,
4105 struct device_attribute *attr, char *buf)
602adf40 4106{
593a9e7b 4107 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4108
1dbb4399 4109 return sprintf(buf, "client%lld\n",
033268a5 4110 ceph_client_gid(rbd_dev->rbd_client->client));
602adf40
YS
4111}
4112
267fb90b
MC
4113static ssize_t rbd_cluster_fsid_show(struct device *dev,
4114 struct device_attribute *attr, char *buf)
4115{
4116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4117
4118 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4119}
4120
0d6d1e9c
MC
4121static ssize_t rbd_config_info_show(struct device *dev,
4122 struct device_attribute *attr, char *buf)
4123{
4124 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4125
4126 return sprintf(buf, "%s\n", rbd_dev->config_info);
602adf40
YS
4127}
4128
dfc5606d
YS
4129static ssize_t rbd_pool_show(struct device *dev,
4130 struct device_attribute *attr, char *buf)
602adf40 4131{
593a9e7b 4132 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4133
0d7dbfce 4134 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
4135}
4136
9bb2f334
AE
4137static ssize_t rbd_pool_id_show(struct device *dev,
4138 struct device_attribute *attr, char *buf)
4139{
4140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4141
0d7dbfce 4142 return sprintf(buf, "%llu\n",
fc71d833 4143 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
4144}
4145
b26c047b
ID
4146static ssize_t rbd_pool_ns_show(struct device *dev,
4147 struct device_attribute *attr, char *buf)
4148{
4149 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4150
4151 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4152}
4153
dfc5606d
YS
4154static ssize_t rbd_name_show(struct device *dev,
4155 struct device_attribute *attr, char *buf)
4156{
593a9e7b 4157 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4158
a92ffdf8
AE
4159 if (rbd_dev->spec->image_name)
4160 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4161
4162 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
4163}
4164
589d30e0
AE
4165static ssize_t rbd_image_id_show(struct device *dev,
4166 struct device_attribute *attr, char *buf)
4167{
4168 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4169
0d7dbfce 4170 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
4171}
4172
34b13184
AE
4173/*
4174 * Shows the name of the currently-mapped snapshot (or
4175 * RBD_SNAP_HEAD_NAME for the base image).
4176 */
dfc5606d
YS
4177static ssize_t rbd_snap_show(struct device *dev,
4178 struct device_attribute *attr,
4179 char *buf)
4180{
593a9e7b 4181 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4182
0d7dbfce 4183 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
4184}
4185
92a58671
MC
4186static ssize_t rbd_snap_id_show(struct device *dev,
4187 struct device_attribute *attr, char *buf)
4188{
4189 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4190
4191 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4192}
4193
86b00e0d 4194/*
ff96128f
ID
4195 * For a v2 image, shows the chain of parent images, separated by empty
4196 * lines. For v1 images or if there is no parent, shows "(no parent
4197 * image)".
86b00e0d
AE
4198 */
4199static ssize_t rbd_parent_show(struct device *dev,
ff96128f
ID
4200 struct device_attribute *attr,
4201 char *buf)
86b00e0d
AE
4202{
4203 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128f 4204 ssize_t count = 0;
86b00e0d 4205
ff96128f 4206 if (!rbd_dev->parent)
86b00e0d
AE
4207 return sprintf(buf, "(no parent image)\n");
4208
ff96128f
ID
4209 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4210 struct rbd_spec *spec = rbd_dev->parent_spec;
4211
4212 count += sprintf(&buf[count], "%s"
4213 "pool_id %llu\npool_name %s\n"
e92c0eaf 4214 "pool_ns %s\n"
ff96128f
ID
4215 "image_id %s\nimage_name %s\n"
4216 "snap_id %llu\nsnap_name %s\n"
4217 "overlap %llu\n",
4218 !count ? "" : "\n", /* first? */
4219 spec->pool_id, spec->pool_name,
e92c0eaf 4220 spec->pool_ns ?: "",
ff96128f
ID
4221 spec->image_id, spec->image_name ?: "(unknown)",
4222 spec->snap_id, spec->snap_name,
4223 rbd_dev->parent_overlap);
4224 }
4225
4226 return count;
86b00e0d
AE
4227}
4228
dfc5606d
YS
4229static ssize_t rbd_image_refresh(struct device *dev,
4230 struct device_attribute *attr,
4231 const char *buf,
4232 size_t size)
4233{
593a9e7b 4234 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 4235 int ret;
602adf40 4236
cc4a38bd 4237 ret = rbd_dev_refresh(rbd_dev);
e627db08 4238 if (ret)
52bb1f9b 4239 return ret;
b813623a 4240
52bb1f9b 4241 return size;
dfc5606d 4242}
602adf40 4243
5657a819
JP
4244static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4245static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4246static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4247static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4248static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4249static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4250static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4251static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4252static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4253static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
b26c047b 4254static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5657a819
JP
4255static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4256static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4257static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4258static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4259static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4260static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
dfc5606d
YS
4261
4262static struct attribute *rbd_attrs[] = {
4263 &dev_attr_size.attr,
34b13184 4264 &dev_attr_features.attr,
dfc5606d 4265 &dev_attr_major.attr,
dd82fff1 4266 &dev_attr_minor.attr,
005a07bf 4267 &dev_attr_client_addr.attr,
dfc5606d 4268 &dev_attr_client_id.attr,
267fb90b 4269 &dev_attr_cluster_fsid.attr,
0d6d1e9c 4270 &dev_attr_config_info.attr,
dfc5606d 4271 &dev_attr_pool.attr,
9bb2f334 4272 &dev_attr_pool_id.attr,
b26c047b 4273 &dev_attr_pool_ns.attr,
dfc5606d 4274 &dev_attr_name.attr,
589d30e0 4275 &dev_attr_image_id.attr,
dfc5606d 4276 &dev_attr_current_snap.attr,
92a58671 4277 &dev_attr_snap_id.attr,
86b00e0d 4278 &dev_attr_parent.attr,
dfc5606d 4279 &dev_attr_refresh.attr,
dfc5606d
YS
4280 NULL
4281};
4282
4283static struct attribute_group rbd_attr_group = {
4284 .attrs = rbd_attrs,
4285};
4286
4287static const struct attribute_group *rbd_attr_groups[] = {
4288 &rbd_attr_group,
4289 NULL
4290};
4291
6cac4695 4292static void rbd_dev_release(struct device *dev);
dfc5606d 4293
b9942bc9 4294static const struct device_type rbd_device_type = {
dfc5606d
YS
4295 .name = "rbd",
4296 .groups = rbd_attr_groups,
6cac4695 4297 .release = rbd_dev_release,
dfc5606d
YS
4298};
4299
8b8fb99c
AE
4300static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4301{
4302 kref_get(&spec->kref);
4303
4304 return spec;
4305}
4306
4307static void rbd_spec_free(struct kref *kref);
4308static void rbd_spec_put(struct rbd_spec *spec)
4309{
4310 if (spec)
4311 kref_put(&spec->kref, rbd_spec_free);
4312}
4313
4314static struct rbd_spec *rbd_spec_alloc(void)
4315{
4316 struct rbd_spec *spec;
4317
4318 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4319 if (!spec)
4320 return NULL;
04077599
ID
4321
4322 spec->pool_id = CEPH_NOPOOL;
4323 spec->snap_id = CEPH_NOSNAP;
8b8fb99c
AE
4324 kref_init(&spec->kref);
4325
8b8fb99c
AE
4326 return spec;
4327}
4328
4329static void rbd_spec_free(struct kref *kref)
4330{
4331 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4332
4333 kfree(spec->pool_name);
b26c047b 4334 kfree(spec->pool_ns);
8b8fb99c
AE
4335 kfree(spec->image_id);
4336 kfree(spec->image_name);
4337 kfree(spec->snap_name);
4338 kfree(spec);
4339}
4340
1643dfa4 4341static void rbd_dev_free(struct rbd_device *rbd_dev)
dd5ac32d 4342{
99d16943 4343 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
ed95b21a 4344 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
dd5ac32d 4345
c41d13a3 4346 ceph_oid_destroy(&rbd_dev->header_oid);
6b6dddbe 4347 ceph_oloc_destroy(&rbd_dev->header_oloc);
0d6d1e9c 4348 kfree(rbd_dev->config_info);
c41d13a3 4349
dd5ac32d
ID
4350 rbd_put_client(rbd_dev->rbd_client);
4351 rbd_spec_put(rbd_dev->spec);
4352 kfree(rbd_dev->opts);
4353 kfree(rbd_dev);
1643dfa4
ID
4354}
4355
4356static void rbd_dev_release(struct device *dev)
4357{
4358 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4359 bool need_put = !!rbd_dev->opts;
4360
4361 if (need_put) {
4362 destroy_workqueue(rbd_dev->task_wq);
4363 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4364 }
4365
4366 rbd_dev_free(rbd_dev);
dd5ac32d
ID
4367
4368 /*
4369 * This is racy, but way better than putting module outside of
4370 * the release callback. The race window is pretty small, so
4371 * doing something similar to dm (dm-builtin.c) is overkill.
4372 */
4373 if (need_put)
4374 module_put(THIS_MODULE);
4375}
4376
1643dfa4
ID
4377static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4378 struct rbd_spec *spec)
c53d5893
AE
4379{
4380 struct rbd_device *rbd_dev;
4381
1643dfa4 4382 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
c53d5893
AE
4383 if (!rbd_dev)
4384 return NULL;
4385
4386 spin_lock_init(&rbd_dev->lock);
4387 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
4388 init_rwsem(&rbd_dev->header_rwsem);
4389
7e97332e 4390 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
c41d13a3 4391 ceph_oid_init(&rbd_dev->header_oid);
431a02cd 4392 rbd_dev->header_oloc.pool = spec->pool_id;
b26c047b
ID
4393 if (spec->pool_ns) {
4394 WARN_ON(!*spec->pool_ns);
4395 rbd_dev->header_oloc.pool_ns =
4396 ceph_find_or_create_string(spec->pool_ns,
4397 strlen(spec->pool_ns));
4398 }
c41d13a3 4399
99d16943
ID
4400 mutex_init(&rbd_dev->watch_mutex);
4401 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4402 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4403
ed95b21a
ID
4404 init_rwsem(&rbd_dev->lock_rwsem);
4405 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4406 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4407 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4408 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4409 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4410 init_waitqueue_head(&rbd_dev->lock_waitq);
4411
dd5ac32d
ID
4412 rbd_dev->dev.bus = &rbd_bus_type;
4413 rbd_dev->dev.type = &rbd_device_type;
4414 rbd_dev->dev.parent = &rbd_root_dev;
dd5ac32d
ID
4415 device_initialize(&rbd_dev->dev);
4416
c53d5893 4417 rbd_dev->rbd_client = rbdc;
d147543d 4418 rbd_dev->spec = spec;
0903e875 4419
1643dfa4
ID
4420 return rbd_dev;
4421}
4422
4423/*
4424 * Create a mapping rbd_dev.
4425 */
4426static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4427 struct rbd_spec *spec,
4428 struct rbd_options *opts)
4429{
4430 struct rbd_device *rbd_dev;
4431
4432 rbd_dev = __rbd_dev_create(rbdc, spec);
4433 if (!rbd_dev)
4434 return NULL;
4435
4436 rbd_dev->opts = opts;
4437
4438 /* get an id and fill in device name */
4439 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4440 minor_to_rbd_dev_id(1 << MINORBITS),
4441 GFP_KERNEL);
4442 if (rbd_dev->dev_id < 0)
4443 goto fail_rbd_dev;
4444
4445 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4446 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4447 rbd_dev->name);
4448 if (!rbd_dev->task_wq)
4449 goto fail_dev_id;
dd5ac32d 4450
1643dfa4
ID
4451 /* we have a ref from do_rbd_add() */
4452 __module_get(THIS_MODULE);
dd5ac32d 4453
1643dfa4 4454 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
c53d5893 4455 return rbd_dev;
1643dfa4
ID
4456
4457fail_dev_id:
4458 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4459fail_rbd_dev:
4460 rbd_dev_free(rbd_dev);
4461 return NULL;
c53d5893
AE
4462}
4463
4464static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4465{
dd5ac32d
ID
4466 if (rbd_dev)
4467 put_device(&rbd_dev->dev);
c53d5893
AE
4468}
4469
9d475de5
AE
4470/*
4471 * Get the size and object order for an image snapshot, or if
4472 * snap_id is CEPH_NOSNAP, gets this information for the base
4473 * image.
4474 */
4475static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4476 u8 *order, u64 *snap_size)
4477{
4478 __le64 snapid = cpu_to_le64(snap_id);
4479 int ret;
4480 struct {
4481 u8 order;
4482 __le64 size;
4483 } __attribute__ ((packed)) size_buf = { 0 };
4484
ecd4a68a
ID
4485 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4486 &rbd_dev->header_oloc, "get_size",
4487 &snapid, sizeof(snapid),
4488 &size_buf, sizeof(size_buf));
36be9a76 4489 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
4490 if (ret < 0)
4491 return ret;
57385b51
AE
4492 if (ret < sizeof (size_buf))
4493 return -ERANGE;
9d475de5 4494
c3545579 4495 if (order) {
c86f86e9 4496 *order = size_buf.order;
c3545579
JD
4497 dout(" order %u", (unsigned int)*order);
4498 }
9d475de5
AE
4499 *snap_size = le64_to_cpu(size_buf.size);
4500
c3545579
JD
4501 dout(" snap_id 0x%016llx snap_size = %llu\n",
4502 (unsigned long long)snap_id,
57385b51 4503 (unsigned long long)*snap_size);
9d475de5
AE
4504
4505 return 0;
4506}
4507
4508static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4509{
4510 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4511 &rbd_dev->header.obj_order,
4512 &rbd_dev->header.image_size);
4513}
4514
1e130199
AE
4515static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4516{
4517 void *reply_buf;
4518 int ret;
4519 void *p;
4520
4521 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4522 if (!reply_buf)
4523 return -ENOMEM;
4524
ecd4a68a
ID
4525 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4526 &rbd_dev->header_oloc, "get_object_prefix",
4527 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a76 4528 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
4529 if (ret < 0)
4530 goto out;
4531
4532 p = reply_buf;
4533 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
4534 p + ret, NULL, GFP_NOIO);
4535 ret = 0;
1e130199
AE
4536
4537 if (IS_ERR(rbd_dev->header.object_prefix)) {
4538 ret = PTR_ERR(rbd_dev->header.object_prefix);
4539 rbd_dev->header.object_prefix = NULL;
4540 } else {
4541 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4542 }
1e130199
AE
4543out:
4544 kfree(reply_buf);
4545
4546 return ret;
4547}
4548
b1b5402a
AE
4549static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4550 u64 *snap_features)
4551{
4552 __le64 snapid = cpu_to_le64(snap_id);
4553 struct {
4554 __le64 features;
4555 __le64 incompat;
4157976b 4556 } __attribute__ ((packed)) features_buf = { 0 };
d3767f0f 4557 u64 unsup;
b1b5402a
AE
4558 int ret;
4559
ecd4a68a
ID
4560 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4561 &rbd_dev->header_oloc, "get_features",
4562 &snapid, sizeof(snapid),
4563 &features_buf, sizeof(features_buf));
36be9a76 4564 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
4565 if (ret < 0)
4566 return ret;
57385b51
AE
4567 if (ret < sizeof (features_buf))
4568 return -ERANGE;
d889140c 4569
d3767f0f
ID
4570 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4571 if (unsup) {
4572 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4573 unsup);
b8f5c6ed 4574 return -ENXIO;
d3767f0f 4575 }
d889140c 4576
b1b5402a
AE
4577 *snap_features = le64_to_cpu(features_buf.features);
4578
4579 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
4580 (unsigned long long)snap_id,
4581 (unsigned long long)*snap_features,
4582 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
4583
4584 return 0;
4585}
4586
4587static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4588{
4589 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4590 &rbd_dev->header.features);
4591}
4592
eb3b2d6b
ID
4593struct parent_image_info {
4594 u64 pool_id;
e92c0eaf 4595 const char *pool_ns;
eb3b2d6b
ID
4596 const char *image_id;
4597 u64 snap_id;
4598
e92c0eaf 4599 bool has_overlap;
eb3b2d6b
ID
4600 u64 overlap;
4601};
4602
e92c0eaf
ID
4603/*
4604 * The caller is responsible for @pii.
4605 */
4606static int decode_parent_image_spec(void **p, void *end,
4607 struct parent_image_info *pii)
4608{
4609 u8 struct_v;
4610 u32 struct_len;
4611 int ret;
4612
4613 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4614 &struct_v, &struct_len);
4615 if (ret)
4616 return ret;
4617
4618 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4619 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4620 if (IS_ERR(pii->pool_ns)) {
4621 ret = PTR_ERR(pii->pool_ns);
4622 pii->pool_ns = NULL;
4623 return ret;
4624 }
4625 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4626 if (IS_ERR(pii->image_id)) {
4627 ret = PTR_ERR(pii->image_id);
4628 pii->image_id = NULL;
4629 return ret;
4630 }
4631 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4632 return 0;
4633
4634e_inval:
4635 return -EINVAL;
4636}
4637
4638static int __get_parent_info(struct rbd_device *rbd_dev,
4639 struct page *req_page,
4640 struct page *reply_page,
4641 struct parent_image_info *pii)
4642{
4643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4644 size_t reply_len = PAGE_SIZE;
4645 void *p, *end;
4646 int ret;
4647
4648 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4649 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4650 req_page, sizeof(u64), reply_page, &reply_len);
4651 if (ret)
4652 return ret == -EOPNOTSUPP ? 1 : ret;
4653
4654 p = page_address(reply_page);
4655 end = p + reply_len;
4656 ret = decode_parent_image_spec(&p, end, pii);
4657 if (ret)
4658 return ret;
4659
4660 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4661 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4662 req_page, sizeof(u64), reply_page, &reply_len);
4663 if (ret)
4664 return ret;
4665
4666 p = page_address(reply_page);
4667 end = p + reply_len;
4668 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4669 if (pii->has_overlap)
4670 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4671
4672 return 0;
4673
4674e_inval:
4675 return -EINVAL;
4676}
4677
eb3b2d6b
ID
4678/*
4679 * The caller is responsible for @pii.
4680 */
4681static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4682 struct page *req_page,
4683 struct page *reply_page,
4684 struct parent_image_info *pii)
4685{
4686 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4687 size_t reply_len = PAGE_SIZE;
4688 void *p, *end;
4689 int ret;
4690
4691 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4692 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4693 req_page, sizeof(u64), reply_page, &reply_len);
4694 if (ret)
4695 return ret;
4696
4697 p = page_address(reply_page);
4698 end = p + reply_len;
4699 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4700 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4701 if (IS_ERR(pii->image_id)) {
4702 ret = PTR_ERR(pii->image_id);
4703 pii->image_id = NULL;
4704 return ret;
4705 }
4706 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
e92c0eaf 4707 pii->has_overlap = true;
eb3b2d6b
ID
4708 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4709
4710 return 0;
4711
4712e_inval:
4713 return -EINVAL;
4714}
4715
4716static int get_parent_info(struct rbd_device *rbd_dev,
4717 struct parent_image_info *pii)
4718{
4719 struct page *req_page, *reply_page;
4720 void *p;
4721 int ret;
4722
4723 req_page = alloc_page(GFP_KERNEL);
4724 if (!req_page)
4725 return -ENOMEM;
4726
4727 reply_page = alloc_page(GFP_KERNEL);
4728 if (!reply_page) {
4729 __free_page(req_page);
4730 return -ENOMEM;
4731 }
4732
4733 p = page_address(req_page);
4734 ceph_encode_64(&p, rbd_dev->spec->snap_id);
e92c0eaf
ID
4735 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4736 if (ret > 0)
4737 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4738 pii);
eb3b2d6b
ID
4739
4740 __free_page(req_page);
4741 __free_page(reply_page);
4742 return ret;
4743}
4744
86b00e0d
AE
4745static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4746{
4747 struct rbd_spec *parent_spec;
eb3b2d6b 4748 struct parent_image_info pii = { 0 };
86b00e0d
AE
4749 int ret;
4750
4751 parent_spec = rbd_spec_alloc();
4752 if (!parent_spec)
4753 return -ENOMEM;
4754
eb3b2d6b
ID
4755 ret = get_parent_info(rbd_dev, &pii);
4756 if (ret)
86b00e0d 4757 goto out_err;
86b00e0d 4758
e92c0eaf
ID
4759 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4760 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4761 pii.has_overlap, pii.overlap);
86b00e0d 4762
e92c0eaf 4763 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
392a9dad
AE
4764 /*
4765 * Either the parent never existed, or we have
4766 * record of it but the image got flattened so it no
4767 * longer has a parent. When the parent of a
4768 * layered image disappears we immediately set the
4769 * overlap to 0. The effect of this is that all new
4770 * requests will be treated as if the image had no
4771 * parent.
e92c0eaf
ID
4772 *
4773 * If !pii.has_overlap, the parent image spec is not
4774 * applicable. It's there to avoid duplication in each
4775 * snapshot record.
392a9dad
AE
4776 */
4777 if (rbd_dev->parent_overlap) {
4778 rbd_dev->parent_overlap = 0;
392a9dad
AE
4779 rbd_dev_parent_put(rbd_dev);
4780 pr_info("%s: clone image has been flattened\n",
4781 rbd_dev->disk->disk_name);
4782 }
4783
86b00e0d 4784 goto out; /* No parent? No problem. */
392a9dad 4785 }
86b00e0d 4786
0903e875
AE
4787 /* The ceph file layout needs to fit pool id in 32 bits */
4788
4789 ret = -EIO;
eb3b2d6b 4790 if (pii.pool_id > (u64)U32_MAX) {
9584d508 4791 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
eb3b2d6b 4792 (unsigned long long)pii.pool_id, U32_MAX);
86b00e0d
AE
4793 goto out_err;
4794 }
86b00e0d 4795
3b5cf2a2
AE
4796 /*
4797 * The parent won't change (except when the clone is
4798 * flattened, already handled that). So we only need to
4799 * record the parent spec we have not already done so.
4800 */
4801 if (!rbd_dev->parent_spec) {
eb3b2d6b 4802 parent_spec->pool_id = pii.pool_id;
e92c0eaf
ID
4803 if (pii.pool_ns && *pii.pool_ns) {
4804 parent_spec->pool_ns = pii.pool_ns;
4805 pii.pool_ns = NULL;
4806 }
eb3b2d6b
ID
4807 parent_spec->image_id = pii.image_id;
4808 pii.image_id = NULL;
4809 parent_spec->snap_id = pii.snap_id;
b26c047b 4810
70cf49cf
AE
4811 rbd_dev->parent_spec = parent_spec;
4812 parent_spec = NULL; /* rbd_dev now owns this */
3b5cf2a2
AE
4813 }
4814
4815 /*
cf32bd9c
ID
4816 * We always update the parent overlap. If it's zero we issue
4817 * a warning, as we will proceed as if there was no parent.
3b5cf2a2 4818 */
eb3b2d6b 4819 if (!pii.overlap) {
3b5cf2a2 4820 if (parent_spec) {
cf32bd9c
ID
4821 /* refresh, careful to warn just once */
4822 if (rbd_dev->parent_overlap)
4823 rbd_warn(rbd_dev,
4824 "clone now standalone (overlap became 0)");
3b5cf2a2 4825 } else {
cf32bd9c
ID
4826 /* initial probe */
4827 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
3b5cf2a2 4828 }
70cf49cf 4829 }
eb3b2d6b 4830 rbd_dev->parent_overlap = pii.overlap;
cf32bd9c 4831
86b00e0d
AE
4832out:
4833 ret = 0;
4834out_err:
e92c0eaf 4835 kfree(pii.pool_ns);
eb3b2d6b 4836 kfree(pii.image_id);
86b00e0d 4837 rbd_spec_put(parent_spec);
86b00e0d
AE
4838 return ret;
4839}
4840
cc070d59
AE
4841static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4842{
4843 struct {
4844 __le64 stripe_unit;
4845 __le64 stripe_count;
4846 } __attribute__ ((packed)) striping_info_buf = { 0 };
4847 size_t size = sizeof (striping_info_buf);
4848 void *p;
cc070d59
AE
4849 int ret;
4850
ecd4a68a
ID
4851 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4852 &rbd_dev->header_oloc, "get_stripe_unit_count",
4853 NULL, 0, &striping_info_buf, size);
cc070d59
AE
4854 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4855 if (ret < 0)
4856 return ret;
4857 if (ret < size)
4858 return -ERANGE;
4859
cc070d59 4860 p = &striping_info_buf;
b1331852
ID
4861 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4862 rbd_dev->header.stripe_count = ceph_decode_64(&p);
cc070d59
AE
4863 return 0;
4864}
4865
7e97332e
ID
4866static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4867{
4868 __le64 data_pool_id;
4869 int ret;
4870
4871 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4872 &rbd_dev->header_oloc, "get_data_pool",
4873 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4874 if (ret < 0)
4875 return ret;
4876 if (ret < sizeof(data_pool_id))
4877 return -EBADMSG;
4878
4879 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4880 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4881 return 0;
4882}
4883
9e15b77d
AE
4884static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4885{
ecd4a68a 4886 CEPH_DEFINE_OID_ONSTACK(oid);
9e15b77d
AE
4887 size_t image_id_size;
4888 char *image_id;
4889 void *p;
4890 void *end;
4891 size_t size;
4892 void *reply_buf = NULL;
4893 size_t len = 0;
4894 char *image_name = NULL;
4895 int ret;
4896
4897 rbd_assert(!rbd_dev->spec->image_name);
4898
69e7a02f
AE
4899 len = strlen(rbd_dev->spec->image_id);
4900 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
4901 image_id = kmalloc(image_id_size, GFP_KERNEL);
4902 if (!image_id)
4903 return NULL;
4904
4905 p = image_id;
4157976b 4906 end = image_id + image_id_size;
57385b51 4907 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
4908
4909 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4910 reply_buf = kmalloc(size, GFP_KERNEL);
4911 if (!reply_buf)
4912 goto out;
4913
ecd4a68a
ID
4914 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4915 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4916 "dir_get_name", image_id, image_id_size,
4917 reply_buf, size);
9e15b77d
AE
4918 if (ret < 0)
4919 goto out;
4920 p = reply_buf;
f40eb349
AE
4921 end = reply_buf + ret;
4922
9e15b77d
AE
4923 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4924 if (IS_ERR(image_name))
4925 image_name = NULL;
4926 else
4927 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4928out:
4929 kfree(reply_buf);
4930 kfree(image_id);
4931
4932 return image_name;
4933}
4934
2ad3d716
AE
4935static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4936{
4937 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4938 const char *snap_name;
4939 u32 which = 0;
4940
4941 /* Skip over names until we find the one we are looking for */
4942
4943 snap_name = rbd_dev->header.snap_names;
4944 while (which < snapc->num_snaps) {
4945 if (!strcmp(name, snap_name))
4946 return snapc->snaps[which];
4947 snap_name += strlen(snap_name) + 1;
4948 which++;
4949 }
4950 return CEPH_NOSNAP;
4951}
4952
4953static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4954{
4955 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4956 u32 which;
4957 bool found = false;
4958 u64 snap_id;
4959
4960 for (which = 0; !found && which < snapc->num_snaps; which++) {
4961 const char *snap_name;
4962
4963 snap_id = snapc->snaps[which];
4964 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98a
JD
4965 if (IS_ERR(snap_name)) {
4966 /* ignore no-longer existing snapshots */
4967 if (PTR_ERR(snap_name) == -ENOENT)
4968 continue;
4969 else
4970 break;
4971 }
2ad3d716
AE
4972 found = !strcmp(name, snap_name);
4973 kfree(snap_name);
4974 }
4975 return found ? snap_id : CEPH_NOSNAP;
4976}
4977
4978/*
4979 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4980 * no snapshot by that name is found, or if an error occurs.
4981 */
4982static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4983{
4984 if (rbd_dev->image_format == 1)
4985 return rbd_v1_snap_id_by_name(rbd_dev, name);
4986
4987 return rbd_v2_snap_id_by_name(rbd_dev, name);
4988}
4989
9e15b77d 4990/*
04077599
ID
4991 * An image being mapped will have everything but the snap id.
4992 */
4993static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4994{
4995 struct rbd_spec *spec = rbd_dev->spec;
4996
4997 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4998 rbd_assert(spec->image_id && spec->image_name);
4999 rbd_assert(spec->snap_name);
5000
5001 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5002 u64 snap_id;
5003
5004 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5005 if (snap_id == CEPH_NOSNAP)
5006 return -ENOENT;
5007
5008 spec->snap_id = snap_id;
5009 } else {
5010 spec->snap_id = CEPH_NOSNAP;
5011 }
5012
5013 return 0;
5014}
5015
5016/*
5017 * A parent image will have all ids but none of the names.
e1d4213f 5018 *
04077599
ID
5019 * All names in an rbd spec are dynamically allocated. It's OK if we
5020 * can't figure out the name for an image id.
9e15b77d 5021 */
04077599 5022static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d 5023{
2e9f7f1c
AE
5024 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5025 struct rbd_spec *spec = rbd_dev->spec;
5026 const char *pool_name;
5027 const char *image_name;
5028 const char *snap_name;
9e15b77d
AE
5029 int ret;
5030
04077599
ID
5031 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5032 rbd_assert(spec->image_id);
5033 rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d 5034
2e9f7f1c 5035 /* Get the pool name; we have to make our own copy of this */
9e15b77d 5036
2e9f7f1c
AE
5037 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5038 if (!pool_name) {
5039 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
5040 return -EIO;
5041 }
2e9f7f1c
AE
5042 pool_name = kstrdup(pool_name, GFP_KERNEL);
5043 if (!pool_name)
9e15b77d
AE
5044 return -ENOMEM;
5045
5046 /* Fetch the image name; tolerate failure here */
5047
2e9f7f1c
AE
5048 image_name = rbd_dev_image_name(rbd_dev);
5049 if (!image_name)
06ecc6cb 5050 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 5051
04077599 5052 /* Fetch the snapshot name */
9e15b77d 5053
2e9f7f1c 5054 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b63
JD
5055 if (IS_ERR(snap_name)) {
5056 ret = PTR_ERR(snap_name);
9e15b77d 5057 goto out_err;
2e9f7f1c
AE
5058 }
5059
5060 spec->pool_name = pool_name;
5061 spec->image_name = image_name;
5062 spec->snap_name = snap_name;
9e15b77d
AE
5063
5064 return 0;
04077599 5065
9e15b77d 5066out_err:
2e9f7f1c
AE
5067 kfree(image_name);
5068 kfree(pool_name);
9e15b77d
AE
5069 return ret;
5070}
5071
cc4a38bd 5072static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
5073{
5074 size_t size;
5075 int ret;
5076 void *reply_buf;
5077 void *p;
5078 void *end;
5079 u64 seq;
5080 u32 snap_count;
5081 struct ceph_snap_context *snapc;
5082 u32 i;
5083
5084 /*
5085 * We'll need room for the seq value (maximum snapshot id),
5086 * snapshot count, and array of that many snapshot ids.
5087 * For now we have a fixed upper limit on the number we're
5088 * prepared to receive.
5089 */
5090 size = sizeof (__le64) + sizeof (__le32) +
5091 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5092 reply_buf = kzalloc(size, GFP_KERNEL);
5093 if (!reply_buf)
5094 return -ENOMEM;
5095
ecd4a68a
ID
5096 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5097 &rbd_dev->header_oloc, "get_snapcontext",
5098 NULL, 0, reply_buf, size);
36be9a76 5099 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
5100 if (ret < 0)
5101 goto out;
5102
35d489f9 5103 p = reply_buf;
57385b51
AE
5104 end = reply_buf + ret;
5105 ret = -ERANGE;
35d489f9
AE
5106 ceph_decode_64_safe(&p, end, seq, out);
5107 ceph_decode_32_safe(&p, end, snap_count, out);
5108
5109 /*
5110 * Make sure the reported number of snapshot ids wouldn't go
5111 * beyond the end of our buffer. But before checking that,
5112 * make sure the computed size of the snapshot context we
5113 * allocate is representable in a size_t.
5114 */
5115 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5116 / sizeof (u64)) {
5117 ret = -EINVAL;
5118 goto out;
5119 }
5120 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5121 goto out;
468521c1 5122 ret = 0;
35d489f9 5123
812164f8 5124 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
5125 if (!snapc) {
5126 ret = -ENOMEM;
5127 goto out;
5128 }
35d489f9 5129 snapc->seq = seq;
35d489f9
AE
5130 for (i = 0; i < snap_count; i++)
5131 snapc->snaps[i] = ceph_decode_64(&p);
5132
49ece554 5133 ceph_put_snap_context(rbd_dev->header.snapc);
35d489f9
AE
5134 rbd_dev->header.snapc = snapc;
5135
5136 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 5137 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
5138out:
5139 kfree(reply_buf);
5140
57385b51 5141 return ret;
35d489f9
AE
5142}
5143
54cac61f
AE
5144static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5145 u64 snap_id)
b8b1e2db
AE
5146{
5147 size_t size;
5148 void *reply_buf;
54cac61f 5149 __le64 snapid;
b8b1e2db
AE
5150 int ret;
5151 void *p;
5152 void *end;
b8b1e2db
AE
5153 char *snap_name;
5154
5155 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5156 reply_buf = kmalloc(size, GFP_KERNEL);
5157 if (!reply_buf)
5158 return ERR_PTR(-ENOMEM);
5159
54cac61f 5160 snapid = cpu_to_le64(snap_id);
ecd4a68a
ID
5161 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5162 &rbd_dev->header_oloc, "get_snapshot_name",
5163 &snapid, sizeof(snapid), reply_buf, size);
36be9a76 5164 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
5165 if (ret < 0) {
5166 snap_name = ERR_PTR(ret);
b8b1e2db 5167 goto out;
f40eb349 5168 }
b8b1e2db
AE
5169
5170 p = reply_buf;
f40eb349 5171 end = reply_buf + ret;
e5c35534 5172 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 5173 if (IS_ERR(snap_name))
b8b1e2db 5174 goto out;
b8b1e2db 5175
f40eb349 5176 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 5177 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
5178out:
5179 kfree(reply_buf);
5180
f40eb349 5181 return snap_name;
b8b1e2db
AE
5182}
5183
2df3fac7 5184static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb 5185{
2df3fac7 5186 bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb 5187 int ret;
117973fb 5188
1617e40c
JD
5189 ret = rbd_dev_v2_image_size(rbd_dev);
5190 if (ret)
cfbf6377 5191 return ret;
1617e40c 5192
2df3fac7
AE
5193 if (first_time) {
5194 ret = rbd_dev_v2_header_onetime(rbd_dev);
5195 if (ret)
cfbf6377 5196 return ret;
2df3fac7
AE
5197 }
5198
cc4a38bd 5199 ret = rbd_dev_v2_snap_context(rbd_dev);
d194cd1d
ID
5200 if (ret && first_time) {
5201 kfree(rbd_dev->header.object_prefix);
5202 rbd_dev->header.object_prefix = NULL;
5203 }
117973fb
AE
5204
5205 return ret;
5206}
5207
a720ae09
ID
5208static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5209{
5210 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5211
5212 if (rbd_dev->image_format == 1)
5213 return rbd_dev_v1_header_info(rbd_dev);
5214
5215 return rbd_dev_v2_header_info(rbd_dev);
5216}
5217
e28fff26
AE
5218/*
5219 * Skips over white space at *buf, and updates *buf to point to the
5220 * first found non-space character (if any). Returns the length of
593a9e7b
AE
5221 * the token (string of non-white space characters) found. Note
5222 * that *buf must be terminated with '\0'.
e28fff26
AE
5223 */
5224static inline size_t next_token(const char **buf)
5225{
5226 /*
5227 * These are the characters that produce nonzero for
5228 * isspace() in the "C" and "POSIX" locales.
5229 */
5230 const char *spaces = " \f\n\r\t\v";
5231
5232 *buf += strspn(*buf, spaces); /* Find start of token */
5233
5234 return strcspn(*buf, spaces); /* Return token length */
5235}
5236
ea3352f4
AE
5237/*
5238 * Finds the next token in *buf, dynamically allocates a buffer big
5239 * enough to hold a copy of it, and copies the token into the new
5240 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5241 * that a duplicate buffer is created even for a zero-length token.
5242 *
5243 * Returns a pointer to the newly-allocated duplicate, or a null
5244 * pointer if memory for the duplicate was not available. If
5245 * the lenp argument is a non-null pointer, the length of the token
5246 * (not including the '\0') is returned in *lenp.
5247 *
5248 * If successful, the *buf pointer will be updated to point beyond
5249 * the end of the found token.
5250 *
5251 * Note: uses GFP_KERNEL for allocation.
5252 */
5253static inline char *dup_token(const char **buf, size_t *lenp)
5254{
5255 char *dup;
5256 size_t len;
5257
5258 len = next_token(buf);
4caf35f9 5259 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
5260 if (!dup)
5261 return NULL;
ea3352f4
AE
5262 *(dup + len) = '\0';
5263 *buf += len;
5264
5265 if (lenp)
5266 *lenp = len;
5267
5268 return dup;
5269}
5270
a725f65e 5271/*
859c31df
AE
5272 * Parse the options provided for an "rbd add" (i.e., rbd image
5273 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5274 * and the data written is passed here via a NUL-terminated buffer.
5275 * Returns 0 if successful or an error code otherwise.
d22f76e7 5276 *
859c31df
AE
5277 * The information extracted from these options is recorded in
5278 * the other parameters which return dynamically-allocated
5279 * structures:
5280 * ceph_opts
5281 * The address of a pointer that will refer to a ceph options
5282 * structure. Caller must release the returned pointer using
5283 * ceph_destroy_options() when it is no longer needed.
5284 * rbd_opts
5285 * Address of an rbd options pointer. Fully initialized by
5286 * this function; caller must release with kfree().
5287 * spec
5288 * Address of an rbd image specification pointer. Fully
5289 * initialized by this function based on parsed options.
5290 * Caller must release with rbd_spec_put().
5291 *
5292 * The options passed take this form:
5293 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5294 * where:
5295 * <mon_addrs>
5296 * A comma-separated list of one or more monitor addresses.
5297 * A monitor address is an ip address, optionally followed
5298 * by a port number (separated by a colon).
5299 * I.e.: ip1[:port1][,ip2[:port2]...]
5300 * <options>
5301 * A comma-separated list of ceph and/or rbd options.
5302 * <pool_name>
5303 * The name of the rados pool containing the rbd image.
5304 * <image_name>
5305 * The name of the image in that pool to map.
5306 * <snap_id>
5307 * An optional snapshot id. If provided, the mapping will
5308 * present data from the image at the time that snapshot was
5309 * created. The image head is used if no snapshot id is
5310 * provided. Snapshot mappings are always read-only.
a725f65e 5311 */
859c31df 5312static int rbd_add_parse_args(const char *buf,
dc79b113 5313 struct ceph_options **ceph_opts,
859c31df
AE
5314 struct rbd_options **opts,
5315 struct rbd_spec **rbd_spec)
e28fff26 5316{
d22f76e7 5317 size_t len;
859c31df 5318 char *options;
0ddebc0c 5319 const char *mon_addrs;
ecb4dc22 5320 char *snap_name;
0ddebc0c 5321 size_t mon_addrs_size;
c300156b 5322 struct parse_rbd_opts_ctx pctx = { 0 };
859c31df 5323 struct ceph_options *copts;
dc79b113 5324 int ret;
e28fff26
AE
5325
5326 /* The first four tokens are required */
5327
7ef3214a 5328 len = next_token(&buf);
4fb5d671
AE
5329 if (!len) {
5330 rbd_warn(NULL, "no monitor address(es) provided");
5331 return -EINVAL;
5332 }
0ddebc0c 5333 mon_addrs = buf;
f28e565a 5334 mon_addrs_size = len + 1;
7ef3214a 5335 buf += len;
a725f65e 5336
dc79b113 5337 ret = -EINVAL;
f28e565a
AE
5338 options = dup_token(&buf, NULL);
5339 if (!options)
dc79b113 5340 return -ENOMEM;
4fb5d671
AE
5341 if (!*options) {
5342 rbd_warn(NULL, "no options provided");
5343 goto out_err;
5344 }
e28fff26 5345
c300156b
ID
5346 pctx.spec = rbd_spec_alloc();
5347 if (!pctx.spec)
f28e565a 5348 goto out_mem;
859c31df 5349
c300156b
ID
5350 pctx.spec->pool_name = dup_token(&buf, NULL);
5351 if (!pctx.spec->pool_name)
859c31df 5352 goto out_mem;
c300156b 5353 if (!*pctx.spec->pool_name) {
4fb5d671
AE
5354 rbd_warn(NULL, "no pool name provided");
5355 goto out_err;
5356 }
e28fff26 5357
c300156b
ID
5358 pctx.spec->image_name = dup_token(&buf, NULL);
5359 if (!pctx.spec->image_name)
f28e565a 5360 goto out_mem;
c300156b 5361 if (!*pctx.spec->image_name) {
4fb5d671
AE
5362 rbd_warn(NULL, "no image name provided");
5363 goto out_err;
5364 }
d4b125e9 5365
f28e565a
AE
5366 /*
5367 * Snapshot name is optional; default is to use "-"
5368 * (indicating the head/no snapshot).
5369 */
3feeb894 5370 len = next_token(&buf);
820a5f3e 5371 if (!len) {
3feeb894
AE
5372 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5373 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 5374 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 5375 ret = -ENAMETOOLONG;
f28e565a 5376 goto out_err;
849b4260 5377 }
ecb4dc22
AE
5378 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5379 if (!snap_name)
f28e565a 5380 goto out_mem;
ecb4dc22 5381 *(snap_name + len) = '\0';
c300156b 5382 pctx.spec->snap_name = snap_name;
e5c35534 5383
0ddebc0c 5384 /* Initialize all rbd options to the defaults */
e28fff26 5385
c300156b
ID
5386 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5387 if (!pctx.opts)
4e9afeba
AE
5388 goto out_mem;
5389
c300156b
ID
5390 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5391 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5392 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5393 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5394 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5395 pctx.opts->trim = RBD_TRIM_DEFAULT;
d22f76e7 5396
859c31df 5397 copts = ceph_parse_options(options, mon_addrs,
c300156b
ID
5398 mon_addrs + mon_addrs_size - 1,
5399 parse_rbd_opts_token, &pctx);
859c31df
AE
5400 if (IS_ERR(copts)) {
5401 ret = PTR_ERR(copts);
dc79b113
AE
5402 goto out_err;
5403 }
859c31df
AE
5404 kfree(options);
5405
5406 *ceph_opts = copts;
c300156b
ID
5407 *opts = pctx.opts;
5408 *rbd_spec = pctx.spec;
0ddebc0c 5409
dc79b113 5410 return 0;
f28e565a 5411out_mem:
dc79b113 5412 ret = -ENOMEM;
d22f76e7 5413out_err:
c300156b
ID
5414 kfree(pctx.opts);
5415 rbd_spec_put(pctx.spec);
f28e565a 5416 kfree(options);
d22f76e7 5417
dc79b113 5418 return ret;
a725f65e
AE
5419}
5420
e010dd0a
ID
5421static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5422{
5423 down_write(&rbd_dev->lock_rwsem);
5424 if (__rbd_is_lock_owner(rbd_dev))
5425 rbd_unlock(rbd_dev);
5426 up_write(&rbd_dev->lock_rwsem);
5427}
5428
5429static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5430{
2f18d466
ID
5431 int ret;
5432
e010dd0a
ID
5433 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5434 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5435 return -EINVAL;
5436 }
5437
5438 /* FIXME: "rbd map --exclusive" should be in interruptible */
5439 down_read(&rbd_dev->lock_rwsem);
2f18d466 5440 ret = rbd_wait_state_locked(rbd_dev, true);
e010dd0a 5441 up_read(&rbd_dev->lock_rwsem);
2f18d466 5442 if (ret) {
e010dd0a
ID
5443 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5444 return -EROFS;
5445 }
5446
5447 return 0;
5448}
5449
589d30e0
AE
5450/*
5451 * An rbd format 2 image has a unique identifier, distinct from the
5452 * name given to it by the user. Internally, that identifier is
5453 * what's used to specify the names of objects related to the image.
5454 *
5455 * A special "rbd id" object is used to map an rbd image name to its
5456 * id. If that object doesn't exist, then there is no v2 rbd image
5457 * with the supplied name.
5458 *
5459 * This function will record the given rbd_dev's image_id field if
5460 * it can be determined, and in that case will return 0. If any
5461 * errors occur a negative errno will be returned and the rbd_dev's
5462 * image_id field will be unchanged (and should be NULL).
5463 */
5464static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5465{
5466 int ret;
5467 size_t size;
ecd4a68a 5468 CEPH_DEFINE_OID_ONSTACK(oid);
589d30e0 5469 void *response;
c0fba368 5470 char *image_id;
2f82ee54 5471
2c0d0a10
AE
5472 /*
5473 * When probing a parent image, the image id is already
5474 * known (and the image name likely is not). There's no
c0fba368
AE
5475 * need to fetch the image id again in this case. We
5476 * do still need to set the image format though.
2c0d0a10 5477 */
c0fba368
AE
5478 if (rbd_dev->spec->image_id) {
5479 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5480
2c0d0a10 5481 return 0;
c0fba368 5482 }
2c0d0a10 5483
589d30e0
AE
5484 /*
5485 * First, see if the format 2 image id file exists, and if
5486 * so, get the image's persistent id from it.
5487 */
ecd4a68a
ID
5488 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5489 rbd_dev->spec->image_name);
5490 if (ret)
5491 return ret;
5492
5493 dout("rbd id object name is %s\n", oid.name);
589d30e0
AE
5494
5495 /* Response will be an encoded string, which includes a length */
5496
5497 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5498 response = kzalloc(size, GFP_NOIO);
5499 if (!response) {
5500 ret = -ENOMEM;
5501 goto out;
5502 }
5503
c0fba368
AE
5504 /* If it doesn't exist we'll assume it's a format 1 image */
5505
ecd4a68a
ID
5506 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5507 "get_id", NULL, 0,
5508 response, RBD_IMAGE_ID_LEN_MAX);
36be9a76 5509 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
5510 if (ret == -ENOENT) {
5511 image_id = kstrdup("", GFP_KERNEL);
5512 ret = image_id ? 0 : -ENOMEM;
5513 if (!ret)
5514 rbd_dev->image_format = 1;
7dd440c9 5515 } else if (ret >= 0) {
c0fba368
AE
5516 void *p = response;
5517
5518 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 5519 NULL, GFP_NOIO);
461f758a 5520 ret = PTR_ERR_OR_ZERO(image_id);
c0fba368
AE
5521 if (!ret)
5522 rbd_dev->image_format = 2;
c0fba368
AE
5523 }
5524
5525 if (!ret) {
5526 rbd_dev->spec->image_id = image_id;
5527 dout("image_id is %s\n", image_id);
589d30e0
AE
5528 }
5529out:
5530 kfree(response);
ecd4a68a 5531 ceph_oid_destroy(&oid);
589d30e0
AE
5532 return ret;
5533}
5534
3abef3b3
AE
5535/*
5536 * Undo whatever state changes are made by v1 or v2 header info
5537 * call.
5538 */
6fd48b3b
AE
5539static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5540{
5541 struct rbd_image_header *header;
5542
e69b8d41 5543 rbd_dev_parent_put(rbd_dev);
6fd48b3b
AE
5544
5545 /* Free dynamic fields from the header, then zero it out */
5546
5547 header = &rbd_dev->header;
812164f8 5548 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
5549 kfree(header->snap_sizes);
5550 kfree(header->snap_names);
5551 kfree(header->object_prefix);
5552 memset(header, 0, sizeof (*header));
5553}
5554
2df3fac7 5555static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b9
AE
5556{
5557 int ret;
a30b71b9 5558
1e130199 5559 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 5560 if (ret)
b1b5402a
AE
5561 goto out_err;
5562
2df3fac7
AE
5563 /*
5564 * Get the and check features for the image. Currently the
5565 * features are assumed to never change.
5566 */
b1b5402a 5567 ret = rbd_dev_v2_features(rbd_dev);
57385b51 5568 if (ret)
9d475de5 5569 goto out_err;
35d489f9 5570
cc070d59
AE
5571 /* If the image supports fancy striping, get its parameters */
5572
5573 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5574 ret = rbd_dev_v2_striping_info(rbd_dev);
5575 if (ret < 0)
5576 goto out_err;
5577 }
a30b71b9 5578
7e97332e
ID
5579 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5580 ret = rbd_dev_v2_data_pool(rbd_dev);
5581 if (ret)
5582 goto out_err;
5583 }
5584
263423f8 5585 rbd_init_layout(rbd_dev);
35152979 5586 return 0;
263423f8 5587
9d475de5 5588out_err:
642a2537 5589 rbd_dev->header.features = 0;
1e130199
AE
5590 kfree(rbd_dev->header.object_prefix);
5591 rbd_dev->header.object_prefix = NULL;
9d475de5 5592 return ret;
a30b71b9
AE
5593}
5594
6d69bb53
ID
5595/*
5596 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5597 * rbd_dev_image_probe() recursion depth, which means it's also the
5598 * length of the already discovered part of the parent chain.
5599 */
5600static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
83a06263 5601{
2f82ee54 5602 struct rbd_device *parent = NULL;
124afba2
AE
5603 int ret;
5604
5605 if (!rbd_dev->parent_spec)
5606 return 0;
124afba2 5607
6d69bb53
ID
5608 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5609 pr_info("parent chain is too long (%d)\n", depth);
5610 ret = -EINVAL;
5611 goto out_err;
5612 }
5613
1643dfa4 5614 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
1f2c6651
ID
5615 if (!parent) {
5616 ret = -ENOMEM;
124afba2 5617 goto out_err;
1f2c6651
ID
5618 }
5619
5620 /*
5621 * Images related by parent/child relationships always share
5622 * rbd_client and spec/parent_spec, so bump their refcounts.
5623 */
5624 __rbd_get_client(rbd_dev->rbd_client);
5625 rbd_spec_get(rbd_dev->parent_spec);
124afba2 5626
6d69bb53 5627 ret = rbd_dev_image_probe(parent, depth);
124afba2
AE
5628 if (ret < 0)
5629 goto out_err;
1f2c6651 5630
124afba2 5631 rbd_dev->parent = parent;
a2acd00e 5632 atomic_set(&rbd_dev->parent_ref, 1);
124afba2 5633 return 0;
1f2c6651 5634
124afba2 5635out_err:
1f2c6651 5636 rbd_dev_unparent(rbd_dev);
1761b229 5637 rbd_dev_destroy(parent);
124afba2
AE
5638 return ret;
5639}
5640
5769ed0c
ID
5641static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5642{
5643 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5644 rbd_dev_mapping_clear(rbd_dev);
5645 rbd_free_disk(rbd_dev);
5646 if (!single_major)
5647 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5648}
5649
811c6688
ID
5650/*
5651 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5652 * upon return.
5653 */
200a6a8b 5654static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 5655{
83a06263 5656 int ret;
d1cf5788 5657
9b60e70b 5658 /* Record our major and minor device numbers. */
83a06263 5659
9b60e70b
ID
5660 if (!single_major) {
5661 ret = register_blkdev(0, rbd_dev->name);
5662 if (ret < 0)
1643dfa4 5663 goto err_out_unlock;
9b60e70b
ID
5664
5665 rbd_dev->major = ret;
5666 rbd_dev->minor = 0;
5667 } else {
5668 rbd_dev->major = rbd_major;
5669 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5670 }
83a06263
AE
5671
5672 /* Set up the blkdev mapping. */
5673
5674 ret = rbd_init_disk(rbd_dev);
5675 if (ret)
5676 goto err_out_blkdev;
5677
f35a4dee 5678 ret = rbd_dev_mapping_set(rbd_dev);
83a06263
AE
5679 if (ret)
5680 goto err_out_disk;
bc1ecc65 5681
f35a4dee 5682 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
9568c93e 5683 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
f35a4dee 5684
5769ed0c 5685 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
f35a4dee 5686 if (ret)
f5ee37bd 5687 goto err_out_mapping;
83a06263 5688
129b79d4 5689 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811c6688 5690 up_write(&rbd_dev->header_rwsem);
5769ed0c 5691 return 0;
2f82ee54 5692
f35a4dee
AE
5693err_out_mapping:
5694 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
5695err_out_disk:
5696 rbd_free_disk(rbd_dev);
5697err_out_blkdev:
9b60e70b
ID
5698 if (!single_major)
5699 unregister_blkdev(rbd_dev->major, rbd_dev->name);
811c6688
ID
5700err_out_unlock:
5701 up_write(&rbd_dev->header_rwsem);
83a06263
AE
5702 return ret;
5703}
5704
332bb12d
AE
5705static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5706{
5707 struct rbd_spec *spec = rbd_dev->spec;
c41d13a3 5708 int ret;
332bb12d
AE
5709
5710 /* Record the header object name for this rbd image. */
5711
5712 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
332bb12d 5713 if (rbd_dev->image_format == 1)
c41d13a3
ID
5714 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5715 spec->image_name, RBD_SUFFIX);
332bb12d 5716 else
c41d13a3
ID
5717 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5718 RBD_HEADER_PREFIX, spec->image_id);
332bb12d 5719
c41d13a3 5720 return ret;
332bb12d
AE
5721}
5722
200a6a8b
AE
5723static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5724{
6fd48b3b 5725 rbd_dev_unprobe(rbd_dev);
fd22aef8
ID
5726 if (rbd_dev->opts)
5727 rbd_unregister_watch(rbd_dev);
6fd48b3b
AE
5728 rbd_dev->image_format = 0;
5729 kfree(rbd_dev->spec->image_id);
5730 rbd_dev->spec->image_id = NULL;
200a6a8b
AE
5731}
5732
a30b71b9
AE
5733/*
5734 * Probe for the existence of the header object for the given rbd
1f3ef788
AE
5735 * device. If this image is the one being mapped (i.e., not a
5736 * parent), initiate a watch on its header object before using that
5737 * object to get detailed information about the rbd image.
a30b71b9 5738 */
6d69bb53 5739static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
a30b71b9
AE
5740{
5741 int ret;
5742
5743 /*
3abef3b3
AE
5744 * Get the id from the image id object. Unless there's an
5745 * error, rbd_dev->spec->image_id will be filled in with
5746 * a dynamically-allocated string, and rbd_dev->image_format
5747 * will be set to either 1 or 2.
a30b71b9
AE
5748 */
5749 ret = rbd_dev_image_id(rbd_dev);
5750 if (ret)
c0fba368 5751 return ret;
c0fba368 5752
332bb12d
AE
5753 ret = rbd_dev_header_name(rbd_dev);
5754 if (ret)
5755 goto err_out_format;
5756
6d69bb53 5757 if (!depth) {
99d16943 5758 ret = rbd_register_watch(rbd_dev);
1fe48023
ID
5759 if (ret) {
5760 if (ret == -ENOENT)
b26c047b 5761 pr_info("image %s/%s%s%s does not exist\n",
1fe48023 5762 rbd_dev->spec->pool_name,
b26c047b
ID
5763 rbd_dev->spec->pool_ns ?: "",
5764 rbd_dev->spec->pool_ns ? "/" : "",
1fe48023 5765 rbd_dev->spec->image_name);
c41d13a3 5766 goto err_out_format;
1fe48023 5767 }
1f3ef788 5768 }
b644de2b 5769
a720ae09 5770 ret = rbd_dev_header_info(rbd_dev);
5655c4d9 5771 if (ret)
b644de2b 5772 goto err_out_watch;
83a06263 5773
04077599
ID
5774 /*
5775 * If this image is the one being mapped, we have pool name and
5776 * id, image name and id, and snap name - need to fill snap id.
5777 * Otherwise this is a parent image, identified by pool, image
5778 * and snap ids - need to fill in names for those ids.
5779 */
6d69bb53 5780 if (!depth)
04077599
ID
5781 ret = rbd_spec_fill_snap_id(rbd_dev);
5782 else
5783 ret = rbd_spec_fill_names(rbd_dev);
1fe48023
ID
5784 if (ret) {
5785 if (ret == -ENOENT)
b26c047b 5786 pr_info("snap %s/%s%s%s@%s does not exist\n",
1fe48023 5787 rbd_dev->spec->pool_name,
b26c047b
ID
5788 rbd_dev->spec->pool_ns ?: "",
5789 rbd_dev->spec->pool_ns ? "/" : "",
1fe48023
ID
5790 rbd_dev->spec->image_name,
5791 rbd_dev->spec->snap_name);
33dca39f 5792 goto err_out_probe;
1fe48023 5793 }
9bb81c9b 5794
e8f59b59
ID
5795 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5796 ret = rbd_dev_v2_parent_info(rbd_dev);
5797 if (ret)
5798 goto err_out_probe;
5799
5800 /*
5801 * Need to warn users if this image is the one being
5802 * mapped and has a parent.
5803 */
6d69bb53 5804 if (!depth && rbd_dev->parent_spec)
e8f59b59
ID
5805 rbd_warn(rbd_dev,
5806 "WARNING: kernel layering is EXPERIMENTAL!");
5807 }
5808
6d69bb53 5809 ret = rbd_dev_probe_parent(rbd_dev, depth);
30d60ba2
AE
5810 if (ret)
5811 goto err_out_probe;
5812
5813 dout("discovered format %u image, header name is %s\n",
c41d13a3 5814 rbd_dev->image_format, rbd_dev->header_oid.name);
30d60ba2 5815 return 0;
e8f59b59 5816
6fd48b3b
AE
5817err_out_probe:
5818 rbd_dev_unprobe(rbd_dev);
b644de2b 5819err_out_watch:
6d69bb53 5820 if (!depth)
99d16943 5821 rbd_unregister_watch(rbd_dev);
332bb12d
AE
5822err_out_format:
5823 rbd_dev->image_format = 0;
5655c4d9
AE
5824 kfree(rbd_dev->spec->image_id);
5825 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
5826 return ret;
5827}
5828
9b60e70b
ID
5829static ssize_t do_rbd_add(struct bus_type *bus,
5830 const char *buf,
5831 size_t count)
602adf40 5832{
cb8627c7 5833 struct rbd_device *rbd_dev = NULL;
dc79b113 5834 struct ceph_options *ceph_opts = NULL;
4e9afeba 5835 struct rbd_options *rbd_opts = NULL;
859c31df 5836 struct rbd_spec *spec = NULL;
9d3997fd 5837 struct rbd_client *rbdc;
b51c83c2 5838 int rc;
602adf40
YS
5839
5840 if (!try_module_get(THIS_MODULE))
5841 return -ENODEV;
5842
602adf40 5843 /* parse add command */
859c31df 5844 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 5845 if (rc < 0)
dd5ac32d 5846 goto out;
78cea76e 5847
9d3997fd
AE
5848 rbdc = rbd_get_client(ceph_opts);
5849 if (IS_ERR(rbdc)) {
5850 rc = PTR_ERR(rbdc);
0ddebc0c 5851 goto err_out_args;
9d3997fd 5852 }
602adf40 5853
602adf40 5854 /* pick the pool */
dd435855 5855 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
1fe48023
ID
5856 if (rc < 0) {
5857 if (rc == -ENOENT)
5858 pr_info("pool %s does not exist\n", spec->pool_name);
602adf40 5859 goto err_out_client;
1fe48023 5860 }
c0cd10db 5861 spec->pool_id = (u64)rc;
859c31df 5862
d147543d 5863 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
b51c83c2
ID
5864 if (!rbd_dev) {
5865 rc = -ENOMEM;
bd4ba655 5866 goto err_out_client;
b51c83c2 5867 }
c53d5893
AE
5868 rbdc = NULL; /* rbd_dev now owns this */
5869 spec = NULL; /* rbd_dev now owns this */
d147543d 5870 rbd_opts = NULL; /* rbd_dev now owns this */
602adf40 5871
0d6d1e9c
MC
5872 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5873 if (!rbd_dev->config_info) {
5874 rc = -ENOMEM;
5875 goto err_out_rbd_dev;
5876 }
5877
811c6688 5878 down_write(&rbd_dev->header_rwsem);
6d69bb53 5879 rc = rbd_dev_image_probe(rbd_dev, 0);
0d6d1e9c
MC
5880 if (rc < 0) {
5881 up_write(&rbd_dev->header_rwsem);
c53d5893 5882 goto err_out_rbd_dev;
0d6d1e9c 5883 }
05fd6f6f 5884
7ce4eef7 5885 /* If we are mapping a snapshot it must be marked read-only */
7ce4eef7 5886 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9568c93e 5887 rbd_dev->opts->read_only = true;
7ce4eef7 5888
b536f69a 5889 rc = rbd_dev_device_setup(rbd_dev);
fd22aef8 5890 if (rc)
8b679ec5 5891 goto err_out_image_probe;
3abef3b3 5892
e010dd0a
ID
5893 if (rbd_dev->opts->exclusive) {
5894 rc = rbd_add_acquire_lock(rbd_dev);
5895 if (rc)
5896 goto err_out_device_setup;
3abef3b3
AE
5897 }
5898
5769ed0c
ID
5899 /* Everything's ready. Announce the disk to the world. */
5900
5901 rc = device_add(&rbd_dev->dev);
5902 if (rc)
e010dd0a 5903 goto err_out_image_lock;
5769ed0c
ID
5904
5905 add_disk(rbd_dev->disk);
5906 /* see rbd_init_disk() */
5907 blk_put_queue(rbd_dev->disk->queue);
5908
5909 spin_lock(&rbd_dev_list_lock);
5910 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5911 spin_unlock(&rbd_dev_list_lock);
5912
5913 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5914 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5915 rbd_dev->header.features);
dd5ac32d
ID
5916 rc = count;
5917out:
5918 module_put(THIS_MODULE);
5919 return rc;
b536f69a 5920
e010dd0a
ID
5921err_out_image_lock:
5922 rbd_dev_image_unlock(rbd_dev);
5769ed0c
ID
5923err_out_device_setup:
5924 rbd_dev_device_release(rbd_dev);
8b679ec5
ID
5925err_out_image_probe:
5926 rbd_dev_image_release(rbd_dev);
c53d5893
AE
5927err_out_rbd_dev:
5928 rbd_dev_destroy(rbd_dev);
bd4ba655 5929err_out_client:
9d3997fd 5930 rbd_put_client(rbdc);
0ddebc0c 5931err_out_args:
859c31df 5932 rbd_spec_put(spec);
d147543d 5933 kfree(rbd_opts);
dd5ac32d 5934 goto out;
602adf40
YS
5935}
5936
9b60e70b
ID
5937static ssize_t rbd_add(struct bus_type *bus,
5938 const char *buf,
5939 size_t count)
5940{
5941 if (single_major)
5942 return -EINVAL;
5943
5944 return do_rbd_add(bus, buf, count);
5945}
5946
5947static ssize_t rbd_add_single_major(struct bus_type *bus,
5948 const char *buf,
5949 size_t count)
5950{
5951 return do_rbd_add(bus, buf, count);
5952}
5953
05a46afd
AE
5954static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5955{
ad945fc1 5956 while (rbd_dev->parent) {
05a46afd
AE
5957 struct rbd_device *first = rbd_dev;
5958 struct rbd_device *second = first->parent;
5959 struct rbd_device *third;
5960
5961 /*
5962 * Follow to the parent with no grandparent and
5963 * remove it.
5964 */
5965 while (second && (third = second->parent)) {
5966 first = second;
5967 second = third;
5968 }
ad945fc1 5969 rbd_assert(second);
8ad42cd0 5970 rbd_dev_image_release(second);
8b679ec5 5971 rbd_dev_destroy(second);
ad945fc1
AE
5972 first->parent = NULL;
5973 first->parent_overlap = 0;
5974
5975 rbd_assert(first->parent_spec);
05a46afd
AE
5976 rbd_spec_put(first->parent_spec);
5977 first->parent_spec = NULL;
05a46afd
AE
5978 }
5979}
5980
9b60e70b
ID
5981static ssize_t do_rbd_remove(struct bus_type *bus,
5982 const char *buf,
5983 size_t count)
602adf40
YS
5984{
5985 struct rbd_device *rbd_dev = NULL;
751cc0e3
AE
5986 struct list_head *tmp;
5987 int dev_id;
0276dca6 5988 char opt_buf[6];
0276dca6 5989 bool force = false;
0d8189e1 5990 int ret;
602adf40 5991
0276dca6
MC
5992 dev_id = -1;
5993 opt_buf[0] = '\0';
5994 sscanf(buf, "%d %5s", &dev_id, opt_buf);
5995 if (dev_id < 0) {
5996 pr_err("dev_id out of range\n");
602adf40 5997 return -EINVAL;
0276dca6
MC
5998 }
5999 if (opt_buf[0] != '\0') {
6000 if (!strcmp(opt_buf, "force")) {
6001 force = true;
6002 } else {
6003 pr_err("bad remove option at '%s'\n", opt_buf);
6004 return -EINVAL;
6005 }
6006 }
602adf40 6007
751cc0e3
AE
6008 ret = -ENOENT;
6009 spin_lock(&rbd_dev_list_lock);
6010 list_for_each(tmp, &rbd_dev_list) {
6011 rbd_dev = list_entry(tmp, struct rbd_device, node);
6012 if (rbd_dev->dev_id == dev_id) {
6013 ret = 0;
6014 break;
6015 }
42382b70 6016 }
751cc0e3
AE
6017 if (!ret) {
6018 spin_lock_irq(&rbd_dev->lock);
0276dca6 6019 if (rbd_dev->open_count && !force)
751cc0e3 6020 ret = -EBUSY;
85f5a4d6
ID
6021 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6022 &rbd_dev->flags))
6023 ret = -EINPROGRESS;
751cc0e3
AE
6024 spin_unlock_irq(&rbd_dev->lock);
6025 }
6026 spin_unlock(&rbd_dev_list_lock);
85f5a4d6 6027 if (ret)
1ba0f1e7 6028 return ret;
751cc0e3 6029
0276dca6
MC
6030 if (force) {
6031 /*
6032 * Prevent new IO from being queued and wait for existing
6033 * IO to complete/fail.
6034 */
6035 blk_mq_freeze_queue(rbd_dev->disk->queue);
6036 blk_set_queue_dying(rbd_dev->disk->queue);
6037 }
6038
5769ed0c
ID
6039 del_gendisk(rbd_dev->disk);
6040 spin_lock(&rbd_dev_list_lock);
6041 list_del_init(&rbd_dev->node);
6042 spin_unlock(&rbd_dev_list_lock);
6043 device_del(&rbd_dev->dev);
fca27065 6044
e010dd0a 6045 rbd_dev_image_unlock(rbd_dev);
dd5ac32d 6046 rbd_dev_device_release(rbd_dev);
8ad42cd0 6047 rbd_dev_image_release(rbd_dev);
8b679ec5 6048 rbd_dev_destroy(rbd_dev);
1ba0f1e7 6049 return count;
602adf40
YS
6050}
6051
9b60e70b
ID
6052static ssize_t rbd_remove(struct bus_type *bus,
6053 const char *buf,
6054 size_t count)
6055{
6056 if (single_major)
6057 return -EINVAL;
6058
6059 return do_rbd_remove(bus, buf, count);
6060}
6061
6062static ssize_t rbd_remove_single_major(struct bus_type *bus,
6063 const char *buf,
6064 size_t count)
6065{
6066 return do_rbd_remove(bus, buf, count);
6067}
6068
602adf40
YS
6069/*
6070 * create control files in sysfs
dfc5606d 6071 * /sys/bus/rbd/...
602adf40 6072 */
7d8dc534 6073static int __init rbd_sysfs_init(void)
602adf40 6074{
dfc5606d 6075 int ret;
602adf40 6076
fed4c143 6077 ret = device_register(&rbd_root_dev);
21079786 6078 if (ret < 0)
dfc5606d 6079 return ret;
602adf40 6080
fed4c143
AE
6081 ret = bus_register(&rbd_bus_type);
6082 if (ret < 0)
6083 device_unregister(&rbd_root_dev);
602adf40 6084
602adf40
YS
6085 return ret;
6086}
6087
7d8dc534 6088static void __exit rbd_sysfs_cleanup(void)
602adf40 6089{
dfc5606d 6090 bus_unregister(&rbd_bus_type);
fed4c143 6091 device_unregister(&rbd_root_dev);
602adf40
YS
6092}
6093
7d8dc534 6094static int __init rbd_slab_init(void)
1c2a9dfe
AE
6095{
6096 rbd_assert(!rbd_img_request_cache);
03d94406 6097 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
868311b1
AE
6098 if (!rbd_img_request_cache)
6099 return -ENOMEM;
6100
6101 rbd_assert(!rbd_obj_request_cache);
03d94406 6102 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
78c2a44a
AE
6103 if (!rbd_obj_request_cache)
6104 goto out_err;
6105
6c696d85 6106 return 0;
1c2a9dfe 6107
6c696d85 6108out_err:
868311b1
AE
6109 kmem_cache_destroy(rbd_img_request_cache);
6110 rbd_img_request_cache = NULL;
1c2a9dfe
AE
6111 return -ENOMEM;
6112}
6113
6114static void rbd_slab_exit(void)
6115{
868311b1
AE
6116 rbd_assert(rbd_obj_request_cache);
6117 kmem_cache_destroy(rbd_obj_request_cache);
6118 rbd_obj_request_cache = NULL;
6119
1c2a9dfe
AE
6120 rbd_assert(rbd_img_request_cache);
6121 kmem_cache_destroy(rbd_img_request_cache);
6122 rbd_img_request_cache = NULL;
6123}
6124
cc344fa1 6125static int __init rbd_init(void)
602adf40
YS
6126{
6127 int rc;
6128
1e32d34c
AE
6129 if (!libceph_compatible(NULL)) {
6130 rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34c
AE
6131 return -EINVAL;
6132 }
e1b4d96d 6133
1c2a9dfe 6134 rc = rbd_slab_init();
602adf40
YS
6135 if (rc)
6136 return rc;
e1b4d96d 6137
f5ee37bd
ID
6138 /*
6139 * The number of active work items is limited by the number of
f77303bd 6140 * rbd devices * queue depth, so leave @max_active at default.
f5ee37bd
ID
6141 */
6142 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6143 if (!rbd_wq) {
6144 rc = -ENOMEM;
6145 goto err_out_slab;
6146 }
6147
9b60e70b
ID
6148 if (single_major) {
6149 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6150 if (rbd_major < 0) {
6151 rc = rbd_major;
f5ee37bd 6152 goto err_out_wq;
9b60e70b
ID
6153 }
6154 }
6155
1c2a9dfe
AE
6156 rc = rbd_sysfs_init();
6157 if (rc)
9b60e70b
ID
6158 goto err_out_blkdev;
6159
6160 if (single_major)
6161 pr_info("loaded (major %d)\n", rbd_major);
6162 else
6163 pr_info("loaded\n");
1c2a9dfe 6164
e1b4d96d
ID
6165 return 0;
6166
9b60e70b
ID
6167err_out_blkdev:
6168 if (single_major)
6169 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd
ID
6170err_out_wq:
6171 destroy_workqueue(rbd_wq);
e1b4d96d
ID
6172err_out_slab:
6173 rbd_slab_exit();
1c2a9dfe 6174 return rc;
602adf40
YS
6175}
6176
cc344fa1 6177static void __exit rbd_exit(void)
602adf40 6178{
ffe312cf 6179 ida_destroy(&rbd_dev_id_ida);
602adf40 6180 rbd_sysfs_cleanup();
9b60e70b
ID
6181 if (single_major)
6182 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd 6183 destroy_workqueue(rbd_wq);
1c2a9dfe 6184 rbd_slab_exit();
602adf40
YS
6185}
6186
6187module_init(rbd_init);
6188module_exit(rbd_exit);
6189
d552c619 6190MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf40
YS
6191MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6192MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf40
YS
6193/* following authorship retained from original osdblk.c */
6194MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6195
90da258b 6196MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf40 6197MODULE_LICENSE("GPL");