]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/block/rbd.c
ceph: fix atomic_open snapdir
[mirror_ubuntu-jammy-kernel.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
59c2be1e 35#include <linux/parser.h>
30d1cff8 36#include <linux/bsearch.h>
602adf40
YS
37
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
41#include <linux/fs.h>
42#include <linux/blkdev.h>
1c2a9dfe 43#include <linux/slab.h>
f8a22fc2 44#include <linux/idr.h>
bc1ecc65 45#include <linux/workqueue.h>
602adf40
YS
46
47#include "rbd_types.h"
48
aafb230e
AE
49#define RBD_DEBUG /* Activate rbd_assert() calls */
50
593a9e7b
AE
51/*
52 * The basic unit of block I/O is a sector. It is interpreted in a
53 * number of contexts in Linux (blk, bio, genhd), but the default is
54 * universally 512 bytes. These symbols are just slightly more
55 * meaningful than the bare numbers they represent.
56 */
57#define SECTOR_SHIFT 9
58#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59
a2acd00e
AE
60/*
61 * Increment the given counter and return its updated value.
62 * If the counter is already 0 it will not be incremented.
63 * If the counter is already at its maximum value returns
64 * -EINVAL without updating it.
65 */
66static int atomic_inc_return_safe(atomic_t *v)
67{
68 unsigned int counter;
69
70 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
71 if (counter <= (unsigned int)INT_MAX)
72 return (int)counter;
73
74 atomic_dec(v);
75
76 return -EINVAL;
77}
78
79/* Decrement the counter. Return the resulting value, or -EINVAL */
80static int atomic_dec_return_safe(atomic_t *v)
81{
82 int counter;
83
84 counter = atomic_dec_return(v);
85 if (counter >= 0)
86 return counter;
87
88 atomic_inc(v);
89
90 return -EINVAL;
91}
92
f0f8cef5 93#define RBD_DRV_NAME "rbd"
602adf40 94
7e513d43
ID
95#define RBD_MINORS_PER_MAJOR 256
96#define RBD_SINGLE_MAJOR_PART_SHIFT 4
602adf40 97
d4b125e9
AE
98#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
99#define RBD_MAX_SNAP_NAME_LEN \
100 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101
35d489f9 102#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
103
104#define RBD_SNAP_HEAD_NAME "-"
105
9682fc6d
AE
106#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107
9e15b77d
AE
108/* This allows a single page to hold an image name sent by OSD */
109#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 110#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 111
1e130199 112#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 113
d889140c
AE
114/* Feature bits */
115
5cbf6f12
AE
116#define RBD_FEATURE_LAYERING (1<<0)
117#define RBD_FEATURE_STRIPINGV2 (1<<1)
118#define RBD_FEATURES_ALL \
119 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
d889140c
AE
120
121/* Features supported by this (client software) implementation. */
122
770eba6e 123#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 124
81a89793
AE
125/*
126 * An RBD device name will be "rbd#", where the "rbd" comes from
127 * RBD_DRV_NAME above, and # is a unique integer identifier.
128 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
129 * enough to hold all possible device names.
130 */
602adf40 131#define DEV_NAME_LEN 32
81a89793 132#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40
YS
133
134/*
135 * block device image metadata (in-memory version)
136 */
137struct rbd_image_header {
f35a4dee 138 /* These six fields never change for a given rbd image */
849b4260 139 char *object_prefix;
602adf40
YS
140 __u8 obj_order;
141 __u8 crypt_type;
142 __u8 comp_type;
f35a4dee
AE
143 u64 stripe_unit;
144 u64 stripe_count;
145 u64 features; /* Might be changeable someday? */
602adf40 146
f84344f3
AE
147 /* The remaining fields need to be updated occasionally */
148 u64 image_size;
149 struct ceph_snap_context *snapc;
f35a4dee
AE
150 char *snap_names; /* format 1 only */
151 u64 *snap_sizes; /* format 1 only */
59c2be1e
YS
152};
153
0d7dbfce
AE
154/*
155 * An rbd image specification.
156 *
157 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
158 * identify an image. Each rbd_dev structure includes a pointer to
159 * an rbd_spec structure that encapsulates this identity.
160 *
161 * Each of the id's in an rbd_spec has an associated name. For a
162 * user-mapped image, the names are supplied and the id's associated
163 * with them are looked up. For a layered image, a parent image is
164 * defined by the tuple, and the names are looked up.
165 *
166 * An rbd_dev structure contains a parent_spec pointer which is
167 * non-null if the image it represents is a child in a layered
168 * image. This pointer will refer to the rbd_spec structure used
169 * by the parent rbd_dev for its own identity (i.e., the structure
170 * is shared between the parent and child).
171 *
172 * Since these structures are populated once, during the discovery
173 * phase of image construction, they are effectively immutable so
174 * we make no effort to synchronize access to them.
175 *
176 * Note that code herein does not assume the image name is known (it
177 * could be a null pointer).
0d7dbfce
AE
178 */
179struct rbd_spec {
180 u64 pool_id;
ecb4dc22 181 const char *pool_name;
0d7dbfce 182
ecb4dc22
AE
183 const char *image_id;
184 const char *image_name;
0d7dbfce
AE
185
186 u64 snap_id;
ecb4dc22 187 const char *snap_name;
0d7dbfce
AE
188
189 struct kref kref;
190};
191
602adf40 192/*
f0f8cef5 193 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
194 */
195struct rbd_client {
196 struct ceph_client *client;
197 struct kref kref;
198 struct list_head node;
199};
200
bf0d5f50
AE
201struct rbd_img_request;
202typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203
204#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205
206struct rbd_obj_request;
207typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208
9969ebc5
AE
209enum obj_request_type {
210 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
211};
bf0d5f50 212
6d2940c8
GZ
213enum obj_operation_type {
214 OBJ_OP_WRITE,
215 OBJ_OP_READ,
90e98c52 216 OBJ_OP_DISCARD,
6d2940c8
GZ
217};
218
926f9b3f
AE
219enum obj_req_flags {
220 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
6365d33a 221 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
5679c59f
AE
222 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
223 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
926f9b3f
AE
224};
225
bf0d5f50
AE
226struct rbd_obj_request {
227 const char *object_name;
228 u64 offset; /* object start byte */
229 u64 length; /* bytes from offset */
926f9b3f 230 unsigned long flags;
bf0d5f50 231
c5b5ef6c
AE
232 /*
233 * An object request associated with an image will have its
234 * img_data flag set; a standalone object request will not.
235 *
236 * A standalone object request will have which == BAD_WHICH
237 * and a null obj_request pointer.
238 *
239 * An object request initiated in support of a layered image
240 * object (to check for its existence before a write) will
241 * have which == BAD_WHICH and a non-null obj_request pointer.
242 *
243 * Finally, an object request for rbd image data will have
244 * which != BAD_WHICH, and will have a non-null img_request
245 * pointer. The value of which will be in the range
246 * 0..(img_request->obj_request_count-1).
247 */
248 union {
249 struct rbd_obj_request *obj_request; /* STAT op */
250 struct {
251 struct rbd_img_request *img_request;
252 u64 img_offset;
253 /* links for img_request->obj_requests list */
254 struct list_head links;
255 };
256 };
bf0d5f50
AE
257 u32 which; /* posn image request list */
258
259 enum obj_request_type type;
788e2df3
AE
260 union {
261 struct bio *bio_list;
262 struct {
263 struct page **pages;
264 u32 page_count;
265 };
266 };
0eefd470 267 struct page **copyup_pages;
ebda6408 268 u32 copyup_page_count;
bf0d5f50
AE
269
270 struct ceph_osd_request *osd_req;
271
272 u64 xferred; /* bytes transferred */
1b83bef2 273 int result;
bf0d5f50
AE
274
275 rbd_obj_callback_t callback;
788e2df3 276 struct completion completion;
bf0d5f50
AE
277
278 struct kref kref;
279};
280
0c425248 281enum img_req_flags {
9849e986
AE
282 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
283 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 284 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
90e98c52 285 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
0c425248
AE
286};
287
bf0d5f50 288struct rbd_img_request {
bf0d5f50
AE
289 struct rbd_device *rbd_dev;
290 u64 offset; /* starting image byte offset */
291 u64 length; /* byte count from offset */
0c425248 292 unsigned long flags;
bf0d5f50 293 union {
9849e986 294 u64 snap_id; /* for reads */
bf0d5f50 295 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
296 };
297 union {
298 struct request *rq; /* block request */
299 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 300 };
3d7efd18 301 struct page **copyup_pages;
ebda6408 302 u32 copyup_page_count;
bf0d5f50
AE
303 spinlock_t completion_lock;/* protects next_completion */
304 u32 next_completion;
305 rbd_img_callback_t callback;
55f27e09 306 u64 xferred;/* aggregate bytes transferred */
a5a337d4 307 int result; /* first nonzero obj_request result */
bf0d5f50
AE
308
309 u32 obj_request_count;
310 struct list_head obj_requests; /* rbd_obj_request structs */
311
312 struct kref kref;
313};
314
315#define for_each_obj_request(ireq, oreq) \
ef06f4d3 316 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 317#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 318 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 319#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 320 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 321
f84344f3 322struct rbd_mapping {
99c1f08f 323 u64 size;
34b13184 324 u64 features;
f84344f3
AE
325 bool read_only;
326};
327
602adf40
YS
328/*
329 * a single device
330 */
331struct rbd_device {
de71a297 332 int dev_id; /* blkdev unique id */
602adf40
YS
333
334 int major; /* blkdev assigned major */
dd82fff1 335 int minor;
602adf40 336 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 337
a30b71b9 338 u32 image_format; /* Either 1 or 2 */
602adf40
YS
339 struct rbd_client *rbd_client;
340
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342
bc1ecc65 343 struct list_head rq_queue; /* incoming rq queue */
b82d167b 344 spinlock_t lock; /* queue, flags, open_count */
bc1ecc65 345 struct work_struct rq_work;
602adf40
YS
346
347 struct rbd_image_header header;
b82d167b 348 unsigned long flags; /* possibly lock protected */
0d7dbfce 349 struct rbd_spec *spec;
602adf40 350
0d7dbfce 351 char *header_name;
971f839a 352
0903e875
AE
353 struct ceph_file_layout layout;
354
59c2be1e 355 struct ceph_osd_event *watch_event;
975241af 356 struct rbd_obj_request *watch_request;
59c2be1e 357
86b00e0d
AE
358 struct rbd_spec *parent_spec;
359 u64 parent_overlap;
a2acd00e 360 atomic_t parent_ref;
2f82ee54 361 struct rbd_device *parent;
86b00e0d 362
c666601a
JD
363 /* protects updating the header */
364 struct rw_semaphore header_rwsem;
f84344f3
AE
365
366 struct rbd_mapping mapping;
602adf40
YS
367
368 struct list_head node;
dfc5606d 369
dfc5606d
YS
370 /* sysfs related */
371 struct device dev;
b82d167b 372 unsigned long open_count; /* protected by lock */
dfc5606d
YS
373};
374
b82d167b
AE
375/*
376 * Flag bits for rbd_dev->flags. If atomicity is required,
377 * rbd_dev->lock is used to protect access.
378 *
379 * Currently, only the "removing" flag (which is coupled with the
380 * "open_count" field) requires atomic access.
381 */
6d292906
AE
382enum rbd_dev_flags {
383 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 384 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
385};
386
cfbf6377 387static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
e124a82f 388
602adf40 389static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
390static DEFINE_SPINLOCK(rbd_dev_list_lock);
391
432b8587
AE
392static LIST_HEAD(rbd_client_list); /* clients */
393static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 394
78c2a44a
AE
395/* Slab caches for frequently-allocated structures */
396
1c2a9dfe 397static struct kmem_cache *rbd_img_request_cache;
868311b1 398static struct kmem_cache *rbd_obj_request_cache;
78c2a44a 399static struct kmem_cache *rbd_segment_name_cache;
1c2a9dfe 400
9b60e70b 401static int rbd_major;
f8a22fc2
ID
402static DEFINE_IDA(rbd_dev_id_ida);
403
f5ee37bd
ID
404static struct workqueue_struct *rbd_wq;
405
9b60e70b
ID
406/*
407 * Default to false for now, as single-major requires >= 0.75 version of
408 * userspace rbd utility.
409 */
410static bool single_major = false;
411module_param(single_major, bool, S_IRUGO);
412MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
413
3d7efd18
AE
414static int rbd_img_request_submit(struct rbd_img_request *img_request);
415
200a6a8b 416static void rbd_dev_device_release(struct device *dev);
dfc5606d 417
f0f8cef5
AE
418static ssize_t rbd_add(struct bus_type *bus, const char *buf,
419 size_t count);
420static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
421 size_t count);
9b60e70b
ID
422static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
423 size_t count);
424static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
425 size_t count);
1f3ef788 426static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
a2acd00e 427static void rbd_spec_put(struct rbd_spec *spec);
f0f8cef5 428
9b60e70b
ID
429static int rbd_dev_id_to_minor(int dev_id)
430{
7e513d43 431 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
432}
433
434static int minor_to_rbd_dev_id(int minor)
435{
7e513d43 436 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
437}
438
b15a21dd
GKH
439static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
440static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
9b60e70b
ID
441static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
442static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
b15a21dd
GKH
443
444static struct attribute *rbd_bus_attrs[] = {
445 &bus_attr_add.attr,
446 &bus_attr_remove.attr,
9b60e70b
ID
447 &bus_attr_add_single_major.attr,
448 &bus_attr_remove_single_major.attr,
b15a21dd 449 NULL,
f0f8cef5 450};
92c76dc0
ID
451
452static umode_t rbd_bus_is_visible(struct kobject *kobj,
453 struct attribute *attr, int index)
454{
9b60e70b
ID
455 if (!single_major &&
456 (attr == &bus_attr_add_single_major.attr ||
457 attr == &bus_attr_remove_single_major.attr))
458 return 0;
459
92c76dc0
ID
460 return attr->mode;
461}
462
463static const struct attribute_group rbd_bus_group = {
464 .attrs = rbd_bus_attrs,
465 .is_visible = rbd_bus_is_visible,
466};
467__ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5
AE
468
469static struct bus_type rbd_bus_type = {
470 .name = "rbd",
b15a21dd 471 .bus_groups = rbd_bus_groups,
f0f8cef5
AE
472};
473
474static void rbd_root_dev_release(struct device *dev)
475{
476}
477
478static struct device rbd_root_dev = {
479 .init_name = "rbd",
480 .release = rbd_root_dev_release,
481};
482
06ecc6cb
AE
483static __printf(2, 3)
484void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
485{
486 struct va_format vaf;
487 va_list args;
488
489 va_start(args, fmt);
490 vaf.fmt = fmt;
491 vaf.va = &args;
492
493 if (!rbd_dev)
494 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
495 else if (rbd_dev->disk)
496 printk(KERN_WARNING "%s: %s: %pV\n",
497 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
498 else if (rbd_dev->spec && rbd_dev->spec->image_name)
499 printk(KERN_WARNING "%s: image %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_id)
502 printk(KERN_WARNING "%s: id %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
504 else /* punt */
505 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
506 RBD_DRV_NAME, rbd_dev, &vaf);
507 va_end(args);
508}
509
aafb230e
AE
510#ifdef RBD_DEBUG
511#define rbd_assert(expr) \
512 if (unlikely(!(expr))) { \
513 printk(KERN_ERR "\nAssertion failure in %s() " \
514 "at line %d:\n\n" \
515 "\trbd_assert(%s);\n\n", \
516 __func__, __LINE__, #expr); \
517 BUG(); \
518 }
519#else /* !RBD_DEBUG */
520# define rbd_assert(expr) ((void) 0)
521#endif /* !RBD_DEBUG */
dfc5606d 522
b454e36d 523static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
05a46afd
AE
524static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
525static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 526
cc4a38bd 527static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac7 528static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae09 529static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b59 530static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61f
AE
531static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
532 u64 snap_id);
2ad3d716
AE
533static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
534 u8 *order, u64 *snap_size);
535static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
536 u64 *snap_features);
537static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
59c2be1e 538
602adf40
YS
539static int rbd_open(struct block_device *bdev, fmode_t mode)
540{
f0f8cef5 541 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 542 bool removing = false;
602adf40 543
f84344f3 544 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
545 return -EROFS;
546
a14ea269 547 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
548 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
549 removing = true;
550 else
551 rbd_dev->open_count++;
a14ea269 552 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
553 if (removing)
554 return -ENOENT;
555
c3e946ce 556 (void) get_device(&rbd_dev->dev);
340c7a2b 557
602adf40
YS
558 return 0;
559}
560
db2a144b 561static void rbd_release(struct gendisk *disk, fmode_t mode)
dfc5606d
YS
562{
563 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
564 unsigned long open_count_before;
565
a14ea269 566 spin_lock_irq(&rbd_dev->lock);
b82d167b 567 open_count_before = rbd_dev->open_count--;
a14ea269 568 spin_unlock_irq(&rbd_dev->lock);
b82d167b 569 rbd_assert(open_count_before > 0);
dfc5606d 570
c3e946ce 571 put_device(&rbd_dev->dev);
dfc5606d
YS
572}
573
131fd9f6
GZ
574static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
575{
77f33c03 576 int ret = 0;
131fd9f6
GZ
577 int val;
578 bool ro;
77f33c03 579 bool ro_changed = false;
131fd9f6 580
77f33c03 581 /* get_user() may sleep, so call it before taking rbd_dev->lock */
131fd9f6
GZ
582 if (get_user(val, (int __user *)(arg)))
583 return -EFAULT;
584
585 ro = val ? true : false;
586 /* Snapshot doesn't allow to write*/
587 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
588 return -EROFS;
589
77f33c03
JD
590 spin_lock_irq(&rbd_dev->lock);
591 /* prevent others open this device */
592 if (rbd_dev->open_count > 1) {
593 ret = -EBUSY;
594 goto out;
595 }
596
131fd9f6
GZ
597 if (rbd_dev->mapping.read_only != ro) {
598 rbd_dev->mapping.read_only = ro;
77f33c03 599 ro_changed = true;
131fd9f6
GZ
600 }
601
77f33c03
JD
602out:
603 spin_unlock_irq(&rbd_dev->lock);
604 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
605 if (ret == 0 && ro_changed)
606 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
607
608 return ret;
131fd9f6
GZ
609}
610
611static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
612 unsigned int cmd, unsigned long arg)
613{
614 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
615 int ret = 0;
616
131fd9f6
GZ
617 switch (cmd) {
618 case BLKROSET:
619 ret = rbd_ioctl_set_ro(rbd_dev, arg);
620 break;
621 default:
622 ret = -ENOTTY;
623 }
624
131fd9f6
GZ
625 return ret;
626}
627
628#ifdef CONFIG_COMPAT
629static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
630 unsigned int cmd, unsigned long arg)
631{
632 return rbd_ioctl(bdev, mode, cmd, arg);
633}
634#endif /* CONFIG_COMPAT */
635
602adf40
YS
636static const struct block_device_operations rbd_bd_ops = {
637 .owner = THIS_MODULE,
638 .open = rbd_open,
dfc5606d 639 .release = rbd_release,
131fd9f6
GZ
640 .ioctl = rbd_ioctl,
641#ifdef CONFIG_COMPAT
642 .compat_ioctl = rbd_compat_ioctl,
643#endif
602adf40
YS
644};
645
646/*
7262cfca 647 * Initialize an rbd client instance. Success or not, this function
cfbf6377 648 * consumes ceph_opts. Caller holds client_mutex.
602adf40 649 */
f8c38929 650static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
651{
652 struct rbd_client *rbdc;
653 int ret = -ENOMEM;
654
37206ee5 655 dout("%s:\n", __func__);
602adf40
YS
656 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
657 if (!rbdc)
658 goto out_opt;
659
660 kref_init(&rbdc->kref);
661 INIT_LIST_HEAD(&rbdc->node);
662
43ae4701 663 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 664 if (IS_ERR(rbdc->client))
08f75463 665 goto out_rbdc;
43ae4701 666 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
667
668 ret = ceph_open_session(rbdc->client);
669 if (ret < 0)
08f75463 670 goto out_client;
602adf40 671
432b8587 672 spin_lock(&rbd_client_list_lock);
602adf40 673 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 674 spin_unlock(&rbd_client_list_lock);
602adf40 675
37206ee5 676 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 677
602adf40 678 return rbdc;
08f75463 679out_client:
602adf40 680 ceph_destroy_client(rbdc->client);
08f75463 681out_rbdc:
602adf40
YS
682 kfree(rbdc);
683out_opt:
43ae4701
AE
684 if (ceph_opts)
685 ceph_destroy_options(ceph_opts);
37206ee5
AE
686 dout("%s: error %d\n", __func__, ret);
687
28f259b7 688 return ERR_PTR(ret);
602adf40
YS
689}
690
2f82ee54
AE
691static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
692{
693 kref_get(&rbdc->kref);
694
695 return rbdc;
696}
697
602adf40 698/*
1f7ba331
AE
699 * Find a ceph client with specific addr and configuration. If
700 * found, bump its reference count.
602adf40 701 */
1f7ba331 702static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
703{
704 struct rbd_client *client_node;
1f7ba331 705 bool found = false;
602adf40 706
43ae4701 707 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
708 return NULL;
709
1f7ba331
AE
710 spin_lock(&rbd_client_list_lock);
711 list_for_each_entry(client_node, &rbd_client_list, node) {
712 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
713 __rbd_get_client(client_node);
714
1f7ba331
AE
715 found = true;
716 break;
717 }
718 }
719 spin_unlock(&rbd_client_list_lock);
720
721 return found ? client_node : NULL;
602adf40
YS
722}
723
59c2be1e
YS
724/*
725 * mount options
726 */
727enum {
59c2be1e
YS
728 Opt_last_int,
729 /* int args above */
730 Opt_last_string,
731 /* string args above */
cc0538b6
AE
732 Opt_read_only,
733 Opt_read_write,
734 /* Boolean args above */
735 Opt_last_bool,
59c2be1e
YS
736};
737
43ae4701 738static match_table_t rbd_opts_tokens = {
59c2be1e
YS
739 /* int args above */
740 /* string args above */
be466c1c 741 {Opt_read_only, "read_only"},
cc0538b6
AE
742 {Opt_read_only, "ro"}, /* Alternate spelling */
743 {Opt_read_write, "read_write"},
744 {Opt_read_write, "rw"}, /* Alternate spelling */
745 /* Boolean args above */
59c2be1e
YS
746 {-1, NULL}
747};
748
98571b5a
AE
749struct rbd_options {
750 bool read_only;
751};
752
753#define RBD_READ_ONLY_DEFAULT false
754
59c2be1e
YS
755static int parse_rbd_opts_token(char *c, void *private)
756{
43ae4701 757 struct rbd_options *rbd_opts = private;
59c2be1e
YS
758 substring_t argstr[MAX_OPT_ARGS];
759 int token, intval, ret;
760
43ae4701 761 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
762 if (token < 0)
763 return -EINVAL;
764
765 if (token < Opt_last_int) {
766 ret = match_int(&argstr[0], &intval);
767 if (ret < 0) {
768 pr_err("bad mount option arg (not int) "
769 "at '%s'\n", c);
770 return ret;
771 }
772 dout("got int token %d val %d\n", token, intval);
773 } else if (token > Opt_last_int && token < Opt_last_string) {
774 dout("got string token %d val %s\n", token,
775 argstr[0].from);
cc0538b6
AE
776 } else if (token > Opt_last_string && token < Opt_last_bool) {
777 dout("got Boolean token %d\n", token);
59c2be1e
YS
778 } else {
779 dout("got token %d\n", token);
780 }
781
782 switch (token) {
cc0538b6
AE
783 case Opt_read_only:
784 rbd_opts->read_only = true;
785 break;
786 case Opt_read_write:
787 rbd_opts->read_only = false;
788 break;
59c2be1e 789 default:
aafb230e
AE
790 rbd_assert(false);
791 break;
59c2be1e
YS
792 }
793 return 0;
794}
795
6d2940c8
GZ
796static char* obj_op_name(enum obj_operation_type op_type)
797{
798 switch (op_type) {
799 case OBJ_OP_READ:
800 return "read";
801 case OBJ_OP_WRITE:
802 return "write";
90e98c52
GZ
803 case OBJ_OP_DISCARD:
804 return "discard";
6d2940c8
GZ
805 default:
806 return "???";
807 }
808}
809
602adf40
YS
810/*
811 * Get a ceph client with specific addr and configuration, if one does
7262cfca
AE
812 * not exist create it. Either way, ceph_opts is consumed by this
813 * function.
602adf40 814 */
9d3997fd 815static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 816{
f8c38929 817 struct rbd_client *rbdc;
59c2be1e 818
cfbf6377 819 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
1f7ba331 820 rbdc = rbd_client_find(ceph_opts);
9d3997fd 821 if (rbdc) /* using an existing client */
43ae4701 822 ceph_destroy_options(ceph_opts);
9d3997fd 823 else
f8c38929 824 rbdc = rbd_client_create(ceph_opts);
cfbf6377 825 mutex_unlock(&client_mutex);
602adf40 826
9d3997fd 827 return rbdc;
602adf40
YS
828}
829
830/*
831 * Destroy ceph client
d23a4b3f 832 *
432b8587 833 * Caller must hold rbd_client_list_lock.
602adf40
YS
834 */
835static void rbd_client_release(struct kref *kref)
836{
837 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
838
37206ee5 839 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 840 spin_lock(&rbd_client_list_lock);
602adf40 841 list_del(&rbdc->node);
cd9d9f5d 842 spin_unlock(&rbd_client_list_lock);
602adf40
YS
843
844 ceph_destroy_client(rbdc->client);
845 kfree(rbdc);
846}
847
848/*
849 * Drop reference to ceph client node. If it's not referenced anymore, release
850 * it.
851 */
9d3997fd 852static void rbd_put_client(struct rbd_client *rbdc)
602adf40 853{
c53d5893
AE
854 if (rbdc)
855 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
856}
857
a30b71b9
AE
858static bool rbd_image_format_valid(u32 image_format)
859{
860 return image_format == 1 || image_format == 2;
861}
862
8e94af8e
AE
863static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
864{
103a150f
AE
865 size_t size;
866 u32 snap_count;
867
868 /* The header has to start with the magic rbd header text */
869 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
870 return false;
871
db2388b6
AE
872 /* The bio layer requires at least sector-sized I/O */
873
874 if (ondisk->options.order < SECTOR_SHIFT)
875 return false;
876
877 /* If we use u64 in a few spots we may be able to loosen this */
878
879 if (ondisk->options.order > 8 * sizeof (int) - 1)
880 return false;
881
103a150f
AE
882 /*
883 * The size of a snapshot header has to fit in a size_t, and
884 * that limits the number of snapshots.
885 */
886 snap_count = le32_to_cpu(ondisk->snap_count);
887 size = SIZE_MAX - sizeof (struct ceph_snap_context);
888 if (snap_count > size / sizeof (__le64))
889 return false;
890
891 /*
892 * Not only that, but the size of the entire the snapshot
893 * header must also be representable in a size_t.
894 */
895 size -= snap_count * sizeof (__le64);
896 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
897 return false;
898
899 return true;
8e94af8e
AE
900}
901
602adf40 902/*
bb23e37a
AE
903 * Fill an rbd image header with information from the given format 1
904 * on-disk header.
602adf40 905 */
662518b1 906static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d998 907 struct rbd_image_header_ondisk *ondisk)
602adf40 908{
662518b1 909 struct rbd_image_header *header = &rbd_dev->header;
bb23e37a
AE
910 bool first_time = header->object_prefix == NULL;
911 struct ceph_snap_context *snapc;
912 char *object_prefix = NULL;
913 char *snap_names = NULL;
914 u64 *snap_sizes = NULL;
ccece235 915 u32 snap_count;
d2bb24e5 916 size_t size;
bb23e37a 917 int ret = -ENOMEM;
621901d6 918 u32 i;
602adf40 919
bb23e37a 920 /* Allocate this now to avoid having to handle failure below */
6a52325f 921
bb23e37a
AE
922 if (first_time) {
923 size_t len;
103a150f 924
bb23e37a
AE
925 len = strnlen(ondisk->object_prefix,
926 sizeof (ondisk->object_prefix));
927 object_prefix = kmalloc(len + 1, GFP_KERNEL);
928 if (!object_prefix)
929 return -ENOMEM;
930 memcpy(object_prefix, ondisk->object_prefix, len);
931 object_prefix[len] = '\0';
932 }
00f1f36f 933
bb23e37a 934 /* Allocate the snapshot context and fill it in */
00f1f36f 935
bb23e37a
AE
936 snap_count = le32_to_cpu(ondisk->snap_count);
937 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
938 if (!snapc)
939 goto out_err;
940 snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 941 if (snap_count) {
bb23e37a 942 struct rbd_image_snap_ondisk *snaps;
f785cc1d
AE
943 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
944
bb23e37a 945 /* We'll keep a copy of the snapshot names... */
621901d6 946
bb23e37a
AE
947 if (snap_names_len > (u64)SIZE_MAX)
948 goto out_2big;
949 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
950 if (!snap_names)
6a52325f
AE
951 goto out_err;
952
bb23e37a 953 /* ...as well as the array of their sizes. */
621901d6 954
d2bb24e5 955 size = snap_count * sizeof (*header->snap_sizes);
bb23e37a
AE
956 snap_sizes = kmalloc(size, GFP_KERNEL);
957 if (!snap_sizes)
6a52325f 958 goto out_err;
bb23e37a 959
f785cc1d 960 /*
bb23e37a
AE
961 * Copy the names, and fill in each snapshot's id
962 * and size.
963 *
99a41ebc 964 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37a 965 * ondisk buffer we're working with has
f785cc1d
AE
966 * snap_names_len bytes beyond the end of the
967 * snapshot id array, this memcpy() is safe.
968 */
bb23e37a
AE
969 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
970 snaps = ondisk->snaps;
971 for (i = 0; i < snap_count; i++) {
972 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
973 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
974 }
602adf40 975 }
6a52325f 976
bb23e37a 977 /* We won't fail any more, fill in the header */
621901d6 978
bb23e37a
AE
979 if (first_time) {
980 header->object_prefix = object_prefix;
981 header->obj_order = ondisk->options.order;
982 header->crypt_type = ondisk->options.crypt_type;
983 header->comp_type = ondisk->options.comp_type;
984 /* The rest aren't used for format 1 images */
985 header->stripe_unit = 0;
986 header->stripe_count = 0;
987 header->features = 0;
602adf40 988 } else {
662518b1
AE
989 ceph_put_snap_context(header->snapc);
990 kfree(header->snap_names);
991 kfree(header->snap_sizes);
602adf40 992 }
849b4260 993
bb23e37a 994 /* The remaining fields always get updated (when we refresh) */
621901d6 995
f84344f3 996 header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37a
AE
997 header->snapc = snapc;
998 header->snap_names = snap_names;
999 header->snap_sizes = snap_sizes;
468521c1 1000
602adf40 1001 return 0;
bb23e37a
AE
1002out_2big:
1003 ret = -EIO;
6a52325f 1004out_err:
bb23e37a
AE
1005 kfree(snap_sizes);
1006 kfree(snap_names);
1007 ceph_put_snap_context(snapc);
1008 kfree(object_prefix);
ccece235 1009
bb23e37a 1010 return ret;
602adf40
YS
1011}
1012
9682fc6d
AE
1013static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1014{
1015 const char *snap_name;
1016
1017 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1018
1019 /* Skip over names until we find the one we are looking for */
1020
1021 snap_name = rbd_dev->header.snap_names;
1022 while (which--)
1023 snap_name += strlen(snap_name) + 1;
1024
1025 return kstrdup(snap_name, GFP_KERNEL);
1026}
1027
30d1cff8
AE
1028/*
1029 * Snapshot id comparison function for use with qsort()/bsearch().
1030 * Note that result is for snapshots in *descending* order.
1031 */
1032static int snapid_compare_reverse(const void *s1, const void *s2)
1033{
1034 u64 snap_id1 = *(u64 *)s1;
1035 u64 snap_id2 = *(u64 *)s2;
1036
1037 if (snap_id1 < snap_id2)
1038 return 1;
1039 return snap_id1 == snap_id2 ? 0 : -1;
1040}
1041
1042/*
1043 * Search a snapshot context to see if the given snapshot id is
1044 * present.
1045 *
1046 * Returns the position of the snapshot id in the array if it's found,
1047 * or BAD_SNAP_INDEX otherwise.
1048 *
1049 * Note: The snapshot array is in kept sorted (by the osd) in
1050 * reverse order, highest snapshot id first.
1051 */
9682fc6d
AE
1052static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1053{
1054 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 1055 u64 *found;
9682fc6d 1056
30d1cff8
AE
1057 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1058 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 1059
30d1cff8 1060 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
1061}
1062
2ad3d716
AE
1063static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1064 u64 snap_id)
9e15b77d 1065{
54cac61f 1066 u32 which;
da6a6b63 1067 const char *snap_name;
9e15b77d 1068
54cac61f
AE
1069 which = rbd_dev_snap_index(rbd_dev, snap_id);
1070 if (which == BAD_SNAP_INDEX)
da6a6b63 1071 return ERR_PTR(-ENOENT);
54cac61f 1072
da6a6b63
JD
1073 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1074 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61f
AE
1075}
1076
1077static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1078{
9e15b77d
AE
1079 if (snap_id == CEPH_NOSNAP)
1080 return RBD_SNAP_HEAD_NAME;
1081
54cac61f
AE
1082 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1083 if (rbd_dev->image_format == 1)
1084 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 1085
54cac61f 1086 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
1087}
1088
2ad3d716
AE
1089static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1090 u64 *snap_size)
602adf40 1091{
2ad3d716
AE
1092 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1093 if (snap_id == CEPH_NOSNAP) {
1094 *snap_size = rbd_dev->header.image_size;
1095 } else if (rbd_dev->image_format == 1) {
1096 u32 which;
602adf40 1097
2ad3d716
AE
1098 which = rbd_dev_snap_index(rbd_dev, snap_id);
1099 if (which == BAD_SNAP_INDEX)
1100 return -ENOENT;
e86924a8 1101
2ad3d716
AE
1102 *snap_size = rbd_dev->header.snap_sizes[which];
1103 } else {
1104 u64 size = 0;
1105 int ret;
1106
1107 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1108 if (ret)
1109 return ret;
1110
1111 *snap_size = size;
1112 }
1113 return 0;
602adf40
YS
1114}
1115
2ad3d716
AE
1116static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1117 u64 *snap_features)
602adf40 1118{
2ad3d716
AE
1119 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1120 if (snap_id == CEPH_NOSNAP) {
1121 *snap_features = rbd_dev->header.features;
1122 } else if (rbd_dev->image_format == 1) {
1123 *snap_features = 0; /* No features for format 1 */
602adf40 1124 } else {
2ad3d716
AE
1125 u64 features = 0;
1126 int ret;
8b0241f8 1127
2ad3d716
AE
1128 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1129 if (ret)
1130 return ret;
1131
1132 *snap_features = features;
1133 }
1134 return 0;
1135}
1136
1137static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1138{
8f4b7d98 1139 u64 snap_id = rbd_dev->spec->snap_id;
2ad3d716
AE
1140 u64 size = 0;
1141 u64 features = 0;
1142 int ret;
1143
2ad3d716
AE
1144 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1145 if (ret)
1146 return ret;
1147 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1148 if (ret)
1149 return ret;
1150
1151 rbd_dev->mapping.size = size;
1152 rbd_dev->mapping.features = features;
1153
8b0241f8 1154 return 0;
602adf40
YS
1155}
1156
d1cf5788
AE
1157static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1158{
1159 rbd_dev->mapping.size = 0;
1160 rbd_dev->mapping.features = 0;
200a6a8b
AE
1161}
1162
7d5079aa
HS
1163static void rbd_segment_name_free(const char *name)
1164{
1165 /* The explicit cast here is needed to drop the const qualifier */
1166
1167 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1168}
1169
98571b5a 1170static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 1171{
65ccfe21
AE
1172 char *name;
1173 u64 segment;
1174 int ret;
3a96d5cd 1175 char *name_format;
602adf40 1176
78c2a44a 1177 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
65ccfe21
AE
1178 if (!name)
1179 return NULL;
1180 segment = offset >> rbd_dev->header.obj_order;
3a96d5cd
JD
1181 name_format = "%s.%012llx";
1182 if (rbd_dev->image_format == 2)
1183 name_format = "%s.%016llx";
2d0ebc5d 1184 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
65ccfe21 1185 rbd_dev->header.object_prefix, segment);
2d0ebc5d 1186 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
65ccfe21
AE
1187 pr_err("error formatting segment name for #%llu (%d)\n",
1188 segment, ret);
7d5079aa 1189 rbd_segment_name_free(name);
65ccfe21
AE
1190 name = NULL;
1191 }
602adf40 1192
65ccfe21
AE
1193 return name;
1194}
602adf40 1195
65ccfe21
AE
1196static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1197{
1198 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 1199
65ccfe21
AE
1200 return offset & (segment_size - 1);
1201}
1202
1203static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1204 u64 offset, u64 length)
1205{
1206 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1207
1208 offset &= segment_size - 1;
1209
aafb230e 1210 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
1211 if (offset + length > segment_size)
1212 length = segment_size - offset;
1213
1214 return length;
602adf40
YS
1215}
1216
029bcbd8
JD
1217/*
1218 * returns the size of an object in the image
1219 */
1220static u64 rbd_obj_bytes(struct rbd_image_header *header)
1221{
1222 return 1 << header->obj_order;
1223}
1224
602adf40
YS
1225/*
1226 * bio helpers
1227 */
1228
1229static void bio_chain_put(struct bio *chain)
1230{
1231 struct bio *tmp;
1232
1233 while (chain) {
1234 tmp = chain;
1235 chain = chain->bi_next;
1236 bio_put(tmp);
1237 }
1238}
1239
1240/*
1241 * zeros a bio chain, starting at specific offset
1242 */
1243static void zero_bio_chain(struct bio *chain, int start_ofs)
1244{
7988613b
KO
1245 struct bio_vec bv;
1246 struct bvec_iter iter;
602adf40
YS
1247 unsigned long flags;
1248 void *buf;
602adf40
YS
1249 int pos = 0;
1250
1251 while (chain) {
7988613b
KO
1252 bio_for_each_segment(bv, chain, iter) {
1253 if (pos + bv.bv_len > start_ofs) {
602adf40 1254 int remainder = max(start_ofs - pos, 0);
7988613b 1255 buf = bvec_kmap_irq(&bv, &flags);
602adf40 1256 memset(buf + remainder, 0,
7988613b
KO
1257 bv.bv_len - remainder);
1258 flush_dcache_page(bv.bv_page);
85b5aaa6 1259 bvec_kunmap_irq(buf, &flags);
602adf40 1260 }
7988613b 1261 pos += bv.bv_len;
602adf40
YS
1262 }
1263
1264 chain = chain->bi_next;
1265 }
1266}
1267
b9434c5b
AE
1268/*
1269 * similar to zero_bio_chain(), zeros data defined by a page array,
1270 * starting at the given byte offset from the start of the array and
1271 * continuing up to the given end offset. The pages array is
1272 * assumed to be big enough to hold all bytes up to the end.
1273 */
1274static void zero_pages(struct page **pages, u64 offset, u64 end)
1275{
1276 struct page **page = &pages[offset >> PAGE_SHIFT];
1277
1278 rbd_assert(end > offset);
1279 rbd_assert(end - offset <= (u64)SIZE_MAX);
1280 while (offset < end) {
1281 size_t page_offset;
1282 size_t length;
1283 unsigned long flags;
1284 void *kaddr;
1285
491205a8
GU
1286 page_offset = offset & ~PAGE_MASK;
1287 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
b9434c5b
AE
1288 local_irq_save(flags);
1289 kaddr = kmap_atomic(*page);
1290 memset(kaddr + page_offset, 0, length);
e2156054 1291 flush_dcache_page(*page);
b9434c5b
AE
1292 kunmap_atomic(kaddr);
1293 local_irq_restore(flags);
1294
1295 offset += length;
1296 page++;
1297 }
1298}
1299
602adf40 1300/*
f7760dad
AE
1301 * Clone a portion of a bio, starting at the given byte offset
1302 * and continuing for the number of bytes indicated.
602adf40 1303 */
f7760dad
AE
1304static struct bio *bio_clone_range(struct bio *bio_src,
1305 unsigned int offset,
1306 unsigned int len,
1307 gfp_t gfpmask)
602adf40 1308{
f7760dad
AE
1309 struct bio *bio;
1310
5341a627 1311 bio = bio_clone(bio_src, gfpmask);
f7760dad
AE
1312 if (!bio)
1313 return NULL; /* ENOMEM */
602adf40 1314
5341a627 1315 bio_advance(bio, offset);
4f024f37 1316 bio->bi_iter.bi_size = len;
f7760dad
AE
1317
1318 return bio;
1319}
1320
1321/*
1322 * Clone a portion of a bio chain, starting at the given byte offset
1323 * into the first bio in the source chain and continuing for the
1324 * number of bytes indicated. The result is another bio chain of
1325 * exactly the given length, or a null pointer on error.
1326 *
1327 * The bio_src and offset parameters are both in-out. On entry they
1328 * refer to the first source bio and the offset into that bio where
1329 * the start of data to be cloned is located.
1330 *
1331 * On return, bio_src is updated to refer to the bio in the source
1332 * chain that contains first un-cloned byte, and *offset will
1333 * contain the offset of that byte within that bio.
1334 */
1335static struct bio *bio_chain_clone_range(struct bio **bio_src,
1336 unsigned int *offset,
1337 unsigned int len,
1338 gfp_t gfpmask)
1339{
1340 struct bio *bi = *bio_src;
1341 unsigned int off = *offset;
1342 struct bio *chain = NULL;
1343 struct bio **end;
1344
1345 /* Build up a chain of clone bios up to the limit */
1346
4f024f37 1347 if (!bi || off >= bi->bi_iter.bi_size || !len)
f7760dad 1348 return NULL; /* Nothing to clone */
602adf40 1349
f7760dad
AE
1350 end = &chain;
1351 while (len) {
1352 unsigned int bi_size;
1353 struct bio *bio;
1354
f5400b7a
AE
1355 if (!bi) {
1356 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad 1357 goto out_err; /* EINVAL; ran out of bio's */
f5400b7a 1358 }
4f024f37 1359 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
f7760dad
AE
1360 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1361 if (!bio)
1362 goto out_err; /* ENOMEM */
1363
1364 *end = bio;
1365 end = &bio->bi_next;
602adf40 1366
f7760dad 1367 off += bi_size;
4f024f37 1368 if (off == bi->bi_iter.bi_size) {
f7760dad
AE
1369 bi = bi->bi_next;
1370 off = 0;
1371 }
1372 len -= bi_size;
1373 }
1374 *bio_src = bi;
1375 *offset = off;
1376
1377 return chain;
1378out_err:
1379 bio_chain_put(chain);
602adf40 1380
602adf40
YS
1381 return NULL;
1382}
1383
926f9b3f
AE
1384/*
1385 * The default/initial value for all object request flags is 0. For
1386 * each flag, once its value is set to 1 it is never reset to 0
1387 * again.
1388 */
57acbaa7 1389static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f 1390{
57acbaa7 1391 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f
AE
1392 struct rbd_device *rbd_dev;
1393
57acbaa7 1394 rbd_dev = obj_request->img_request->rbd_dev;
9584d508 1395 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
926f9b3f
AE
1396 obj_request);
1397 }
1398}
1399
57acbaa7 1400static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f
AE
1401{
1402 smp_mb();
57acbaa7 1403 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f
AE
1404}
1405
57acbaa7 1406static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a 1407{
57acbaa7
AE
1408 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1409 struct rbd_device *rbd_dev = NULL;
6365d33a 1410
57acbaa7
AE
1411 if (obj_request_img_data_test(obj_request))
1412 rbd_dev = obj_request->img_request->rbd_dev;
9584d508 1413 rbd_warn(rbd_dev, "obj_request %p already marked done",
6365d33a
AE
1414 obj_request);
1415 }
1416}
1417
57acbaa7 1418static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a
AE
1419{
1420 smp_mb();
57acbaa7 1421 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a
AE
1422}
1423
5679c59f
AE
1424/*
1425 * This sets the KNOWN flag after (possibly) setting the EXISTS
1426 * flag. The latter is set based on the "exists" value provided.
1427 *
1428 * Note that for our purposes once an object exists it never goes
1429 * away again. It's possible that the response from two existence
1430 * checks are separated by the creation of the target object, and
1431 * the first ("doesn't exist") response arrives *after* the second
1432 * ("does exist"). In that case we ignore the second one.
1433 */
1434static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1435 bool exists)
1436{
1437 if (exists)
1438 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1439 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1440 smp_mb();
1441}
1442
1443static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1444{
1445 smp_mb();
1446 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1447}
1448
1449static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1453}
1454
9638556a
ID
1455static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1456{
1457 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1458
1459 return obj_request->img_offset <
1460 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1461}
1462
bf0d5f50
AE
1463static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1464{
37206ee5
AE
1465 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1466 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1467 kref_get(&obj_request->kref);
1468}
1469
1470static void rbd_obj_request_destroy(struct kref *kref);
1471static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1472{
1473 rbd_assert(obj_request != NULL);
37206ee5
AE
1474 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1475 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1476 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1477}
1478
0f2d5be7
AE
1479static void rbd_img_request_get(struct rbd_img_request *img_request)
1480{
1481 dout("%s: img %p (was %d)\n", __func__, img_request,
1482 atomic_read(&img_request->kref.refcount));
1483 kref_get(&img_request->kref);
1484}
1485
e93f3152
AE
1486static bool img_request_child_test(struct rbd_img_request *img_request);
1487static void rbd_parent_request_destroy(struct kref *kref);
bf0d5f50
AE
1488static void rbd_img_request_destroy(struct kref *kref);
1489static void rbd_img_request_put(struct rbd_img_request *img_request)
1490{
1491 rbd_assert(img_request != NULL);
37206ee5
AE
1492 dout("%s: img %p (was %d)\n", __func__, img_request,
1493 atomic_read(&img_request->kref.refcount));
e93f3152
AE
1494 if (img_request_child_test(img_request))
1495 kref_put(&img_request->kref, rbd_parent_request_destroy);
1496 else
1497 kref_put(&img_request->kref, rbd_img_request_destroy);
bf0d5f50
AE
1498}
1499
1500static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1501 struct rbd_obj_request *obj_request)
1502{
25dcf954
AE
1503 rbd_assert(obj_request->img_request == NULL);
1504
b155e86c 1505 /* Image request now owns object's original reference */
bf0d5f50 1506 obj_request->img_request = img_request;
25dcf954 1507 obj_request->which = img_request->obj_request_count;
6365d33a
AE
1508 rbd_assert(!obj_request_img_data_test(obj_request));
1509 obj_request_img_data_set(obj_request);
bf0d5f50 1510 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1511 img_request->obj_request_count++;
1512 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1513 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1514 obj_request->which);
bf0d5f50
AE
1515}
1516
1517static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1518 struct rbd_obj_request *obj_request)
1519{
1520 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1521
37206ee5
AE
1522 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1523 obj_request->which);
bf0d5f50 1524 list_del(&obj_request->links);
25dcf954
AE
1525 rbd_assert(img_request->obj_request_count > 0);
1526 img_request->obj_request_count--;
1527 rbd_assert(obj_request->which == img_request->obj_request_count);
1528 obj_request->which = BAD_WHICH;
6365d33a 1529 rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f50 1530 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1531 obj_request->img_request = NULL;
25dcf954 1532 obj_request->callback = NULL;
bf0d5f50
AE
1533 rbd_obj_request_put(obj_request);
1534}
1535
1536static bool obj_request_type_valid(enum obj_request_type type)
1537{
1538 switch (type) {
9969ebc5 1539 case OBJ_REQUEST_NODATA:
bf0d5f50 1540 case OBJ_REQUEST_BIO:
788e2df3 1541 case OBJ_REQUEST_PAGES:
bf0d5f50
AE
1542 return true;
1543 default:
1544 return false;
1545 }
1546}
1547
bf0d5f50
AE
1548static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1549 struct rbd_obj_request *obj_request)
1550{
71c20a06 1551 dout("%s %p\n", __func__, obj_request);
bf0d5f50
AE
1552 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1553}
1554
71c20a06
ID
1555static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1556{
1557 dout("%s %p\n", __func__, obj_request);
1558 ceph_osdc_cancel_request(obj_request->osd_req);
1559}
1560
1561/*
1562 * Wait for an object request to complete. If interrupted, cancel the
1563 * underlying osd request.
1564 */
1565static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1566{
1567 int ret;
1568
1569 dout("%s %p\n", __func__, obj_request);
1570
1571 ret = wait_for_completion_interruptible(&obj_request->completion);
1572 if (ret < 0) {
1573 dout("%s %p interrupted\n", __func__, obj_request);
1574 rbd_obj_request_end(obj_request);
1575 return ret;
1576 }
1577
1578 dout("%s %p done\n", __func__, obj_request);
1579 return 0;
1580}
1581
bf0d5f50
AE
1582static void rbd_img_request_complete(struct rbd_img_request *img_request)
1583{
55f27e09 1584
37206ee5 1585 dout("%s: img %p\n", __func__, img_request);
55f27e09
AE
1586
1587 /*
1588 * If no error occurred, compute the aggregate transfer
1589 * count for the image request. We could instead use
1590 * atomic64_cmpxchg() to update it as each object request
1591 * completes; not clear which way is better off hand.
1592 */
1593 if (!img_request->result) {
1594 struct rbd_obj_request *obj_request;
1595 u64 xferred = 0;
1596
1597 for_each_obj_request(img_request, obj_request)
1598 xferred += obj_request->xferred;
1599 img_request->xferred = xferred;
1600 }
1601
bf0d5f50
AE
1602 if (img_request->callback)
1603 img_request->callback(img_request);
1604 else
1605 rbd_img_request_put(img_request);
1606}
1607
0c425248
AE
1608/*
1609 * The default/initial value for all image request flags is 0. Each
1610 * is conditionally set to 1 at image request initialization time
1611 * and currently never change thereafter.
1612 */
1613static void img_request_write_set(struct rbd_img_request *img_request)
1614{
1615 set_bit(IMG_REQ_WRITE, &img_request->flags);
1616 smp_mb();
1617}
1618
1619static bool img_request_write_test(struct rbd_img_request *img_request)
1620{
1621 smp_mb();
1622 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1623}
1624
90e98c52
GZ
1625/*
1626 * Set the discard flag when the img_request is an discard request
1627 */
1628static void img_request_discard_set(struct rbd_img_request *img_request)
1629{
1630 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1631 smp_mb();
1632}
1633
1634static bool img_request_discard_test(struct rbd_img_request *img_request)
1635{
1636 smp_mb();
1637 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1638}
1639
9849e986
AE
1640static void img_request_child_set(struct rbd_img_request *img_request)
1641{
1642 set_bit(IMG_REQ_CHILD, &img_request->flags);
1643 smp_mb();
1644}
1645
e93f3152
AE
1646static void img_request_child_clear(struct rbd_img_request *img_request)
1647{
1648 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1649 smp_mb();
1650}
1651
9849e986
AE
1652static bool img_request_child_test(struct rbd_img_request *img_request)
1653{
1654 smp_mb();
1655 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1656}
1657
d0b2e944
AE
1658static void img_request_layered_set(struct rbd_img_request *img_request)
1659{
1660 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1661 smp_mb();
1662}
1663
a2acd00e
AE
1664static void img_request_layered_clear(struct rbd_img_request *img_request)
1665{
1666 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1667 smp_mb();
1668}
1669
d0b2e944
AE
1670static bool img_request_layered_test(struct rbd_img_request *img_request)
1671{
1672 smp_mb();
1673 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1674}
1675
3b434a2a
JD
1676static enum obj_operation_type
1677rbd_img_request_op_type(struct rbd_img_request *img_request)
1678{
1679 if (img_request_write_test(img_request))
1680 return OBJ_OP_WRITE;
1681 else if (img_request_discard_test(img_request))
1682 return OBJ_OP_DISCARD;
1683 else
1684 return OBJ_OP_READ;
1685}
1686
6e2a4505
AE
1687static void
1688rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1689{
b9434c5b
AE
1690 u64 xferred = obj_request->xferred;
1691 u64 length = obj_request->length;
1692
6e2a4505
AE
1693 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1694 obj_request, obj_request->img_request, obj_request->result,
b9434c5b 1695 xferred, length);
6e2a4505 1696 /*
17c1cc1d
JD
1697 * ENOENT means a hole in the image. We zero-fill the entire
1698 * length of the request. A short read also implies zero-fill
1699 * to the end of the request. An error requires the whole
1700 * length of the request to be reported finished with an error
1701 * to the block layer. In each case we update the xferred
1702 * count to indicate the whole request was satisfied.
6e2a4505 1703 */
b9434c5b 1704 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
6e2a4505 1705 if (obj_request->result == -ENOENT) {
b9434c5b
AE
1706 if (obj_request->type == OBJ_REQUEST_BIO)
1707 zero_bio_chain(obj_request->bio_list, 0);
1708 else
1709 zero_pages(obj_request->pages, 0, length);
6e2a4505 1710 obj_request->result = 0;
b9434c5b
AE
1711 } else if (xferred < length && !obj_request->result) {
1712 if (obj_request->type == OBJ_REQUEST_BIO)
1713 zero_bio_chain(obj_request->bio_list, xferred);
1714 else
1715 zero_pages(obj_request->pages, xferred, length);
6e2a4505 1716 }
17c1cc1d 1717 obj_request->xferred = length;
6e2a4505
AE
1718 obj_request_done_set(obj_request);
1719}
1720
bf0d5f50
AE
1721static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1722{
37206ee5
AE
1723 dout("%s: obj %p cb %p\n", __func__, obj_request,
1724 obj_request->callback);
bf0d5f50
AE
1725 if (obj_request->callback)
1726 obj_request->callback(obj_request);
788e2df3
AE
1727 else
1728 complete_all(&obj_request->completion);
bf0d5f50
AE
1729}
1730
c47f9371 1731static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
39bf2c5d
AE
1732{
1733 dout("%s: obj %p\n", __func__, obj_request);
1734 obj_request_done_set(obj_request);
1735}
1736
c47f9371 1737static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1738{
57acbaa7 1739 struct rbd_img_request *img_request = NULL;
a9e8ba2c 1740 struct rbd_device *rbd_dev = NULL;
57acbaa7
AE
1741 bool layered = false;
1742
1743 if (obj_request_img_data_test(obj_request)) {
1744 img_request = obj_request->img_request;
1745 layered = img_request && img_request_layered_test(img_request);
a9e8ba2c 1746 rbd_dev = img_request->rbd_dev;
57acbaa7 1747 }
8b3e1a56
AE
1748
1749 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1750 obj_request, img_request, obj_request->result,
1751 obj_request->xferred, obj_request->length);
a9e8ba2c
AE
1752 if (layered && obj_request->result == -ENOENT &&
1753 obj_request->img_offset < rbd_dev->parent_overlap)
8b3e1a56
AE
1754 rbd_img_parent_read(obj_request);
1755 else if (img_request)
6e2a4505
AE
1756 rbd_img_obj_request_read_callback(obj_request);
1757 else
1758 obj_request_done_set(obj_request);
bf0d5f50
AE
1759}
1760
c47f9371 1761static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1762{
1b83bef2
SW
1763 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1764 obj_request->result, obj_request->length);
1765 /*
8b3e1a56
AE
1766 * There is no such thing as a successful short write. Set
1767 * it to our originally-requested length.
1b83bef2
SW
1768 */
1769 obj_request->xferred = obj_request->length;
07741308 1770 obj_request_done_set(obj_request);
bf0d5f50
AE
1771}
1772
90e98c52
GZ
1773static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1774{
1775 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1776 obj_request->result, obj_request->length);
1777 /*
1778 * There is no such thing as a successful short discard. Set
1779 * it to our originally-requested length.
1780 */
1781 obj_request->xferred = obj_request->length;
d0265de7
JD
1782 /* discarding a non-existent object is not a problem */
1783 if (obj_request->result == -ENOENT)
1784 obj_request->result = 0;
90e98c52
GZ
1785 obj_request_done_set(obj_request);
1786}
1787
fbfab539
AE
1788/*
1789 * For a simple stat call there's nothing to do. We'll do more if
1790 * this is part of a write sequence for a layered image.
1791 */
c47f9371 1792static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab539 1793{
37206ee5 1794 dout("%s: obj %p\n", __func__, obj_request);
fbfab539
AE
1795 obj_request_done_set(obj_request);
1796}
1797
bf0d5f50
AE
1798static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1799 struct ceph_msg *msg)
1800{
1801 struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f50
AE
1802 u16 opcode;
1803
37206ee5 1804 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
bf0d5f50 1805 rbd_assert(osd_req == obj_request->osd_req);
57acbaa7
AE
1806 if (obj_request_img_data_test(obj_request)) {
1807 rbd_assert(obj_request->img_request);
1808 rbd_assert(obj_request->which != BAD_WHICH);
1809 } else {
1810 rbd_assert(obj_request->which == BAD_WHICH);
1811 }
bf0d5f50 1812
1b83bef2
SW
1813 if (osd_req->r_result < 0)
1814 obj_request->result = osd_req->r_result;
bf0d5f50 1815
7cc69d42 1816 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
bf0d5f50 1817
c47f9371
AE
1818 /*
1819 * We support a 64-bit length, but ultimately it has to be
1820 * passed to blk_end_request(), which takes an unsigned int.
1821 */
1b83bef2 1822 obj_request->xferred = osd_req->r_reply_op_len[0];
8b3e1a56 1823 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
0ccd5926 1824
79528734 1825 opcode = osd_req->r_ops[0].op;
bf0d5f50
AE
1826 switch (opcode) {
1827 case CEPH_OSD_OP_READ:
c47f9371 1828 rbd_osd_read_callback(obj_request);
bf0d5f50 1829 break;
0ccd5926
ID
1830 case CEPH_OSD_OP_SETALLOCHINT:
1831 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1832 /* fall through */
bf0d5f50 1833 case CEPH_OSD_OP_WRITE:
c47f9371 1834 rbd_osd_write_callback(obj_request);
bf0d5f50 1835 break;
fbfab539 1836 case CEPH_OSD_OP_STAT:
c47f9371 1837 rbd_osd_stat_callback(obj_request);
fbfab539 1838 break;
90e98c52
GZ
1839 case CEPH_OSD_OP_DELETE:
1840 case CEPH_OSD_OP_TRUNCATE:
1841 case CEPH_OSD_OP_ZERO:
1842 rbd_osd_discard_callback(obj_request);
1843 break;
36be9a76 1844 case CEPH_OSD_OP_CALL:
b8d70035 1845 case CEPH_OSD_OP_NOTIFY_ACK:
9969ebc5 1846 case CEPH_OSD_OP_WATCH:
c47f9371 1847 rbd_osd_trivial_callback(obj_request);
9969ebc5 1848 break;
bf0d5f50 1849 default:
9584d508 1850 rbd_warn(NULL, "%s: unsupported op %hu",
bf0d5f50
AE
1851 obj_request->object_name, (unsigned short) opcode);
1852 break;
1853 }
1854
07741308 1855 if (obj_request_done_test(obj_request))
bf0d5f50
AE
1856 rbd_obj_request_complete(obj_request);
1857}
1858
9d4df01f 1859static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3
AE
1860{
1861 struct rbd_img_request *img_request = obj_request->img_request;
8c042b0d 1862 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1863 u64 snap_id;
430c28c3 1864
8c042b0d 1865 rbd_assert(osd_req != NULL);
430c28c3 1866
9d4df01f 1867 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
8c042b0d 1868 ceph_osdc_build_request(osd_req, obj_request->offset,
9d4df01f
AE
1869 NULL, snap_id, NULL);
1870}
1871
1872static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1873{
1874 struct rbd_img_request *img_request = obj_request->img_request;
1875 struct ceph_osd_request *osd_req = obj_request->osd_req;
1876 struct ceph_snap_context *snapc;
1877 struct timespec mtime = CURRENT_TIME;
1878
1879 rbd_assert(osd_req != NULL);
1880
1881 snapc = img_request ? img_request->snapc : NULL;
1882 ceph_osdc_build_request(osd_req, obj_request->offset,
1883 snapc, CEPH_NOSNAP, &mtime);
430c28c3
AE
1884}
1885
0ccd5926
ID
1886/*
1887 * Create an osd request. A read request has one osd op (read).
1888 * A write request has either one (watch) or two (hint+write) osd ops.
1889 * (All rbd data writes are prefixed with an allocation hint op, but
1890 * technically osd watch is a write request, hence this distinction.)
1891 */
bf0d5f50
AE
1892static struct ceph_osd_request *rbd_osd_req_create(
1893 struct rbd_device *rbd_dev,
6d2940c8 1894 enum obj_operation_type op_type,
deb236b3 1895 unsigned int num_ops,
430c28c3 1896 struct rbd_obj_request *obj_request)
bf0d5f50 1897{
bf0d5f50
AE
1898 struct ceph_snap_context *snapc = NULL;
1899 struct ceph_osd_client *osdc;
1900 struct ceph_osd_request *osd_req;
bf0d5f50 1901
90e98c52
GZ
1902 if (obj_request_img_data_test(obj_request) &&
1903 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
6365d33a 1904 struct rbd_img_request *img_request = obj_request->img_request;
90e98c52
GZ
1905 if (op_type == OBJ_OP_WRITE) {
1906 rbd_assert(img_request_write_test(img_request));
1907 } else {
1908 rbd_assert(img_request_discard_test(img_request));
1909 }
6d2940c8 1910 snapc = img_request->snapc;
bf0d5f50
AE
1911 }
1912
6d2940c8 1913 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
deb236b3
ID
1914
1915 /* Allocate and initialize the request, for the num_ops ops */
bf0d5f50
AE
1916
1917 osdc = &rbd_dev->rbd_client->client->osdc;
deb236b3
ID
1918 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1919 GFP_ATOMIC);
bf0d5f50
AE
1920 if (!osd_req)
1921 return NULL; /* ENOMEM */
bf0d5f50 1922
90e98c52 1923 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
bf0d5f50 1924 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
430c28c3 1925 else
bf0d5f50 1926 osd_req->r_flags = CEPH_OSD_FLAG_READ;
bf0d5f50
AE
1927
1928 osd_req->r_callback = rbd_osd_req_callback;
1929 osd_req->r_priv = obj_request;
1930
3c972c95
ID
1931 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1932 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
bf0d5f50 1933
bf0d5f50
AE
1934 return osd_req;
1935}
1936
0eefd470 1937/*
d3246fb0
JD
1938 * Create a copyup osd request based on the information in the object
1939 * request supplied. A copyup request has two or three osd ops, a
1940 * copyup method call, potentially a hint op, and a write or truncate
1941 * or zero op.
0eefd470
AE
1942 */
1943static struct ceph_osd_request *
1944rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1945{
1946 struct rbd_img_request *img_request;
1947 struct ceph_snap_context *snapc;
1948 struct rbd_device *rbd_dev;
1949 struct ceph_osd_client *osdc;
1950 struct ceph_osd_request *osd_req;
d3246fb0 1951 int num_osd_ops = 3;
0eefd470
AE
1952
1953 rbd_assert(obj_request_img_data_test(obj_request));
1954 img_request = obj_request->img_request;
1955 rbd_assert(img_request);
d3246fb0
JD
1956 rbd_assert(img_request_write_test(img_request) ||
1957 img_request_discard_test(img_request));
0eefd470 1958
d3246fb0
JD
1959 if (img_request_discard_test(img_request))
1960 num_osd_ops = 2;
1961
1962 /* Allocate and initialize the request, for all the ops */
0eefd470
AE
1963
1964 snapc = img_request->snapc;
1965 rbd_dev = img_request->rbd_dev;
1966 osdc = &rbd_dev->rbd_client->client->osdc;
d3246fb0
JD
1967 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1968 false, GFP_ATOMIC);
0eefd470
AE
1969 if (!osd_req)
1970 return NULL; /* ENOMEM */
1971
1972 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1973 osd_req->r_callback = rbd_osd_req_callback;
1974 osd_req->r_priv = obj_request;
1975
3c972c95
ID
1976 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1977 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
0eefd470 1978
0eefd470
AE
1979 return osd_req;
1980}
1981
1982
bf0d5f50
AE
1983static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1984{
1985 ceph_osdc_put_request(osd_req);
1986}
1987
1988/* object_name is assumed to be a non-null pointer and NUL-terminated */
1989
1990static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1991 u64 offset, u64 length,
1992 enum obj_request_type type)
1993{
1994 struct rbd_obj_request *obj_request;
1995 size_t size;
1996 char *name;
1997
1998 rbd_assert(obj_request_type_valid(type));
1999
2000 size = strlen(object_name) + 1;
f907ad55
AE
2001 name = kmalloc(size, GFP_KERNEL);
2002 if (!name)
bf0d5f50
AE
2003 return NULL;
2004
868311b1 2005 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
f907ad55
AE
2006 if (!obj_request) {
2007 kfree(name);
2008 return NULL;
2009 }
2010
bf0d5f50
AE
2011 obj_request->object_name = memcpy(name, object_name, size);
2012 obj_request->offset = offset;
2013 obj_request->length = length;
926f9b3f 2014 obj_request->flags = 0;
bf0d5f50
AE
2015 obj_request->which = BAD_WHICH;
2016 obj_request->type = type;
2017 INIT_LIST_HEAD(&obj_request->links);
788e2df3 2018 init_completion(&obj_request->completion);
bf0d5f50
AE
2019 kref_init(&obj_request->kref);
2020
37206ee5
AE
2021 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2022 offset, length, (int)type, obj_request);
2023
bf0d5f50
AE
2024 return obj_request;
2025}
2026
2027static void rbd_obj_request_destroy(struct kref *kref)
2028{
2029 struct rbd_obj_request *obj_request;
2030
2031 obj_request = container_of(kref, struct rbd_obj_request, kref);
2032
37206ee5
AE
2033 dout("%s: obj %p\n", __func__, obj_request);
2034
bf0d5f50
AE
2035 rbd_assert(obj_request->img_request == NULL);
2036 rbd_assert(obj_request->which == BAD_WHICH);
2037
2038 if (obj_request->osd_req)
2039 rbd_osd_req_destroy(obj_request->osd_req);
2040
2041 rbd_assert(obj_request_type_valid(obj_request->type));
2042 switch (obj_request->type) {
9969ebc5
AE
2043 case OBJ_REQUEST_NODATA:
2044 break; /* Nothing to do */
bf0d5f50
AE
2045 case OBJ_REQUEST_BIO:
2046 if (obj_request->bio_list)
2047 bio_chain_put(obj_request->bio_list);
2048 break;
788e2df3
AE
2049 case OBJ_REQUEST_PAGES:
2050 if (obj_request->pages)
2051 ceph_release_page_vector(obj_request->pages,
2052 obj_request->page_count);
2053 break;
bf0d5f50
AE
2054 }
2055
f907ad55 2056 kfree(obj_request->object_name);
868311b1
AE
2057 obj_request->object_name = NULL;
2058 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
2059}
2060
fb65d228
AE
2061/* It's OK to call this for a device with no parent */
2062
2063static void rbd_spec_put(struct rbd_spec *spec);
2064static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2065{
2066 rbd_dev_remove_parent(rbd_dev);
2067 rbd_spec_put(rbd_dev->parent_spec);
2068 rbd_dev->parent_spec = NULL;
2069 rbd_dev->parent_overlap = 0;
2070}
2071
a2acd00e
AE
2072/*
2073 * Parent image reference counting is used to determine when an
2074 * image's parent fields can be safely torn down--after there are no
2075 * more in-flight requests to the parent image. When the last
2076 * reference is dropped, cleaning them up is safe.
2077 */
2078static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2079{
2080 int counter;
2081
2082 if (!rbd_dev->parent_spec)
2083 return;
2084
2085 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2086 if (counter > 0)
2087 return;
2088
2089 /* Last reference; clean up parent data structures */
2090
2091 if (!counter)
2092 rbd_dev_unparent(rbd_dev);
2093 else
9584d508 2094 rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e
AE
2095}
2096
2097/*
2098 * If an image has a non-zero parent overlap, get a reference to its
2099 * parent.
2100 *
2101 * Returns true if the rbd device has a parent with a non-zero
2102 * overlap and a reference for it was successfully taken, or
2103 * false otherwise.
2104 */
2105static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2106{
ae43e9d0 2107 int counter = 0;
a2acd00e
AE
2108
2109 if (!rbd_dev->parent_spec)
2110 return false;
2111
ae43e9d0
ID
2112 down_read(&rbd_dev->header_rwsem);
2113 if (rbd_dev->parent_overlap)
2114 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2115 up_read(&rbd_dev->header_rwsem);
a2acd00e
AE
2116
2117 if (counter < 0)
9584d508 2118 rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e 2119
ae43e9d0 2120 return counter > 0;
a2acd00e
AE
2121}
2122
bf0d5f50
AE
2123/*
2124 * Caller is responsible for filling in the list of object requests
2125 * that comprises the image request, and the Linux request pointer
2126 * (if there is one).
2127 */
cc344fa1
AE
2128static struct rbd_img_request *rbd_img_request_create(
2129 struct rbd_device *rbd_dev,
bf0d5f50 2130 u64 offset, u64 length,
6d2940c8 2131 enum obj_operation_type op_type,
4e752f0a 2132 struct ceph_snap_context *snapc)
bf0d5f50
AE
2133{
2134 struct rbd_img_request *img_request;
bf0d5f50 2135
7a716aac 2136 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
bf0d5f50
AE
2137 if (!img_request)
2138 return NULL;
2139
bf0d5f50
AE
2140 img_request->rq = NULL;
2141 img_request->rbd_dev = rbd_dev;
2142 img_request->offset = offset;
2143 img_request->length = length;
0c425248 2144 img_request->flags = 0;
90e98c52
GZ
2145 if (op_type == OBJ_OP_DISCARD) {
2146 img_request_discard_set(img_request);
2147 img_request->snapc = snapc;
2148 } else if (op_type == OBJ_OP_WRITE) {
0c425248 2149 img_request_write_set(img_request);
4e752f0a 2150 img_request->snapc = snapc;
0c425248 2151 } else {
bf0d5f50 2152 img_request->snap_id = rbd_dev->spec->snap_id;
0c425248 2153 }
a2acd00e 2154 if (rbd_dev_parent_get(rbd_dev))
d0b2e944 2155 img_request_layered_set(img_request);
bf0d5f50
AE
2156 spin_lock_init(&img_request->completion_lock);
2157 img_request->next_completion = 0;
2158 img_request->callback = NULL;
a5a337d4 2159 img_request->result = 0;
bf0d5f50
AE
2160 img_request->obj_request_count = 0;
2161 INIT_LIST_HEAD(&img_request->obj_requests);
2162 kref_init(&img_request->kref);
2163
37206ee5 2164 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
6d2940c8 2165 obj_op_name(op_type), offset, length, img_request);
37206ee5 2166
bf0d5f50
AE
2167 return img_request;
2168}
2169
2170static void rbd_img_request_destroy(struct kref *kref)
2171{
2172 struct rbd_img_request *img_request;
2173 struct rbd_obj_request *obj_request;
2174 struct rbd_obj_request *next_obj_request;
2175
2176 img_request = container_of(kref, struct rbd_img_request, kref);
2177
37206ee5
AE
2178 dout("%s: img %p\n", __func__, img_request);
2179
bf0d5f50
AE
2180 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2181 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 2182 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 2183
a2acd00e
AE
2184 if (img_request_layered_test(img_request)) {
2185 img_request_layered_clear(img_request);
2186 rbd_dev_parent_put(img_request->rbd_dev);
2187 }
2188
bef95455
JD
2189 if (img_request_write_test(img_request) ||
2190 img_request_discard_test(img_request))
812164f8 2191 ceph_put_snap_context(img_request->snapc);
bf0d5f50 2192
1c2a9dfe 2193 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
2194}
2195
e93f3152
AE
2196static struct rbd_img_request *rbd_parent_request_create(
2197 struct rbd_obj_request *obj_request,
2198 u64 img_offset, u64 length)
2199{
2200 struct rbd_img_request *parent_request;
2201 struct rbd_device *rbd_dev;
2202
2203 rbd_assert(obj_request->img_request);
2204 rbd_dev = obj_request->img_request->rbd_dev;
2205
4e752f0a 2206 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
6d2940c8 2207 length, OBJ_OP_READ, NULL);
e93f3152
AE
2208 if (!parent_request)
2209 return NULL;
2210
2211 img_request_child_set(parent_request);
2212 rbd_obj_request_get(obj_request);
2213 parent_request->obj_request = obj_request;
2214
2215 return parent_request;
2216}
2217
2218static void rbd_parent_request_destroy(struct kref *kref)
2219{
2220 struct rbd_img_request *parent_request;
2221 struct rbd_obj_request *orig_request;
2222
2223 parent_request = container_of(kref, struct rbd_img_request, kref);
2224 orig_request = parent_request->obj_request;
2225
2226 parent_request->obj_request = NULL;
2227 rbd_obj_request_put(orig_request);
2228 img_request_child_clear(parent_request);
2229
2230 rbd_img_request_destroy(kref);
2231}
2232
1217857f
AE
2233static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2234{
6365d33a 2235 struct rbd_img_request *img_request;
1217857f
AE
2236 unsigned int xferred;
2237 int result;
8b3e1a56 2238 bool more;
1217857f 2239
6365d33a
AE
2240 rbd_assert(obj_request_img_data_test(obj_request));
2241 img_request = obj_request->img_request;
2242
1217857f
AE
2243 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2244 xferred = (unsigned int)obj_request->xferred;
2245 result = obj_request->result;
2246 if (result) {
2247 struct rbd_device *rbd_dev = img_request->rbd_dev;
6d2940c8
GZ
2248 enum obj_operation_type op_type;
2249
90e98c52
GZ
2250 if (img_request_discard_test(img_request))
2251 op_type = OBJ_OP_DISCARD;
2252 else if (img_request_write_test(img_request))
2253 op_type = OBJ_OP_WRITE;
2254 else
2255 op_type = OBJ_OP_READ;
1217857f 2256
9584d508 2257 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
6d2940c8
GZ
2258 obj_op_name(op_type), obj_request->length,
2259 obj_request->img_offset, obj_request->offset);
9584d508 2260 rbd_warn(rbd_dev, " result %d xferred %x",
1217857f
AE
2261 result, xferred);
2262 if (!img_request->result)
2263 img_request->result = result;
2264 }
2265
f1a4739f
AE
2266 /* Image object requests don't own their page array */
2267
2268 if (obj_request->type == OBJ_REQUEST_PAGES) {
2269 obj_request->pages = NULL;
2270 obj_request->page_count = 0;
2271 }
2272
8b3e1a56
AE
2273 if (img_request_child_test(img_request)) {
2274 rbd_assert(img_request->obj_request != NULL);
2275 more = obj_request->which < img_request->obj_request_count - 1;
2276 } else {
2277 rbd_assert(img_request->rq != NULL);
2278 more = blk_end_request(img_request->rq, result, xferred);
2279 }
2280
2281 return more;
1217857f
AE
2282}
2283
2169238d
AE
2284static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2285{
2286 struct rbd_img_request *img_request;
2287 u32 which = obj_request->which;
2288 bool more = true;
2289
6365d33a 2290 rbd_assert(obj_request_img_data_test(obj_request));
2169238d
AE
2291 img_request = obj_request->img_request;
2292
2293 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2294 rbd_assert(img_request != NULL);
2169238d
AE
2295 rbd_assert(img_request->obj_request_count > 0);
2296 rbd_assert(which != BAD_WHICH);
2297 rbd_assert(which < img_request->obj_request_count);
2169238d
AE
2298
2299 spin_lock_irq(&img_request->completion_lock);
2300 if (which != img_request->next_completion)
2301 goto out;
2302
2303 for_each_obj_request_from(img_request, obj_request) {
2169238d
AE
2304 rbd_assert(more);
2305 rbd_assert(which < img_request->obj_request_count);
2306
2307 if (!obj_request_done_test(obj_request))
2308 break;
1217857f 2309 more = rbd_img_obj_end_request(obj_request);
2169238d
AE
2310 which++;
2311 }
2312
2313 rbd_assert(more ^ (which == img_request->obj_request_count));
2314 img_request->next_completion = which;
2315out:
2316 spin_unlock_irq(&img_request->completion_lock);
0f2d5be7 2317 rbd_img_request_put(img_request);
2169238d
AE
2318
2319 if (!more)
2320 rbd_img_request_complete(img_request);
2321}
2322
3b434a2a
JD
2323/*
2324 * Add individual osd ops to the given ceph_osd_request and prepare
2325 * them for submission. num_ops is the current number of
2326 * osd operations already to the object request.
2327 */
2328static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2329 struct ceph_osd_request *osd_request,
2330 enum obj_operation_type op_type,
2331 unsigned int num_ops)
2332{
2333 struct rbd_img_request *img_request = obj_request->img_request;
2334 struct rbd_device *rbd_dev = img_request->rbd_dev;
2335 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2336 u64 offset = obj_request->offset;
2337 u64 length = obj_request->length;
2338 u64 img_end;
2339 u16 opcode;
2340
2341 if (op_type == OBJ_OP_DISCARD) {
d3246fb0
JD
2342 if (!offset && length == object_size &&
2343 (!img_request_layered_test(img_request) ||
2344 !obj_request_overlaps_parent(obj_request))) {
3b434a2a
JD
2345 opcode = CEPH_OSD_OP_DELETE;
2346 } else if ((offset + length == object_size)) {
2347 opcode = CEPH_OSD_OP_TRUNCATE;
2348 } else {
2349 down_read(&rbd_dev->header_rwsem);
2350 img_end = rbd_dev->header.image_size;
2351 up_read(&rbd_dev->header_rwsem);
2352
2353 if (obj_request->img_offset + length == img_end)
2354 opcode = CEPH_OSD_OP_TRUNCATE;
2355 else
2356 opcode = CEPH_OSD_OP_ZERO;
2357 }
2358 } else if (op_type == OBJ_OP_WRITE) {
2359 opcode = CEPH_OSD_OP_WRITE;
2360 osd_req_op_alloc_hint_init(osd_request, num_ops,
2361 object_size, object_size);
2362 num_ops++;
2363 } else {
2364 opcode = CEPH_OSD_OP_READ;
2365 }
2366
7e868b6e
ID
2367 if (opcode == CEPH_OSD_OP_DELETE)
2368 osd_req_op_init(osd_request, num_ops, opcode);
2369 else
2370 osd_req_op_extent_init(osd_request, num_ops, opcode,
2371 offset, length, 0, 0);
2372
3b434a2a
JD
2373 if (obj_request->type == OBJ_REQUEST_BIO)
2374 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2375 obj_request->bio_list, length);
2376 else if (obj_request->type == OBJ_REQUEST_PAGES)
2377 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2378 obj_request->pages, length,
2379 offset & ~PAGE_MASK, false, false);
2380
2381 /* Discards are also writes */
2382 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2383 rbd_osd_req_format_write(obj_request);
2384 else
2385 rbd_osd_req_format_read(obj_request);
2386}
2387
f1a4739f
AE
2388/*
2389 * Split up an image request into one or more object requests, each
2390 * to a different object. The "type" parameter indicates whether
2391 * "data_desc" is the pointer to the head of a list of bio
2392 * structures, or the base of a page array. In either case this
2393 * function assumes data_desc describes memory sufficient to hold
2394 * all data described by the image request.
2395 */
2396static int rbd_img_request_fill(struct rbd_img_request *img_request,
2397 enum obj_request_type type,
2398 void *data_desc)
bf0d5f50
AE
2399{
2400 struct rbd_device *rbd_dev = img_request->rbd_dev;
2401 struct rbd_obj_request *obj_request = NULL;
2402 struct rbd_obj_request *next_obj_request;
a158073c 2403 struct bio *bio_list = NULL;
f1a4739f 2404 unsigned int bio_offset = 0;
a158073c 2405 struct page **pages = NULL;
6d2940c8 2406 enum obj_operation_type op_type;
7da22d29 2407 u64 img_offset;
bf0d5f50 2408 u64 resid;
bf0d5f50 2409
f1a4739f
AE
2410 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2411 (int)type, data_desc);
37206ee5 2412
7da22d29 2413 img_offset = img_request->offset;
bf0d5f50 2414 resid = img_request->length;
4dda41d3 2415 rbd_assert(resid > 0);
3b434a2a 2416 op_type = rbd_img_request_op_type(img_request);
f1a4739f
AE
2417
2418 if (type == OBJ_REQUEST_BIO) {
2419 bio_list = data_desc;
4f024f37
KO
2420 rbd_assert(img_offset ==
2421 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
90e98c52 2422 } else if (type == OBJ_REQUEST_PAGES) {
f1a4739f
AE
2423 pages = data_desc;
2424 }
2425
bf0d5f50 2426 while (resid) {
2fa12320 2427 struct ceph_osd_request *osd_req;
bf0d5f50 2428 const char *object_name;
bf0d5f50
AE
2429 u64 offset;
2430 u64 length;
2431
7da22d29 2432 object_name = rbd_segment_name(rbd_dev, img_offset);
bf0d5f50
AE
2433 if (!object_name)
2434 goto out_unwind;
7da22d29
AE
2435 offset = rbd_segment_offset(rbd_dev, img_offset);
2436 length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f50 2437 obj_request = rbd_obj_request_create(object_name,
f1a4739f 2438 offset, length, type);
78c2a44a
AE
2439 /* object request has its own copy of the object name */
2440 rbd_segment_name_free(object_name);
bf0d5f50
AE
2441 if (!obj_request)
2442 goto out_unwind;
62054da6 2443
03507db6
JD
2444 /*
2445 * set obj_request->img_request before creating the
2446 * osd_request so that it gets the right snapc
2447 */
2448 rbd_img_obj_request_add(img_request, obj_request);
bf0d5f50 2449
f1a4739f
AE
2450 if (type == OBJ_REQUEST_BIO) {
2451 unsigned int clone_size;
2452
2453 rbd_assert(length <= (u64)UINT_MAX);
2454 clone_size = (unsigned int)length;
2455 obj_request->bio_list =
2456 bio_chain_clone_range(&bio_list,
2457 &bio_offset,
2458 clone_size,
2459 GFP_ATOMIC);
2460 if (!obj_request->bio_list)
62054da6 2461 goto out_unwind;
90e98c52 2462 } else if (type == OBJ_REQUEST_PAGES) {
f1a4739f
AE
2463 unsigned int page_count;
2464
2465 obj_request->pages = pages;
2466 page_count = (u32)calc_pages_for(offset, length);
2467 obj_request->page_count = page_count;
2468 if ((offset + length) & ~PAGE_MASK)
2469 page_count--; /* more on last page */
2470 pages += page_count;
2471 }
bf0d5f50 2472
6d2940c8
GZ
2473 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2474 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2475 obj_request);
2fa12320 2476 if (!osd_req)
62054da6 2477 goto out_unwind;
3b434a2a 2478
2fa12320 2479 obj_request->osd_req = osd_req;
2169238d 2480 obj_request->callback = rbd_img_obj_callback;
3b434a2a 2481 obj_request->img_offset = img_offset;
9d4df01f 2482
3b434a2a 2483 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
430c28c3 2484
3b434a2a 2485 rbd_img_request_get(img_request);
bf0d5f50 2486
7da22d29 2487 img_offset += length;
bf0d5f50
AE
2488 resid -= length;
2489 }
2490
2491 return 0;
2492
bf0d5f50
AE
2493out_unwind:
2494 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
42dd037c 2495 rbd_img_obj_request_del(img_request, obj_request);
bf0d5f50
AE
2496
2497 return -ENOMEM;
2498}
2499
0eefd470
AE
2500static void
2501rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2502{
2503 struct rbd_img_request *img_request;
2504 struct rbd_device *rbd_dev;
ebda6408 2505 struct page **pages;
0eefd470
AE
2506 u32 page_count;
2507
d3246fb0
JD
2508 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2509 obj_request->type == OBJ_REQUEST_NODATA);
0eefd470
AE
2510 rbd_assert(obj_request_img_data_test(obj_request));
2511 img_request = obj_request->img_request;
2512 rbd_assert(img_request);
2513
2514 rbd_dev = img_request->rbd_dev;
2515 rbd_assert(rbd_dev);
0eefd470 2516
ebda6408
AE
2517 pages = obj_request->copyup_pages;
2518 rbd_assert(pages != NULL);
0eefd470 2519 obj_request->copyup_pages = NULL;
ebda6408
AE
2520 page_count = obj_request->copyup_page_count;
2521 rbd_assert(page_count);
2522 obj_request->copyup_page_count = 0;
2523 ceph_release_page_vector(pages, page_count);
0eefd470
AE
2524
2525 /*
2526 * We want the transfer count to reflect the size of the
2527 * original write request. There is no such thing as a
2528 * successful short write, so if the request was successful
2529 * we can just set it to the originally-requested length.
2530 */
2531 if (!obj_request->result)
2532 obj_request->xferred = obj_request->length;
2533
2534 /* Finish up with the normal image object callback */
2535
2536 rbd_img_obj_callback(obj_request);
2537}
2538
3d7efd18
AE
2539static void
2540rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2541{
2542 struct rbd_obj_request *orig_request;
0eefd470
AE
2543 struct ceph_osd_request *osd_req;
2544 struct ceph_osd_client *osdc;
2545 struct rbd_device *rbd_dev;
3d7efd18 2546 struct page **pages;
d3246fb0 2547 enum obj_operation_type op_type;
ebda6408 2548 u32 page_count;
bbea1c1a 2549 int img_result;
ebda6408 2550 u64 parent_length;
3d7efd18
AE
2551
2552 rbd_assert(img_request_child_test(img_request));
2553
2554 /* First get what we need from the image request */
2555
2556 pages = img_request->copyup_pages;
2557 rbd_assert(pages != NULL);
2558 img_request->copyup_pages = NULL;
ebda6408
AE
2559 page_count = img_request->copyup_page_count;
2560 rbd_assert(page_count);
2561 img_request->copyup_page_count = 0;
3d7efd18
AE
2562
2563 orig_request = img_request->obj_request;
2564 rbd_assert(orig_request != NULL);
b91f09f1 2565 rbd_assert(obj_request_type_valid(orig_request->type));
bbea1c1a 2566 img_result = img_request->result;
ebda6408
AE
2567 parent_length = img_request->length;
2568 rbd_assert(parent_length == img_request->xferred);
91c6febb 2569 rbd_img_request_put(img_request);
3d7efd18 2570
91c6febb
AE
2571 rbd_assert(orig_request->img_request);
2572 rbd_dev = orig_request->img_request->rbd_dev;
0eefd470 2573 rbd_assert(rbd_dev);
0eefd470 2574
bbea1c1a
AE
2575 /*
2576 * If the overlap has become 0 (most likely because the
2577 * image has been flattened) we need to free the pages
2578 * and re-submit the original write request.
2579 */
2580 if (!rbd_dev->parent_overlap) {
2581 struct ceph_osd_client *osdc;
3d7efd18 2582
bbea1c1a
AE
2583 ceph_release_page_vector(pages, page_count);
2584 osdc = &rbd_dev->rbd_client->client->osdc;
2585 img_result = rbd_obj_request_submit(osdc, orig_request);
2586 if (!img_result)
2587 return;
2588 }
0eefd470 2589
bbea1c1a 2590 if (img_result)
0eefd470 2591 goto out_err;
0eefd470 2592
8785b1d4
AE
2593 /*
2594 * The original osd request is of no use to use any more.
0ccd5926 2595 * We need a new one that can hold the three ops in a copyup
8785b1d4
AE
2596 * request. Allocate the new copyup osd request for the
2597 * original request, and release the old one.
2598 */
bbea1c1a 2599 img_result = -ENOMEM;
0eefd470
AE
2600 osd_req = rbd_osd_req_create_copyup(orig_request);
2601 if (!osd_req)
2602 goto out_err;
8785b1d4 2603 rbd_osd_req_destroy(orig_request->osd_req);
0eefd470
AE
2604 orig_request->osd_req = osd_req;
2605 orig_request->copyup_pages = pages;
ebda6408 2606 orig_request->copyup_page_count = page_count;
3d7efd18 2607
0eefd470 2608 /* Initialize the copyup op */
3d7efd18 2609
0eefd470 2610 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
ebda6408 2611 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
0eefd470 2612 false, false);
3d7efd18 2613
d3246fb0 2614 /* Add the other op(s) */
0eefd470 2615
d3246fb0
JD
2616 op_type = rbd_img_request_op_type(orig_request->img_request);
2617 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
0eefd470
AE
2618
2619 /* All set, send it off. */
2620
2621 orig_request->callback = rbd_img_obj_copyup_callback;
2622 osdc = &rbd_dev->rbd_client->client->osdc;
bbea1c1a
AE
2623 img_result = rbd_obj_request_submit(osdc, orig_request);
2624 if (!img_result)
0eefd470
AE
2625 return;
2626out_err:
2627 /* Record the error code and complete the request */
2628
bbea1c1a 2629 orig_request->result = img_result;
0eefd470
AE
2630 orig_request->xferred = 0;
2631 obj_request_done_set(orig_request);
2632 rbd_obj_request_complete(orig_request);
3d7efd18
AE
2633}
2634
2635/*
2636 * Read from the parent image the range of data that covers the
2637 * entire target of the given object request. This is used for
2638 * satisfying a layered image write request when the target of an
2639 * object request from the image request does not exist.
2640 *
2641 * A page array big enough to hold the returned data is allocated
2642 * and supplied to rbd_img_request_fill() as the "data descriptor."
2643 * When the read completes, this page array will be transferred to
2644 * the original object request for the copyup operation.
2645 *
2646 * If an error occurs, record it as the result of the original
2647 * object request and mark it done so it gets completed.
2648 */
2649static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2650{
2651 struct rbd_img_request *img_request = NULL;
2652 struct rbd_img_request *parent_request = NULL;
2653 struct rbd_device *rbd_dev;
2654 u64 img_offset;
2655 u64 length;
2656 struct page **pages = NULL;
2657 u32 page_count;
2658 int result;
2659
2660 rbd_assert(obj_request_img_data_test(obj_request));
b91f09f1 2661 rbd_assert(obj_request_type_valid(obj_request->type));
3d7efd18
AE
2662
2663 img_request = obj_request->img_request;
2664 rbd_assert(img_request != NULL);
2665 rbd_dev = img_request->rbd_dev;
2666 rbd_assert(rbd_dev->parent != NULL);
2667
2668 /*
2669 * Determine the byte range covered by the object in the
2670 * child image to which the original request was to be sent.
2671 */
2672 img_offset = obj_request->img_offset - obj_request->offset;
2673 length = (u64)1 << rbd_dev->header.obj_order;
2674
a9e8ba2c
AE
2675 /*
2676 * There is no defined parent data beyond the parent
2677 * overlap, so limit what we read at that boundary if
2678 * necessary.
2679 */
2680 if (img_offset + length > rbd_dev->parent_overlap) {
2681 rbd_assert(img_offset < rbd_dev->parent_overlap);
2682 length = rbd_dev->parent_overlap - img_offset;
2683 }
2684
3d7efd18
AE
2685 /*
2686 * Allocate a page array big enough to receive the data read
2687 * from the parent.
2688 */
2689 page_count = (u32)calc_pages_for(0, length);
2690 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2691 if (IS_ERR(pages)) {
2692 result = PTR_ERR(pages);
2693 pages = NULL;
2694 goto out_err;
2695 }
2696
2697 result = -ENOMEM;
e93f3152
AE
2698 parent_request = rbd_parent_request_create(obj_request,
2699 img_offset, length);
3d7efd18
AE
2700 if (!parent_request)
2701 goto out_err;
3d7efd18
AE
2702
2703 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2704 if (result)
2705 goto out_err;
2706 parent_request->copyup_pages = pages;
ebda6408 2707 parent_request->copyup_page_count = page_count;
3d7efd18
AE
2708
2709 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2710 result = rbd_img_request_submit(parent_request);
2711 if (!result)
2712 return 0;
2713
2714 parent_request->copyup_pages = NULL;
ebda6408 2715 parent_request->copyup_page_count = 0;
3d7efd18
AE
2716 parent_request->obj_request = NULL;
2717 rbd_obj_request_put(obj_request);
2718out_err:
2719 if (pages)
2720 ceph_release_page_vector(pages, page_count);
2721 if (parent_request)
2722 rbd_img_request_put(parent_request);
2723 obj_request->result = result;
2724 obj_request->xferred = 0;
2725 obj_request_done_set(obj_request);
2726
2727 return result;
2728}
2729
c5b5ef6c
AE
2730static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2731{
c5b5ef6c 2732 struct rbd_obj_request *orig_request;
638f5abe 2733 struct rbd_device *rbd_dev;
c5b5ef6c
AE
2734 int result;
2735
2736 rbd_assert(!obj_request_img_data_test(obj_request));
2737
2738 /*
2739 * All we need from the object request is the original
2740 * request and the result of the STAT op. Grab those, then
2741 * we're done with the request.
2742 */
2743 orig_request = obj_request->obj_request;
2744 obj_request->obj_request = NULL;
912c317d 2745 rbd_obj_request_put(orig_request);
c5b5ef6c
AE
2746 rbd_assert(orig_request);
2747 rbd_assert(orig_request->img_request);
2748
2749 result = obj_request->result;
2750 obj_request->result = 0;
2751
2752 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2753 obj_request, orig_request, result,
2754 obj_request->xferred, obj_request->length);
2755 rbd_obj_request_put(obj_request);
2756
638f5abe
AE
2757 /*
2758 * If the overlap has become 0 (most likely because the
2759 * image has been flattened) we need to free the pages
2760 * and re-submit the original write request.
2761 */
2762 rbd_dev = orig_request->img_request->rbd_dev;
2763 if (!rbd_dev->parent_overlap) {
2764 struct ceph_osd_client *osdc;
2765
638f5abe
AE
2766 osdc = &rbd_dev->rbd_client->client->osdc;
2767 result = rbd_obj_request_submit(osdc, orig_request);
2768 if (!result)
2769 return;
2770 }
c5b5ef6c
AE
2771
2772 /*
2773 * Our only purpose here is to determine whether the object
2774 * exists, and we don't want to treat the non-existence as
2775 * an error. If something else comes back, transfer the
2776 * error to the original request and complete it now.
2777 */
2778 if (!result) {
2779 obj_request_existence_set(orig_request, true);
2780 } else if (result == -ENOENT) {
2781 obj_request_existence_set(orig_request, false);
2782 } else if (result) {
2783 orig_request->result = result;
3d7efd18 2784 goto out;
c5b5ef6c
AE
2785 }
2786
2787 /*
2788 * Resubmit the original request now that we have recorded
2789 * whether the target object exists.
2790 */
b454e36d 2791 orig_request->result = rbd_img_obj_request_submit(orig_request);
3d7efd18 2792out:
c5b5ef6c
AE
2793 if (orig_request->result)
2794 rbd_obj_request_complete(orig_request);
c5b5ef6c
AE
2795}
2796
2797static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2798{
2799 struct rbd_obj_request *stat_request;
2800 struct rbd_device *rbd_dev;
2801 struct ceph_osd_client *osdc;
2802 struct page **pages = NULL;
2803 u32 page_count;
2804 size_t size;
2805 int ret;
2806
2807 /*
2808 * The response data for a STAT call consists of:
2809 * le64 length;
2810 * struct {
2811 * le32 tv_sec;
2812 * le32 tv_nsec;
2813 * } mtime;
2814 */
2815 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2816 page_count = (u32)calc_pages_for(0, size);
2817 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2818 if (IS_ERR(pages))
2819 return PTR_ERR(pages);
2820
2821 ret = -ENOMEM;
2822 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2823 OBJ_REQUEST_PAGES);
2824 if (!stat_request)
2825 goto out;
2826
2827 rbd_obj_request_get(obj_request);
2828 stat_request->obj_request = obj_request;
2829 stat_request->pages = pages;
2830 stat_request->page_count = page_count;
2831
2832 rbd_assert(obj_request->img_request);
2833 rbd_dev = obj_request->img_request->rbd_dev;
6d2940c8 2834 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b3 2835 stat_request);
c5b5ef6c
AE
2836 if (!stat_request->osd_req)
2837 goto out;
2838 stat_request->callback = rbd_img_obj_exists_callback;
2839
2840 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2841 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2842 false, false);
9d4df01f 2843 rbd_osd_req_format_read(stat_request);
c5b5ef6c
AE
2844
2845 osdc = &rbd_dev->rbd_client->client->osdc;
2846 ret = rbd_obj_request_submit(osdc, stat_request);
2847out:
2848 if (ret)
2849 rbd_obj_request_put(obj_request);
2850
2851 return ret;
2852}
2853
70d045f6 2854static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
b454e36d
AE
2855{
2856 struct rbd_img_request *img_request;
a9e8ba2c 2857 struct rbd_device *rbd_dev;
b454e36d
AE
2858
2859 rbd_assert(obj_request_img_data_test(obj_request));
2860
2861 img_request = obj_request->img_request;
2862 rbd_assert(img_request);
a9e8ba2c 2863 rbd_dev = img_request->rbd_dev;
b454e36d 2864
70d045f6 2865 /* Reads */
1c220881
JD
2866 if (!img_request_write_test(img_request) &&
2867 !img_request_discard_test(img_request))
70d045f6
ID
2868 return true;
2869
2870 /* Non-layered writes */
2871 if (!img_request_layered_test(img_request))
2872 return true;
2873
b454e36d 2874 /*
70d045f6
ID
2875 * Layered writes outside of the parent overlap range don't
2876 * share any data with the parent.
b454e36d 2877 */
70d045f6
ID
2878 if (!obj_request_overlaps_parent(obj_request))
2879 return true;
b454e36d 2880
c622d226
GZ
2881 /*
2882 * Entire-object layered writes - we will overwrite whatever
2883 * parent data there is anyway.
2884 */
2885 if (!obj_request->offset &&
2886 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2887 return true;
2888
70d045f6
ID
2889 /*
2890 * If the object is known to already exist, its parent data has
2891 * already been copied.
2892 */
2893 if (obj_request_known_test(obj_request) &&
2894 obj_request_exists_test(obj_request))
2895 return true;
2896
2897 return false;
2898}
2899
2900static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2901{
2902 if (img_obj_request_simple(obj_request)) {
b454e36d
AE
2903 struct rbd_device *rbd_dev;
2904 struct ceph_osd_client *osdc;
2905
2906 rbd_dev = obj_request->img_request->rbd_dev;
2907 osdc = &rbd_dev->rbd_client->client->osdc;
2908
2909 return rbd_obj_request_submit(osdc, obj_request);
2910 }
2911
2912 /*
3d7efd18
AE
2913 * It's a layered write. The target object might exist but
2914 * we may not know that yet. If we know it doesn't exist,
2915 * start by reading the data for the full target object from
2916 * the parent so we can use it for a copyup to the target.
b454e36d 2917 */
70d045f6 2918 if (obj_request_known_test(obj_request))
3d7efd18
AE
2919 return rbd_img_obj_parent_read_full(obj_request);
2920
2921 /* We don't know whether the target exists. Go find out. */
b454e36d
AE
2922
2923 return rbd_img_obj_exists_submit(obj_request);
2924}
2925
bf0d5f50
AE
2926static int rbd_img_request_submit(struct rbd_img_request *img_request)
2927{
bf0d5f50 2928 struct rbd_obj_request *obj_request;
46faeed4 2929 struct rbd_obj_request *next_obj_request;
bf0d5f50 2930
37206ee5 2931 dout("%s: img %p\n", __func__, img_request);
46faeed4 2932 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
bf0d5f50
AE
2933 int ret;
2934
b454e36d 2935 ret = rbd_img_obj_request_submit(obj_request);
bf0d5f50
AE
2936 if (ret)
2937 return ret;
bf0d5f50
AE
2938 }
2939
2940 return 0;
2941}
8b3e1a56
AE
2942
2943static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2944{
2945 struct rbd_obj_request *obj_request;
a9e8ba2c
AE
2946 struct rbd_device *rbd_dev;
2947 u64 obj_end;
02c74fba
AE
2948 u64 img_xferred;
2949 int img_result;
8b3e1a56
AE
2950
2951 rbd_assert(img_request_child_test(img_request));
2952
02c74fba
AE
2953 /* First get what we need from the image request and release it */
2954
8b3e1a56 2955 obj_request = img_request->obj_request;
02c74fba
AE
2956 img_xferred = img_request->xferred;
2957 img_result = img_request->result;
2958 rbd_img_request_put(img_request);
2959
2960 /*
2961 * If the overlap has become 0 (most likely because the
2962 * image has been flattened) we need to re-submit the
2963 * original request.
2964 */
a9e8ba2c
AE
2965 rbd_assert(obj_request);
2966 rbd_assert(obj_request->img_request);
02c74fba
AE
2967 rbd_dev = obj_request->img_request->rbd_dev;
2968 if (!rbd_dev->parent_overlap) {
2969 struct ceph_osd_client *osdc;
2970
2971 osdc = &rbd_dev->rbd_client->client->osdc;
2972 img_result = rbd_obj_request_submit(osdc, obj_request);
2973 if (!img_result)
2974 return;
2975 }
a9e8ba2c 2976
02c74fba 2977 obj_request->result = img_result;
a9e8ba2c
AE
2978 if (obj_request->result)
2979 goto out;
2980
2981 /*
2982 * We need to zero anything beyond the parent overlap
2983 * boundary. Since rbd_img_obj_request_read_callback()
2984 * will zero anything beyond the end of a short read, an
2985 * easy way to do this is to pretend the data from the
2986 * parent came up short--ending at the overlap boundary.
2987 */
2988 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2989 obj_end = obj_request->img_offset + obj_request->length;
a9e8ba2c
AE
2990 if (obj_end > rbd_dev->parent_overlap) {
2991 u64 xferred = 0;
2992
2993 if (obj_request->img_offset < rbd_dev->parent_overlap)
2994 xferred = rbd_dev->parent_overlap -
2995 obj_request->img_offset;
8b3e1a56 2996
02c74fba 2997 obj_request->xferred = min(img_xferred, xferred);
a9e8ba2c 2998 } else {
02c74fba 2999 obj_request->xferred = img_xferred;
a9e8ba2c
AE
3000 }
3001out:
8b3e1a56
AE
3002 rbd_img_obj_request_read_callback(obj_request);
3003 rbd_obj_request_complete(obj_request);
3004}
3005
3006static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3007{
8b3e1a56
AE
3008 struct rbd_img_request *img_request;
3009 int result;
3010
3011 rbd_assert(obj_request_img_data_test(obj_request));
3012 rbd_assert(obj_request->img_request != NULL);
3013 rbd_assert(obj_request->result == (s32) -ENOENT);
5b2ab72d 3014 rbd_assert(obj_request_type_valid(obj_request->type));
8b3e1a56 3015
8b3e1a56 3016 /* rbd_read_finish(obj_request, obj_request->length); */
e93f3152 3017 img_request = rbd_parent_request_create(obj_request,
8b3e1a56 3018 obj_request->img_offset,
e93f3152 3019 obj_request->length);
8b3e1a56
AE
3020 result = -ENOMEM;
3021 if (!img_request)
3022 goto out_err;
3023
5b2ab72d
AE
3024 if (obj_request->type == OBJ_REQUEST_BIO)
3025 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3026 obj_request->bio_list);
3027 else
3028 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3029 obj_request->pages);
8b3e1a56
AE
3030 if (result)
3031 goto out_err;
3032
3033 img_request->callback = rbd_img_parent_read_callback;
3034 result = rbd_img_request_submit(img_request);
3035 if (result)
3036 goto out_err;
3037
3038 return;
3039out_err:
3040 if (img_request)
3041 rbd_img_request_put(img_request);
3042 obj_request->result = result;
3043 obj_request->xferred = 0;
3044 obj_request_done_set(obj_request);
3045}
bf0d5f50 3046
20e0af67 3047static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
b8d70035
AE
3048{
3049 struct rbd_obj_request *obj_request;
2169238d 3050 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
b8d70035
AE
3051 int ret;
3052
3053 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3054 OBJ_REQUEST_NODATA);
3055 if (!obj_request)
3056 return -ENOMEM;
3057
3058 ret = -ENOMEM;
6d2940c8 3059 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b3 3060 obj_request);
b8d70035
AE
3061 if (!obj_request->osd_req)
3062 goto out;
3063
c99d2d4a 3064 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
cc4a38bd 3065 notify_id, 0, 0);
9d4df01f 3066 rbd_osd_req_format_read(obj_request);
430c28c3 3067
b8d70035 3068 ret = rbd_obj_request_submit(osdc, obj_request);
cf81b60e 3069 if (ret)
20e0af67
JD
3070 goto out;
3071 ret = rbd_obj_request_wait(obj_request);
3072out:
3073 rbd_obj_request_put(obj_request);
b8d70035
AE
3074
3075 return ret;
3076}
3077
3078static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3079{
3080 struct rbd_device *rbd_dev = (struct rbd_device *)data;
e627db08 3081 int ret;
b8d70035
AE
3082
3083 if (!rbd_dev)
3084 return;
3085
37206ee5 3086 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
cc4a38bd
AE
3087 rbd_dev->header_name, (unsigned long long)notify_id,
3088 (unsigned int)opcode);
52bb1f9b
ID
3089
3090 /*
3091 * Until adequate refresh error handling is in place, there is
3092 * not much we can do here, except warn.
3093 *
3094 * See http://tracker.ceph.com/issues/5040
3095 */
e627db08
AE
3096 ret = rbd_dev_refresh(rbd_dev);
3097 if (ret)
9584d508 3098 rbd_warn(rbd_dev, "refresh failed: %d", ret);
b8d70035 3099
52bb1f9b
ID
3100 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3101 if (ret)
9584d508 3102 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
b8d70035
AE
3103}
3104
bb040aa0
ID
3105/*
3106 * Send a (un)watch request and wait for the ack. Return a request
3107 * with a ref held on success or error.
3108 */
3109static struct rbd_obj_request *rbd_obj_watch_request_helper(
3110 struct rbd_device *rbd_dev,
3111 bool watch)
3112{
3113 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3114 struct rbd_obj_request *obj_request;
3115 int ret;
3116
3117 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3118 OBJ_REQUEST_NODATA);
3119 if (!obj_request)
3120 return ERR_PTR(-ENOMEM);
3121
6d2940c8 3122 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
bb040aa0
ID
3123 obj_request);
3124 if (!obj_request->osd_req) {
3125 ret = -ENOMEM;
3126 goto out;
3127 }
3128
3129 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3130 rbd_dev->watch_event->cookie, 0, watch);
3131 rbd_osd_req_format_write(obj_request);
3132
3133 if (watch)
3134 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3135
3136 ret = rbd_obj_request_submit(osdc, obj_request);
3137 if (ret)
3138 goto out;
3139
3140 ret = rbd_obj_request_wait(obj_request);
3141 if (ret)
3142 goto out;
3143
3144 ret = obj_request->result;
3145 if (ret) {
3146 if (watch)
3147 rbd_obj_request_end(obj_request);
3148 goto out;
3149 }
3150
3151 return obj_request;
3152
3153out:
3154 rbd_obj_request_put(obj_request);
3155 return ERR_PTR(ret);
3156}
3157
9969ebc5 3158/*
b30a01f2 3159 * Initiate a watch request, synchronously.
9969ebc5 3160 */
b30a01f2 3161static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
9969ebc5
AE
3162{
3163 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3164 struct rbd_obj_request *obj_request;
9969ebc5
AE
3165 int ret;
3166
b30a01f2
ID
3167 rbd_assert(!rbd_dev->watch_event);
3168 rbd_assert(!rbd_dev->watch_request);
9969ebc5 3169
b30a01f2
ID
3170 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3171 &rbd_dev->watch_event);
3172 if (ret < 0)
3173 return ret;
3174
76756a51
ID
3175 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3176 if (IS_ERR(obj_request)) {
3177 ceph_osdc_cancel_event(rbd_dev->watch_event);
3178 rbd_dev->watch_event = NULL;
3179 return PTR_ERR(obj_request);
b30a01f2 3180 }
9969ebc5 3181
8eb87565
AE
3182 /*
3183 * A watch request is set to linger, so the underlying osd
3184 * request won't go away until we unregister it. We retain
3185 * a pointer to the object request during that time (in
76756a51
ID
3186 * rbd_dev->watch_request), so we'll keep a reference to it.
3187 * We'll drop that reference after we've unregistered it in
3188 * rbd_dev_header_unwatch_sync().
8eb87565 3189 */
b30a01f2 3190 rbd_dev->watch_request = obj_request;
8eb87565 3191
b30a01f2 3192 return 0;
b30a01f2
ID
3193}
3194
3195/*
3196 * Tear down a watch request, synchronously.
3197 */
76756a51 3198static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
b30a01f2 3199{
b30a01f2 3200 struct rbd_obj_request *obj_request;
b30a01f2
ID
3201
3202 rbd_assert(rbd_dev->watch_event);
3203 rbd_assert(rbd_dev->watch_request);
3204
76756a51 3205 rbd_obj_request_end(rbd_dev->watch_request);
8eb87565
AE
3206 rbd_obj_request_put(rbd_dev->watch_request);
3207 rbd_dev->watch_request = NULL;
b30a01f2 3208
76756a51
ID
3209 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3210 if (!IS_ERR(obj_request))
3211 rbd_obj_request_put(obj_request);
3212 else
3213 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3214 PTR_ERR(obj_request));
3215
9969ebc5
AE
3216 ceph_osdc_cancel_event(rbd_dev->watch_event);
3217 rbd_dev->watch_event = NULL;
fca27065
ID
3218}
3219
36be9a76 3220/*
f40eb349
AE
3221 * Synchronous osd object method call. Returns the number of bytes
3222 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
3223 */
3224static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3225 const char *object_name,
3226 const char *class_name,
3227 const char *method_name,
4157976b 3228 const void *outbound,
36be9a76 3229 size_t outbound_size,
4157976b 3230 void *inbound,
e2a58ee5 3231 size_t inbound_size)
36be9a76 3232{
2169238d 3233 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
36be9a76 3234 struct rbd_obj_request *obj_request;
36be9a76
AE
3235 struct page **pages;
3236 u32 page_count;
3237 int ret;
3238
3239 /*
6010a451
AE
3240 * Method calls are ultimately read operations. The result
3241 * should placed into the inbound buffer provided. They
3242 * also supply outbound data--parameters for the object
3243 * method. Currently if this is present it will be a
3244 * snapshot id.
36be9a76 3245 */
57385b51 3246 page_count = (u32)calc_pages_for(0, inbound_size);
36be9a76
AE
3247 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3248 if (IS_ERR(pages))
3249 return PTR_ERR(pages);
3250
3251 ret = -ENOMEM;
6010a451 3252 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
36be9a76
AE
3253 OBJ_REQUEST_PAGES);
3254 if (!obj_request)
3255 goto out;
3256
3257 obj_request->pages = pages;
3258 obj_request->page_count = page_count;
3259
6d2940c8 3260 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b3 3261 obj_request);
36be9a76
AE
3262 if (!obj_request->osd_req)
3263 goto out;
3264
c99d2d4a 3265 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
04017e29
AE
3266 class_name, method_name);
3267 if (outbound_size) {
3268 struct ceph_pagelist *pagelist;
3269
3270 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3271 if (!pagelist)
3272 goto out;
3273
3274 ceph_pagelist_init(pagelist);
3275 ceph_pagelist_append(pagelist, outbound, outbound_size);
3276 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3277 pagelist);
3278 }
a4ce40a9
AE
3279 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3280 obj_request->pages, inbound_size,
44cd188d 3281 0, false, false);
9d4df01f 3282 rbd_osd_req_format_read(obj_request);
430c28c3 3283
36be9a76
AE
3284 ret = rbd_obj_request_submit(osdc, obj_request);
3285 if (ret)
3286 goto out;
3287 ret = rbd_obj_request_wait(obj_request);
3288 if (ret)
3289 goto out;
3290
3291 ret = obj_request->result;
3292 if (ret < 0)
3293 goto out;
57385b51
AE
3294
3295 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3296 ret = (int)obj_request->xferred;
903bb32e 3297 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a76
AE
3298out:
3299 if (obj_request)
3300 rbd_obj_request_put(obj_request);
3301 else
3302 ceph_release_page_vector(pages, page_count);
3303
3304 return ret;
3305}
3306
bc1ecc65 3307static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
bf0d5f50 3308{
bc1ecc65 3309 struct rbd_img_request *img_request;
4e752f0a 3310 struct ceph_snap_context *snapc = NULL;
bc1ecc65
ID
3311 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3312 u64 length = blk_rq_bytes(rq);
6d2940c8 3313 enum obj_operation_type op_type;
4e752f0a 3314 u64 mapping_size;
bf0d5f50
AE
3315 int result;
3316
90e98c52
GZ
3317 if (rq->cmd_flags & REQ_DISCARD)
3318 op_type = OBJ_OP_DISCARD;
3319 else if (rq->cmd_flags & REQ_WRITE)
6d2940c8
GZ
3320 op_type = OBJ_OP_WRITE;
3321 else
3322 op_type = OBJ_OP_READ;
3323
bc1ecc65 3324 /* Ignore/skip any zero-length requests */
bf0d5f50 3325
bc1ecc65
ID
3326 if (!length) {
3327 dout("%s: zero-length request\n", __func__);
3328 result = 0;
3329 goto err_rq;
3330 }
bf0d5f50 3331
6d2940c8 3332 /* Only reads are allowed to a read-only device */
bc1ecc65 3333
6d2940c8 3334 if (op_type != OBJ_OP_READ) {
bc1ecc65
ID
3335 if (rbd_dev->mapping.read_only) {
3336 result = -EROFS;
3337 goto err_rq;
4dda41d3 3338 }
bc1ecc65
ID
3339 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3340 }
4dda41d3 3341
bc1ecc65
ID
3342 /*
3343 * Quit early if the mapped snapshot no longer exists. It's
3344 * still possible the snapshot will have disappeared by the
3345 * time our request arrives at the osd, but there's no sense in
3346 * sending it if we already know.
3347 */
3348 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3349 dout("request for non-existent snapshot");
3350 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3351 result = -ENXIO;
3352 goto err_rq;
3353 }
4dda41d3 3354
bc1ecc65
ID
3355 if (offset && length > U64_MAX - offset + 1) {
3356 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3357 length);
3358 result = -EINVAL;
3359 goto err_rq; /* Shouldn't happen */
3360 }
4dda41d3 3361
4e752f0a
JD
3362 down_read(&rbd_dev->header_rwsem);
3363 mapping_size = rbd_dev->mapping.size;
6d2940c8 3364 if (op_type != OBJ_OP_READ) {
4e752f0a
JD
3365 snapc = rbd_dev->header.snapc;
3366 ceph_get_snap_context(snapc);
3367 }
3368 up_read(&rbd_dev->header_rwsem);
3369
3370 if (offset + length > mapping_size) {
bc1ecc65 3371 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0a 3372 length, mapping_size);
bc1ecc65
ID
3373 result = -EIO;
3374 goto err_rq;
3375 }
bf0d5f50 3376
6d2940c8 3377 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4e752f0a 3378 snapc);
bc1ecc65
ID
3379 if (!img_request) {
3380 result = -ENOMEM;
3381 goto err_rq;
3382 }
3383 img_request->rq = rq;
bf0d5f50 3384
90e98c52
GZ
3385 if (op_type == OBJ_OP_DISCARD)
3386 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3387 NULL);
3388 else
3389 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3390 rq->bio);
bc1ecc65
ID
3391 if (result)
3392 goto err_img_request;
bf0d5f50 3393
bc1ecc65
ID
3394 result = rbd_img_request_submit(img_request);
3395 if (result)
3396 goto err_img_request;
bf0d5f50 3397
bc1ecc65 3398 return;
bf0d5f50 3399
bc1ecc65
ID
3400err_img_request:
3401 rbd_img_request_put(img_request);
3402err_rq:
3403 if (result)
3404 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c8 3405 obj_op_name(op_type), length, offset, result);
e96a650a 3406 ceph_put_snap_context(snapc);
bc1ecc65
ID
3407 blk_end_request_all(rq, result);
3408}
bf0d5f50 3409
bc1ecc65
ID
3410static void rbd_request_workfn(struct work_struct *work)
3411{
3412 struct rbd_device *rbd_dev =
3413 container_of(work, struct rbd_device, rq_work);
3414 struct request *rq, *next;
3415 LIST_HEAD(requests);
00a653e2 3416
bc1ecc65
ID
3417 spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
3418 list_splice_init(&rbd_dev->rq_queue, &requests);
3419 spin_unlock_irq(&rbd_dev->lock);
bf0d5f50 3420
bc1ecc65
ID
3421 list_for_each_entry_safe(rq, next, &requests, queuelist) {
3422 list_del_init(&rq->queuelist);
3423 rbd_handle_request(rbd_dev, rq);
3424 }
3425}
bf0d5f50 3426
bc1ecc65
ID
3427/*
3428 * Called with q->queue_lock held and interrupts disabled, possibly on
3429 * the way to schedule(). Do not sleep here!
3430 */
3431static void rbd_request_fn(struct request_queue *q)
3432{
3433 struct rbd_device *rbd_dev = q->queuedata;
3434 struct request *rq;
3435 int queued = 0;
3436
3437 rbd_assert(rbd_dev);
3438
3439 while ((rq = blk_fetch_request(q))) {
3440 /* Ignore any non-FS requests that filter through. */
3441 if (rq->cmd_type != REQ_TYPE_FS) {
3442 dout("%s: non-fs request type %d\n", __func__,
3443 (int) rq->cmd_type);
3444 __blk_end_request_all(rq, 0);
3445 continue;
bf0d5f50 3446 }
bc1ecc65
ID
3447
3448 list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
3449 queued++;
bf0d5f50 3450 }
bc1ecc65
ID
3451
3452 if (queued)
f5ee37bd 3453 queue_work(rbd_wq, &rbd_dev->rq_work);
bf0d5f50
AE
3454}
3455
602adf40
YS
3456/*
3457 * a queue callback. Makes sure that we don't create a bio that spans across
3458 * multiple osd objects. One exception would be with a single page bios,
f7760dad 3459 * which we handle later at bio_chain_clone_range()
602adf40
YS
3460 */
3461static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3462 struct bio_vec *bvec)
3463{
3464 struct rbd_device *rbd_dev = q->queuedata;
e5cfeed2
AE
3465 sector_t sector_offset;
3466 sector_t sectors_per_obj;
3467 sector_t obj_sector_offset;
3468 int ret;
3469
3470 /*
3471 * Find how far into its rbd object the partition-relative
3472 * bio start sector is to offset relative to the enclosing
3473 * device.
3474 */
3475 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3476 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3477 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3478
3479 /*
3480 * Compute the number of bytes from that offset to the end
3481 * of the object. Account for what's already used by the bio.
3482 */
3483 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3484 if (ret > bmd->bi_size)
3485 ret -= bmd->bi_size;
3486 else
3487 ret = 0;
3488
3489 /*
3490 * Don't send back more than was asked for. And if the bio
3491 * was empty, let the whole thing through because: "Note
3492 * that a block device *must* allow a single page to be
3493 * added to an empty bio."
3494 */
3495 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3496 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3497 ret = (int) bvec->bv_len;
3498
3499 return ret;
602adf40
YS
3500}
3501
3502static void rbd_free_disk(struct rbd_device *rbd_dev)
3503{
3504 struct gendisk *disk = rbd_dev->disk;
3505
3506 if (!disk)
3507 return;
3508
a0cab924
AE
3509 rbd_dev->disk = NULL;
3510 if (disk->flags & GENHD_FL_UP) {
602adf40 3511 del_gendisk(disk);
a0cab924
AE
3512 if (disk->queue)
3513 blk_cleanup_queue(disk->queue);
3514 }
602adf40
YS
3515 put_disk(disk);
3516}
3517
788e2df3
AE
3518static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3519 const char *object_name,
7097f8df 3520 u64 offset, u64 length, void *buf)
788e2df3
AE
3521
3522{
2169238d 3523 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
788e2df3 3524 struct rbd_obj_request *obj_request;
788e2df3
AE
3525 struct page **pages = NULL;
3526 u32 page_count;
1ceae7ef 3527 size_t size;
788e2df3
AE
3528 int ret;
3529
3530 page_count = (u32) calc_pages_for(offset, length);
3531 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3532 if (IS_ERR(pages))
a8d42056 3533 return PTR_ERR(pages);
788e2df3
AE
3534
3535 ret = -ENOMEM;
3536 obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a76 3537 OBJ_REQUEST_PAGES);
788e2df3
AE
3538 if (!obj_request)
3539 goto out;
3540
3541 obj_request->pages = pages;
3542 obj_request->page_count = page_count;
3543
6d2940c8 3544 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
deb236b3 3545 obj_request);
788e2df3
AE
3546 if (!obj_request->osd_req)
3547 goto out;
3548
c99d2d4a
AE
3549 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3550 offset, length, 0, 0);
406e2c9f 3551 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
a4ce40a9 3552 obj_request->pages,
44cd188d
AE
3553 obj_request->length,
3554 obj_request->offset & ~PAGE_MASK,
3555 false, false);
9d4df01f 3556 rbd_osd_req_format_read(obj_request);
430c28c3 3557
788e2df3
AE
3558 ret = rbd_obj_request_submit(osdc, obj_request);
3559 if (ret)
3560 goto out;
3561 ret = rbd_obj_request_wait(obj_request);
3562 if (ret)
3563 goto out;
3564
3565 ret = obj_request->result;
3566 if (ret < 0)
3567 goto out;
1ceae7ef
AE
3568
3569 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3570 size = (size_t) obj_request->xferred;
903bb32e 3571 ceph_copy_from_page_vector(pages, buf, 0, size);
7097f8df
AE
3572 rbd_assert(size <= (size_t)INT_MAX);
3573 ret = (int)size;
788e2df3
AE
3574out:
3575 if (obj_request)
3576 rbd_obj_request_put(obj_request);
3577 else
3578 ceph_release_page_vector(pages, page_count);
3579
3580 return ret;
3581}
3582
602adf40 3583/*
662518b1
AE
3584 * Read the complete header for the given rbd device. On successful
3585 * return, the rbd_dev->header field will contain up-to-date
3586 * information about the image.
602adf40 3587 */
99a41ebc 3588static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf40 3589{
4156d998 3590 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 3591 u32 snap_count = 0;
4156d998
AE
3592 u64 names_size = 0;
3593 u32 want_count;
3594 int ret;
602adf40 3595
00f1f36f 3596 /*
4156d998
AE
3597 * The complete header will include an array of its 64-bit
3598 * snapshot ids, followed by the names of those snapshots as
3599 * a contiguous block of NUL-terminated strings. Note that
3600 * the number of snapshots could change by the time we read
3601 * it in, in which case we re-read it.
00f1f36f 3602 */
4156d998
AE
3603 do {
3604 size_t size;
3605
3606 kfree(ondisk);
3607
3608 size = sizeof (*ondisk);
3609 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3610 size += names_size;
3611 ondisk = kmalloc(size, GFP_KERNEL);
3612 if (!ondisk)
662518b1 3613 return -ENOMEM;
4156d998 3614
788e2df3 3615 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
7097f8df 3616 0, size, ondisk);
4156d998 3617 if (ret < 0)
662518b1 3618 goto out;
c0cd10db 3619 if ((size_t)ret < size) {
4156d998 3620 ret = -ENXIO;
06ecc6cb
AE
3621 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3622 size, ret);
662518b1 3623 goto out;
4156d998
AE
3624 }
3625 if (!rbd_dev_ondisk_valid(ondisk)) {
3626 ret = -ENXIO;
06ecc6cb 3627 rbd_warn(rbd_dev, "invalid header");
662518b1 3628 goto out;
81e759fb 3629 }
602adf40 3630
4156d998
AE
3631 names_size = le64_to_cpu(ondisk->snap_names_len);
3632 want_count = snap_count;
3633 snap_count = le32_to_cpu(ondisk->snap_count);
3634 } while (snap_count != want_count);
00f1f36f 3635
662518b1
AE
3636 ret = rbd_header_from_disk(rbd_dev, ondisk);
3637out:
4156d998
AE
3638 kfree(ondisk);
3639
3640 return ret;
602adf40
YS
3641}
3642
15228ede
AE
3643/*
3644 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3645 * has disappeared from the (just updated) snapshot context.
3646 */
3647static void rbd_exists_validate(struct rbd_device *rbd_dev)
3648{
3649 u64 snap_id;
3650
3651 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3652 return;
3653
3654 snap_id = rbd_dev->spec->snap_id;
3655 if (snap_id == CEPH_NOSNAP)
3656 return;
3657
3658 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3659 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3660}
3661
9875201e
JD
3662static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3663{
3664 sector_t size;
3665 bool removing;
3666
3667 /*
3668 * Don't hold the lock while doing disk operations,
3669 * or lock ordering will conflict with the bdev mutex via:
3670 * rbd_add() -> blkdev_get() -> rbd_open()
3671 */
3672 spin_lock_irq(&rbd_dev->lock);
3673 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3674 spin_unlock_irq(&rbd_dev->lock);
3675 /*
3676 * If the device is being removed, rbd_dev->disk has
3677 * been destroyed, so don't try to update its size
3678 */
3679 if (!removing) {
3680 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3681 dout("setting size to %llu sectors", (unsigned long long)size);
3682 set_capacity(rbd_dev->disk, size);
3683 revalidate_disk(rbd_dev->disk);
3684 }
3685}
3686
cc4a38bd 3687static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 3688{
e627db08 3689 u64 mapping_size;
1fe5e993
AE
3690 int ret;
3691
cfbf6377 3692 down_write(&rbd_dev->header_rwsem);
3b5cf2a2 3693 mapping_size = rbd_dev->mapping.size;
a720ae09
ID
3694
3695 ret = rbd_dev_header_info(rbd_dev);
52bb1f9b 3696 if (ret)
73e39e4d 3697 goto out;
15228ede 3698
e8f59b59
ID
3699 /*
3700 * If there is a parent, see if it has disappeared due to the
3701 * mapped image getting flattened.
3702 */
3703 if (rbd_dev->parent) {
3704 ret = rbd_dev_v2_parent_info(rbd_dev);
3705 if (ret)
73e39e4d 3706 goto out;
e8f59b59
ID
3707 }
3708
5ff1108c 3709 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
73e39e4d 3710 rbd_dev->mapping.size = rbd_dev->header.image_size;
5ff1108c
ID
3711 } else {
3712 /* validate mapped snapshot's EXISTS flag */
3713 rbd_exists_validate(rbd_dev);
3714 }
15228ede 3715
73e39e4d 3716out:
cfbf6377 3717 up_write(&rbd_dev->header_rwsem);
73e39e4d 3718 if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e 3719 rbd_dev_update_size(rbd_dev);
1fe5e993 3720
73e39e4d 3721 return ret;
1fe5e993
AE
3722}
3723
602adf40
YS
3724static int rbd_init_disk(struct rbd_device *rbd_dev)
3725{
3726 struct gendisk *disk;
3727 struct request_queue *q;
593a9e7b 3728 u64 segment_size;
602adf40 3729
602adf40 3730 /* create gendisk info */
7e513d43
ID
3731 disk = alloc_disk(single_major ?
3732 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3733 RBD_MINORS_PER_MAJOR);
602adf40 3734 if (!disk)
1fcdb8aa 3735 return -ENOMEM;
602adf40 3736
f0f8cef5 3737 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 3738 rbd_dev->dev_id);
602adf40 3739 disk->major = rbd_dev->major;
dd82fff1 3740 disk->first_minor = rbd_dev->minor;
7e513d43
ID
3741 if (single_major)
3742 disk->flags |= GENHD_FL_EXT_DEVT;
602adf40
YS
3743 disk->fops = &rbd_bd_ops;
3744 disk->private_data = rbd_dev;
3745
bf0d5f50 3746 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
602adf40
YS
3747 if (!q)
3748 goto out_disk;
029bcbd8 3749
593a9e7b
AE
3750 /* We use the default size, but let's be explicit about it. */
3751 blk_queue_physical_block_size(q, SECTOR_SIZE);
3752
029bcbd8 3753 /* set io sizes to object size */
593a9e7b
AE
3754 segment_size = rbd_obj_bytes(&rbd_dev->header);
3755 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3756 blk_queue_max_segment_size(q, segment_size);
3757 blk_queue_io_min(q, segment_size);
3758 blk_queue_io_opt(q, segment_size);
029bcbd8 3759
90e98c52
GZ
3760 /* enable the discard support */
3761 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3762 q->limits.discard_granularity = segment_size;
3763 q->limits.discard_alignment = segment_size;
b76f8239
JD
3764 q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
3765 q->limits.discard_zeroes_data = 1;
90e98c52 3766
602adf40
YS
3767 blk_queue_merge_bvec(q, rbd_merge_bvec);
3768 disk->queue = q;
3769
3770 q->queuedata = rbd_dev;
3771
3772 rbd_dev->disk = disk;
602adf40 3773
602adf40 3774 return 0;
602adf40
YS
3775out_disk:
3776 put_disk(disk);
1fcdb8aa
AE
3777
3778 return -ENOMEM;
602adf40
YS
3779}
3780
dfc5606d
YS
3781/*
3782 sysfs
3783*/
3784
593a9e7b
AE
3785static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3786{
3787 return container_of(dev, struct rbd_device, dev);
3788}
3789
dfc5606d
YS
3790static ssize_t rbd_size_show(struct device *dev,
3791 struct device_attribute *attr, char *buf)
3792{
593a9e7b 3793 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 3794
fc71d833
AE
3795 return sprintf(buf, "%llu\n",
3796 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
3797}
3798
34b13184
AE
3799/*
3800 * Note this shows the features for whatever's mapped, which is not
3801 * necessarily the base image.
3802 */
3803static ssize_t rbd_features_show(struct device *dev,
3804 struct device_attribute *attr, char *buf)
3805{
3806 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3807
3808 return sprintf(buf, "0x%016llx\n",
fc71d833 3809 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
3810}
3811
dfc5606d
YS
3812static ssize_t rbd_major_show(struct device *dev,
3813 struct device_attribute *attr, char *buf)
3814{
593a9e7b 3815 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 3816
fc71d833
AE
3817 if (rbd_dev->major)
3818 return sprintf(buf, "%d\n", rbd_dev->major);
3819
3820 return sprintf(buf, "(none)\n");
dd82fff1
ID
3821}
3822
3823static ssize_t rbd_minor_show(struct device *dev,
3824 struct device_attribute *attr, char *buf)
3825{
3826 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d833 3827
dd82fff1 3828 return sprintf(buf, "%d\n", rbd_dev->minor);
dfc5606d
YS
3829}
3830
3831static ssize_t rbd_client_id_show(struct device *dev,
3832 struct device_attribute *attr, char *buf)
602adf40 3833{
593a9e7b 3834 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3835
1dbb4399
AE
3836 return sprintf(buf, "client%lld\n",
3837 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
3838}
3839
dfc5606d
YS
3840static ssize_t rbd_pool_show(struct device *dev,
3841 struct device_attribute *attr, char *buf)
602adf40 3842{
593a9e7b 3843 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3844
0d7dbfce 3845 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
3846}
3847
9bb2f334
AE
3848static ssize_t rbd_pool_id_show(struct device *dev,
3849 struct device_attribute *attr, char *buf)
3850{
3851 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3852
0d7dbfce 3853 return sprintf(buf, "%llu\n",
fc71d833 3854 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
3855}
3856
dfc5606d
YS
3857static ssize_t rbd_name_show(struct device *dev,
3858 struct device_attribute *attr, char *buf)
3859{
593a9e7b 3860 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3861
a92ffdf8
AE
3862 if (rbd_dev->spec->image_name)
3863 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3864
3865 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
3866}
3867
589d30e0
AE
3868static ssize_t rbd_image_id_show(struct device *dev,
3869 struct device_attribute *attr, char *buf)
3870{
3871 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3872
0d7dbfce 3873 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
3874}
3875
34b13184
AE
3876/*
3877 * Shows the name of the currently-mapped snapshot (or
3878 * RBD_SNAP_HEAD_NAME for the base image).
3879 */
dfc5606d
YS
3880static ssize_t rbd_snap_show(struct device *dev,
3881 struct device_attribute *attr,
3882 char *buf)
3883{
593a9e7b 3884 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3885
0d7dbfce 3886 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
3887}
3888
86b00e0d 3889/*
ff96128f
ID
3890 * For a v2 image, shows the chain of parent images, separated by empty
3891 * lines. For v1 images or if there is no parent, shows "(no parent
3892 * image)".
86b00e0d
AE
3893 */
3894static ssize_t rbd_parent_show(struct device *dev,
ff96128f
ID
3895 struct device_attribute *attr,
3896 char *buf)
86b00e0d
AE
3897{
3898 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128f 3899 ssize_t count = 0;
86b00e0d 3900
ff96128f 3901 if (!rbd_dev->parent)
86b00e0d
AE
3902 return sprintf(buf, "(no parent image)\n");
3903
ff96128f
ID
3904 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3905 struct rbd_spec *spec = rbd_dev->parent_spec;
3906
3907 count += sprintf(&buf[count], "%s"
3908 "pool_id %llu\npool_name %s\n"
3909 "image_id %s\nimage_name %s\n"
3910 "snap_id %llu\nsnap_name %s\n"
3911 "overlap %llu\n",
3912 !count ? "" : "\n", /* first? */
3913 spec->pool_id, spec->pool_name,
3914 spec->image_id, spec->image_name ?: "(unknown)",
3915 spec->snap_id, spec->snap_name,
3916 rbd_dev->parent_overlap);
3917 }
3918
3919 return count;
86b00e0d
AE
3920}
3921
dfc5606d
YS
3922static ssize_t rbd_image_refresh(struct device *dev,
3923 struct device_attribute *attr,
3924 const char *buf,
3925 size_t size)
3926{
593a9e7b 3927 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 3928 int ret;
602adf40 3929
cc4a38bd 3930 ret = rbd_dev_refresh(rbd_dev);
e627db08 3931 if (ret)
52bb1f9b 3932 return ret;
b813623a 3933
52bb1f9b 3934 return size;
dfc5606d 3935}
602adf40 3936
dfc5606d 3937static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 3938static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d 3939static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
dd82fff1 3940static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
dfc5606d
YS
3941static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3942static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 3943static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 3944static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 3945static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
3946static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3947static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0d 3948static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
3949
3950static struct attribute *rbd_attrs[] = {
3951 &dev_attr_size.attr,
34b13184 3952 &dev_attr_features.attr,
dfc5606d 3953 &dev_attr_major.attr,
dd82fff1 3954 &dev_attr_minor.attr,
dfc5606d
YS
3955 &dev_attr_client_id.attr,
3956 &dev_attr_pool.attr,
9bb2f334 3957 &dev_attr_pool_id.attr,
dfc5606d 3958 &dev_attr_name.attr,
589d30e0 3959 &dev_attr_image_id.attr,
dfc5606d 3960 &dev_attr_current_snap.attr,
86b00e0d 3961 &dev_attr_parent.attr,
dfc5606d 3962 &dev_attr_refresh.attr,
dfc5606d
YS
3963 NULL
3964};
3965
3966static struct attribute_group rbd_attr_group = {
3967 .attrs = rbd_attrs,
3968};
3969
3970static const struct attribute_group *rbd_attr_groups[] = {
3971 &rbd_attr_group,
3972 NULL
3973};
3974
3975static void rbd_sysfs_dev_release(struct device *dev)
3976{
3977}
3978
3979static struct device_type rbd_device_type = {
3980 .name = "rbd",
3981 .groups = rbd_attr_groups,
3982 .release = rbd_sysfs_dev_release,
3983};
3984
8b8fb99c
AE
3985static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3986{
3987 kref_get(&spec->kref);
3988
3989 return spec;
3990}
3991
3992static void rbd_spec_free(struct kref *kref);
3993static void rbd_spec_put(struct rbd_spec *spec)
3994{
3995 if (spec)
3996 kref_put(&spec->kref, rbd_spec_free);
3997}
3998
3999static struct rbd_spec *rbd_spec_alloc(void)
4000{
4001 struct rbd_spec *spec;
4002
4003 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4004 if (!spec)
4005 return NULL;
04077599
ID
4006
4007 spec->pool_id = CEPH_NOPOOL;
4008 spec->snap_id = CEPH_NOSNAP;
8b8fb99c
AE
4009 kref_init(&spec->kref);
4010
8b8fb99c
AE
4011 return spec;
4012}
4013
4014static void rbd_spec_free(struct kref *kref)
4015{
4016 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4017
4018 kfree(spec->pool_name);
4019 kfree(spec->image_id);
4020 kfree(spec->image_name);
4021 kfree(spec->snap_name);
4022 kfree(spec);
4023}
4024
cc344fa1 4025static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
c53d5893
AE
4026 struct rbd_spec *spec)
4027{
4028 struct rbd_device *rbd_dev;
4029
4030 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4031 if (!rbd_dev)
4032 return NULL;
4033
4034 spin_lock_init(&rbd_dev->lock);
bc1ecc65
ID
4035 INIT_LIST_HEAD(&rbd_dev->rq_queue);
4036 INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
6d292906 4037 rbd_dev->flags = 0;
a2acd00e 4038 atomic_set(&rbd_dev->parent_ref, 0);
c53d5893 4039 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
4040 init_rwsem(&rbd_dev->header_rwsem);
4041
4042 rbd_dev->spec = spec;
4043 rbd_dev->rbd_client = rbdc;
4044
0903e875
AE
4045 /* Initialize the layout used for all rbd requests */
4046
4047 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4048 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4049 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4050 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4051
c53d5893
AE
4052 return rbd_dev;
4053}
4054
4055static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4056{
c53d5893
AE
4057 rbd_put_client(rbd_dev->rbd_client);
4058 rbd_spec_put(rbd_dev->spec);
4059 kfree(rbd_dev);
4060}
4061
9d475de5
AE
4062/*
4063 * Get the size and object order for an image snapshot, or if
4064 * snap_id is CEPH_NOSNAP, gets this information for the base
4065 * image.
4066 */
4067static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4068 u8 *order, u64 *snap_size)
4069{
4070 __le64 snapid = cpu_to_le64(snap_id);
4071 int ret;
4072 struct {
4073 u8 order;
4074 __le64 size;
4075 } __attribute__ ((packed)) size_buf = { 0 };
4076
36be9a76 4077 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
9d475de5 4078 "rbd", "get_size",
4157976b 4079 &snapid, sizeof (snapid),
e2a58ee5 4080 &size_buf, sizeof (size_buf));
36be9a76 4081 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
4082 if (ret < 0)
4083 return ret;
57385b51
AE
4084 if (ret < sizeof (size_buf))
4085 return -ERANGE;
9d475de5 4086
c3545579 4087 if (order) {
c86f86e9 4088 *order = size_buf.order;
c3545579
JD
4089 dout(" order %u", (unsigned int)*order);
4090 }
9d475de5
AE
4091 *snap_size = le64_to_cpu(size_buf.size);
4092
c3545579
JD
4093 dout(" snap_id 0x%016llx snap_size = %llu\n",
4094 (unsigned long long)snap_id,
57385b51 4095 (unsigned long long)*snap_size);
9d475de5
AE
4096
4097 return 0;
4098}
4099
4100static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4101{
4102 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4103 &rbd_dev->header.obj_order,
4104 &rbd_dev->header.image_size);
4105}
4106
1e130199
AE
4107static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4108{
4109 void *reply_buf;
4110 int ret;
4111 void *p;
4112
4113 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4114 if (!reply_buf)
4115 return -ENOMEM;
4116
36be9a76 4117 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 4118 "rbd", "get_object_prefix", NULL, 0,
e2a58ee5 4119 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a76 4120 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
4121 if (ret < 0)
4122 goto out;
4123
4124 p = reply_buf;
4125 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
4126 p + ret, NULL, GFP_NOIO);
4127 ret = 0;
1e130199
AE
4128
4129 if (IS_ERR(rbd_dev->header.object_prefix)) {
4130 ret = PTR_ERR(rbd_dev->header.object_prefix);
4131 rbd_dev->header.object_prefix = NULL;
4132 } else {
4133 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4134 }
1e130199
AE
4135out:
4136 kfree(reply_buf);
4137
4138 return ret;
4139}
4140
b1b5402a
AE
4141static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4142 u64 *snap_features)
4143{
4144 __le64 snapid = cpu_to_le64(snap_id);
4145 struct {
4146 __le64 features;
4147 __le64 incompat;
4157976b 4148 } __attribute__ ((packed)) features_buf = { 0 };
d889140c 4149 u64 incompat;
b1b5402a
AE
4150 int ret;
4151
36be9a76 4152 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b1b5402a 4153 "rbd", "get_features",
4157976b 4154 &snapid, sizeof (snapid),
e2a58ee5 4155 &features_buf, sizeof (features_buf));
36be9a76 4156 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
4157 if (ret < 0)
4158 return ret;
57385b51
AE
4159 if (ret < sizeof (features_buf))
4160 return -ERANGE;
d889140c
AE
4161
4162 incompat = le64_to_cpu(features_buf.incompat);
5cbf6f12 4163 if (incompat & ~RBD_FEATURES_SUPPORTED)
b8f5c6ed 4164 return -ENXIO;
d889140c 4165
b1b5402a
AE
4166 *snap_features = le64_to_cpu(features_buf.features);
4167
4168 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
4169 (unsigned long long)snap_id,
4170 (unsigned long long)*snap_features,
4171 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
4172
4173 return 0;
4174}
4175
4176static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4177{
4178 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4179 &rbd_dev->header.features);
4180}
4181
86b00e0d
AE
4182static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4183{
4184 struct rbd_spec *parent_spec;
4185 size_t size;
4186 void *reply_buf = NULL;
4187 __le64 snapid;
4188 void *p;
4189 void *end;
642a2537 4190 u64 pool_id;
86b00e0d 4191 char *image_id;
3b5cf2a2 4192 u64 snap_id;
86b00e0d 4193 u64 overlap;
86b00e0d
AE
4194 int ret;
4195
4196 parent_spec = rbd_spec_alloc();
4197 if (!parent_spec)
4198 return -ENOMEM;
4199
4200 size = sizeof (__le64) + /* pool_id */
4201 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4202 sizeof (__le64) + /* snap_id */
4203 sizeof (__le64); /* overlap */
4204 reply_buf = kmalloc(size, GFP_KERNEL);
4205 if (!reply_buf) {
4206 ret = -ENOMEM;
4207 goto out_err;
4208 }
4209
4d9b67cd 4210 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
36be9a76 4211 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
86b00e0d 4212 "rbd", "get_parent",
4157976b 4213 &snapid, sizeof (snapid),
e2a58ee5 4214 reply_buf, size);
36be9a76 4215 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
4216 if (ret < 0)
4217 goto out_err;
4218
86b00e0d 4219 p = reply_buf;
57385b51
AE
4220 end = reply_buf + ret;
4221 ret = -ERANGE;
642a2537 4222 ceph_decode_64_safe(&p, end, pool_id, out_err);
392a9dad
AE
4223 if (pool_id == CEPH_NOPOOL) {
4224 /*
4225 * Either the parent never existed, or we have
4226 * record of it but the image got flattened so it no
4227 * longer has a parent. When the parent of a
4228 * layered image disappears we immediately set the
4229 * overlap to 0. The effect of this is that all new
4230 * requests will be treated as if the image had no
4231 * parent.
4232 */
4233 if (rbd_dev->parent_overlap) {
4234 rbd_dev->parent_overlap = 0;
392a9dad
AE
4235 rbd_dev_parent_put(rbd_dev);
4236 pr_info("%s: clone image has been flattened\n",
4237 rbd_dev->disk->disk_name);
4238 }
4239
86b00e0d 4240 goto out; /* No parent? No problem. */
392a9dad 4241 }
86b00e0d 4242
0903e875
AE
4243 /* The ceph file layout needs to fit pool id in 32 bits */
4244
4245 ret = -EIO;
642a2537 4246 if (pool_id > (u64)U32_MAX) {
9584d508 4247 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
642a2537 4248 (unsigned long long)pool_id, U32_MAX);
57385b51 4249 goto out_err;
c0cd10db 4250 }
0903e875 4251
979ed480 4252 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
4253 if (IS_ERR(image_id)) {
4254 ret = PTR_ERR(image_id);
4255 goto out_err;
4256 }
3b5cf2a2 4257 ceph_decode_64_safe(&p, end, snap_id, out_err);
86b00e0d
AE
4258 ceph_decode_64_safe(&p, end, overlap, out_err);
4259
3b5cf2a2
AE
4260 /*
4261 * The parent won't change (except when the clone is
4262 * flattened, already handled that). So we only need to
4263 * record the parent spec we have not already done so.
4264 */
4265 if (!rbd_dev->parent_spec) {
4266 parent_spec->pool_id = pool_id;
4267 parent_spec->image_id = image_id;
4268 parent_spec->snap_id = snap_id;
70cf49cf
AE
4269 rbd_dev->parent_spec = parent_spec;
4270 parent_spec = NULL; /* rbd_dev now owns this */
fbba11b3
ID
4271 } else {
4272 kfree(image_id);
3b5cf2a2
AE
4273 }
4274
4275 /*
4276 * We always update the parent overlap. If it's zero we
4277 * treat it specially.
4278 */
4279 rbd_dev->parent_overlap = overlap;
3b5cf2a2
AE
4280 if (!overlap) {
4281
4282 /* A null parent_spec indicates it's the initial probe */
4283
4284 if (parent_spec) {
4285 /*
4286 * The overlap has become zero, so the clone
4287 * must have been resized down to 0 at some
4288 * point. Treat this the same as a flatten.
4289 */
4290 rbd_dev_parent_put(rbd_dev);
4291 pr_info("%s: clone image now standalone\n",
4292 rbd_dev->disk->disk_name);
4293 } else {
4294 /*
4295 * For the initial probe, if we find the
4296 * overlap is zero we just pretend there was
4297 * no parent image.
4298 */
9584d508 4299 rbd_warn(rbd_dev, "ignoring parent with overlap 0");
3b5cf2a2 4300 }
70cf49cf 4301 }
86b00e0d
AE
4302out:
4303 ret = 0;
4304out_err:
4305 kfree(reply_buf);
4306 rbd_spec_put(parent_spec);
4307
4308 return ret;
4309}
4310
cc070d59
AE
4311static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4312{
4313 struct {
4314 __le64 stripe_unit;
4315 __le64 stripe_count;
4316 } __attribute__ ((packed)) striping_info_buf = { 0 };
4317 size_t size = sizeof (striping_info_buf);
4318 void *p;
4319 u64 obj_size;
4320 u64 stripe_unit;
4321 u64 stripe_count;
4322 int ret;
4323
4324 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4325 "rbd", "get_stripe_unit_count", NULL, 0,
e2a58ee5 4326 (char *)&striping_info_buf, size);
cc070d59
AE
4327 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4328 if (ret < 0)
4329 return ret;
4330 if (ret < size)
4331 return -ERANGE;
4332
4333 /*
4334 * We don't actually support the "fancy striping" feature
4335 * (STRIPINGV2) yet, but if the striping sizes are the
4336 * defaults the behavior is the same as before. So find
4337 * out, and only fail if the image has non-default values.
4338 */
4339 ret = -EINVAL;
4340 obj_size = (u64)1 << rbd_dev->header.obj_order;
4341 p = &striping_info_buf;
4342 stripe_unit = ceph_decode_64(&p);
4343 if (stripe_unit != obj_size) {
4344 rbd_warn(rbd_dev, "unsupported stripe unit "
4345 "(got %llu want %llu)",
4346 stripe_unit, obj_size);
4347 return -EINVAL;
4348 }
4349 stripe_count = ceph_decode_64(&p);
4350 if (stripe_count != 1) {
4351 rbd_warn(rbd_dev, "unsupported stripe count "
4352 "(got %llu want 1)", stripe_count);
4353 return -EINVAL;
4354 }
500d0c0f
AE
4355 rbd_dev->header.stripe_unit = stripe_unit;
4356 rbd_dev->header.stripe_count = stripe_count;
cc070d59
AE
4357
4358 return 0;
4359}
4360
9e15b77d
AE
4361static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4362{
4363 size_t image_id_size;
4364 char *image_id;
4365 void *p;
4366 void *end;
4367 size_t size;
4368 void *reply_buf = NULL;
4369 size_t len = 0;
4370 char *image_name = NULL;
4371 int ret;
4372
4373 rbd_assert(!rbd_dev->spec->image_name);
4374
69e7a02f
AE
4375 len = strlen(rbd_dev->spec->image_id);
4376 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
4377 image_id = kmalloc(image_id_size, GFP_KERNEL);
4378 if (!image_id)
4379 return NULL;
4380
4381 p = image_id;
4157976b 4382 end = image_id + image_id_size;
57385b51 4383 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
4384
4385 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4386 reply_buf = kmalloc(size, GFP_KERNEL);
4387 if (!reply_buf)
4388 goto out;
4389
36be9a76 4390 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d
AE
4391 "rbd", "dir_get_name",
4392 image_id, image_id_size,
e2a58ee5 4393 reply_buf, size);
9e15b77d
AE
4394 if (ret < 0)
4395 goto out;
4396 p = reply_buf;
f40eb349
AE
4397 end = reply_buf + ret;
4398
9e15b77d
AE
4399 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4400 if (IS_ERR(image_name))
4401 image_name = NULL;
4402 else
4403 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4404out:
4405 kfree(reply_buf);
4406 kfree(image_id);
4407
4408 return image_name;
4409}
4410
2ad3d716
AE
4411static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4412{
4413 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4414 const char *snap_name;
4415 u32 which = 0;
4416
4417 /* Skip over names until we find the one we are looking for */
4418
4419 snap_name = rbd_dev->header.snap_names;
4420 while (which < snapc->num_snaps) {
4421 if (!strcmp(name, snap_name))
4422 return snapc->snaps[which];
4423 snap_name += strlen(snap_name) + 1;
4424 which++;
4425 }
4426 return CEPH_NOSNAP;
4427}
4428
4429static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4430{
4431 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4432 u32 which;
4433 bool found = false;
4434 u64 snap_id;
4435
4436 for (which = 0; !found && which < snapc->num_snaps; which++) {
4437 const char *snap_name;
4438
4439 snap_id = snapc->snaps[which];
4440 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98a
JD
4441 if (IS_ERR(snap_name)) {
4442 /* ignore no-longer existing snapshots */
4443 if (PTR_ERR(snap_name) == -ENOENT)
4444 continue;
4445 else
4446 break;
4447 }
2ad3d716
AE
4448 found = !strcmp(name, snap_name);
4449 kfree(snap_name);
4450 }
4451 return found ? snap_id : CEPH_NOSNAP;
4452}
4453
4454/*
4455 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4456 * no snapshot by that name is found, or if an error occurs.
4457 */
4458static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4459{
4460 if (rbd_dev->image_format == 1)
4461 return rbd_v1_snap_id_by_name(rbd_dev, name);
4462
4463 return rbd_v2_snap_id_by_name(rbd_dev, name);
4464}
4465
9e15b77d 4466/*
04077599
ID
4467 * An image being mapped will have everything but the snap id.
4468 */
4469static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4470{
4471 struct rbd_spec *spec = rbd_dev->spec;
4472
4473 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4474 rbd_assert(spec->image_id && spec->image_name);
4475 rbd_assert(spec->snap_name);
4476
4477 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4478 u64 snap_id;
4479
4480 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4481 if (snap_id == CEPH_NOSNAP)
4482 return -ENOENT;
4483
4484 spec->snap_id = snap_id;
4485 } else {
4486 spec->snap_id = CEPH_NOSNAP;
4487 }
4488
4489 return 0;
4490}
4491
4492/*
4493 * A parent image will have all ids but none of the names.
e1d4213f 4494 *
04077599
ID
4495 * All names in an rbd spec are dynamically allocated. It's OK if we
4496 * can't figure out the name for an image id.
9e15b77d 4497 */
04077599 4498static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d 4499{
2e9f7f1c
AE
4500 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4501 struct rbd_spec *spec = rbd_dev->spec;
4502 const char *pool_name;
4503 const char *image_name;
4504 const char *snap_name;
9e15b77d
AE
4505 int ret;
4506
04077599
ID
4507 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4508 rbd_assert(spec->image_id);
4509 rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d 4510
2e9f7f1c 4511 /* Get the pool name; we have to make our own copy of this */
9e15b77d 4512
2e9f7f1c
AE
4513 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4514 if (!pool_name) {
4515 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
4516 return -EIO;
4517 }
2e9f7f1c
AE
4518 pool_name = kstrdup(pool_name, GFP_KERNEL);
4519 if (!pool_name)
9e15b77d
AE
4520 return -ENOMEM;
4521
4522 /* Fetch the image name; tolerate failure here */
4523
2e9f7f1c
AE
4524 image_name = rbd_dev_image_name(rbd_dev);
4525 if (!image_name)
06ecc6cb 4526 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 4527
04077599 4528 /* Fetch the snapshot name */
9e15b77d 4529
2e9f7f1c 4530 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b63
JD
4531 if (IS_ERR(snap_name)) {
4532 ret = PTR_ERR(snap_name);
9e15b77d 4533 goto out_err;
2e9f7f1c
AE
4534 }
4535
4536 spec->pool_name = pool_name;
4537 spec->image_name = image_name;
4538 spec->snap_name = snap_name;
9e15b77d
AE
4539
4540 return 0;
04077599 4541
9e15b77d 4542out_err:
2e9f7f1c
AE
4543 kfree(image_name);
4544 kfree(pool_name);
9e15b77d
AE
4545 return ret;
4546}
4547
cc4a38bd 4548static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
4549{
4550 size_t size;
4551 int ret;
4552 void *reply_buf;
4553 void *p;
4554 void *end;
4555 u64 seq;
4556 u32 snap_count;
4557 struct ceph_snap_context *snapc;
4558 u32 i;
4559
4560 /*
4561 * We'll need room for the seq value (maximum snapshot id),
4562 * snapshot count, and array of that many snapshot ids.
4563 * For now we have a fixed upper limit on the number we're
4564 * prepared to receive.
4565 */
4566 size = sizeof (__le64) + sizeof (__le32) +
4567 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4568 reply_buf = kzalloc(size, GFP_KERNEL);
4569 if (!reply_buf)
4570 return -ENOMEM;
4571
36be9a76 4572 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 4573 "rbd", "get_snapcontext", NULL, 0,
e2a58ee5 4574 reply_buf, size);
36be9a76 4575 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
4576 if (ret < 0)
4577 goto out;
4578
35d489f9 4579 p = reply_buf;
57385b51
AE
4580 end = reply_buf + ret;
4581 ret = -ERANGE;
35d489f9
AE
4582 ceph_decode_64_safe(&p, end, seq, out);
4583 ceph_decode_32_safe(&p, end, snap_count, out);
4584
4585 /*
4586 * Make sure the reported number of snapshot ids wouldn't go
4587 * beyond the end of our buffer. But before checking that,
4588 * make sure the computed size of the snapshot context we
4589 * allocate is representable in a size_t.
4590 */
4591 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4592 / sizeof (u64)) {
4593 ret = -EINVAL;
4594 goto out;
4595 }
4596 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4597 goto out;
468521c1 4598 ret = 0;
35d489f9 4599
812164f8 4600 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
4601 if (!snapc) {
4602 ret = -ENOMEM;
4603 goto out;
4604 }
35d489f9 4605 snapc->seq = seq;
35d489f9
AE
4606 for (i = 0; i < snap_count; i++)
4607 snapc->snaps[i] = ceph_decode_64(&p);
4608
49ece554 4609 ceph_put_snap_context(rbd_dev->header.snapc);
35d489f9
AE
4610 rbd_dev->header.snapc = snapc;
4611
4612 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 4613 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
4614out:
4615 kfree(reply_buf);
4616
57385b51 4617 return ret;
35d489f9
AE
4618}
4619
54cac61f
AE
4620static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4621 u64 snap_id)
b8b1e2db
AE
4622{
4623 size_t size;
4624 void *reply_buf;
54cac61f 4625 __le64 snapid;
b8b1e2db
AE
4626 int ret;
4627 void *p;
4628 void *end;
b8b1e2db
AE
4629 char *snap_name;
4630
4631 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4632 reply_buf = kmalloc(size, GFP_KERNEL);
4633 if (!reply_buf)
4634 return ERR_PTR(-ENOMEM);
4635
54cac61f 4636 snapid = cpu_to_le64(snap_id);
36be9a76 4637 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b8b1e2db 4638 "rbd", "get_snapshot_name",
54cac61f 4639 &snapid, sizeof (snapid),
e2a58ee5 4640 reply_buf, size);
36be9a76 4641 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
4642 if (ret < 0) {
4643 snap_name = ERR_PTR(ret);
b8b1e2db 4644 goto out;
f40eb349 4645 }
b8b1e2db
AE
4646
4647 p = reply_buf;
f40eb349 4648 end = reply_buf + ret;
e5c35534 4649 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 4650 if (IS_ERR(snap_name))
b8b1e2db 4651 goto out;
b8b1e2db 4652
f40eb349 4653 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 4654 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
4655out:
4656 kfree(reply_buf);
4657
f40eb349 4658 return snap_name;
b8b1e2db
AE
4659}
4660
2df3fac7 4661static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb 4662{
2df3fac7 4663 bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb 4664 int ret;
117973fb 4665
1617e40c
JD
4666 ret = rbd_dev_v2_image_size(rbd_dev);
4667 if (ret)
cfbf6377 4668 return ret;
1617e40c 4669
2df3fac7
AE
4670 if (first_time) {
4671 ret = rbd_dev_v2_header_onetime(rbd_dev);
4672 if (ret)
cfbf6377 4673 return ret;
2df3fac7
AE
4674 }
4675
cc4a38bd 4676 ret = rbd_dev_v2_snap_context(rbd_dev);
117973fb 4677 dout("rbd_dev_v2_snap_context returned %d\n", ret);
117973fb
AE
4678
4679 return ret;
4680}
4681
a720ae09
ID
4682static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4683{
4684 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4685
4686 if (rbd_dev->image_format == 1)
4687 return rbd_dev_v1_header_info(rbd_dev);
4688
4689 return rbd_dev_v2_header_info(rbd_dev);
4690}
4691
dfc5606d
YS
4692static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4693{
dfc5606d 4694 struct device *dev;
cd789ab9 4695 int ret;
dfc5606d 4696
cd789ab9 4697 dev = &rbd_dev->dev;
dfc5606d
YS
4698 dev->bus = &rbd_bus_type;
4699 dev->type = &rbd_device_type;
4700 dev->parent = &rbd_root_dev;
200a6a8b 4701 dev->release = rbd_dev_device_release;
de71a297 4702 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 4703 ret = device_register(dev);
dfc5606d 4704
dfc5606d 4705 return ret;
602adf40
YS
4706}
4707
dfc5606d
YS
4708static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4709{
4710 device_unregister(&rbd_dev->dev);
4711}
4712
1ddbe94e 4713/*
499afd5b 4714 * Get a unique rbd identifier for the given new rbd_dev, and add
f8a22fc2 4715 * the rbd_dev to the global list.
1ddbe94e 4716 */
f8a22fc2 4717static int rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 4718{
f8a22fc2
ID
4719 int new_dev_id;
4720
9b60e70b
ID
4721 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4722 0, minor_to_rbd_dev_id(1 << MINORBITS),
4723 GFP_KERNEL);
f8a22fc2
ID
4724 if (new_dev_id < 0)
4725 return new_dev_id;
4726
4727 rbd_dev->dev_id = new_dev_id;
499afd5b
AE
4728
4729 spin_lock(&rbd_dev_list_lock);
4730 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4731 spin_unlock(&rbd_dev_list_lock);
f8a22fc2 4732
70eebd20 4733 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
f8a22fc2
ID
4734
4735 return 0;
1ddbe94e 4736}
b7f23c36 4737
1ddbe94e 4738/*
499afd5b
AE
4739 * Remove an rbd_dev from the global list, and record that its
4740 * identifier is no longer in use.
1ddbe94e 4741 */
e2839308 4742static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 4743{
499afd5b
AE
4744 spin_lock(&rbd_dev_list_lock);
4745 list_del_init(&rbd_dev->node);
4746 spin_unlock(&rbd_dev_list_lock);
b7f23c36 4747
f8a22fc2
ID
4748 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4749
4750 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
b7f23c36
AE
4751}
4752
e28fff26
AE
4753/*
4754 * Skips over white space at *buf, and updates *buf to point to the
4755 * first found non-space character (if any). Returns the length of
593a9e7b
AE
4756 * the token (string of non-white space characters) found. Note
4757 * that *buf must be terminated with '\0'.
e28fff26
AE
4758 */
4759static inline size_t next_token(const char **buf)
4760{
4761 /*
4762 * These are the characters that produce nonzero for
4763 * isspace() in the "C" and "POSIX" locales.
4764 */
4765 const char *spaces = " \f\n\r\t\v";
4766
4767 *buf += strspn(*buf, spaces); /* Find start of token */
4768
4769 return strcspn(*buf, spaces); /* Return token length */
4770}
4771
ea3352f4
AE
4772/*
4773 * Finds the next token in *buf, dynamically allocates a buffer big
4774 * enough to hold a copy of it, and copies the token into the new
4775 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4776 * that a duplicate buffer is created even for a zero-length token.
4777 *
4778 * Returns a pointer to the newly-allocated duplicate, or a null
4779 * pointer if memory for the duplicate was not available. If
4780 * the lenp argument is a non-null pointer, the length of the token
4781 * (not including the '\0') is returned in *lenp.
4782 *
4783 * If successful, the *buf pointer will be updated to point beyond
4784 * the end of the found token.
4785 *
4786 * Note: uses GFP_KERNEL for allocation.
4787 */
4788static inline char *dup_token(const char **buf, size_t *lenp)
4789{
4790 char *dup;
4791 size_t len;
4792
4793 len = next_token(buf);
4caf35f9 4794 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
4795 if (!dup)
4796 return NULL;
ea3352f4
AE
4797 *(dup + len) = '\0';
4798 *buf += len;
4799
4800 if (lenp)
4801 *lenp = len;
4802
4803 return dup;
4804}
4805
a725f65e 4806/*
859c31df
AE
4807 * Parse the options provided for an "rbd add" (i.e., rbd image
4808 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4809 * and the data written is passed here via a NUL-terminated buffer.
4810 * Returns 0 if successful or an error code otherwise.
d22f76e7 4811 *
859c31df
AE
4812 * The information extracted from these options is recorded in
4813 * the other parameters which return dynamically-allocated
4814 * structures:
4815 * ceph_opts
4816 * The address of a pointer that will refer to a ceph options
4817 * structure. Caller must release the returned pointer using
4818 * ceph_destroy_options() when it is no longer needed.
4819 * rbd_opts
4820 * Address of an rbd options pointer. Fully initialized by
4821 * this function; caller must release with kfree().
4822 * spec
4823 * Address of an rbd image specification pointer. Fully
4824 * initialized by this function based on parsed options.
4825 * Caller must release with rbd_spec_put().
4826 *
4827 * The options passed take this form:
4828 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4829 * where:
4830 * <mon_addrs>
4831 * A comma-separated list of one or more monitor addresses.
4832 * A monitor address is an ip address, optionally followed
4833 * by a port number (separated by a colon).
4834 * I.e.: ip1[:port1][,ip2[:port2]...]
4835 * <options>
4836 * A comma-separated list of ceph and/or rbd options.
4837 * <pool_name>
4838 * The name of the rados pool containing the rbd image.
4839 * <image_name>
4840 * The name of the image in that pool to map.
4841 * <snap_id>
4842 * An optional snapshot id. If provided, the mapping will
4843 * present data from the image at the time that snapshot was
4844 * created. The image head is used if no snapshot id is
4845 * provided. Snapshot mappings are always read-only.
a725f65e 4846 */
859c31df 4847static int rbd_add_parse_args(const char *buf,
dc79b113 4848 struct ceph_options **ceph_opts,
859c31df
AE
4849 struct rbd_options **opts,
4850 struct rbd_spec **rbd_spec)
e28fff26 4851{
d22f76e7 4852 size_t len;
859c31df 4853 char *options;
0ddebc0c 4854 const char *mon_addrs;
ecb4dc22 4855 char *snap_name;
0ddebc0c 4856 size_t mon_addrs_size;
859c31df 4857 struct rbd_spec *spec = NULL;
4e9afeba 4858 struct rbd_options *rbd_opts = NULL;
859c31df 4859 struct ceph_options *copts;
dc79b113 4860 int ret;
e28fff26
AE
4861
4862 /* The first four tokens are required */
4863
7ef3214a 4864 len = next_token(&buf);
4fb5d671
AE
4865 if (!len) {
4866 rbd_warn(NULL, "no monitor address(es) provided");
4867 return -EINVAL;
4868 }
0ddebc0c 4869 mon_addrs = buf;
f28e565a 4870 mon_addrs_size = len + 1;
7ef3214a 4871 buf += len;
a725f65e 4872
dc79b113 4873 ret = -EINVAL;
f28e565a
AE
4874 options = dup_token(&buf, NULL);
4875 if (!options)
dc79b113 4876 return -ENOMEM;
4fb5d671
AE
4877 if (!*options) {
4878 rbd_warn(NULL, "no options provided");
4879 goto out_err;
4880 }
e28fff26 4881
859c31df
AE
4882 spec = rbd_spec_alloc();
4883 if (!spec)
f28e565a 4884 goto out_mem;
859c31df
AE
4885
4886 spec->pool_name = dup_token(&buf, NULL);
4887 if (!spec->pool_name)
4888 goto out_mem;
4fb5d671
AE
4889 if (!*spec->pool_name) {
4890 rbd_warn(NULL, "no pool name provided");
4891 goto out_err;
4892 }
e28fff26 4893
69e7a02f 4894 spec->image_name = dup_token(&buf, NULL);
859c31df 4895 if (!spec->image_name)
f28e565a 4896 goto out_mem;
4fb5d671
AE
4897 if (!*spec->image_name) {
4898 rbd_warn(NULL, "no image name provided");
4899 goto out_err;
4900 }
d4b125e9 4901
f28e565a
AE
4902 /*
4903 * Snapshot name is optional; default is to use "-"
4904 * (indicating the head/no snapshot).
4905 */
3feeb894 4906 len = next_token(&buf);
820a5f3e 4907 if (!len) {
3feeb894
AE
4908 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4909 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 4910 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 4911 ret = -ENAMETOOLONG;
f28e565a 4912 goto out_err;
849b4260 4913 }
ecb4dc22
AE
4914 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4915 if (!snap_name)
f28e565a 4916 goto out_mem;
ecb4dc22
AE
4917 *(snap_name + len) = '\0';
4918 spec->snap_name = snap_name;
e5c35534 4919
0ddebc0c 4920 /* Initialize all rbd options to the defaults */
e28fff26 4921
4e9afeba
AE
4922 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4923 if (!rbd_opts)
4924 goto out_mem;
4925
4926 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
d22f76e7 4927
859c31df 4928 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 4929 mon_addrs + mon_addrs_size - 1,
4e9afeba 4930 parse_rbd_opts_token, rbd_opts);
859c31df
AE
4931 if (IS_ERR(copts)) {
4932 ret = PTR_ERR(copts);
dc79b113
AE
4933 goto out_err;
4934 }
859c31df
AE
4935 kfree(options);
4936
4937 *ceph_opts = copts;
4e9afeba 4938 *opts = rbd_opts;
859c31df 4939 *rbd_spec = spec;
0ddebc0c 4940
dc79b113 4941 return 0;
f28e565a 4942out_mem:
dc79b113 4943 ret = -ENOMEM;
d22f76e7 4944out_err:
859c31df
AE
4945 kfree(rbd_opts);
4946 rbd_spec_put(spec);
f28e565a 4947 kfree(options);
d22f76e7 4948
dc79b113 4949 return ret;
a725f65e
AE
4950}
4951
30ba1f02
ID
4952/*
4953 * Return pool id (>= 0) or a negative error code.
4954 */
4955static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4956{
4957 u64 newest_epoch;
4958 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4959 int tries = 0;
4960 int ret;
4961
4962again:
4963 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4964 if (ret == -ENOENT && tries++ < 1) {
4965 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4966 &newest_epoch);
4967 if (ret < 0)
4968 return ret;
4969
4970 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4971 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4972 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4973 newest_epoch, timeout);
4974 goto again;
4975 } else {
4976 /* the osdmap we have is new enough */
4977 return -ENOENT;
4978 }
4979 }
4980
4981 return ret;
4982}
4983
589d30e0
AE
4984/*
4985 * An rbd format 2 image has a unique identifier, distinct from the
4986 * name given to it by the user. Internally, that identifier is
4987 * what's used to specify the names of objects related to the image.
4988 *
4989 * A special "rbd id" object is used to map an rbd image name to its
4990 * id. If that object doesn't exist, then there is no v2 rbd image
4991 * with the supplied name.
4992 *
4993 * This function will record the given rbd_dev's image_id field if
4994 * it can be determined, and in that case will return 0. If any
4995 * errors occur a negative errno will be returned and the rbd_dev's
4996 * image_id field will be unchanged (and should be NULL).
4997 */
4998static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4999{
5000 int ret;
5001 size_t size;
5002 char *object_name;
5003 void *response;
c0fba368 5004 char *image_id;
2f82ee54 5005
2c0d0a10
AE
5006 /*
5007 * When probing a parent image, the image id is already
5008 * known (and the image name likely is not). There's no
c0fba368
AE
5009 * need to fetch the image id again in this case. We
5010 * do still need to set the image format though.
2c0d0a10 5011 */
c0fba368
AE
5012 if (rbd_dev->spec->image_id) {
5013 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5014
2c0d0a10 5015 return 0;
c0fba368 5016 }
2c0d0a10 5017
589d30e0
AE
5018 /*
5019 * First, see if the format 2 image id file exists, and if
5020 * so, get the image's persistent id from it.
5021 */
69e7a02f 5022 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0
AE
5023 object_name = kmalloc(size, GFP_NOIO);
5024 if (!object_name)
5025 return -ENOMEM;
0d7dbfce 5026 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0
AE
5027 dout("rbd id object name is %s\n", object_name);
5028
5029 /* Response will be an encoded string, which includes a length */
5030
5031 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5032 response = kzalloc(size, GFP_NOIO);
5033 if (!response) {
5034 ret = -ENOMEM;
5035 goto out;
5036 }
5037
c0fba368
AE
5038 /* If it doesn't exist we'll assume it's a format 1 image */
5039
36be9a76 5040 ret = rbd_obj_method_sync(rbd_dev, object_name,
4157976b 5041 "rbd", "get_id", NULL, 0,
e2a58ee5 5042 response, RBD_IMAGE_ID_LEN_MAX);
36be9a76 5043 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
5044 if (ret == -ENOENT) {
5045 image_id = kstrdup("", GFP_KERNEL);
5046 ret = image_id ? 0 : -ENOMEM;
5047 if (!ret)
5048 rbd_dev->image_format = 1;
7dd440c9 5049 } else if (ret >= 0) {
c0fba368
AE
5050 void *p = response;
5051
5052 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 5053 NULL, GFP_NOIO);
461f758a 5054 ret = PTR_ERR_OR_ZERO(image_id);
c0fba368
AE
5055 if (!ret)
5056 rbd_dev->image_format = 2;
c0fba368
AE
5057 }
5058
5059 if (!ret) {
5060 rbd_dev->spec->image_id = image_id;
5061 dout("image_id is %s\n", image_id);
589d30e0
AE
5062 }
5063out:
5064 kfree(response);
5065 kfree(object_name);
5066
5067 return ret;
5068}
5069
3abef3b3
AE
5070/*
5071 * Undo whatever state changes are made by v1 or v2 header info
5072 * call.
5073 */
6fd48b3b
AE
5074static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5075{
5076 struct rbd_image_header *header;
5077
e69b8d41 5078 rbd_dev_parent_put(rbd_dev);
6fd48b3b
AE
5079
5080 /* Free dynamic fields from the header, then zero it out */
5081
5082 header = &rbd_dev->header;
812164f8 5083 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
5084 kfree(header->snap_sizes);
5085 kfree(header->snap_names);
5086 kfree(header->object_prefix);
5087 memset(header, 0, sizeof (*header));
5088}
5089
2df3fac7 5090static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b9
AE
5091{
5092 int ret;
a30b71b9 5093
1e130199 5094 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 5095 if (ret)
b1b5402a
AE
5096 goto out_err;
5097
2df3fac7
AE
5098 /*
5099 * Get the and check features for the image. Currently the
5100 * features are assumed to never change.
5101 */
b1b5402a 5102 ret = rbd_dev_v2_features(rbd_dev);
57385b51 5103 if (ret)
9d475de5 5104 goto out_err;
35d489f9 5105
cc070d59
AE
5106 /* If the image supports fancy striping, get its parameters */
5107
5108 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5109 ret = rbd_dev_v2_striping_info(rbd_dev);
5110 if (ret < 0)
5111 goto out_err;
5112 }
2df3fac7 5113 /* No support for crypto and compression type format 2 images */
a30b71b9 5114
35152979 5115 return 0;
9d475de5 5116out_err:
642a2537 5117 rbd_dev->header.features = 0;
1e130199
AE
5118 kfree(rbd_dev->header.object_prefix);
5119 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
5120
5121 return ret;
a30b71b9
AE
5122}
5123
124afba2 5124static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
83a06263 5125{
2f82ee54 5126 struct rbd_device *parent = NULL;
124afba2
AE
5127 struct rbd_spec *parent_spec;
5128 struct rbd_client *rbdc;
5129 int ret;
5130
5131 if (!rbd_dev->parent_spec)
5132 return 0;
5133 /*
5134 * We need to pass a reference to the client and the parent
5135 * spec when creating the parent rbd_dev. Images related by
5136 * parent/child relationships always share both.
5137 */
5138 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5139 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5140
5141 ret = -ENOMEM;
5142 parent = rbd_dev_create(rbdc, parent_spec);
5143 if (!parent)
5144 goto out_err;
5145
1f3ef788 5146 ret = rbd_dev_image_probe(parent, false);
124afba2
AE
5147 if (ret < 0)
5148 goto out_err;
5149 rbd_dev->parent = parent;
a2acd00e 5150 atomic_set(&rbd_dev->parent_ref, 1);
124afba2
AE
5151
5152 return 0;
5153out_err:
5154 if (parent) {
fb65d228 5155 rbd_dev_unparent(rbd_dev);
124afba2
AE
5156 kfree(rbd_dev->header_name);
5157 rbd_dev_destroy(parent);
5158 } else {
5159 rbd_put_client(rbdc);
5160 rbd_spec_put(parent_spec);
5161 }
5162
5163 return ret;
5164}
5165
200a6a8b 5166static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 5167{
83a06263 5168 int ret;
d1cf5788 5169
f8a22fc2
ID
5170 /* Get an id and fill in device name. */
5171
5172 ret = rbd_dev_id_get(rbd_dev);
5173 if (ret)
5174 return ret;
83a06263 5175
83a06263
AE
5176 BUILD_BUG_ON(DEV_NAME_LEN
5177 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5178 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5179
9b60e70b 5180 /* Record our major and minor device numbers. */
83a06263 5181
9b60e70b
ID
5182 if (!single_major) {
5183 ret = register_blkdev(0, rbd_dev->name);
5184 if (ret < 0)
5185 goto err_out_id;
5186
5187 rbd_dev->major = ret;
5188 rbd_dev->minor = 0;
5189 } else {
5190 rbd_dev->major = rbd_major;
5191 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5192 }
83a06263
AE
5193
5194 /* Set up the blkdev mapping. */
5195
5196 ret = rbd_init_disk(rbd_dev);
5197 if (ret)
5198 goto err_out_blkdev;
5199
f35a4dee 5200 ret = rbd_dev_mapping_set(rbd_dev);
83a06263
AE
5201 if (ret)
5202 goto err_out_disk;
bc1ecc65 5203
f35a4dee 5204 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
22001f61 5205 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
f35a4dee
AE
5206
5207 ret = rbd_bus_add_dev(rbd_dev);
5208 if (ret)
f5ee37bd 5209 goto err_out_mapping;
83a06263 5210
83a06263
AE
5211 /* Everything's ready. Announce the disk to the world. */
5212
129b79d4 5213 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
83a06263
AE
5214 add_disk(rbd_dev->disk);
5215
5216 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5217 (unsigned long long) rbd_dev->mapping.size);
5218
5219 return ret;
2f82ee54 5220
f35a4dee
AE
5221err_out_mapping:
5222 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
5223err_out_disk:
5224 rbd_free_disk(rbd_dev);
5225err_out_blkdev:
9b60e70b
ID
5226 if (!single_major)
5227 unregister_blkdev(rbd_dev->major, rbd_dev->name);
83a06263
AE
5228err_out_id:
5229 rbd_dev_id_put(rbd_dev);
d1cf5788 5230 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
5231
5232 return ret;
5233}
5234
332bb12d
AE
5235static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5236{
5237 struct rbd_spec *spec = rbd_dev->spec;
5238 size_t size;
5239
5240 /* Record the header object name for this rbd image. */
5241
5242 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5243
5244 if (rbd_dev->image_format == 1)
5245 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5246 else
5247 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5248
5249 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5250 if (!rbd_dev->header_name)
5251 return -ENOMEM;
5252
5253 if (rbd_dev->image_format == 1)
5254 sprintf(rbd_dev->header_name, "%s%s",
5255 spec->image_name, RBD_SUFFIX);
5256 else
5257 sprintf(rbd_dev->header_name, "%s%s",
5258 RBD_HEADER_PREFIX, spec->image_id);
5259 return 0;
5260}
5261
200a6a8b
AE
5262static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5263{
6fd48b3b 5264 rbd_dev_unprobe(rbd_dev);
200a6a8b 5265 kfree(rbd_dev->header_name);
6fd48b3b
AE
5266 rbd_dev->header_name = NULL;
5267 rbd_dev->image_format = 0;
5268 kfree(rbd_dev->spec->image_id);
5269 rbd_dev->spec->image_id = NULL;
5270
200a6a8b
AE
5271 rbd_dev_destroy(rbd_dev);
5272}
5273
a30b71b9
AE
5274/*
5275 * Probe for the existence of the header object for the given rbd
1f3ef788
AE
5276 * device. If this image is the one being mapped (i.e., not a
5277 * parent), initiate a watch on its header object before using that
5278 * object to get detailed information about the rbd image.
a30b71b9 5279 */
1f3ef788 5280static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
a30b71b9
AE
5281{
5282 int ret;
5283
5284 /*
3abef3b3
AE
5285 * Get the id from the image id object. Unless there's an
5286 * error, rbd_dev->spec->image_id will be filled in with
5287 * a dynamically-allocated string, and rbd_dev->image_format
5288 * will be set to either 1 or 2.
a30b71b9
AE
5289 */
5290 ret = rbd_dev_image_id(rbd_dev);
5291 if (ret)
c0fba368 5292 return ret;
c0fba368 5293
332bb12d
AE
5294 ret = rbd_dev_header_name(rbd_dev);
5295 if (ret)
5296 goto err_out_format;
5297
1f3ef788 5298 if (mapping) {
fca27065 5299 ret = rbd_dev_header_watch_sync(rbd_dev);
1f3ef788
AE
5300 if (ret)
5301 goto out_header_name;
5302 }
b644de2b 5303
a720ae09 5304 ret = rbd_dev_header_info(rbd_dev);
5655c4d9 5305 if (ret)
b644de2b 5306 goto err_out_watch;
83a06263 5307
04077599
ID
5308 /*
5309 * If this image is the one being mapped, we have pool name and
5310 * id, image name and id, and snap name - need to fill snap id.
5311 * Otherwise this is a parent image, identified by pool, image
5312 * and snap ids - need to fill in names for those ids.
5313 */
5314 if (mapping)
5315 ret = rbd_spec_fill_snap_id(rbd_dev);
5316 else
5317 ret = rbd_spec_fill_names(rbd_dev);
9bb81c9b 5318 if (ret)
33dca39f 5319 goto err_out_probe;
9bb81c9b 5320
e8f59b59
ID
5321 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5322 ret = rbd_dev_v2_parent_info(rbd_dev);
5323 if (ret)
5324 goto err_out_probe;
5325
5326 /*
5327 * Need to warn users if this image is the one being
5328 * mapped and has a parent.
5329 */
5330 if (mapping && rbd_dev->parent_spec)
5331 rbd_warn(rbd_dev,
5332 "WARNING: kernel layering is EXPERIMENTAL!");
5333 }
5334
9bb81c9b 5335 ret = rbd_dev_probe_parent(rbd_dev);
30d60ba2
AE
5336 if (ret)
5337 goto err_out_probe;
5338
5339 dout("discovered format %u image, header name is %s\n",
5340 rbd_dev->image_format, rbd_dev->header_name);
30d60ba2 5341 return 0;
e8f59b59 5342
6fd48b3b
AE
5343err_out_probe:
5344 rbd_dev_unprobe(rbd_dev);
b644de2b 5345err_out_watch:
fca27065
ID
5346 if (mapping)
5347 rbd_dev_header_unwatch_sync(rbd_dev);
332bb12d
AE
5348out_header_name:
5349 kfree(rbd_dev->header_name);
5350 rbd_dev->header_name = NULL;
5351err_out_format:
5352 rbd_dev->image_format = 0;
5655c4d9
AE
5353 kfree(rbd_dev->spec->image_id);
5354 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
5355 return ret;
5356}
5357
9b60e70b
ID
5358static ssize_t do_rbd_add(struct bus_type *bus,
5359 const char *buf,
5360 size_t count)
602adf40 5361{
cb8627c7 5362 struct rbd_device *rbd_dev = NULL;
dc79b113 5363 struct ceph_options *ceph_opts = NULL;
4e9afeba 5364 struct rbd_options *rbd_opts = NULL;
859c31df 5365 struct rbd_spec *spec = NULL;
9d3997fd 5366 struct rbd_client *rbdc;
51344a38 5367 bool read_only;
27cc2594 5368 int rc = -ENOMEM;
602adf40
YS
5369
5370 if (!try_module_get(THIS_MODULE))
5371 return -ENODEV;
5372
602adf40 5373 /* parse add command */
859c31df 5374 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 5375 if (rc < 0)
bd4ba655 5376 goto err_out_module;
51344a38
AE
5377 read_only = rbd_opts->read_only;
5378 kfree(rbd_opts);
5379 rbd_opts = NULL; /* done with this */
78cea76e 5380
9d3997fd
AE
5381 rbdc = rbd_get_client(ceph_opts);
5382 if (IS_ERR(rbdc)) {
5383 rc = PTR_ERR(rbdc);
0ddebc0c 5384 goto err_out_args;
9d3997fd 5385 }
602adf40 5386
602adf40 5387 /* pick the pool */
30ba1f02 5388 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
602adf40
YS
5389 if (rc < 0)
5390 goto err_out_client;
c0cd10db 5391 spec->pool_id = (u64)rc;
859c31df 5392
0903e875
AE
5393 /* The ceph file layout needs to fit pool id in 32 bits */
5394
c0cd10db 5395 if (spec->pool_id > (u64)U32_MAX) {
9584d508 5396 rbd_warn(NULL, "pool id too large (%llu > %u)",
c0cd10db 5397 (unsigned long long)spec->pool_id, U32_MAX);
0903e875
AE
5398 rc = -EIO;
5399 goto err_out_client;
5400 }
5401
c53d5893 5402 rbd_dev = rbd_dev_create(rbdc, spec);
bd4ba655
AE
5403 if (!rbd_dev)
5404 goto err_out_client;
c53d5893
AE
5405 rbdc = NULL; /* rbd_dev now owns this */
5406 spec = NULL; /* rbd_dev now owns this */
602adf40 5407
1f3ef788 5408 rc = rbd_dev_image_probe(rbd_dev, true);
a30b71b9 5409 if (rc < 0)
c53d5893 5410 goto err_out_rbd_dev;
05fd6f6f 5411
7ce4eef7
AE
5412 /* If we are mapping a snapshot it must be marked read-only */
5413
5414 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5415 read_only = true;
5416 rbd_dev->mapping.read_only = read_only;
5417
b536f69a 5418 rc = rbd_dev_device_setup(rbd_dev);
3abef3b3 5419 if (rc) {
e37180c0
ID
5420 /*
5421 * rbd_dev_header_unwatch_sync() can't be moved into
5422 * rbd_dev_image_release() without refactoring, see
5423 * commit 1f3ef78861ac.
5424 */
5425 rbd_dev_header_unwatch_sync(rbd_dev);
3abef3b3
AE
5426 rbd_dev_image_release(rbd_dev);
5427 goto err_out_module;
5428 }
5429
5430 return count;
b536f69a 5431
c53d5893
AE
5432err_out_rbd_dev:
5433 rbd_dev_destroy(rbd_dev);
bd4ba655 5434err_out_client:
9d3997fd 5435 rbd_put_client(rbdc);
0ddebc0c 5436err_out_args:
859c31df 5437 rbd_spec_put(spec);
bd4ba655
AE
5438err_out_module:
5439 module_put(THIS_MODULE);
27cc2594 5440
602adf40 5441 dout("Error adding device %s\n", buf);
27cc2594 5442
c0cd10db 5443 return (ssize_t)rc;
602adf40
YS
5444}
5445
9b60e70b
ID
5446static ssize_t rbd_add(struct bus_type *bus,
5447 const char *buf,
5448 size_t count)
5449{
5450 if (single_major)
5451 return -EINVAL;
5452
5453 return do_rbd_add(bus, buf, count);
5454}
5455
5456static ssize_t rbd_add_single_major(struct bus_type *bus,
5457 const char *buf,
5458 size_t count)
5459{
5460 return do_rbd_add(bus, buf, count);
5461}
5462
200a6a8b 5463static void rbd_dev_device_release(struct device *dev)
602adf40 5464{
593a9e7b 5465 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 5466
602adf40 5467 rbd_free_disk(rbd_dev);
200a6a8b 5468 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6d80b130 5469 rbd_dev_mapping_clear(rbd_dev);
9b60e70b
ID
5470 if (!single_major)
5471 unregister_blkdev(rbd_dev->major, rbd_dev->name);
e2839308 5472 rbd_dev_id_put(rbd_dev);
d1cf5788 5473 rbd_dev_mapping_clear(rbd_dev);
602adf40
YS
5474}
5475
05a46afd
AE
5476static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5477{
ad945fc1 5478 while (rbd_dev->parent) {
05a46afd
AE
5479 struct rbd_device *first = rbd_dev;
5480 struct rbd_device *second = first->parent;
5481 struct rbd_device *third;
5482
5483 /*
5484 * Follow to the parent with no grandparent and
5485 * remove it.
5486 */
5487 while (second && (third = second->parent)) {
5488 first = second;
5489 second = third;
5490 }
ad945fc1 5491 rbd_assert(second);
8ad42cd0 5492 rbd_dev_image_release(second);
ad945fc1
AE
5493 first->parent = NULL;
5494 first->parent_overlap = 0;
5495
5496 rbd_assert(first->parent_spec);
05a46afd
AE
5497 rbd_spec_put(first->parent_spec);
5498 first->parent_spec = NULL;
05a46afd
AE
5499 }
5500}
5501
9b60e70b
ID
5502static ssize_t do_rbd_remove(struct bus_type *bus,
5503 const char *buf,
5504 size_t count)
602adf40
YS
5505{
5506 struct rbd_device *rbd_dev = NULL;
751cc0e3
AE
5507 struct list_head *tmp;
5508 int dev_id;
602adf40 5509 unsigned long ul;
82a442d2 5510 bool already = false;
0d8189e1 5511 int ret;
602adf40 5512
bb8e0e84 5513 ret = kstrtoul(buf, 10, &ul);
0d8189e1
AE
5514 if (ret)
5515 return ret;
602adf40
YS
5516
5517 /* convert to int; abort if we lost anything in the conversion */
751cc0e3
AE
5518 dev_id = (int)ul;
5519 if (dev_id != ul)
602adf40
YS
5520 return -EINVAL;
5521
751cc0e3
AE
5522 ret = -ENOENT;
5523 spin_lock(&rbd_dev_list_lock);
5524 list_for_each(tmp, &rbd_dev_list) {
5525 rbd_dev = list_entry(tmp, struct rbd_device, node);
5526 if (rbd_dev->dev_id == dev_id) {
5527 ret = 0;
5528 break;
5529 }
42382b70 5530 }
751cc0e3
AE
5531 if (!ret) {
5532 spin_lock_irq(&rbd_dev->lock);
5533 if (rbd_dev->open_count)
5534 ret = -EBUSY;
5535 else
82a442d2
AE
5536 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5537 &rbd_dev->flags);
751cc0e3
AE
5538 spin_unlock_irq(&rbd_dev->lock);
5539 }
5540 spin_unlock(&rbd_dev_list_lock);
82a442d2 5541 if (ret < 0 || already)
1ba0f1e7 5542 return ret;
751cc0e3 5543
fca27065 5544 rbd_dev_header_unwatch_sync(rbd_dev);
9abc5990
JD
5545 /*
5546 * flush remaining watch callbacks - these must be complete
5547 * before the osd_client is shutdown
5548 */
5549 dout("%s: flushing notifies", __func__);
5550 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca27065 5551
9875201e
JD
5552 /*
5553 * Don't free anything from rbd_dev->disk until after all
5554 * notifies are completely processed. Otherwise
5555 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5556 * in a potential use after free of rbd_dev->disk or rbd_dev.
5557 */
5558 rbd_bus_del_dev(rbd_dev);
8ad42cd0 5559 rbd_dev_image_release(rbd_dev);
79ab7558 5560 module_put(THIS_MODULE);
aafb230e 5561
1ba0f1e7 5562 return count;
602adf40
YS
5563}
5564
9b60e70b
ID
5565static ssize_t rbd_remove(struct bus_type *bus,
5566 const char *buf,
5567 size_t count)
5568{
5569 if (single_major)
5570 return -EINVAL;
5571
5572 return do_rbd_remove(bus, buf, count);
5573}
5574
5575static ssize_t rbd_remove_single_major(struct bus_type *bus,
5576 const char *buf,
5577 size_t count)
5578{
5579 return do_rbd_remove(bus, buf, count);
5580}
5581
602adf40
YS
5582/*
5583 * create control files in sysfs
dfc5606d 5584 * /sys/bus/rbd/...
602adf40
YS
5585 */
5586static int rbd_sysfs_init(void)
5587{
dfc5606d 5588 int ret;
602adf40 5589
fed4c143 5590 ret = device_register(&rbd_root_dev);
21079786 5591 if (ret < 0)
dfc5606d 5592 return ret;
602adf40 5593
fed4c143
AE
5594 ret = bus_register(&rbd_bus_type);
5595 if (ret < 0)
5596 device_unregister(&rbd_root_dev);
602adf40 5597
602adf40
YS
5598 return ret;
5599}
5600
5601static void rbd_sysfs_cleanup(void)
5602{
dfc5606d 5603 bus_unregister(&rbd_bus_type);
fed4c143 5604 device_unregister(&rbd_root_dev);
602adf40
YS
5605}
5606
1c2a9dfe
AE
5607static int rbd_slab_init(void)
5608{
5609 rbd_assert(!rbd_img_request_cache);
5610 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5611 sizeof (struct rbd_img_request),
5612 __alignof__(struct rbd_img_request),
5613 0, NULL);
868311b1
AE
5614 if (!rbd_img_request_cache)
5615 return -ENOMEM;
5616
5617 rbd_assert(!rbd_obj_request_cache);
5618 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5619 sizeof (struct rbd_obj_request),
5620 __alignof__(struct rbd_obj_request),
5621 0, NULL);
78c2a44a
AE
5622 if (!rbd_obj_request_cache)
5623 goto out_err;
5624
5625 rbd_assert(!rbd_segment_name_cache);
5626 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
2d0ebc5d 5627 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
78c2a44a 5628 if (rbd_segment_name_cache)
1c2a9dfe 5629 return 0;
78c2a44a
AE
5630out_err:
5631 if (rbd_obj_request_cache) {
5632 kmem_cache_destroy(rbd_obj_request_cache);
5633 rbd_obj_request_cache = NULL;
5634 }
1c2a9dfe 5635
868311b1
AE
5636 kmem_cache_destroy(rbd_img_request_cache);
5637 rbd_img_request_cache = NULL;
5638
1c2a9dfe
AE
5639 return -ENOMEM;
5640}
5641
5642static void rbd_slab_exit(void)
5643{
78c2a44a
AE
5644 rbd_assert(rbd_segment_name_cache);
5645 kmem_cache_destroy(rbd_segment_name_cache);
5646 rbd_segment_name_cache = NULL;
5647
868311b1
AE
5648 rbd_assert(rbd_obj_request_cache);
5649 kmem_cache_destroy(rbd_obj_request_cache);
5650 rbd_obj_request_cache = NULL;
5651
1c2a9dfe
AE
5652 rbd_assert(rbd_img_request_cache);
5653 kmem_cache_destroy(rbd_img_request_cache);
5654 rbd_img_request_cache = NULL;
5655}
5656
cc344fa1 5657static int __init rbd_init(void)
602adf40
YS
5658{
5659 int rc;
5660
1e32d34c
AE
5661 if (!libceph_compatible(NULL)) {
5662 rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34c
AE
5663 return -EINVAL;
5664 }
e1b4d96d 5665
1c2a9dfe 5666 rc = rbd_slab_init();
602adf40
YS
5667 if (rc)
5668 return rc;
e1b4d96d 5669
f5ee37bd
ID
5670 /*
5671 * The number of active work items is limited by the number of
5672 * rbd devices, so leave @max_active at default.
5673 */
5674 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5675 if (!rbd_wq) {
5676 rc = -ENOMEM;
5677 goto err_out_slab;
5678 }
5679
9b60e70b
ID
5680 if (single_major) {
5681 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5682 if (rbd_major < 0) {
5683 rc = rbd_major;
f5ee37bd 5684 goto err_out_wq;
9b60e70b
ID
5685 }
5686 }
5687
1c2a9dfe
AE
5688 rc = rbd_sysfs_init();
5689 if (rc)
9b60e70b
ID
5690 goto err_out_blkdev;
5691
5692 if (single_major)
5693 pr_info("loaded (major %d)\n", rbd_major);
5694 else
5695 pr_info("loaded\n");
1c2a9dfe 5696
e1b4d96d
ID
5697 return 0;
5698
9b60e70b
ID
5699err_out_blkdev:
5700 if (single_major)
5701 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd
ID
5702err_out_wq:
5703 destroy_workqueue(rbd_wq);
e1b4d96d
ID
5704err_out_slab:
5705 rbd_slab_exit();
1c2a9dfe 5706 return rc;
602adf40
YS
5707}
5708
cc344fa1 5709static void __exit rbd_exit(void)
602adf40 5710{
ffe312cf 5711 ida_destroy(&rbd_dev_id_ida);
602adf40 5712 rbd_sysfs_cleanup();
9b60e70b
ID
5713 if (single_major)
5714 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd 5715 destroy_workqueue(rbd_wq);
1c2a9dfe 5716 rbd_slab_exit();
602adf40
YS
5717}
5718
5719module_init(rbd_init);
5720module_exit(rbd_exit);
5721
d552c619 5722MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf40
YS
5723MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5724MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf40
YS
5725/* following authorship retained from original osdblk.c */
5726MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5727
90da258b 5728MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf40 5729MODULE_LICENSE("GPL");