]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/rbd.c
rbd: drop obj_request->version
[mirror_ubuntu-bionic-kernel.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
59c2be1e 35#include <linux/parser.h>
602adf40
YS
36
37#include <linux/kernel.h>
38#include <linux/device.h>
39#include <linux/module.h>
40#include <linux/fs.h>
41#include <linux/blkdev.h>
42
43#include "rbd_types.h"
44
aafb230e
AE
45#define RBD_DEBUG /* Activate rbd_assert() calls */
46
593a9e7b
AE
47/*
48 * The basic unit of block I/O is a sector. It is interpreted in a
49 * number of contexts in Linux (blk, bio, genhd), but the default is
50 * universally 512 bytes. These symbols are just slightly more
51 * meaningful than the bare numbers they represent.
52 */
53#define SECTOR_SHIFT 9
54#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55
f0f8cef5
AE
56#define RBD_DRV_NAME "rbd"
57#define RBD_DRV_NAME_LONG "rbd (rados block device)"
602adf40
YS
58
59#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
60
d4b125e9
AE
61#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
62#define RBD_MAX_SNAP_NAME_LEN \
63 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
64
35d489f9 65#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
66
67#define RBD_SNAP_HEAD_NAME "-"
68
9e15b77d
AE
69/* This allows a single page to hold an image name sent by OSD */
70#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 71#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 72
1e130199 73#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 74
d889140c
AE
75/* Feature bits */
76
5cbf6f12
AE
77#define RBD_FEATURE_LAYERING (1<<0)
78#define RBD_FEATURE_STRIPINGV2 (1<<1)
79#define RBD_FEATURES_ALL \
80 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
d889140c
AE
81
82/* Features supported by this (client software) implementation. */
83
770eba6e 84#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 85
81a89793
AE
86/*
87 * An RBD device name will be "rbd#", where the "rbd" comes from
88 * RBD_DRV_NAME above, and # is a unique integer identifier.
89 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
90 * enough to hold all possible device names.
91 */
602adf40 92#define DEV_NAME_LEN 32
81a89793 93#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40
YS
94
95/*
96 * block device image metadata (in-memory version)
97 */
98struct rbd_image_header {
f84344f3 99 /* These four fields never change for a given rbd image */
849b4260 100 char *object_prefix;
34b13184 101 u64 features;
602adf40
YS
102 __u8 obj_order;
103 __u8 crypt_type;
104 __u8 comp_type;
602adf40 105
f84344f3
AE
106 /* The remaining fields need to be updated occasionally */
107 u64 image_size;
108 struct ceph_snap_context *snapc;
602adf40
YS
109 char *snap_names;
110 u64 *snap_sizes;
59c2be1e 111
500d0c0f
AE
112 u64 stripe_unit;
113 u64 stripe_count;
59c2be1e
YS
114};
115
0d7dbfce
AE
116/*
117 * An rbd image specification.
118 *
119 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
120 * identify an image. Each rbd_dev structure includes a pointer to
121 * an rbd_spec structure that encapsulates this identity.
122 *
123 * Each of the id's in an rbd_spec has an associated name. For a
124 * user-mapped image, the names are supplied and the id's associated
125 * with them are looked up. For a layered image, a parent image is
126 * defined by the tuple, and the names are looked up.
127 *
128 * An rbd_dev structure contains a parent_spec pointer which is
129 * non-null if the image it represents is a child in a layered
130 * image. This pointer will refer to the rbd_spec structure used
131 * by the parent rbd_dev for its own identity (i.e., the structure
132 * is shared between the parent and child).
133 *
134 * Since these structures are populated once, during the discovery
135 * phase of image construction, they are effectively immutable so
136 * we make no effort to synchronize access to them.
137 *
138 * Note that code herein does not assume the image name is known (it
139 * could be a null pointer).
0d7dbfce
AE
140 */
141struct rbd_spec {
142 u64 pool_id;
ecb4dc22 143 const char *pool_name;
0d7dbfce 144
ecb4dc22
AE
145 const char *image_id;
146 const char *image_name;
0d7dbfce
AE
147
148 u64 snap_id;
ecb4dc22 149 const char *snap_name;
0d7dbfce
AE
150
151 struct kref kref;
152};
153
602adf40 154/*
f0f8cef5 155 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
156 */
157struct rbd_client {
158 struct ceph_client *client;
159 struct kref kref;
160 struct list_head node;
161};
162
bf0d5f50
AE
163struct rbd_img_request;
164typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
165
166#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
167
168struct rbd_obj_request;
169typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
170
9969ebc5
AE
171enum obj_request_type {
172 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
173};
bf0d5f50 174
926f9b3f
AE
175enum obj_req_flags {
176 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
6365d33a 177 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
5679c59f
AE
178 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
179 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
926f9b3f
AE
180};
181
bf0d5f50
AE
182struct rbd_obj_request {
183 const char *object_name;
184 u64 offset; /* object start byte */
185 u64 length; /* bytes from offset */
926f9b3f 186 unsigned long flags;
bf0d5f50 187
c5b5ef6c
AE
188 /*
189 * An object request associated with an image will have its
190 * img_data flag set; a standalone object request will not.
191 *
192 * A standalone object request will have which == BAD_WHICH
193 * and a null obj_request pointer.
194 *
195 * An object request initiated in support of a layered image
196 * object (to check for its existence before a write) will
197 * have which == BAD_WHICH and a non-null obj_request pointer.
198 *
199 * Finally, an object request for rbd image data will have
200 * which != BAD_WHICH, and will have a non-null img_request
201 * pointer. The value of which will be in the range
202 * 0..(img_request->obj_request_count-1).
203 */
204 union {
205 struct rbd_obj_request *obj_request; /* STAT op */
206 struct {
207 struct rbd_img_request *img_request;
208 u64 img_offset;
209 /* links for img_request->obj_requests list */
210 struct list_head links;
211 };
212 };
bf0d5f50
AE
213 u32 which; /* posn image request list */
214
215 enum obj_request_type type;
788e2df3
AE
216 union {
217 struct bio *bio_list;
218 struct {
219 struct page **pages;
220 u32 page_count;
221 };
222 };
0eefd470 223 struct page **copyup_pages;
bf0d5f50
AE
224
225 struct ceph_osd_request *osd_req;
226
227 u64 xferred; /* bytes transferred */
1b83bef2 228 int result;
bf0d5f50
AE
229
230 rbd_obj_callback_t callback;
788e2df3 231 struct completion completion;
bf0d5f50
AE
232
233 struct kref kref;
234};
235
0c425248 236enum img_req_flags {
9849e986
AE
237 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
238 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 239 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
240};
241
bf0d5f50 242struct rbd_img_request {
bf0d5f50
AE
243 struct rbd_device *rbd_dev;
244 u64 offset; /* starting image byte offset */
245 u64 length; /* byte count from offset */
0c425248 246 unsigned long flags;
bf0d5f50 247 union {
9849e986 248 u64 snap_id; /* for reads */
bf0d5f50 249 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
250 };
251 union {
252 struct request *rq; /* block request */
253 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 254 };
3d7efd18 255 struct page **copyup_pages;
bf0d5f50
AE
256 spinlock_t completion_lock;/* protects next_completion */
257 u32 next_completion;
258 rbd_img_callback_t callback;
55f27e09 259 u64 xferred;/* aggregate bytes transferred */
a5a337d4 260 int result; /* first nonzero obj_request result */
bf0d5f50
AE
261
262 u32 obj_request_count;
263 struct list_head obj_requests; /* rbd_obj_request structs */
264
265 struct kref kref;
266};
267
268#define for_each_obj_request(ireq, oreq) \
ef06f4d3 269 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 270#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 271 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 272#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 273 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 274
dfc5606d 275struct rbd_snap {
dfc5606d 276 const char *name;
3591538f 277 u64 size;
dfc5606d
YS
278 struct list_head node;
279 u64 id;
34b13184 280 u64 features;
dfc5606d
YS
281};
282
f84344f3 283struct rbd_mapping {
99c1f08f 284 u64 size;
34b13184 285 u64 features;
f84344f3
AE
286 bool read_only;
287};
288
602adf40
YS
289/*
290 * a single device
291 */
292struct rbd_device {
de71a297 293 int dev_id; /* blkdev unique id */
602adf40
YS
294
295 int major; /* blkdev assigned major */
296 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 297
a30b71b9 298 u32 image_format; /* Either 1 or 2 */
602adf40
YS
299 struct rbd_client *rbd_client;
300
301 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
302
b82d167b 303 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
304
305 struct rbd_image_header header;
b82d167b 306 unsigned long flags; /* possibly lock protected */
0d7dbfce 307 struct rbd_spec *spec;
602adf40 308
0d7dbfce 309 char *header_name;
971f839a 310
0903e875
AE
311 struct ceph_file_layout layout;
312
59c2be1e 313 struct ceph_osd_event *watch_event;
975241af 314 struct rbd_obj_request *watch_request;
59c2be1e 315
86b00e0d
AE
316 struct rbd_spec *parent_spec;
317 u64 parent_overlap;
2f82ee54 318 struct rbd_device *parent;
86b00e0d 319
c666601a
JD
320 /* protects updating the header */
321 struct rw_semaphore header_rwsem;
f84344f3
AE
322
323 struct rbd_mapping mapping;
602adf40
YS
324
325 struct list_head node;
dfc5606d
YS
326
327 /* list of snapshots */
328 struct list_head snaps;
329
330 /* sysfs related */
331 struct device dev;
b82d167b 332 unsigned long open_count; /* protected by lock */
dfc5606d
YS
333};
334
b82d167b
AE
335/*
336 * Flag bits for rbd_dev->flags. If atomicity is required,
337 * rbd_dev->lock is used to protect access.
338 *
339 * Currently, only the "removing" flag (which is coupled with the
340 * "open_count" field) requires atomic access.
341 */
6d292906
AE
342enum rbd_dev_flags {
343 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 344 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
345};
346
602adf40 347static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
e124a82f 348
602adf40 349static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
350static DEFINE_SPINLOCK(rbd_dev_list_lock);
351
432b8587
AE
352static LIST_HEAD(rbd_client_list); /* clients */
353static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 354
3d7efd18
AE
355static int rbd_img_request_submit(struct rbd_img_request *img_request);
356
304f6808 357static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
304f6808 358
200a6a8b 359static void rbd_dev_device_release(struct device *dev);
6087b51b 360static void rbd_snap_destroy(struct rbd_snap *snap);
dfc5606d 361
f0f8cef5
AE
362static ssize_t rbd_add(struct bus_type *bus, const char *buf,
363 size_t count);
364static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
365 size_t count);
71f293e2 366static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
f0f8cef5
AE
367
368static struct bus_attribute rbd_bus_attrs[] = {
369 __ATTR(add, S_IWUSR, NULL, rbd_add),
370 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
371 __ATTR_NULL
372};
373
374static struct bus_type rbd_bus_type = {
375 .name = "rbd",
376 .bus_attrs = rbd_bus_attrs,
377};
378
379static void rbd_root_dev_release(struct device *dev)
380{
381}
382
383static struct device rbd_root_dev = {
384 .init_name = "rbd",
385 .release = rbd_root_dev_release,
386};
387
06ecc6cb
AE
388static __printf(2, 3)
389void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
390{
391 struct va_format vaf;
392 va_list args;
393
394 va_start(args, fmt);
395 vaf.fmt = fmt;
396 vaf.va = &args;
397
398 if (!rbd_dev)
399 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
400 else if (rbd_dev->disk)
401 printk(KERN_WARNING "%s: %s: %pV\n",
402 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
403 else if (rbd_dev->spec && rbd_dev->spec->image_name)
404 printk(KERN_WARNING "%s: image %s: %pV\n",
405 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
406 else if (rbd_dev->spec && rbd_dev->spec->image_id)
407 printk(KERN_WARNING "%s: id %s: %pV\n",
408 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
409 else /* punt */
410 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
411 RBD_DRV_NAME, rbd_dev, &vaf);
412 va_end(args);
413}
414
aafb230e
AE
415#ifdef RBD_DEBUG
416#define rbd_assert(expr) \
417 if (unlikely(!(expr))) { \
418 printk(KERN_ERR "\nAssertion failure in %s() " \
419 "at line %d:\n\n" \
420 "\trbd_assert(%s);\n\n", \
421 __func__, __LINE__, #expr); \
422 BUG(); \
423 }
424#else /* !RBD_DEBUG */
425# define rbd_assert(expr) ((void) 0)
426#endif /* !RBD_DEBUG */
dfc5606d 427
b454e36d 428static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
05a46afd
AE
429static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
430static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 431
cc4a38bd
AE
432static int rbd_dev_refresh(struct rbd_device *rbd_dev);
433static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
59c2be1e 434
602adf40
YS
435static int rbd_open(struct block_device *bdev, fmode_t mode)
436{
f0f8cef5 437 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 438 bool removing = false;
602adf40 439
f84344f3 440 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
441 return -EROFS;
442
a14ea269 443 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
444 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
445 removing = true;
446 else
447 rbd_dev->open_count++;
a14ea269 448 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
449 if (removing)
450 return -ENOENT;
451
42382b70 452 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 453 (void) get_device(&rbd_dev->dev);
f84344f3 454 set_device_ro(bdev, rbd_dev->mapping.read_only);
42382b70 455 mutex_unlock(&ctl_mutex);
340c7a2b 456
602adf40
YS
457 return 0;
458}
459
dfc5606d
YS
460static int rbd_release(struct gendisk *disk, fmode_t mode)
461{
462 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
463 unsigned long open_count_before;
464
a14ea269 465 spin_lock_irq(&rbd_dev->lock);
b82d167b 466 open_count_before = rbd_dev->open_count--;
a14ea269 467 spin_unlock_irq(&rbd_dev->lock);
b82d167b 468 rbd_assert(open_count_before > 0);
dfc5606d 469
42382b70 470 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 471 put_device(&rbd_dev->dev);
42382b70 472 mutex_unlock(&ctl_mutex);
dfc5606d
YS
473
474 return 0;
475}
476
602adf40
YS
477static const struct block_device_operations rbd_bd_ops = {
478 .owner = THIS_MODULE,
479 .open = rbd_open,
dfc5606d 480 .release = rbd_release,
602adf40
YS
481};
482
483/*
484 * Initialize an rbd client instance.
43ae4701 485 * We own *ceph_opts.
602adf40 486 */
f8c38929 487static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
488{
489 struct rbd_client *rbdc;
490 int ret = -ENOMEM;
491
37206ee5 492 dout("%s:\n", __func__);
602adf40
YS
493 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
494 if (!rbdc)
495 goto out_opt;
496
497 kref_init(&rbdc->kref);
498 INIT_LIST_HEAD(&rbdc->node);
499
bc534d86
AE
500 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
501
43ae4701 502 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 503 if (IS_ERR(rbdc->client))
bc534d86 504 goto out_mutex;
43ae4701 505 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
506
507 ret = ceph_open_session(rbdc->client);
508 if (ret < 0)
509 goto out_err;
510
432b8587 511 spin_lock(&rbd_client_list_lock);
602adf40 512 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 513 spin_unlock(&rbd_client_list_lock);
602adf40 514
bc534d86 515 mutex_unlock(&ctl_mutex);
37206ee5 516 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 517
602adf40
YS
518 return rbdc;
519
520out_err:
521 ceph_destroy_client(rbdc->client);
bc534d86
AE
522out_mutex:
523 mutex_unlock(&ctl_mutex);
602adf40
YS
524 kfree(rbdc);
525out_opt:
43ae4701
AE
526 if (ceph_opts)
527 ceph_destroy_options(ceph_opts);
37206ee5
AE
528 dout("%s: error %d\n", __func__, ret);
529
28f259b7 530 return ERR_PTR(ret);
602adf40
YS
531}
532
2f82ee54
AE
533static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
534{
535 kref_get(&rbdc->kref);
536
537 return rbdc;
538}
539
602adf40 540/*
1f7ba331
AE
541 * Find a ceph client with specific addr and configuration. If
542 * found, bump its reference count.
602adf40 543 */
1f7ba331 544static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
545{
546 struct rbd_client *client_node;
1f7ba331 547 bool found = false;
602adf40 548
43ae4701 549 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
550 return NULL;
551
1f7ba331
AE
552 spin_lock(&rbd_client_list_lock);
553 list_for_each_entry(client_node, &rbd_client_list, node) {
554 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
555 __rbd_get_client(client_node);
556
1f7ba331
AE
557 found = true;
558 break;
559 }
560 }
561 spin_unlock(&rbd_client_list_lock);
562
563 return found ? client_node : NULL;
602adf40
YS
564}
565
59c2be1e
YS
566/*
567 * mount options
568 */
569enum {
59c2be1e
YS
570 Opt_last_int,
571 /* int args above */
572 Opt_last_string,
573 /* string args above */
cc0538b6
AE
574 Opt_read_only,
575 Opt_read_write,
576 /* Boolean args above */
577 Opt_last_bool,
59c2be1e
YS
578};
579
43ae4701 580static match_table_t rbd_opts_tokens = {
59c2be1e
YS
581 /* int args above */
582 /* string args above */
be466c1c 583 {Opt_read_only, "read_only"},
cc0538b6
AE
584 {Opt_read_only, "ro"}, /* Alternate spelling */
585 {Opt_read_write, "read_write"},
586 {Opt_read_write, "rw"}, /* Alternate spelling */
587 /* Boolean args above */
59c2be1e
YS
588 {-1, NULL}
589};
590
98571b5a
AE
591struct rbd_options {
592 bool read_only;
593};
594
595#define RBD_READ_ONLY_DEFAULT false
596
59c2be1e
YS
597static int parse_rbd_opts_token(char *c, void *private)
598{
43ae4701 599 struct rbd_options *rbd_opts = private;
59c2be1e
YS
600 substring_t argstr[MAX_OPT_ARGS];
601 int token, intval, ret;
602
43ae4701 603 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
604 if (token < 0)
605 return -EINVAL;
606
607 if (token < Opt_last_int) {
608 ret = match_int(&argstr[0], &intval);
609 if (ret < 0) {
610 pr_err("bad mount option arg (not int) "
611 "at '%s'\n", c);
612 return ret;
613 }
614 dout("got int token %d val %d\n", token, intval);
615 } else if (token > Opt_last_int && token < Opt_last_string) {
616 dout("got string token %d val %s\n", token,
617 argstr[0].from);
cc0538b6
AE
618 } else if (token > Opt_last_string && token < Opt_last_bool) {
619 dout("got Boolean token %d\n", token);
59c2be1e
YS
620 } else {
621 dout("got token %d\n", token);
622 }
623
624 switch (token) {
cc0538b6
AE
625 case Opt_read_only:
626 rbd_opts->read_only = true;
627 break;
628 case Opt_read_write:
629 rbd_opts->read_only = false;
630 break;
59c2be1e 631 default:
aafb230e
AE
632 rbd_assert(false);
633 break;
59c2be1e
YS
634 }
635 return 0;
636}
637
602adf40
YS
638/*
639 * Get a ceph client with specific addr and configuration, if one does
640 * not exist create it.
641 */
9d3997fd 642static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 643{
f8c38929 644 struct rbd_client *rbdc;
59c2be1e 645
1f7ba331 646 rbdc = rbd_client_find(ceph_opts);
9d3997fd 647 if (rbdc) /* using an existing client */
43ae4701 648 ceph_destroy_options(ceph_opts);
9d3997fd 649 else
f8c38929 650 rbdc = rbd_client_create(ceph_opts);
602adf40 651
9d3997fd 652 return rbdc;
602adf40
YS
653}
654
655/*
656 * Destroy ceph client
d23a4b3f 657 *
432b8587 658 * Caller must hold rbd_client_list_lock.
602adf40
YS
659 */
660static void rbd_client_release(struct kref *kref)
661{
662 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
663
37206ee5 664 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 665 spin_lock(&rbd_client_list_lock);
602adf40 666 list_del(&rbdc->node);
cd9d9f5d 667 spin_unlock(&rbd_client_list_lock);
602adf40
YS
668
669 ceph_destroy_client(rbdc->client);
670 kfree(rbdc);
671}
672
673/*
674 * Drop reference to ceph client node. If it's not referenced anymore, release
675 * it.
676 */
9d3997fd 677static void rbd_put_client(struct rbd_client *rbdc)
602adf40 678{
c53d5893
AE
679 if (rbdc)
680 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
681}
682
a30b71b9
AE
683static bool rbd_image_format_valid(u32 image_format)
684{
685 return image_format == 1 || image_format == 2;
686}
687
8e94af8e
AE
688static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
689{
103a150f
AE
690 size_t size;
691 u32 snap_count;
692
693 /* The header has to start with the magic rbd header text */
694 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
695 return false;
696
db2388b6
AE
697 /* The bio layer requires at least sector-sized I/O */
698
699 if (ondisk->options.order < SECTOR_SHIFT)
700 return false;
701
702 /* If we use u64 in a few spots we may be able to loosen this */
703
704 if (ondisk->options.order > 8 * sizeof (int) - 1)
705 return false;
706
103a150f
AE
707 /*
708 * The size of a snapshot header has to fit in a size_t, and
709 * that limits the number of snapshots.
710 */
711 snap_count = le32_to_cpu(ondisk->snap_count);
712 size = SIZE_MAX - sizeof (struct ceph_snap_context);
713 if (snap_count > size / sizeof (__le64))
714 return false;
715
716 /*
717 * Not only that, but the size of the entire the snapshot
718 * header must also be representable in a size_t.
719 */
720 size -= snap_count * sizeof (__le64);
721 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
722 return false;
723
724 return true;
8e94af8e
AE
725}
726
602adf40
YS
727/*
728 * Create a new header structure, translate header format from the on-disk
729 * header.
730 */
731static int rbd_header_from_disk(struct rbd_image_header *header,
4156d998 732 struct rbd_image_header_ondisk *ondisk)
602adf40 733{
ccece235 734 u32 snap_count;
58c17b0e 735 size_t len;
d2bb24e5 736 size_t size;
621901d6 737 u32 i;
602adf40 738
6a52325f
AE
739 memset(header, 0, sizeof (*header));
740
103a150f
AE
741 snap_count = le32_to_cpu(ondisk->snap_count);
742
58c17b0e
AE
743 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
744 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
6a52325f 745 if (!header->object_prefix)
602adf40 746 return -ENOMEM;
58c17b0e
AE
747 memcpy(header->object_prefix, ondisk->object_prefix, len);
748 header->object_prefix[len] = '\0';
00f1f36f 749
602adf40 750 if (snap_count) {
f785cc1d
AE
751 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
752
621901d6
AE
753 /* Save a copy of the snapshot names */
754
f785cc1d
AE
755 if (snap_names_len > (u64) SIZE_MAX)
756 return -EIO;
757 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
602adf40 758 if (!header->snap_names)
6a52325f 759 goto out_err;
f785cc1d
AE
760 /*
761 * Note that rbd_dev_v1_header_read() guarantees
762 * the ondisk buffer we're working with has
763 * snap_names_len bytes beyond the end of the
764 * snapshot id array, this memcpy() is safe.
765 */
766 memcpy(header->snap_names, &ondisk->snaps[snap_count],
767 snap_names_len);
6a52325f 768
621901d6
AE
769 /* Record each snapshot's size */
770
d2bb24e5
AE
771 size = snap_count * sizeof (*header->snap_sizes);
772 header->snap_sizes = kmalloc(size, GFP_KERNEL);
602adf40 773 if (!header->snap_sizes)
6a52325f 774 goto out_err;
621901d6
AE
775 for (i = 0; i < snap_count; i++)
776 header->snap_sizes[i] =
777 le64_to_cpu(ondisk->snaps[i].image_size);
602adf40
YS
778 } else {
779 header->snap_names = NULL;
780 header->snap_sizes = NULL;
781 }
849b4260 782
34b13184 783 header->features = 0; /* No features support in v1 images */
602adf40
YS
784 header->obj_order = ondisk->options.order;
785 header->crypt_type = ondisk->options.crypt_type;
786 header->comp_type = ondisk->options.comp_type;
6a52325f 787
621901d6
AE
788 /* Allocate and fill in the snapshot context */
789
f84344f3 790 header->image_size = le64_to_cpu(ondisk->image_size);
468521c1 791
812164f8 792 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6a52325f
AE
793 if (!header->snapc)
794 goto out_err;
505cbb9b 795 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
621901d6 796 for (i = 0; i < snap_count; i++)
468521c1 797 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
602adf40
YS
798
799 return 0;
800
6a52325f 801out_err:
849b4260 802 kfree(header->snap_sizes);
ccece235 803 header->snap_sizes = NULL;
602adf40 804 kfree(header->snap_names);
ccece235 805 header->snap_names = NULL;
6a52325f
AE
806 kfree(header->object_prefix);
807 header->object_prefix = NULL;
ccece235 808
00f1f36f 809 return -ENOMEM;
602adf40
YS
810}
811
9e15b77d
AE
812static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
813{
814 struct rbd_snap *snap;
815
816 if (snap_id == CEPH_NOSNAP)
817 return RBD_SNAP_HEAD_NAME;
818
819 list_for_each_entry(snap, &rbd_dev->snaps, node)
820 if (snap_id == snap->id)
821 return snap->name;
822
823 return NULL;
824}
825
8b0241f8
AE
826static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev,
827 const char *snap_name)
602adf40 828{
e86924a8 829 struct rbd_snap *snap;
602adf40 830
8b0241f8
AE
831 list_for_each_entry(snap, &rbd_dev->snaps, node)
832 if (!strcmp(snap_name, snap->name))
833 return snap;
e86924a8 834
8b0241f8 835 return NULL;
602adf40
YS
836}
837
d1cf5788 838static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
602adf40 839{
0d7dbfce 840 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
cc9d734c 841 sizeof (RBD_SNAP_HEAD_NAME))) {
99c1f08f 842 rbd_dev->mapping.size = rbd_dev->header.image_size;
34b13184 843 rbd_dev->mapping.features = rbd_dev->header.features;
602adf40 844 } else {
8b0241f8
AE
845 struct rbd_snap *snap;
846
847 snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
848 if (!snap)
849 return -ENOENT;
8b0241f8
AE
850 rbd_dev->mapping.size = snap->size;
851 rbd_dev->mapping.features = snap->features;
f84344f3 852 rbd_dev->mapping.read_only = true;
602adf40 853 }
6d292906 854
8b0241f8 855 return 0;
602adf40
YS
856}
857
d1cf5788
AE
858static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
859{
860 rbd_dev->mapping.size = 0;
861 rbd_dev->mapping.features = 0;
862 rbd_dev->mapping.read_only = true;
863}
864
200a6a8b
AE
865static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
866{
867 rbd_dev->mapping.size = 0;
868 rbd_dev->mapping.features = 0;
869 rbd_dev->mapping.read_only = true;
870}
871
98571b5a 872static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 873{
65ccfe21
AE
874 char *name;
875 u64 segment;
876 int ret;
602adf40 877
2fd82b9e 878 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
65ccfe21
AE
879 if (!name)
880 return NULL;
881 segment = offset >> rbd_dev->header.obj_order;
2fd82b9e 882 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
65ccfe21 883 rbd_dev->header.object_prefix, segment);
2fd82b9e 884 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
65ccfe21
AE
885 pr_err("error formatting segment name for #%llu (%d)\n",
886 segment, ret);
887 kfree(name);
888 name = NULL;
889 }
602adf40 890
65ccfe21
AE
891 return name;
892}
602adf40 893
65ccfe21
AE
894static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
895{
896 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 897
65ccfe21
AE
898 return offset & (segment_size - 1);
899}
900
901static u64 rbd_segment_length(struct rbd_device *rbd_dev,
902 u64 offset, u64 length)
903{
904 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
905
906 offset &= segment_size - 1;
907
aafb230e 908 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
909 if (offset + length > segment_size)
910 length = segment_size - offset;
911
912 return length;
602adf40
YS
913}
914
029bcbd8
JD
915/*
916 * returns the size of an object in the image
917 */
918static u64 rbd_obj_bytes(struct rbd_image_header *header)
919{
920 return 1 << header->obj_order;
921}
922
602adf40
YS
923/*
924 * bio helpers
925 */
926
927static void bio_chain_put(struct bio *chain)
928{
929 struct bio *tmp;
930
931 while (chain) {
932 tmp = chain;
933 chain = chain->bi_next;
934 bio_put(tmp);
935 }
936}
937
938/*
939 * zeros a bio chain, starting at specific offset
940 */
941static void zero_bio_chain(struct bio *chain, int start_ofs)
942{
943 struct bio_vec *bv;
944 unsigned long flags;
945 void *buf;
946 int i;
947 int pos = 0;
948
949 while (chain) {
950 bio_for_each_segment(bv, chain, i) {
951 if (pos + bv->bv_len > start_ofs) {
952 int remainder = max(start_ofs - pos, 0);
953 buf = bvec_kmap_irq(bv, &flags);
954 memset(buf + remainder, 0,
955 bv->bv_len - remainder);
85b5aaa6 956 bvec_kunmap_irq(buf, &flags);
602adf40
YS
957 }
958 pos += bv->bv_len;
959 }
960
961 chain = chain->bi_next;
962 }
963}
964
b9434c5b
AE
965/*
966 * similar to zero_bio_chain(), zeros data defined by a page array,
967 * starting at the given byte offset from the start of the array and
968 * continuing up to the given end offset. The pages array is
969 * assumed to be big enough to hold all bytes up to the end.
970 */
971static void zero_pages(struct page **pages, u64 offset, u64 end)
972{
973 struct page **page = &pages[offset >> PAGE_SHIFT];
974
975 rbd_assert(end > offset);
976 rbd_assert(end - offset <= (u64)SIZE_MAX);
977 while (offset < end) {
978 size_t page_offset;
979 size_t length;
980 unsigned long flags;
981 void *kaddr;
982
983 page_offset = (size_t)(offset & ~PAGE_MASK);
984 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
985 local_irq_save(flags);
986 kaddr = kmap_atomic(*page);
987 memset(kaddr + page_offset, 0, length);
988 kunmap_atomic(kaddr);
989 local_irq_restore(flags);
990
991 offset += length;
992 page++;
993 }
994}
995
602adf40 996/*
f7760dad
AE
997 * Clone a portion of a bio, starting at the given byte offset
998 * and continuing for the number of bytes indicated.
602adf40 999 */
f7760dad
AE
1000static struct bio *bio_clone_range(struct bio *bio_src,
1001 unsigned int offset,
1002 unsigned int len,
1003 gfp_t gfpmask)
602adf40 1004{
f7760dad
AE
1005 struct bio_vec *bv;
1006 unsigned int resid;
1007 unsigned short idx;
1008 unsigned int voff;
1009 unsigned short end_idx;
1010 unsigned short vcnt;
1011 struct bio *bio;
1012
1013 /* Handle the easy case for the caller */
1014
1015 if (!offset && len == bio_src->bi_size)
1016 return bio_clone(bio_src, gfpmask);
1017
1018 if (WARN_ON_ONCE(!len))
1019 return NULL;
1020 if (WARN_ON_ONCE(len > bio_src->bi_size))
1021 return NULL;
1022 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1023 return NULL;
1024
1025 /* Find first affected segment... */
1026
1027 resid = offset;
1028 __bio_for_each_segment(bv, bio_src, idx, 0) {
1029 if (resid < bv->bv_len)
1030 break;
1031 resid -= bv->bv_len;
602adf40 1032 }
f7760dad 1033 voff = resid;
602adf40 1034
f7760dad 1035 /* ...and the last affected segment */
602adf40 1036
f7760dad
AE
1037 resid += len;
1038 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1039 if (resid <= bv->bv_len)
1040 break;
1041 resid -= bv->bv_len;
1042 }
1043 vcnt = end_idx - idx + 1;
1044
1045 /* Build the clone */
1046
1047 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1048 if (!bio)
1049 return NULL; /* ENOMEM */
602adf40 1050
f7760dad
AE
1051 bio->bi_bdev = bio_src->bi_bdev;
1052 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1053 bio->bi_rw = bio_src->bi_rw;
1054 bio->bi_flags |= 1 << BIO_CLONED;
1055
1056 /*
1057 * Copy over our part of the bio_vec, then update the first
1058 * and last (or only) entries.
1059 */
1060 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1061 vcnt * sizeof (struct bio_vec));
1062 bio->bi_io_vec[0].bv_offset += voff;
1063 if (vcnt > 1) {
1064 bio->bi_io_vec[0].bv_len -= voff;
1065 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1066 } else {
1067 bio->bi_io_vec[0].bv_len = len;
602adf40
YS
1068 }
1069
f7760dad
AE
1070 bio->bi_vcnt = vcnt;
1071 bio->bi_size = len;
1072 bio->bi_idx = 0;
1073
1074 return bio;
1075}
1076
1077/*
1078 * Clone a portion of a bio chain, starting at the given byte offset
1079 * into the first bio in the source chain and continuing for the
1080 * number of bytes indicated. The result is another bio chain of
1081 * exactly the given length, or a null pointer on error.
1082 *
1083 * The bio_src and offset parameters are both in-out. On entry they
1084 * refer to the first source bio and the offset into that bio where
1085 * the start of data to be cloned is located.
1086 *
1087 * On return, bio_src is updated to refer to the bio in the source
1088 * chain that contains first un-cloned byte, and *offset will
1089 * contain the offset of that byte within that bio.
1090 */
1091static struct bio *bio_chain_clone_range(struct bio **bio_src,
1092 unsigned int *offset,
1093 unsigned int len,
1094 gfp_t gfpmask)
1095{
1096 struct bio *bi = *bio_src;
1097 unsigned int off = *offset;
1098 struct bio *chain = NULL;
1099 struct bio **end;
1100
1101 /* Build up a chain of clone bios up to the limit */
1102
1103 if (!bi || off >= bi->bi_size || !len)
1104 return NULL; /* Nothing to clone */
602adf40 1105
f7760dad
AE
1106 end = &chain;
1107 while (len) {
1108 unsigned int bi_size;
1109 struct bio *bio;
1110
f5400b7a
AE
1111 if (!bi) {
1112 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad 1113 goto out_err; /* EINVAL; ran out of bio's */
f5400b7a 1114 }
f7760dad
AE
1115 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1116 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1117 if (!bio)
1118 goto out_err; /* ENOMEM */
1119
1120 *end = bio;
1121 end = &bio->bi_next;
602adf40 1122
f7760dad
AE
1123 off += bi_size;
1124 if (off == bi->bi_size) {
1125 bi = bi->bi_next;
1126 off = 0;
1127 }
1128 len -= bi_size;
1129 }
1130 *bio_src = bi;
1131 *offset = off;
1132
1133 return chain;
1134out_err:
1135 bio_chain_put(chain);
602adf40 1136
602adf40
YS
1137 return NULL;
1138}
1139
926f9b3f
AE
1140/*
1141 * The default/initial value for all object request flags is 0. For
1142 * each flag, once its value is set to 1 it is never reset to 0
1143 * again.
1144 */
57acbaa7 1145static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f 1146{
57acbaa7 1147 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f
AE
1148 struct rbd_device *rbd_dev;
1149
57acbaa7
AE
1150 rbd_dev = obj_request->img_request->rbd_dev;
1151 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
926f9b3f
AE
1152 obj_request);
1153 }
1154}
1155
57acbaa7 1156static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f
AE
1157{
1158 smp_mb();
57acbaa7 1159 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f
AE
1160}
1161
57acbaa7 1162static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a 1163{
57acbaa7
AE
1164 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1165 struct rbd_device *rbd_dev = NULL;
6365d33a 1166
57acbaa7
AE
1167 if (obj_request_img_data_test(obj_request))
1168 rbd_dev = obj_request->img_request->rbd_dev;
1169 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
6365d33a
AE
1170 obj_request);
1171 }
1172}
1173
57acbaa7 1174static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a
AE
1175{
1176 smp_mb();
57acbaa7 1177 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a
AE
1178}
1179
5679c59f
AE
1180/*
1181 * This sets the KNOWN flag after (possibly) setting the EXISTS
1182 * flag. The latter is set based on the "exists" value provided.
1183 *
1184 * Note that for our purposes once an object exists it never goes
1185 * away again. It's possible that the response from two existence
1186 * checks are separated by the creation of the target object, and
1187 * the first ("doesn't exist") response arrives *after* the second
1188 * ("does exist"). In that case we ignore the second one.
1189 */
1190static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1191 bool exists)
1192{
1193 if (exists)
1194 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1195 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1196 smp_mb();
1197}
1198
1199static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1200{
1201 smp_mb();
1202 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1203}
1204
1205static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1206{
1207 smp_mb();
1208 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1209}
1210
bf0d5f50
AE
1211static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1212{
37206ee5
AE
1213 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1214 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1215 kref_get(&obj_request->kref);
1216}
1217
1218static void rbd_obj_request_destroy(struct kref *kref);
1219static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1220{
1221 rbd_assert(obj_request != NULL);
37206ee5
AE
1222 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1223 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1224 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1225}
1226
1227static void rbd_img_request_get(struct rbd_img_request *img_request)
1228{
37206ee5
AE
1229 dout("%s: img %p (was %d)\n", __func__, img_request,
1230 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1231 kref_get(&img_request->kref);
1232}
1233
1234static void rbd_img_request_destroy(struct kref *kref);
1235static void rbd_img_request_put(struct rbd_img_request *img_request)
1236{
1237 rbd_assert(img_request != NULL);
37206ee5
AE
1238 dout("%s: img %p (was %d)\n", __func__, img_request,
1239 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1240 kref_put(&img_request->kref, rbd_img_request_destroy);
1241}
1242
1243static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1244 struct rbd_obj_request *obj_request)
1245{
25dcf954
AE
1246 rbd_assert(obj_request->img_request == NULL);
1247
b155e86c 1248 /* Image request now owns object's original reference */
bf0d5f50 1249 obj_request->img_request = img_request;
25dcf954 1250 obj_request->which = img_request->obj_request_count;
6365d33a
AE
1251 rbd_assert(!obj_request_img_data_test(obj_request));
1252 obj_request_img_data_set(obj_request);
bf0d5f50 1253 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1254 img_request->obj_request_count++;
1255 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1256 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1257 obj_request->which);
bf0d5f50
AE
1258}
1259
1260static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1261 struct rbd_obj_request *obj_request)
1262{
1263 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1264
37206ee5
AE
1265 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1266 obj_request->which);
bf0d5f50 1267 list_del(&obj_request->links);
25dcf954
AE
1268 rbd_assert(img_request->obj_request_count > 0);
1269 img_request->obj_request_count--;
1270 rbd_assert(obj_request->which == img_request->obj_request_count);
1271 obj_request->which = BAD_WHICH;
6365d33a 1272 rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f50 1273 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1274 obj_request->img_request = NULL;
25dcf954 1275 obj_request->callback = NULL;
bf0d5f50
AE
1276 rbd_obj_request_put(obj_request);
1277}
1278
1279static bool obj_request_type_valid(enum obj_request_type type)
1280{
1281 switch (type) {
9969ebc5 1282 case OBJ_REQUEST_NODATA:
bf0d5f50 1283 case OBJ_REQUEST_BIO:
788e2df3 1284 case OBJ_REQUEST_PAGES:
bf0d5f50
AE
1285 return true;
1286 default:
1287 return false;
1288 }
1289}
1290
bf0d5f50
AE
1291static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1292 struct rbd_obj_request *obj_request)
1293{
37206ee5
AE
1294 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1295
bf0d5f50
AE
1296 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1297}
1298
1299static void rbd_img_request_complete(struct rbd_img_request *img_request)
1300{
55f27e09 1301
37206ee5 1302 dout("%s: img %p\n", __func__, img_request);
55f27e09
AE
1303
1304 /*
1305 * If no error occurred, compute the aggregate transfer
1306 * count for the image request. We could instead use
1307 * atomic64_cmpxchg() to update it as each object request
1308 * completes; not clear which way is better off hand.
1309 */
1310 if (!img_request->result) {
1311 struct rbd_obj_request *obj_request;
1312 u64 xferred = 0;
1313
1314 for_each_obj_request(img_request, obj_request)
1315 xferred += obj_request->xferred;
1316 img_request->xferred = xferred;
1317 }
1318
bf0d5f50
AE
1319 if (img_request->callback)
1320 img_request->callback(img_request);
1321 else
1322 rbd_img_request_put(img_request);
1323}
1324
788e2df3
AE
1325/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1326
1327static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1328{
37206ee5
AE
1329 dout("%s: obj %p\n", __func__, obj_request);
1330
788e2df3
AE
1331 return wait_for_completion_interruptible(&obj_request->completion);
1332}
1333
0c425248
AE
1334/*
1335 * The default/initial value for all image request flags is 0. Each
1336 * is conditionally set to 1 at image request initialization time
1337 * and currently never change thereafter.
1338 */
1339static void img_request_write_set(struct rbd_img_request *img_request)
1340{
1341 set_bit(IMG_REQ_WRITE, &img_request->flags);
1342 smp_mb();
1343}
1344
1345static bool img_request_write_test(struct rbd_img_request *img_request)
1346{
1347 smp_mb();
1348 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1349}
1350
9849e986
AE
1351static void img_request_child_set(struct rbd_img_request *img_request)
1352{
1353 set_bit(IMG_REQ_CHILD, &img_request->flags);
1354 smp_mb();
1355}
1356
1357static bool img_request_child_test(struct rbd_img_request *img_request)
1358{
1359 smp_mb();
1360 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1361}
1362
d0b2e944
AE
1363static void img_request_layered_set(struct rbd_img_request *img_request)
1364{
1365 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1366 smp_mb();
1367}
1368
1369static bool img_request_layered_test(struct rbd_img_request *img_request)
1370{
1371 smp_mb();
1372 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1373}
1374
6e2a4505
AE
1375static void
1376rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1377{
b9434c5b
AE
1378 u64 xferred = obj_request->xferred;
1379 u64 length = obj_request->length;
1380
6e2a4505
AE
1381 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1382 obj_request, obj_request->img_request, obj_request->result,
b9434c5b 1383 xferred, length);
6e2a4505
AE
1384 /*
1385 * ENOENT means a hole in the image. We zero-fill the
1386 * entire length of the request. A short read also implies
1387 * zero-fill to the end of the request. Either way we
1388 * update the xferred count to indicate the whole request
1389 * was satisfied.
1390 */
b9434c5b 1391 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
6e2a4505 1392 if (obj_request->result == -ENOENT) {
b9434c5b
AE
1393 if (obj_request->type == OBJ_REQUEST_BIO)
1394 zero_bio_chain(obj_request->bio_list, 0);
1395 else
1396 zero_pages(obj_request->pages, 0, length);
6e2a4505 1397 obj_request->result = 0;
b9434c5b
AE
1398 obj_request->xferred = length;
1399 } else if (xferred < length && !obj_request->result) {
1400 if (obj_request->type == OBJ_REQUEST_BIO)
1401 zero_bio_chain(obj_request->bio_list, xferred);
1402 else
1403 zero_pages(obj_request->pages, xferred, length);
1404 obj_request->xferred = length;
6e2a4505
AE
1405 }
1406 obj_request_done_set(obj_request);
1407}
1408
bf0d5f50
AE
1409static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1410{
37206ee5
AE
1411 dout("%s: obj %p cb %p\n", __func__, obj_request,
1412 obj_request->callback);
bf0d5f50
AE
1413 if (obj_request->callback)
1414 obj_request->callback(obj_request);
788e2df3
AE
1415 else
1416 complete_all(&obj_request->completion);
bf0d5f50
AE
1417}
1418
c47f9371 1419static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
39bf2c5d
AE
1420{
1421 dout("%s: obj %p\n", __func__, obj_request);
1422 obj_request_done_set(obj_request);
1423}
1424
c47f9371 1425static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1426{
57acbaa7 1427 struct rbd_img_request *img_request = NULL;
a9e8ba2c 1428 struct rbd_device *rbd_dev = NULL;
57acbaa7
AE
1429 bool layered = false;
1430
1431 if (obj_request_img_data_test(obj_request)) {
1432 img_request = obj_request->img_request;
1433 layered = img_request && img_request_layered_test(img_request);
a9e8ba2c 1434 rbd_dev = img_request->rbd_dev;
57acbaa7 1435 }
8b3e1a56
AE
1436
1437 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1438 obj_request, img_request, obj_request->result,
1439 obj_request->xferred, obj_request->length);
a9e8ba2c
AE
1440 if (layered && obj_request->result == -ENOENT &&
1441 obj_request->img_offset < rbd_dev->parent_overlap)
8b3e1a56
AE
1442 rbd_img_parent_read(obj_request);
1443 else if (img_request)
6e2a4505
AE
1444 rbd_img_obj_request_read_callback(obj_request);
1445 else
1446 obj_request_done_set(obj_request);
bf0d5f50
AE
1447}
1448
c47f9371 1449static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1450{
1b83bef2
SW
1451 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1452 obj_request->result, obj_request->length);
1453 /*
8b3e1a56
AE
1454 * There is no such thing as a successful short write. Set
1455 * it to our originally-requested length.
1b83bef2
SW
1456 */
1457 obj_request->xferred = obj_request->length;
07741308 1458 obj_request_done_set(obj_request);
bf0d5f50
AE
1459}
1460
fbfab539
AE
1461/*
1462 * For a simple stat call there's nothing to do. We'll do more if
1463 * this is part of a write sequence for a layered image.
1464 */
c47f9371 1465static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab539 1466{
37206ee5 1467 dout("%s: obj %p\n", __func__, obj_request);
fbfab539
AE
1468 obj_request_done_set(obj_request);
1469}
1470
bf0d5f50
AE
1471static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1472 struct ceph_msg *msg)
1473{
1474 struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f50
AE
1475 u16 opcode;
1476
37206ee5 1477 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
bf0d5f50 1478 rbd_assert(osd_req == obj_request->osd_req);
57acbaa7
AE
1479 if (obj_request_img_data_test(obj_request)) {
1480 rbd_assert(obj_request->img_request);
1481 rbd_assert(obj_request->which != BAD_WHICH);
1482 } else {
1483 rbd_assert(obj_request->which == BAD_WHICH);
1484 }
bf0d5f50 1485
1b83bef2
SW
1486 if (osd_req->r_result < 0)
1487 obj_request->result = osd_req->r_result;
bf0d5f50 1488
0eefd470 1489 BUG_ON(osd_req->r_num_ops > 2);
bf0d5f50 1490
c47f9371
AE
1491 /*
1492 * We support a 64-bit length, but ultimately it has to be
1493 * passed to blk_end_request(), which takes an unsigned int.
1494 */
1b83bef2 1495 obj_request->xferred = osd_req->r_reply_op_len[0];
8b3e1a56 1496 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
79528734 1497 opcode = osd_req->r_ops[0].op;
bf0d5f50
AE
1498 switch (opcode) {
1499 case CEPH_OSD_OP_READ:
c47f9371 1500 rbd_osd_read_callback(obj_request);
bf0d5f50
AE
1501 break;
1502 case CEPH_OSD_OP_WRITE:
c47f9371 1503 rbd_osd_write_callback(obj_request);
bf0d5f50 1504 break;
fbfab539 1505 case CEPH_OSD_OP_STAT:
c47f9371 1506 rbd_osd_stat_callback(obj_request);
fbfab539 1507 break;
36be9a76 1508 case CEPH_OSD_OP_CALL:
b8d70035 1509 case CEPH_OSD_OP_NOTIFY_ACK:
9969ebc5 1510 case CEPH_OSD_OP_WATCH:
c47f9371 1511 rbd_osd_trivial_callback(obj_request);
9969ebc5 1512 break;
bf0d5f50
AE
1513 default:
1514 rbd_warn(NULL, "%s: unsupported op %hu\n",
1515 obj_request->object_name, (unsigned short) opcode);
1516 break;
1517 }
1518
07741308 1519 if (obj_request_done_test(obj_request))
bf0d5f50
AE
1520 rbd_obj_request_complete(obj_request);
1521}
1522
9d4df01f 1523static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3
AE
1524{
1525 struct rbd_img_request *img_request = obj_request->img_request;
8c042b0d 1526 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1527 u64 snap_id;
430c28c3 1528
8c042b0d 1529 rbd_assert(osd_req != NULL);
430c28c3 1530
9d4df01f 1531 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
8c042b0d 1532 ceph_osdc_build_request(osd_req, obj_request->offset,
9d4df01f
AE
1533 NULL, snap_id, NULL);
1534}
1535
1536static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1537{
1538 struct rbd_img_request *img_request = obj_request->img_request;
1539 struct ceph_osd_request *osd_req = obj_request->osd_req;
1540 struct ceph_snap_context *snapc;
1541 struct timespec mtime = CURRENT_TIME;
1542
1543 rbd_assert(osd_req != NULL);
1544
1545 snapc = img_request ? img_request->snapc : NULL;
1546 ceph_osdc_build_request(osd_req, obj_request->offset,
1547 snapc, CEPH_NOSNAP, &mtime);
430c28c3
AE
1548}
1549
bf0d5f50
AE
1550static struct ceph_osd_request *rbd_osd_req_create(
1551 struct rbd_device *rbd_dev,
1552 bool write_request,
430c28c3 1553 struct rbd_obj_request *obj_request)
bf0d5f50 1554{
bf0d5f50
AE
1555 struct ceph_snap_context *snapc = NULL;
1556 struct ceph_osd_client *osdc;
1557 struct ceph_osd_request *osd_req;
bf0d5f50 1558
6365d33a
AE
1559 if (obj_request_img_data_test(obj_request)) {
1560 struct rbd_img_request *img_request = obj_request->img_request;
1561
0c425248
AE
1562 rbd_assert(write_request ==
1563 img_request_write_test(img_request));
1564 if (write_request)
bf0d5f50 1565 snapc = img_request->snapc;
bf0d5f50
AE
1566 }
1567
1568 /* Allocate and initialize the request, for the single op */
1569
1570 osdc = &rbd_dev->rbd_client->client->osdc;
1571 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1572 if (!osd_req)
1573 return NULL; /* ENOMEM */
bf0d5f50 1574
430c28c3 1575 if (write_request)
bf0d5f50 1576 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
430c28c3 1577 else
bf0d5f50 1578 osd_req->r_flags = CEPH_OSD_FLAG_READ;
bf0d5f50
AE
1579
1580 osd_req->r_callback = rbd_osd_req_callback;
1581 osd_req->r_priv = obj_request;
1582
1583 osd_req->r_oid_len = strlen(obj_request->object_name);
1584 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1585 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1586
1587 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1588
bf0d5f50
AE
1589 return osd_req;
1590}
1591
0eefd470
AE
1592/*
1593 * Create a copyup osd request based on the information in the
1594 * object request supplied. A copyup request has two osd ops,
1595 * a copyup method call, and a "normal" write request.
1596 */
1597static struct ceph_osd_request *
1598rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1599{
1600 struct rbd_img_request *img_request;
1601 struct ceph_snap_context *snapc;
1602 struct rbd_device *rbd_dev;
1603 struct ceph_osd_client *osdc;
1604 struct ceph_osd_request *osd_req;
1605
1606 rbd_assert(obj_request_img_data_test(obj_request));
1607 img_request = obj_request->img_request;
1608 rbd_assert(img_request);
1609 rbd_assert(img_request_write_test(img_request));
1610
1611 /* Allocate and initialize the request, for the two ops */
1612
1613 snapc = img_request->snapc;
1614 rbd_dev = img_request->rbd_dev;
1615 osdc = &rbd_dev->rbd_client->client->osdc;
1616 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1617 if (!osd_req)
1618 return NULL; /* ENOMEM */
1619
1620 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1621 osd_req->r_callback = rbd_osd_req_callback;
1622 osd_req->r_priv = obj_request;
1623
1624 osd_req->r_oid_len = strlen(obj_request->object_name);
1625 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1626 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1627
1628 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1629
1630 return osd_req;
1631}
1632
1633
bf0d5f50
AE
1634static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1635{
1636 ceph_osdc_put_request(osd_req);
1637}
1638
1639/* object_name is assumed to be a non-null pointer and NUL-terminated */
1640
1641static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1642 u64 offset, u64 length,
1643 enum obj_request_type type)
1644{
1645 struct rbd_obj_request *obj_request;
1646 size_t size;
1647 char *name;
1648
1649 rbd_assert(obj_request_type_valid(type));
1650
1651 size = strlen(object_name) + 1;
1652 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1653 if (!obj_request)
1654 return NULL;
1655
1656 name = (char *)(obj_request + 1);
1657 obj_request->object_name = memcpy(name, object_name, size);
1658 obj_request->offset = offset;
1659 obj_request->length = length;
926f9b3f 1660 obj_request->flags = 0;
bf0d5f50
AE
1661 obj_request->which = BAD_WHICH;
1662 obj_request->type = type;
1663 INIT_LIST_HEAD(&obj_request->links);
788e2df3 1664 init_completion(&obj_request->completion);
bf0d5f50
AE
1665 kref_init(&obj_request->kref);
1666
37206ee5
AE
1667 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1668 offset, length, (int)type, obj_request);
1669
bf0d5f50
AE
1670 return obj_request;
1671}
1672
1673static void rbd_obj_request_destroy(struct kref *kref)
1674{
1675 struct rbd_obj_request *obj_request;
1676
1677 obj_request = container_of(kref, struct rbd_obj_request, kref);
1678
37206ee5
AE
1679 dout("%s: obj %p\n", __func__, obj_request);
1680
bf0d5f50
AE
1681 rbd_assert(obj_request->img_request == NULL);
1682 rbd_assert(obj_request->which == BAD_WHICH);
1683
1684 if (obj_request->osd_req)
1685 rbd_osd_req_destroy(obj_request->osd_req);
1686
1687 rbd_assert(obj_request_type_valid(obj_request->type));
1688 switch (obj_request->type) {
9969ebc5
AE
1689 case OBJ_REQUEST_NODATA:
1690 break; /* Nothing to do */
bf0d5f50
AE
1691 case OBJ_REQUEST_BIO:
1692 if (obj_request->bio_list)
1693 bio_chain_put(obj_request->bio_list);
1694 break;
788e2df3
AE
1695 case OBJ_REQUEST_PAGES:
1696 if (obj_request->pages)
1697 ceph_release_page_vector(obj_request->pages,
1698 obj_request->page_count);
1699 break;
bf0d5f50
AE
1700 }
1701
1702 kfree(obj_request);
1703}
1704
1705/*
1706 * Caller is responsible for filling in the list of object requests
1707 * that comprises the image request, and the Linux request pointer
1708 * (if there is one).
1709 */
cc344fa1
AE
1710static struct rbd_img_request *rbd_img_request_create(
1711 struct rbd_device *rbd_dev,
bf0d5f50 1712 u64 offset, u64 length,
9849e986
AE
1713 bool write_request,
1714 bool child_request)
bf0d5f50
AE
1715{
1716 struct rbd_img_request *img_request;
bf0d5f50
AE
1717
1718 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1719 if (!img_request)
1720 return NULL;
1721
1722 if (write_request) {
1723 down_read(&rbd_dev->header_rwsem);
812164f8 1724 ceph_get_snap_context(rbd_dev->header.snapc);
bf0d5f50 1725 up_read(&rbd_dev->header_rwsem);
bf0d5f50
AE
1726 }
1727
1728 img_request->rq = NULL;
1729 img_request->rbd_dev = rbd_dev;
1730 img_request->offset = offset;
1731 img_request->length = length;
0c425248
AE
1732 img_request->flags = 0;
1733 if (write_request) {
1734 img_request_write_set(img_request);
468521c1 1735 img_request->snapc = rbd_dev->header.snapc;
0c425248 1736 } else {
bf0d5f50 1737 img_request->snap_id = rbd_dev->spec->snap_id;
0c425248 1738 }
9849e986
AE
1739 if (child_request)
1740 img_request_child_set(img_request);
d0b2e944
AE
1741 if (rbd_dev->parent_spec)
1742 img_request_layered_set(img_request);
bf0d5f50
AE
1743 spin_lock_init(&img_request->completion_lock);
1744 img_request->next_completion = 0;
1745 img_request->callback = NULL;
a5a337d4 1746 img_request->result = 0;
bf0d5f50
AE
1747 img_request->obj_request_count = 0;
1748 INIT_LIST_HEAD(&img_request->obj_requests);
1749 kref_init(&img_request->kref);
1750
1751 rbd_img_request_get(img_request); /* Avoid a warning */
1752 rbd_img_request_put(img_request); /* TEMPORARY */
1753
37206ee5
AE
1754 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1755 write_request ? "write" : "read", offset, length,
1756 img_request);
1757
bf0d5f50
AE
1758 return img_request;
1759}
1760
1761static void rbd_img_request_destroy(struct kref *kref)
1762{
1763 struct rbd_img_request *img_request;
1764 struct rbd_obj_request *obj_request;
1765 struct rbd_obj_request *next_obj_request;
1766
1767 img_request = container_of(kref, struct rbd_img_request, kref);
1768
37206ee5
AE
1769 dout("%s: img %p\n", __func__, img_request);
1770
bf0d5f50
AE
1771 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1772 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1773 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 1774
0c425248 1775 if (img_request_write_test(img_request))
812164f8 1776 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1777
8b3e1a56
AE
1778 if (img_request_child_test(img_request))
1779 rbd_obj_request_put(img_request->obj_request);
1780
bf0d5f50
AE
1781 kfree(img_request);
1782}
1783
1217857f
AE
1784static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1785{
6365d33a 1786 struct rbd_img_request *img_request;
1217857f
AE
1787 unsigned int xferred;
1788 int result;
8b3e1a56 1789 bool more;
1217857f 1790
6365d33a
AE
1791 rbd_assert(obj_request_img_data_test(obj_request));
1792 img_request = obj_request->img_request;
1793
1217857f
AE
1794 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1795 xferred = (unsigned int)obj_request->xferred;
1796 result = obj_request->result;
1797 if (result) {
1798 struct rbd_device *rbd_dev = img_request->rbd_dev;
1799
1800 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1801 img_request_write_test(img_request) ? "write" : "read",
1802 obj_request->length, obj_request->img_offset,
1803 obj_request->offset);
1804 rbd_warn(rbd_dev, " result %d xferred %x\n",
1805 result, xferred);
1806 if (!img_request->result)
1807 img_request->result = result;
1808 }
1809
f1a4739f
AE
1810 /* Image object requests don't own their page array */
1811
1812 if (obj_request->type == OBJ_REQUEST_PAGES) {
1813 obj_request->pages = NULL;
1814 obj_request->page_count = 0;
1815 }
1816
8b3e1a56
AE
1817 if (img_request_child_test(img_request)) {
1818 rbd_assert(img_request->obj_request != NULL);
1819 more = obj_request->which < img_request->obj_request_count - 1;
1820 } else {
1821 rbd_assert(img_request->rq != NULL);
1822 more = blk_end_request(img_request->rq, result, xferred);
1823 }
1824
1825 return more;
1217857f
AE
1826}
1827
2169238d
AE
1828static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1829{
1830 struct rbd_img_request *img_request;
1831 u32 which = obj_request->which;
1832 bool more = true;
1833
6365d33a 1834 rbd_assert(obj_request_img_data_test(obj_request));
2169238d
AE
1835 img_request = obj_request->img_request;
1836
1837 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1838 rbd_assert(img_request != NULL);
2169238d
AE
1839 rbd_assert(img_request->obj_request_count > 0);
1840 rbd_assert(which != BAD_WHICH);
1841 rbd_assert(which < img_request->obj_request_count);
1842 rbd_assert(which >= img_request->next_completion);
1843
1844 spin_lock_irq(&img_request->completion_lock);
1845 if (which != img_request->next_completion)
1846 goto out;
1847
1848 for_each_obj_request_from(img_request, obj_request) {
2169238d
AE
1849 rbd_assert(more);
1850 rbd_assert(which < img_request->obj_request_count);
1851
1852 if (!obj_request_done_test(obj_request))
1853 break;
1217857f 1854 more = rbd_img_obj_end_request(obj_request);
2169238d
AE
1855 which++;
1856 }
1857
1858 rbd_assert(more ^ (which == img_request->obj_request_count));
1859 img_request->next_completion = which;
1860out:
1861 spin_unlock_irq(&img_request->completion_lock);
1862
1863 if (!more)
1864 rbd_img_request_complete(img_request);
1865}
1866
f1a4739f
AE
1867/*
1868 * Split up an image request into one or more object requests, each
1869 * to a different object. The "type" parameter indicates whether
1870 * "data_desc" is the pointer to the head of a list of bio
1871 * structures, or the base of a page array. In either case this
1872 * function assumes data_desc describes memory sufficient to hold
1873 * all data described by the image request.
1874 */
1875static int rbd_img_request_fill(struct rbd_img_request *img_request,
1876 enum obj_request_type type,
1877 void *data_desc)
bf0d5f50
AE
1878{
1879 struct rbd_device *rbd_dev = img_request->rbd_dev;
1880 struct rbd_obj_request *obj_request = NULL;
1881 struct rbd_obj_request *next_obj_request;
0c425248 1882 bool write_request = img_request_write_test(img_request);
f1a4739f
AE
1883 struct bio *bio_list;
1884 unsigned int bio_offset = 0;
1885 struct page **pages;
7da22d29 1886 u64 img_offset;
bf0d5f50
AE
1887 u64 resid;
1888 u16 opcode;
1889
f1a4739f
AE
1890 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1891 (int)type, data_desc);
37206ee5 1892
430c28c3 1893 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
7da22d29 1894 img_offset = img_request->offset;
bf0d5f50 1895 resid = img_request->length;
4dda41d3 1896 rbd_assert(resid > 0);
f1a4739f
AE
1897
1898 if (type == OBJ_REQUEST_BIO) {
1899 bio_list = data_desc;
1900 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1901 } else {
1902 rbd_assert(type == OBJ_REQUEST_PAGES);
1903 pages = data_desc;
1904 }
1905
bf0d5f50 1906 while (resid) {
2fa12320 1907 struct ceph_osd_request *osd_req;
bf0d5f50 1908 const char *object_name;
bf0d5f50
AE
1909 u64 offset;
1910 u64 length;
1911
7da22d29 1912 object_name = rbd_segment_name(rbd_dev, img_offset);
bf0d5f50
AE
1913 if (!object_name)
1914 goto out_unwind;
7da22d29
AE
1915 offset = rbd_segment_offset(rbd_dev, img_offset);
1916 length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f50 1917 obj_request = rbd_obj_request_create(object_name,
f1a4739f 1918 offset, length, type);
bf0d5f50
AE
1919 kfree(object_name); /* object request has its own copy */
1920 if (!obj_request)
1921 goto out_unwind;
1922
f1a4739f
AE
1923 if (type == OBJ_REQUEST_BIO) {
1924 unsigned int clone_size;
1925
1926 rbd_assert(length <= (u64)UINT_MAX);
1927 clone_size = (unsigned int)length;
1928 obj_request->bio_list =
1929 bio_chain_clone_range(&bio_list,
1930 &bio_offset,
1931 clone_size,
1932 GFP_ATOMIC);
1933 if (!obj_request->bio_list)
1934 goto out_partial;
1935 } else {
1936 unsigned int page_count;
1937
1938 obj_request->pages = pages;
1939 page_count = (u32)calc_pages_for(offset, length);
1940 obj_request->page_count = page_count;
1941 if ((offset + length) & ~PAGE_MASK)
1942 page_count--; /* more on last page */
1943 pages += page_count;
1944 }
bf0d5f50 1945
2fa12320
AE
1946 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1947 obj_request);
1948 if (!osd_req)
bf0d5f50 1949 goto out_partial;
2fa12320 1950 obj_request->osd_req = osd_req;
2169238d 1951 obj_request->callback = rbd_img_obj_callback;
430c28c3 1952
2fa12320
AE
1953 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1954 0, 0);
f1a4739f
AE
1955 if (type == OBJ_REQUEST_BIO)
1956 osd_req_op_extent_osd_data_bio(osd_req, 0,
1957 obj_request->bio_list, length);
1958 else
1959 osd_req_op_extent_osd_data_pages(osd_req, 0,
1960 obj_request->pages, length,
1961 offset & ~PAGE_MASK, false, false);
9d4df01f
AE
1962
1963 if (write_request)
1964 rbd_osd_req_format_write(obj_request);
1965 else
1966 rbd_osd_req_format_read(obj_request);
430c28c3 1967
7da22d29 1968 obj_request->img_offset = img_offset;
bf0d5f50
AE
1969 rbd_img_obj_request_add(img_request, obj_request);
1970
7da22d29 1971 img_offset += length;
bf0d5f50
AE
1972 resid -= length;
1973 }
1974
1975 return 0;
1976
1977out_partial:
1978 rbd_obj_request_put(obj_request);
1979out_unwind:
1980 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1981 rbd_obj_request_put(obj_request);
1982
1983 return -ENOMEM;
1984}
1985
0eefd470
AE
1986static void
1987rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
1988{
1989 struct rbd_img_request *img_request;
1990 struct rbd_device *rbd_dev;
1991 u64 length;
1992 u32 page_count;
1993
1994 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
1995 rbd_assert(obj_request_img_data_test(obj_request));
1996 img_request = obj_request->img_request;
1997 rbd_assert(img_request);
1998
1999 rbd_dev = img_request->rbd_dev;
2000 rbd_assert(rbd_dev);
2001 length = (u64)1 << rbd_dev->header.obj_order;
2002 page_count = (u32)calc_pages_for(0, length);
2003
2004 rbd_assert(obj_request->copyup_pages);
2005 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2006 obj_request->copyup_pages = NULL;
2007
2008 /*
2009 * We want the transfer count to reflect the size of the
2010 * original write request. There is no such thing as a
2011 * successful short write, so if the request was successful
2012 * we can just set it to the originally-requested length.
2013 */
2014 if (!obj_request->result)
2015 obj_request->xferred = obj_request->length;
2016
2017 /* Finish up with the normal image object callback */
2018
2019 rbd_img_obj_callback(obj_request);
2020}
2021
3d7efd18
AE
2022static void
2023rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2024{
2025 struct rbd_obj_request *orig_request;
0eefd470
AE
2026 struct ceph_osd_request *osd_req;
2027 struct ceph_osd_client *osdc;
2028 struct rbd_device *rbd_dev;
3d7efd18 2029 struct page **pages;
3d7efd18
AE
2030 int result;
2031 u64 obj_size;
2032 u64 xferred;
2033
2034 rbd_assert(img_request_child_test(img_request));
2035
2036 /* First get what we need from the image request */
2037
2038 pages = img_request->copyup_pages;
2039 rbd_assert(pages != NULL);
2040 img_request->copyup_pages = NULL;
2041
2042 orig_request = img_request->obj_request;
2043 rbd_assert(orig_request != NULL);
0eefd470 2044 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
3d7efd18
AE
2045 result = img_request->result;
2046 obj_size = img_request->length;
2047 xferred = img_request->xferred;
2048
0eefd470
AE
2049 rbd_dev = img_request->rbd_dev;
2050 rbd_assert(rbd_dev);
2051 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2052
3d7efd18
AE
2053 rbd_img_request_put(img_request);
2054
0eefd470
AE
2055 if (result)
2056 goto out_err;
2057
2058 /* Allocate the new copyup osd request for the original request */
2059
2060 result = -ENOMEM;
2061 rbd_assert(!orig_request->osd_req);
2062 osd_req = rbd_osd_req_create_copyup(orig_request);
2063 if (!osd_req)
2064 goto out_err;
2065 orig_request->osd_req = osd_req;
2066 orig_request->copyup_pages = pages;
3d7efd18 2067
0eefd470 2068 /* Initialize the copyup op */
3d7efd18 2069
0eefd470
AE
2070 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2071 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2072 false, false);
3d7efd18 2073
0eefd470
AE
2074 /* Then the original write request op */
2075
2076 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2077 orig_request->offset,
2078 orig_request->length, 0, 0);
2079 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2080 orig_request->length);
2081
2082 rbd_osd_req_format_write(orig_request);
2083
2084 /* All set, send it off. */
2085
2086 orig_request->callback = rbd_img_obj_copyup_callback;
2087 osdc = &rbd_dev->rbd_client->client->osdc;
2088 result = rbd_obj_request_submit(osdc, orig_request);
2089 if (!result)
2090 return;
2091out_err:
2092 /* Record the error code and complete the request */
2093
2094 orig_request->result = result;
2095 orig_request->xferred = 0;
2096 obj_request_done_set(orig_request);
2097 rbd_obj_request_complete(orig_request);
3d7efd18
AE
2098}
2099
2100/*
2101 * Read from the parent image the range of data that covers the
2102 * entire target of the given object request. This is used for
2103 * satisfying a layered image write request when the target of an
2104 * object request from the image request does not exist.
2105 *
2106 * A page array big enough to hold the returned data is allocated
2107 * and supplied to rbd_img_request_fill() as the "data descriptor."
2108 * When the read completes, this page array will be transferred to
2109 * the original object request for the copyup operation.
2110 *
2111 * If an error occurs, record it as the result of the original
2112 * object request and mark it done so it gets completed.
2113 */
2114static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2115{
2116 struct rbd_img_request *img_request = NULL;
2117 struct rbd_img_request *parent_request = NULL;
2118 struct rbd_device *rbd_dev;
2119 u64 img_offset;
2120 u64 length;
2121 struct page **pages = NULL;
2122 u32 page_count;
2123 int result;
2124
2125 rbd_assert(obj_request_img_data_test(obj_request));
2126 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2127
2128 img_request = obj_request->img_request;
2129 rbd_assert(img_request != NULL);
2130 rbd_dev = img_request->rbd_dev;
2131 rbd_assert(rbd_dev->parent != NULL);
2132
0eefd470
AE
2133 /*
2134 * First things first. The original osd request is of no
2135 * use to use any more, we'll need a new one that can hold
2136 * the two ops in a copyup request. We'll get that later,
2137 * but for now we can release the old one.
2138 */
2139 rbd_osd_req_destroy(obj_request->osd_req);
2140 obj_request->osd_req = NULL;
2141
3d7efd18
AE
2142 /*
2143 * Determine the byte range covered by the object in the
2144 * child image to which the original request was to be sent.
2145 */
2146 img_offset = obj_request->img_offset - obj_request->offset;
2147 length = (u64)1 << rbd_dev->header.obj_order;
2148
a9e8ba2c
AE
2149 /*
2150 * There is no defined parent data beyond the parent
2151 * overlap, so limit what we read at that boundary if
2152 * necessary.
2153 */
2154 if (img_offset + length > rbd_dev->parent_overlap) {
2155 rbd_assert(img_offset < rbd_dev->parent_overlap);
2156 length = rbd_dev->parent_overlap - img_offset;
2157 }
2158
3d7efd18
AE
2159 /*
2160 * Allocate a page array big enough to receive the data read
2161 * from the parent.
2162 */
2163 page_count = (u32)calc_pages_for(0, length);
2164 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2165 if (IS_ERR(pages)) {
2166 result = PTR_ERR(pages);
2167 pages = NULL;
2168 goto out_err;
2169 }
2170
2171 result = -ENOMEM;
2172 parent_request = rbd_img_request_create(rbd_dev->parent,
2173 img_offset, length,
2174 false, true);
2175 if (!parent_request)
2176 goto out_err;
2177 rbd_obj_request_get(obj_request);
2178 parent_request->obj_request = obj_request;
2179
2180 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2181 if (result)
2182 goto out_err;
2183 parent_request->copyup_pages = pages;
2184
2185 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2186 result = rbd_img_request_submit(parent_request);
2187 if (!result)
2188 return 0;
2189
2190 parent_request->copyup_pages = NULL;
2191 parent_request->obj_request = NULL;
2192 rbd_obj_request_put(obj_request);
2193out_err:
2194 if (pages)
2195 ceph_release_page_vector(pages, page_count);
2196 if (parent_request)
2197 rbd_img_request_put(parent_request);
2198 obj_request->result = result;
2199 obj_request->xferred = 0;
2200 obj_request_done_set(obj_request);
2201
2202 return result;
2203}
2204
c5b5ef6c
AE
2205static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2206{
c5b5ef6c
AE
2207 struct rbd_obj_request *orig_request;
2208 int result;
2209
2210 rbd_assert(!obj_request_img_data_test(obj_request));
2211
2212 /*
2213 * All we need from the object request is the original
2214 * request and the result of the STAT op. Grab those, then
2215 * we're done with the request.
2216 */
2217 orig_request = obj_request->obj_request;
2218 obj_request->obj_request = NULL;
2219 rbd_assert(orig_request);
2220 rbd_assert(orig_request->img_request);
2221
2222 result = obj_request->result;
2223 obj_request->result = 0;
2224
2225 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2226 obj_request, orig_request, result,
2227 obj_request->xferred, obj_request->length);
2228 rbd_obj_request_put(obj_request);
2229
2230 rbd_assert(orig_request);
2231 rbd_assert(orig_request->img_request);
c5b5ef6c
AE
2232
2233 /*
2234 * Our only purpose here is to determine whether the object
2235 * exists, and we don't want to treat the non-existence as
2236 * an error. If something else comes back, transfer the
2237 * error to the original request and complete it now.
2238 */
2239 if (!result) {
2240 obj_request_existence_set(orig_request, true);
2241 } else if (result == -ENOENT) {
2242 obj_request_existence_set(orig_request, false);
2243 } else if (result) {
2244 orig_request->result = result;
3d7efd18 2245 goto out;
c5b5ef6c
AE
2246 }
2247
2248 /*
2249 * Resubmit the original request now that we have recorded
2250 * whether the target object exists.
2251 */
b454e36d 2252 orig_request->result = rbd_img_obj_request_submit(orig_request);
3d7efd18 2253out:
c5b5ef6c
AE
2254 if (orig_request->result)
2255 rbd_obj_request_complete(orig_request);
2256 rbd_obj_request_put(orig_request);
2257}
2258
2259static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2260{
2261 struct rbd_obj_request *stat_request;
2262 struct rbd_device *rbd_dev;
2263 struct ceph_osd_client *osdc;
2264 struct page **pages = NULL;
2265 u32 page_count;
2266 size_t size;
2267 int ret;
2268
2269 /*
2270 * The response data for a STAT call consists of:
2271 * le64 length;
2272 * struct {
2273 * le32 tv_sec;
2274 * le32 tv_nsec;
2275 * } mtime;
2276 */
2277 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2278 page_count = (u32)calc_pages_for(0, size);
2279 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2280 if (IS_ERR(pages))
2281 return PTR_ERR(pages);
2282
2283 ret = -ENOMEM;
2284 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2285 OBJ_REQUEST_PAGES);
2286 if (!stat_request)
2287 goto out;
2288
2289 rbd_obj_request_get(obj_request);
2290 stat_request->obj_request = obj_request;
2291 stat_request->pages = pages;
2292 stat_request->page_count = page_count;
2293
2294 rbd_assert(obj_request->img_request);
2295 rbd_dev = obj_request->img_request->rbd_dev;
2296 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2297 stat_request);
2298 if (!stat_request->osd_req)
2299 goto out;
2300 stat_request->callback = rbd_img_obj_exists_callback;
2301
2302 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2303 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2304 false, false);
9d4df01f 2305 rbd_osd_req_format_read(stat_request);
c5b5ef6c
AE
2306
2307 osdc = &rbd_dev->rbd_client->client->osdc;
2308 ret = rbd_obj_request_submit(osdc, stat_request);
2309out:
2310 if (ret)
2311 rbd_obj_request_put(obj_request);
2312
2313 return ret;
2314}
2315
b454e36d
AE
2316static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2317{
2318 struct rbd_img_request *img_request;
a9e8ba2c 2319 struct rbd_device *rbd_dev;
3d7efd18 2320 bool known;
b454e36d
AE
2321
2322 rbd_assert(obj_request_img_data_test(obj_request));
2323
2324 img_request = obj_request->img_request;
2325 rbd_assert(img_request);
a9e8ba2c 2326 rbd_dev = img_request->rbd_dev;
b454e36d 2327
b454e36d 2328 /*
a9e8ba2c
AE
2329 * Only writes to layered images need special handling.
2330 * Reads and non-layered writes are simple object requests.
2331 * Layered writes that start beyond the end of the overlap
2332 * with the parent have no parent data, so they too are
2333 * simple object requests. Finally, if the target object is
2334 * known to already exist, its parent data has already been
2335 * copied, so a write to the object can also be handled as a
2336 * simple object request.
b454e36d
AE
2337 */
2338 if (!img_request_write_test(img_request) ||
2339 !img_request_layered_test(img_request) ||
a9e8ba2c 2340 rbd_dev->parent_overlap <= obj_request->img_offset ||
3d7efd18
AE
2341 ((known = obj_request_known_test(obj_request)) &&
2342 obj_request_exists_test(obj_request))) {
b454e36d
AE
2343
2344 struct rbd_device *rbd_dev;
2345 struct ceph_osd_client *osdc;
2346
2347 rbd_dev = obj_request->img_request->rbd_dev;
2348 osdc = &rbd_dev->rbd_client->client->osdc;
2349
2350 return rbd_obj_request_submit(osdc, obj_request);
2351 }
2352
2353 /*
3d7efd18
AE
2354 * It's a layered write. The target object might exist but
2355 * we may not know that yet. If we know it doesn't exist,
2356 * start by reading the data for the full target object from
2357 * the parent so we can use it for a copyup to the target.
b454e36d 2358 */
3d7efd18
AE
2359 if (known)
2360 return rbd_img_obj_parent_read_full(obj_request);
2361
2362 /* We don't know whether the target exists. Go find out. */
b454e36d
AE
2363
2364 return rbd_img_obj_exists_submit(obj_request);
2365}
2366
bf0d5f50
AE
2367static int rbd_img_request_submit(struct rbd_img_request *img_request)
2368{
bf0d5f50 2369 struct rbd_obj_request *obj_request;
46faeed4 2370 struct rbd_obj_request *next_obj_request;
bf0d5f50 2371
37206ee5 2372 dout("%s: img %p\n", __func__, img_request);
46faeed4 2373 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
bf0d5f50
AE
2374 int ret;
2375
b454e36d 2376 ret = rbd_img_obj_request_submit(obj_request);
bf0d5f50
AE
2377 if (ret)
2378 return ret;
bf0d5f50
AE
2379 }
2380
2381 return 0;
2382}
8b3e1a56
AE
2383
2384static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2385{
2386 struct rbd_obj_request *obj_request;
a9e8ba2c
AE
2387 struct rbd_device *rbd_dev;
2388 u64 obj_end;
8b3e1a56
AE
2389
2390 rbd_assert(img_request_child_test(img_request));
2391
2392 obj_request = img_request->obj_request;
a9e8ba2c
AE
2393 rbd_assert(obj_request);
2394 rbd_assert(obj_request->img_request);
2395
8b3e1a56 2396 obj_request->result = img_request->result;
a9e8ba2c
AE
2397 if (obj_request->result)
2398 goto out;
2399
2400 /*
2401 * We need to zero anything beyond the parent overlap
2402 * boundary. Since rbd_img_obj_request_read_callback()
2403 * will zero anything beyond the end of a short read, an
2404 * easy way to do this is to pretend the data from the
2405 * parent came up short--ending at the overlap boundary.
2406 */
2407 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2408 obj_end = obj_request->img_offset + obj_request->length;
2409 rbd_dev = obj_request->img_request->rbd_dev;
2410 if (obj_end > rbd_dev->parent_overlap) {
2411 u64 xferred = 0;
2412
2413 if (obj_request->img_offset < rbd_dev->parent_overlap)
2414 xferred = rbd_dev->parent_overlap -
2415 obj_request->img_offset;
8b3e1a56 2416
a9e8ba2c
AE
2417 obj_request->xferred = min(img_request->xferred, xferred);
2418 } else {
2419 obj_request->xferred = img_request->xferred;
2420 }
2421out:
8b3e1a56
AE
2422 rbd_img_obj_request_read_callback(obj_request);
2423 rbd_obj_request_complete(obj_request);
2424}
2425
2426static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2427{
2428 struct rbd_device *rbd_dev;
2429 struct rbd_img_request *img_request;
2430 int result;
2431
2432 rbd_assert(obj_request_img_data_test(obj_request));
2433 rbd_assert(obj_request->img_request != NULL);
2434 rbd_assert(obj_request->result == (s32) -ENOENT);
2435 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2436
2437 rbd_dev = obj_request->img_request->rbd_dev;
2438 rbd_assert(rbd_dev->parent != NULL);
2439 /* rbd_read_finish(obj_request, obj_request->length); */
2440 img_request = rbd_img_request_create(rbd_dev->parent,
2441 obj_request->img_offset,
2442 obj_request->length,
2443 false, true);
2444 result = -ENOMEM;
2445 if (!img_request)
2446 goto out_err;
2447
2448 rbd_obj_request_get(obj_request);
2449 img_request->obj_request = obj_request;
2450
f1a4739f
AE
2451 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2452 obj_request->bio_list);
8b3e1a56
AE
2453 if (result)
2454 goto out_err;
2455
2456 img_request->callback = rbd_img_parent_read_callback;
2457 result = rbd_img_request_submit(img_request);
2458 if (result)
2459 goto out_err;
2460
2461 return;
2462out_err:
2463 if (img_request)
2464 rbd_img_request_put(img_request);
2465 obj_request->result = result;
2466 obj_request->xferred = 0;
2467 obj_request_done_set(obj_request);
2468}
bf0d5f50 2469
cc4a38bd 2470static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
b8d70035
AE
2471{
2472 struct rbd_obj_request *obj_request;
2169238d 2473 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
b8d70035
AE
2474 int ret;
2475
2476 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2477 OBJ_REQUEST_NODATA);
2478 if (!obj_request)
2479 return -ENOMEM;
2480
2481 ret = -ENOMEM;
430c28c3 2482 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
b8d70035
AE
2483 if (!obj_request->osd_req)
2484 goto out;
2169238d 2485 obj_request->callback = rbd_obj_request_put;
b8d70035 2486
c99d2d4a 2487 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
cc4a38bd 2488 notify_id, 0, 0);
9d4df01f 2489 rbd_osd_req_format_read(obj_request);
430c28c3 2490
b8d70035 2491 ret = rbd_obj_request_submit(osdc, obj_request);
b8d70035 2492out:
cf81b60e
AE
2493 if (ret)
2494 rbd_obj_request_put(obj_request);
b8d70035
AE
2495
2496 return ret;
2497}
2498
2499static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2500{
2501 struct rbd_device *rbd_dev = (struct rbd_device *)data;
b8d70035
AE
2502
2503 if (!rbd_dev)
2504 return;
2505
37206ee5 2506 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
cc4a38bd
AE
2507 rbd_dev->header_name, (unsigned long long)notify_id,
2508 (unsigned int)opcode);
2509 (void)rbd_dev_refresh(rbd_dev);
b8d70035 2510
cc4a38bd 2511 rbd_obj_notify_ack(rbd_dev, notify_id);
b8d70035
AE
2512}
2513
9969ebc5
AE
2514/*
2515 * Request sync osd watch/unwatch. The value of "start" determines
2516 * whether a watch request is being initiated or torn down.
2517 */
2518static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2519{
2520 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2521 struct rbd_obj_request *obj_request;
9969ebc5
AE
2522 int ret;
2523
2524 rbd_assert(start ^ !!rbd_dev->watch_event);
2525 rbd_assert(start ^ !!rbd_dev->watch_request);
2526
2527 if (start) {
3c663bbd 2528 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
9969ebc5
AE
2529 &rbd_dev->watch_event);
2530 if (ret < 0)
2531 return ret;
8eb87565 2532 rbd_assert(rbd_dev->watch_event != NULL);
9969ebc5
AE
2533 }
2534
2535 ret = -ENOMEM;
2536 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2537 OBJ_REQUEST_NODATA);
2538 if (!obj_request)
2539 goto out_cancel;
2540
430c28c3
AE
2541 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2542 if (!obj_request->osd_req)
2543 goto out_cancel;
2544
8eb87565 2545 if (start)
975241af 2546 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
8eb87565 2547 else
6977c3f9 2548 ceph_osdc_unregister_linger_request(osdc,
975241af 2549 rbd_dev->watch_request->osd_req);
2169238d
AE
2550
2551 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
b21ebddd 2552 rbd_dev->watch_event->cookie, 0, start);
9d4df01f 2553 rbd_osd_req_format_write(obj_request);
2169238d 2554
9969ebc5
AE
2555 ret = rbd_obj_request_submit(osdc, obj_request);
2556 if (ret)
2557 goto out_cancel;
2558 ret = rbd_obj_request_wait(obj_request);
2559 if (ret)
2560 goto out_cancel;
9969ebc5
AE
2561 ret = obj_request->result;
2562 if (ret)
2563 goto out_cancel;
2564
8eb87565
AE
2565 /*
2566 * A watch request is set to linger, so the underlying osd
2567 * request won't go away until we unregister it. We retain
2568 * a pointer to the object request during that time (in
2569 * rbd_dev->watch_request), so we'll keep a reference to
2570 * it. We'll drop that reference (below) after we've
2571 * unregistered it.
2572 */
2573 if (start) {
2574 rbd_dev->watch_request = obj_request;
2575
2576 return 0;
2577 }
2578
2579 /* We have successfully torn down the watch request */
2580
2581 rbd_obj_request_put(rbd_dev->watch_request);
2582 rbd_dev->watch_request = NULL;
9969ebc5
AE
2583out_cancel:
2584 /* Cancel the event if we're tearing down, or on error */
2585 ceph_osdc_cancel_event(rbd_dev->watch_event);
2586 rbd_dev->watch_event = NULL;
9969ebc5
AE
2587 if (obj_request)
2588 rbd_obj_request_put(obj_request);
2589
2590 return ret;
2591}
2592
36be9a76 2593/*
f40eb349
AE
2594 * Synchronous osd object method call. Returns the number of bytes
2595 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
2596 */
2597static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2598 const char *object_name,
2599 const char *class_name,
2600 const char *method_name,
4157976b 2601 const void *outbound,
36be9a76 2602 size_t outbound_size,
4157976b 2603 void *inbound,
e2a58ee5 2604 size_t inbound_size)
36be9a76 2605{
2169238d 2606 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
36be9a76 2607 struct rbd_obj_request *obj_request;
36be9a76
AE
2608 struct page **pages;
2609 u32 page_count;
2610 int ret;
2611
2612 /*
6010a451
AE
2613 * Method calls are ultimately read operations. The result
2614 * should placed into the inbound buffer provided. They
2615 * also supply outbound data--parameters for the object
2616 * method. Currently if this is present it will be a
2617 * snapshot id.
36be9a76 2618 */
57385b51 2619 page_count = (u32)calc_pages_for(0, inbound_size);
36be9a76
AE
2620 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2621 if (IS_ERR(pages))
2622 return PTR_ERR(pages);
2623
2624 ret = -ENOMEM;
6010a451 2625 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
36be9a76
AE
2626 OBJ_REQUEST_PAGES);
2627 if (!obj_request)
2628 goto out;
2629
2630 obj_request->pages = pages;
2631 obj_request->page_count = page_count;
2632
430c28c3 2633 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
36be9a76
AE
2634 if (!obj_request->osd_req)
2635 goto out;
2636
c99d2d4a 2637 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
04017e29
AE
2638 class_name, method_name);
2639 if (outbound_size) {
2640 struct ceph_pagelist *pagelist;
2641
2642 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2643 if (!pagelist)
2644 goto out;
2645
2646 ceph_pagelist_init(pagelist);
2647 ceph_pagelist_append(pagelist, outbound, outbound_size);
2648 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2649 pagelist);
2650 }
a4ce40a9
AE
2651 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2652 obj_request->pages, inbound_size,
44cd188d 2653 0, false, false);
9d4df01f 2654 rbd_osd_req_format_read(obj_request);
430c28c3 2655
36be9a76
AE
2656 ret = rbd_obj_request_submit(osdc, obj_request);
2657 if (ret)
2658 goto out;
2659 ret = rbd_obj_request_wait(obj_request);
2660 if (ret)
2661 goto out;
2662
2663 ret = obj_request->result;
2664 if (ret < 0)
2665 goto out;
57385b51
AE
2666
2667 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2668 ret = (int)obj_request->xferred;
903bb32e 2669 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a76
AE
2670out:
2671 if (obj_request)
2672 rbd_obj_request_put(obj_request);
2673 else
2674 ceph_release_page_vector(pages, page_count);
2675
2676 return ret;
2677}
2678
bf0d5f50 2679static void rbd_request_fn(struct request_queue *q)
cc344fa1 2680 __releases(q->queue_lock) __acquires(q->queue_lock)
bf0d5f50
AE
2681{
2682 struct rbd_device *rbd_dev = q->queuedata;
2683 bool read_only = rbd_dev->mapping.read_only;
2684 struct request *rq;
2685 int result;
2686
2687 while ((rq = blk_fetch_request(q))) {
2688 bool write_request = rq_data_dir(rq) == WRITE;
2689 struct rbd_img_request *img_request;
2690 u64 offset;
2691 u64 length;
2692
2693 /* Ignore any non-FS requests that filter through. */
2694
2695 if (rq->cmd_type != REQ_TYPE_FS) {
4dda41d3
AE
2696 dout("%s: non-fs request type %d\n", __func__,
2697 (int) rq->cmd_type);
2698 __blk_end_request_all(rq, 0);
2699 continue;
2700 }
2701
2702 /* Ignore/skip any zero-length requests */
2703
2704 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2705 length = (u64) blk_rq_bytes(rq);
2706
2707 if (!length) {
2708 dout("%s: zero-length request\n", __func__);
bf0d5f50
AE
2709 __blk_end_request_all(rq, 0);
2710 continue;
2711 }
2712
2713 spin_unlock_irq(q->queue_lock);
2714
2715 /* Disallow writes to a read-only device */
2716
2717 if (write_request) {
2718 result = -EROFS;
2719 if (read_only)
2720 goto end_request;
2721 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2722 }
2723
6d292906
AE
2724 /*
2725 * Quit early if the mapped snapshot no longer
2726 * exists. It's still possible the snapshot will
2727 * have disappeared by the time our request arrives
2728 * at the osd, but there's no sense in sending it if
2729 * we already know.
2730 */
2731 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
bf0d5f50
AE
2732 dout("request for non-existent snapshot");
2733 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2734 result = -ENXIO;
2735 goto end_request;
2736 }
2737
bf0d5f50 2738 result = -EINVAL;
c0cd10db
AE
2739 if (offset && length > U64_MAX - offset + 1) {
2740 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2741 offset, length);
bf0d5f50 2742 goto end_request; /* Shouldn't happen */
c0cd10db 2743 }
bf0d5f50
AE
2744
2745 result = -ENOMEM;
2746 img_request = rbd_img_request_create(rbd_dev, offset, length,
9849e986 2747 write_request, false);
bf0d5f50
AE
2748 if (!img_request)
2749 goto end_request;
2750
2751 img_request->rq = rq;
2752
f1a4739f
AE
2753 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2754 rq->bio);
bf0d5f50
AE
2755 if (!result)
2756 result = rbd_img_request_submit(img_request);
2757 if (result)
2758 rbd_img_request_put(img_request);
2759end_request:
2760 spin_lock_irq(q->queue_lock);
2761 if (result < 0) {
7da22d29
AE
2762 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2763 write_request ? "write" : "read",
2764 length, offset, result);
2765
bf0d5f50
AE
2766 __blk_end_request_all(rq, result);
2767 }
2768 }
2769}
2770
602adf40
YS
2771/*
2772 * a queue callback. Makes sure that we don't create a bio that spans across
2773 * multiple osd objects. One exception would be with a single page bios,
f7760dad 2774 * which we handle later at bio_chain_clone_range()
602adf40
YS
2775 */
2776static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2777 struct bio_vec *bvec)
2778{
2779 struct rbd_device *rbd_dev = q->queuedata;
e5cfeed2
AE
2780 sector_t sector_offset;
2781 sector_t sectors_per_obj;
2782 sector_t obj_sector_offset;
2783 int ret;
2784
2785 /*
2786 * Find how far into its rbd object the partition-relative
2787 * bio start sector is to offset relative to the enclosing
2788 * device.
2789 */
2790 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2791 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2792 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2793
2794 /*
2795 * Compute the number of bytes from that offset to the end
2796 * of the object. Account for what's already used by the bio.
2797 */
2798 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2799 if (ret > bmd->bi_size)
2800 ret -= bmd->bi_size;
2801 else
2802 ret = 0;
2803
2804 /*
2805 * Don't send back more than was asked for. And if the bio
2806 * was empty, let the whole thing through because: "Note
2807 * that a block device *must* allow a single page to be
2808 * added to an empty bio."
2809 */
2810 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2811 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2812 ret = (int) bvec->bv_len;
2813
2814 return ret;
602adf40
YS
2815}
2816
2817static void rbd_free_disk(struct rbd_device *rbd_dev)
2818{
2819 struct gendisk *disk = rbd_dev->disk;
2820
2821 if (!disk)
2822 return;
2823
a0cab924
AE
2824 rbd_dev->disk = NULL;
2825 if (disk->flags & GENHD_FL_UP) {
602adf40 2826 del_gendisk(disk);
a0cab924
AE
2827 if (disk->queue)
2828 blk_cleanup_queue(disk->queue);
2829 }
602adf40
YS
2830 put_disk(disk);
2831}
2832
788e2df3
AE
2833static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2834 const char *object_name,
7097f8df 2835 u64 offset, u64 length, void *buf)
788e2df3
AE
2836
2837{
2169238d 2838 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
788e2df3 2839 struct rbd_obj_request *obj_request;
788e2df3
AE
2840 struct page **pages = NULL;
2841 u32 page_count;
1ceae7ef 2842 size_t size;
788e2df3
AE
2843 int ret;
2844
2845 page_count = (u32) calc_pages_for(offset, length);
2846 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2847 if (IS_ERR(pages))
2848 ret = PTR_ERR(pages);
2849
2850 ret = -ENOMEM;
2851 obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a76 2852 OBJ_REQUEST_PAGES);
788e2df3
AE
2853 if (!obj_request)
2854 goto out;
2855
2856 obj_request->pages = pages;
2857 obj_request->page_count = page_count;
2858
430c28c3 2859 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
788e2df3
AE
2860 if (!obj_request->osd_req)
2861 goto out;
2862
c99d2d4a
AE
2863 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2864 offset, length, 0, 0);
406e2c9f 2865 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
a4ce40a9 2866 obj_request->pages,
44cd188d
AE
2867 obj_request->length,
2868 obj_request->offset & ~PAGE_MASK,
2869 false, false);
9d4df01f 2870 rbd_osd_req_format_read(obj_request);
430c28c3 2871
788e2df3
AE
2872 ret = rbd_obj_request_submit(osdc, obj_request);
2873 if (ret)
2874 goto out;
2875 ret = rbd_obj_request_wait(obj_request);
2876 if (ret)
2877 goto out;
2878
2879 ret = obj_request->result;
2880 if (ret < 0)
2881 goto out;
1ceae7ef
AE
2882
2883 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2884 size = (size_t) obj_request->xferred;
903bb32e 2885 ceph_copy_from_page_vector(pages, buf, 0, size);
7097f8df
AE
2886 rbd_assert(size <= (size_t)INT_MAX);
2887 ret = (int)size;
788e2df3
AE
2888out:
2889 if (obj_request)
2890 rbd_obj_request_put(obj_request);
2891 else
2892 ceph_release_page_vector(pages, page_count);
2893
2894 return ret;
2895}
2896
602adf40 2897/*
4156d998
AE
2898 * Read the complete header for the given rbd device.
2899 *
2900 * Returns a pointer to a dynamically-allocated buffer containing
2901 * the complete and validated header. Caller can pass the address
2902 * of a variable that will be filled in with the version of the
2903 * header object at the time it was read.
2904 *
2905 * Returns a pointer-coded errno if a failure occurs.
602adf40 2906 */
4156d998 2907static struct rbd_image_header_ondisk *
7097f8df 2908rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
602adf40 2909{
4156d998 2910 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 2911 u32 snap_count = 0;
4156d998
AE
2912 u64 names_size = 0;
2913 u32 want_count;
2914 int ret;
602adf40 2915
00f1f36f 2916 /*
4156d998
AE
2917 * The complete header will include an array of its 64-bit
2918 * snapshot ids, followed by the names of those snapshots as
2919 * a contiguous block of NUL-terminated strings. Note that
2920 * the number of snapshots could change by the time we read
2921 * it in, in which case we re-read it.
00f1f36f 2922 */
4156d998
AE
2923 do {
2924 size_t size;
2925
2926 kfree(ondisk);
2927
2928 size = sizeof (*ondisk);
2929 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2930 size += names_size;
2931 ondisk = kmalloc(size, GFP_KERNEL);
2932 if (!ondisk)
2933 return ERR_PTR(-ENOMEM);
2934
788e2df3 2935 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
7097f8df 2936 0, size, ondisk);
4156d998
AE
2937 if (ret < 0)
2938 goto out_err;
c0cd10db 2939 if ((size_t)ret < size) {
4156d998 2940 ret = -ENXIO;
06ecc6cb
AE
2941 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2942 size, ret);
4156d998
AE
2943 goto out_err;
2944 }
2945 if (!rbd_dev_ondisk_valid(ondisk)) {
2946 ret = -ENXIO;
06ecc6cb 2947 rbd_warn(rbd_dev, "invalid header");
4156d998 2948 goto out_err;
81e759fb 2949 }
602adf40 2950
4156d998
AE
2951 names_size = le64_to_cpu(ondisk->snap_names_len);
2952 want_count = snap_count;
2953 snap_count = le32_to_cpu(ondisk->snap_count);
2954 } while (snap_count != want_count);
00f1f36f 2955
4156d998 2956 return ondisk;
00f1f36f 2957
4156d998
AE
2958out_err:
2959 kfree(ondisk);
2960
2961 return ERR_PTR(ret);
2962}
2963
2964/*
2965 * reload the ondisk the header
2966 */
2967static int rbd_read_header(struct rbd_device *rbd_dev,
2968 struct rbd_image_header *header)
2969{
2970 struct rbd_image_header_ondisk *ondisk;
4156d998 2971 int ret;
602adf40 2972
7097f8df 2973 ondisk = rbd_dev_v1_header_read(rbd_dev);
4156d998
AE
2974 if (IS_ERR(ondisk))
2975 return PTR_ERR(ondisk);
2976 ret = rbd_header_from_disk(header, ondisk);
4156d998
AE
2977 kfree(ondisk);
2978
2979 return ret;
602adf40
YS
2980}
2981
41f38c2b 2982static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
dfc5606d
YS
2983{
2984 struct rbd_snap *snap;
a0593290 2985 struct rbd_snap *next;
dfc5606d 2986
6087b51b
AE
2987 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) {
2988 list_del(&snap->node);
2989 rbd_snap_destroy(snap);
2990 }
dfc5606d
YS
2991}
2992
9478554a
AE
2993static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2994{
0d7dbfce 2995 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9478554a
AE
2996 return;
2997
e28626a0
AE
2998 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
2999 sector_t size;
3000
3001 rbd_dev->mapping.size = rbd_dev->header.image_size;
3002 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3003 dout("setting size to %llu sectors", (unsigned long long)size);
3004 set_capacity(rbd_dev->disk, size);
3005 }
9478554a
AE
3006}
3007
602adf40
YS
3008/*
3009 * only read the first part of the ondisk header, without the snaps info
3010 */
cc4a38bd 3011static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
602adf40
YS
3012{
3013 int ret;
3014 struct rbd_image_header h;
602adf40
YS
3015
3016 ret = rbd_read_header(rbd_dev, &h);
3017 if (ret < 0)
3018 return ret;
3019
a51aa0c0
JD
3020 down_write(&rbd_dev->header_rwsem);
3021
9478554a
AE
3022 /* Update image size, and check for resize of mapped image */
3023 rbd_dev->header.image_size = h.image_size;
3024 rbd_update_mapping_size(rbd_dev);
9db4b3e3 3025
849b4260 3026 /* rbd_dev->header.object_prefix shouldn't change */
602adf40 3027 kfree(rbd_dev->header.snap_sizes);
849b4260 3028 kfree(rbd_dev->header.snap_names);
d1d25646 3029 /* osd requests may still refer to snapc */
812164f8 3030 ceph_put_snap_context(rbd_dev->header.snapc);
602adf40 3031
93a24e08 3032 rbd_dev->header.image_size = h.image_size;
602adf40
YS
3033 rbd_dev->header.snapc = h.snapc;
3034 rbd_dev->header.snap_names = h.snap_names;
3035 rbd_dev->header.snap_sizes = h.snap_sizes;
849b4260 3036 /* Free the extra copy of the object prefix */
c0cd10db
AE
3037 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3038 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
849b4260
AE
3039 kfree(h.object_prefix);
3040
304f6808 3041 ret = rbd_dev_snaps_update(rbd_dev);
dfc5606d 3042
c666601a 3043 up_write(&rbd_dev->header_rwsem);
602adf40 3044
dfc5606d 3045 return ret;
602adf40
YS
3046}
3047
cc4a38bd 3048static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 3049{
a3fbe5d4 3050 u64 image_size;
1fe5e993
AE
3051 int ret;
3052
117973fb 3053 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
a3fbe5d4 3054 image_size = rbd_dev->header.image_size;
1fe5e993 3055 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
117973fb 3056 if (rbd_dev->image_format == 1)
cc4a38bd 3057 ret = rbd_dev_v1_refresh(rbd_dev);
117973fb 3058 else
cc4a38bd 3059 ret = rbd_dev_v2_refresh(rbd_dev);
1fe5e993 3060 mutex_unlock(&ctl_mutex);
522a0cc0
AE
3061 if (ret)
3062 rbd_warn(rbd_dev, "got notification but failed to "
3063 " update snaps: %d\n", ret);
a3fbe5d4
AE
3064 if (image_size != rbd_dev->header.image_size)
3065 revalidate_disk(rbd_dev->disk);
1fe5e993
AE
3066
3067 return ret;
3068}
3069
602adf40
YS
3070static int rbd_init_disk(struct rbd_device *rbd_dev)
3071{
3072 struct gendisk *disk;
3073 struct request_queue *q;
593a9e7b 3074 u64 segment_size;
602adf40 3075
602adf40 3076 /* create gendisk info */
602adf40
YS
3077 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3078 if (!disk)
1fcdb8aa 3079 return -ENOMEM;
602adf40 3080
f0f8cef5 3081 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 3082 rbd_dev->dev_id);
602adf40
YS
3083 disk->major = rbd_dev->major;
3084 disk->first_minor = 0;
3085 disk->fops = &rbd_bd_ops;
3086 disk->private_data = rbd_dev;
3087
bf0d5f50 3088 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
602adf40
YS
3089 if (!q)
3090 goto out_disk;
029bcbd8 3091
593a9e7b
AE
3092 /* We use the default size, but let's be explicit about it. */
3093 blk_queue_physical_block_size(q, SECTOR_SIZE);
3094
029bcbd8 3095 /* set io sizes to object size */
593a9e7b
AE
3096 segment_size = rbd_obj_bytes(&rbd_dev->header);
3097 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3098 blk_queue_max_segment_size(q, segment_size);
3099 blk_queue_io_min(q, segment_size);
3100 blk_queue_io_opt(q, segment_size);
029bcbd8 3101
602adf40
YS
3102 blk_queue_merge_bvec(q, rbd_merge_bvec);
3103 disk->queue = q;
3104
3105 q->queuedata = rbd_dev;
3106
3107 rbd_dev->disk = disk;
602adf40 3108
602adf40 3109 return 0;
602adf40
YS
3110out_disk:
3111 put_disk(disk);
1fcdb8aa
AE
3112
3113 return -ENOMEM;
602adf40
YS
3114}
3115
dfc5606d
YS
3116/*
3117 sysfs
3118*/
3119
593a9e7b
AE
3120static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3121{
3122 return container_of(dev, struct rbd_device, dev);
3123}
3124
dfc5606d
YS
3125static ssize_t rbd_size_show(struct device *dev,
3126 struct device_attribute *attr, char *buf)
3127{
593a9e7b 3128 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 3129
fc71d833
AE
3130 return sprintf(buf, "%llu\n",
3131 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
3132}
3133
34b13184
AE
3134/*
3135 * Note this shows the features for whatever's mapped, which is not
3136 * necessarily the base image.
3137 */
3138static ssize_t rbd_features_show(struct device *dev,
3139 struct device_attribute *attr, char *buf)
3140{
3141 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3142
3143 return sprintf(buf, "0x%016llx\n",
fc71d833 3144 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
3145}
3146
dfc5606d
YS
3147static ssize_t rbd_major_show(struct device *dev,
3148 struct device_attribute *attr, char *buf)
3149{
593a9e7b 3150 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 3151
fc71d833
AE
3152 if (rbd_dev->major)
3153 return sprintf(buf, "%d\n", rbd_dev->major);
3154
3155 return sprintf(buf, "(none)\n");
3156
dfc5606d
YS
3157}
3158
3159static ssize_t rbd_client_id_show(struct device *dev,
3160 struct device_attribute *attr, char *buf)
602adf40 3161{
593a9e7b 3162 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3163
1dbb4399
AE
3164 return sprintf(buf, "client%lld\n",
3165 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
3166}
3167
dfc5606d
YS
3168static ssize_t rbd_pool_show(struct device *dev,
3169 struct device_attribute *attr, char *buf)
602adf40 3170{
593a9e7b 3171 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3172
0d7dbfce 3173 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
3174}
3175
9bb2f334
AE
3176static ssize_t rbd_pool_id_show(struct device *dev,
3177 struct device_attribute *attr, char *buf)
3178{
3179 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3180
0d7dbfce 3181 return sprintf(buf, "%llu\n",
fc71d833 3182 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
3183}
3184
dfc5606d
YS
3185static ssize_t rbd_name_show(struct device *dev,
3186 struct device_attribute *attr, char *buf)
3187{
593a9e7b 3188 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3189
a92ffdf8
AE
3190 if (rbd_dev->spec->image_name)
3191 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3192
3193 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
3194}
3195
589d30e0
AE
3196static ssize_t rbd_image_id_show(struct device *dev,
3197 struct device_attribute *attr, char *buf)
3198{
3199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3200
0d7dbfce 3201 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
3202}
3203
34b13184
AE
3204/*
3205 * Shows the name of the currently-mapped snapshot (or
3206 * RBD_SNAP_HEAD_NAME for the base image).
3207 */
dfc5606d
YS
3208static ssize_t rbd_snap_show(struct device *dev,
3209 struct device_attribute *attr,
3210 char *buf)
3211{
593a9e7b 3212 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3213
0d7dbfce 3214 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
3215}
3216
86b00e0d
AE
3217/*
3218 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3219 * for the parent image. If there is no parent, simply shows
3220 * "(no parent image)".
3221 */
3222static ssize_t rbd_parent_show(struct device *dev,
3223 struct device_attribute *attr,
3224 char *buf)
3225{
3226 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3227 struct rbd_spec *spec = rbd_dev->parent_spec;
3228 int count;
3229 char *bufp = buf;
3230
3231 if (!spec)
3232 return sprintf(buf, "(no parent image)\n");
3233
3234 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3235 (unsigned long long) spec->pool_id, spec->pool_name);
3236 if (count < 0)
3237 return count;
3238 bufp += count;
3239
3240 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3241 spec->image_name ? spec->image_name : "(unknown)");
3242 if (count < 0)
3243 return count;
3244 bufp += count;
3245
3246 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3247 (unsigned long long) spec->snap_id, spec->snap_name);
3248 if (count < 0)
3249 return count;
3250 bufp += count;
3251
3252 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3253 if (count < 0)
3254 return count;
3255 bufp += count;
3256
3257 return (ssize_t) (bufp - buf);
3258}
3259
dfc5606d
YS
3260static ssize_t rbd_image_refresh(struct device *dev,
3261 struct device_attribute *attr,
3262 const char *buf,
3263 size_t size)
3264{
593a9e7b 3265 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 3266 int ret;
602adf40 3267
cc4a38bd 3268 ret = rbd_dev_refresh(rbd_dev);
b813623a
AE
3269
3270 return ret < 0 ? ret : size;
dfc5606d 3271}
602adf40 3272
dfc5606d 3273static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 3274static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d
YS
3275static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3276static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3277static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 3278static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 3279static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 3280static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
3281static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3282static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0d 3283static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
3284
3285static struct attribute *rbd_attrs[] = {
3286 &dev_attr_size.attr,
34b13184 3287 &dev_attr_features.attr,
dfc5606d
YS
3288 &dev_attr_major.attr,
3289 &dev_attr_client_id.attr,
3290 &dev_attr_pool.attr,
9bb2f334 3291 &dev_attr_pool_id.attr,
dfc5606d 3292 &dev_attr_name.attr,
589d30e0 3293 &dev_attr_image_id.attr,
dfc5606d 3294 &dev_attr_current_snap.attr,
86b00e0d 3295 &dev_attr_parent.attr,
dfc5606d 3296 &dev_attr_refresh.attr,
dfc5606d
YS
3297 NULL
3298};
3299
3300static struct attribute_group rbd_attr_group = {
3301 .attrs = rbd_attrs,
3302};
3303
3304static const struct attribute_group *rbd_attr_groups[] = {
3305 &rbd_attr_group,
3306 NULL
3307};
3308
3309static void rbd_sysfs_dev_release(struct device *dev)
3310{
3311}
3312
3313static struct device_type rbd_device_type = {
3314 .name = "rbd",
3315 .groups = rbd_attr_groups,
3316 .release = rbd_sysfs_dev_release,
3317};
3318
8b8fb99c
AE
3319static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3320{
3321 kref_get(&spec->kref);
3322
3323 return spec;
3324}
3325
3326static void rbd_spec_free(struct kref *kref);
3327static void rbd_spec_put(struct rbd_spec *spec)
3328{
3329 if (spec)
3330 kref_put(&spec->kref, rbd_spec_free);
3331}
3332
3333static struct rbd_spec *rbd_spec_alloc(void)
3334{
3335 struct rbd_spec *spec;
3336
3337 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3338 if (!spec)
3339 return NULL;
3340 kref_init(&spec->kref);
3341
8b8fb99c
AE
3342 return spec;
3343}
3344
3345static void rbd_spec_free(struct kref *kref)
3346{
3347 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3348
3349 kfree(spec->pool_name);
3350 kfree(spec->image_id);
3351 kfree(spec->image_name);
3352 kfree(spec->snap_name);
3353 kfree(spec);
3354}
3355
cc344fa1 3356static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
c53d5893
AE
3357 struct rbd_spec *spec)
3358{
3359 struct rbd_device *rbd_dev;
3360
3361 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3362 if (!rbd_dev)
3363 return NULL;
3364
3365 spin_lock_init(&rbd_dev->lock);
6d292906 3366 rbd_dev->flags = 0;
c53d5893
AE
3367 INIT_LIST_HEAD(&rbd_dev->node);
3368 INIT_LIST_HEAD(&rbd_dev->snaps);
3369 init_rwsem(&rbd_dev->header_rwsem);
3370
3371 rbd_dev->spec = spec;
3372 rbd_dev->rbd_client = rbdc;
3373
0903e875
AE
3374 /* Initialize the layout used for all rbd requests */
3375
3376 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3377 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3378 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3379 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3380
c53d5893
AE
3381 return rbd_dev;
3382}
3383
3384static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3385{
c53d5893
AE
3386 rbd_put_client(rbd_dev->rbd_client);
3387 rbd_spec_put(rbd_dev->spec);
3388 kfree(rbd_dev);
3389}
3390
6087b51b 3391static void rbd_snap_destroy(struct rbd_snap *snap)
dfc5606d 3392{
3e83b65b
AE
3393 kfree(snap->name);
3394 kfree(snap);
dfc5606d
YS
3395}
3396
6087b51b 3397static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev,
c8d18425 3398 const char *snap_name,
34b13184
AE
3399 u64 snap_id, u64 snap_size,
3400 u64 snap_features)
dfc5606d 3401{
4e891e0a 3402 struct rbd_snap *snap;
4e891e0a
AE
3403
3404 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
dfc5606d 3405 if (!snap)
4e891e0a
AE
3406 return ERR_PTR(-ENOMEM);
3407
6e584f52 3408 snap->name = snap_name;
c8d18425
AE
3409 snap->id = snap_id;
3410 snap->size = snap_size;
34b13184 3411 snap->features = snap_features;
4e891e0a
AE
3412
3413 return snap;
dfc5606d
YS
3414}
3415
6e584f52
AE
3416/*
3417 * Returns a dynamically-allocated snapshot name if successful, or a
3418 * pointer-coded error otherwise.
3419 */
cb75223d 3420static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
cd892126
AE
3421 u64 *snap_size, u64 *snap_features)
3422{
cb75223d 3423 const char *snap_name;
6e584f52 3424 int i;
cd892126
AE
3425
3426 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3427
cd892126
AE
3428 /* Skip over names until we find the one we are looking for */
3429
3430 snap_name = rbd_dev->header.snap_names;
6e584f52 3431 for (i = 0; i < which; i++)
cd892126
AE
3432 snap_name += strlen(snap_name) + 1;
3433
6e584f52
AE
3434 snap_name = kstrdup(snap_name, GFP_KERNEL);
3435 if (!snap_name)
3436 return ERR_PTR(-ENOMEM);
3437
3438 *snap_size = rbd_dev->header.snap_sizes[which];
3439 *snap_features = 0; /* No features for v1 */
3440
cd892126
AE
3441 return snap_name;
3442}
3443
9d475de5
AE
3444/*
3445 * Get the size and object order for an image snapshot, or if
3446 * snap_id is CEPH_NOSNAP, gets this information for the base
3447 * image.
3448 */
3449static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3450 u8 *order, u64 *snap_size)
3451{
3452 __le64 snapid = cpu_to_le64(snap_id);
3453 int ret;
3454 struct {
3455 u8 order;
3456 __le64 size;
3457 } __attribute__ ((packed)) size_buf = { 0 };
3458
36be9a76 3459 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
9d475de5 3460 "rbd", "get_size",
4157976b 3461 &snapid, sizeof (snapid),
e2a58ee5 3462 &size_buf, sizeof (size_buf));
36be9a76 3463 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
3464 if (ret < 0)
3465 return ret;
57385b51
AE
3466 if (ret < sizeof (size_buf))
3467 return -ERANGE;
9d475de5 3468
c86f86e9
AE
3469 if (order)
3470 *order = size_buf.order;
9d475de5
AE
3471 *snap_size = le64_to_cpu(size_buf.size);
3472
3473 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
57385b51
AE
3474 (unsigned long long)snap_id, (unsigned int)*order,
3475 (unsigned long long)*snap_size);
9d475de5
AE
3476
3477 return 0;
3478}
3479
3480static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3481{
3482 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3483 &rbd_dev->header.obj_order,
3484 &rbd_dev->header.image_size);
3485}
3486
1e130199
AE
3487static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3488{
3489 void *reply_buf;
3490 int ret;
3491 void *p;
3492
3493 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3494 if (!reply_buf)
3495 return -ENOMEM;
3496
36be9a76 3497 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3498 "rbd", "get_object_prefix", NULL, 0,
e2a58ee5 3499 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a76 3500 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
3501 if (ret < 0)
3502 goto out;
3503
3504 p = reply_buf;
3505 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
3506 p + ret, NULL, GFP_NOIO);
3507 ret = 0;
1e130199
AE
3508
3509 if (IS_ERR(rbd_dev->header.object_prefix)) {
3510 ret = PTR_ERR(rbd_dev->header.object_prefix);
3511 rbd_dev->header.object_prefix = NULL;
3512 } else {
3513 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3514 }
1e130199
AE
3515out:
3516 kfree(reply_buf);
3517
3518 return ret;
3519}
3520
b1b5402a
AE
3521static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3522 u64 *snap_features)
3523{
3524 __le64 snapid = cpu_to_le64(snap_id);
3525 struct {
3526 __le64 features;
3527 __le64 incompat;
4157976b 3528 } __attribute__ ((packed)) features_buf = { 0 };
d889140c 3529 u64 incompat;
b1b5402a
AE
3530 int ret;
3531
36be9a76 3532 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b1b5402a 3533 "rbd", "get_features",
4157976b 3534 &snapid, sizeof (snapid),
e2a58ee5 3535 &features_buf, sizeof (features_buf));
36be9a76 3536 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
3537 if (ret < 0)
3538 return ret;
57385b51
AE
3539 if (ret < sizeof (features_buf))
3540 return -ERANGE;
d889140c
AE
3541
3542 incompat = le64_to_cpu(features_buf.incompat);
5cbf6f12 3543 if (incompat & ~RBD_FEATURES_SUPPORTED)
b8f5c6ed 3544 return -ENXIO;
d889140c 3545
b1b5402a
AE
3546 *snap_features = le64_to_cpu(features_buf.features);
3547
3548 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
3549 (unsigned long long)snap_id,
3550 (unsigned long long)*snap_features,
3551 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
3552
3553 return 0;
3554}
3555
3556static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3557{
3558 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3559 &rbd_dev->header.features);
3560}
3561
86b00e0d
AE
3562static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3563{
3564 struct rbd_spec *parent_spec;
3565 size_t size;
3566 void *reply_buf = NULL;
3567 __le64 snapid;
3568 void *p;
3569 void *end;
3570 char *image_id;
3571 u64 overlap;
86b00e0d
AE
3572 int ret;
3573
3574 parent_spec = rbd_spec_alloc();
3575 if (!parent_spec)
3576 return -ENOMEM;
3577
3578 size = sizeof (__le64) + /* pool_id */
3579 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3580 sizeof (__le64) + /* snap_id */
3581 sizeof (__le64); /* overlap */
3582 reply_buf = kmalloc(size, GFP_KERNEL);
3583 if (!reply_buf) {
3584 ret = -ENOMEM;
3585 goto out_err;
3586 }
3587
3588 snapid = cpu_to_le64(CEPH_NOSNAP);
36be9a76 3589 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
86b00e0d 3590 "rbd", "get_parent",
4157976b 3591 &snapid, sizeof (snapid),
e2a58ee5 3592 reply_buf, size);
36be9a76 3593 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
3594 if (ret < 0)
3595 goto out_err;
3596
86b00e0d 3597 p = reply_buf;
57385b51
AE
3598 end = reply_buf + ret;
3599 ret = -ERANGE;
86b00e0d
AE
3600 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3601 if (parent_spec->pool_id == CEPH_NOPOOL)
3602 goto out; /* No parent? No problem. */
3603
0903e875
AE
3604 /* The ceph file layout needs to fit pool id in 32 bits */
3605
3606 ret = -EIO;
c0cd10db
AE
3607 if (parent_spec->pool_id > (u64)U32_MAX) {
3608 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3609 (unsigned long long)parent_spec->pool_id, U32_MAX);
57385b51 3610 goto out_err;
c0cd10db 3611 }
0903e875 3612
979ed480 3613 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
3614 if (IS_ERR(image_id)) {
3615 ret = PTR_ERR(image_id);
3616 goto out_err;
3617 }
3618 parent_spec->image_id = image_id;
3619 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3620 ceph_decode_64_safe(&p, end, overlap, out_err);
3621
3622 rbd_dev->parent_overlap = overlap;
3623 rbd_dev->parent_spec = parent_spec;
3624 parent_spec = NULL; /* rbd_dev now owns this */
3625out:
3626 ret = 0;
3627out_err:
3628 kfree(reply_buf);
3629 rbd_spec_put(parent_spec);
3630
3631 return ret;
3632}
3633
cc070d59
AE
3634static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3635{
3636 struct {
3637 __le64 stripe_unit;
3638 __le64 stripe_count;
3639 } __attribute__ ((packed)) striping_info_buf = { 0 };
3640 size_t size = sizeof (striping_info_buf);
3641 void *p;
3642 u64 obj_size;
3643 u64 stripe_unit;
3644 u64 stripe_count;
3645 int ret;
3646
3647 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3648 "rbd", "get_stripe_unit_count", NULL, 0,
e2a58ee5 3649 (char *)&striping_info_buf, size);
cc070d59
AE
3650 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3651 if (ret < 0)
3652 return ret;
3653 if (ret < size)
3654 return -ERANGE;
3655
3656 /*
3657 * We don't actually support the "fancy striping" feature
3658 * (STRIPINGV2) yet, but if the striping sizes are the
3659 * defaults the behavior is the same as before. So find
3660 * out, and only fail if the image has non-default values.
3661 */
3662 ret = -EINVAL;
3663 obj_size = (u64)1 << rbd_dev->header.obj_order;
3664 p = &striping_info_buf;
3665 stripe_unit = ceph_decode_64(&p);
3666 if (stripe_unit != obj_size) {
3667 rbd_warn(rbd_dev, "unsupported stripe unit "
3668 "(got %llu want %llu)",
3669 stripe_unit, obj_size);
3670 return -EINVAL;
3671 }
3672 stripe_count = ceph_decode_64(&p);
3673 if (stripe_count != 1) {
3674 rbd_warn(rbd_dev, "unsupported stripe count "
3675 "(got %llu want 1)", stripe_count);
3676 return -EINVAL;
3677 }
500d0c0f
AE
3678 rbd_dev->header.stripe_unit = stripe_unit;
3679 rbd_dev->header.stripe_count = stripe_count;
cc070d59
AE
3680
3681 return 0;
3682}
3683
9e15b77d
AE
3684static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3685{
3686 size_t image_id_size;
3687 char *image_id;
3688 void *p;
3689 void *end;
3690 size_t size;
3691 void *reply_buf = NULL;
3692 size_t len = 0;
3693 char *image_name = NULL;
3694 int ret;
3695
3696 rbd_assert(!rbd_dev->spec->image_name);
3697
69e7a02f
AE
3698 len = strlen(rbd_dev->spec->image_id);
3699 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
3700 image_id = kmalloc(image_id_size, GFP_KERNEL);
3701 if (!image_id)
3702 return NULL;
3703
3704 p = image_id;
4157976b 3705 end = image_id + image_id_size;
57385b51 3706 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
3707
3708 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3709 reply_buf = kmalloc(size, GFP_KERNEL);
3710 if (!reply_buf)
3711 goto out;
3712
36be9a76 3713 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d
AE
3714 "rbd", "dir_get_name",
3715 image_id, image_id_size,
e2a58ee5 3716 reply_buf, size);
9e15b77d
AE
3717 if (ret < 0)
3718 goto out;
3719 p = reply_buf;
f40eb349
AE
3720 end = reply_buf + ret;
3721
9e15b77d
AE
3722 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3723 if (IS_ERR(image_name))
3724 image_name = NULL;
3725 else
3726 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3727out:
3728 kfree(reply_buf);
3729 kfree(image_id);
3730
3731 return image_name;
3732}
3733
3734/*
2e9f7f1c
AE
3735 * When an rbd image has a parent image, it is identified by the
3736 * pool, image, and snapshot ids (not names). This function fills
3737 * in the names for those ids. (It's OK if we can't figure out the
3738 * name for an image id, but the pool and snapshot ids should always
3739 * exist and have names.) All names in an rbd spec are dynamically
3740 * allocated.
e1d4213f
AE
3741 *
3742 * When an image being mapped (not a parent) is probed, we have the
3743 * pool name and pool id, image name and image id, and the snapshot
3744 * name. The only thing we're missing is the snapshot id.
2e9f7f1c
AE
3745 *
3746 * The set of snapshots for an image is not known until they have
3747 * been read by rbd_dev_snaps_update(), so we can't completely fill
3748 * in this information until after that has been called.
9e15b77d 3749 */
2e9f7f1c 3750static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
9e15b77d 3751{
2e9f7f1c
AE
3752 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3753 struct rbd_spec *spec = rbd_dev->spec;
3754 const char *pool_name;
3755 const char *image_name;
3756 const char *snap_name;
9e15b77d
AE
3757 int ret;
3758
e1d4213f
AE
3759 /*
3760 * An image being mapped will have the pool name (etc.), but
3761 * we need to look up the snapshot id.
3762 */
2e9f7f1c
AE
3763 if (spec->pool_name) {
3764 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
e1d4213f
AE
3765 struct rbd_snap *snap;
3766
2e9f7f1c 3767 snap = snap_by_name(rbd_dev, spec->snap_name);
e1d4213f
AE
3768 if (!snap)
3769 return -ENOENT;
2e9f7f1c 3770 spec->snap_id = snap->id;
e1d4213f 3771 } else {
2e9f7f1c 3772 spec->snap_id = CEPH_NOSNAP;
e1d4213f
AE
3773 }
3774
3775 return 0;
3776 }
9e15b77d 3777
2e9f7f1c 3778 /* Get the pool name; we have to make our own copy of this */
9e15b77d 3779
2e9f7f1c
AE
3780 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3781 if (!pool_name) {
3782 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
3783 return -EIO;
3784 }
2e9f7f1c
AE
3785 pool_name = kstrdup(pool_name, GFP_KERNEL);
3786 if (!pool_name)
9e15b77d
AE
3787 return -ENOMEM;
3788
3789 /* Fetch the image name; tolerate failure here */
3790
2e9f7f1c
AE
3791 image_name = rbd_dev_image_name(rbd_dev);
3792 if (!image_name)
06ecc6cb 3793 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 3794
2e9f7f1c 3795 /* Look up the snapshot name, and make a copy */
9e15b77d 3796
2e9f7f1c
AE
3797 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3798 if (!snap_name) {
3799 rbd_warn(rbd_dev, "no snapshot with id %llu", spec->snap_id);
9e15b77d
AE
3800 ret = -EIO;
3801 goto out_err;
3802 }
2e9f7f1c
AE
3803 snap_name = kstrdup(snap_name, GFP_KERNEL);
3804 if (!snap_name) {
3805 ret = -ENOMEM;
9e15b77d 3806 goto out_err;
2e9f7f1c
AE
3807 }
3808
3809 spec->pool_name = pool_name;
3810 spec->image_name = image_name;
3811 spec->snap_name = snap_name;
9e15b77d
AE
3812
3813 return 0;
3814out_err:
2e9f7f1c
AE
3815 kfree(image_name);
3816 kfree(pool_name);
9e15b77d
AE
3817
3818 return ret;
3819}
3820
cc4a38bd 3821static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
3822{
3823 size_t size;
3824 int ret;
3825 void *reply_buf;
3826 void *p;
3827 void *end;
3828 u64 seq;
3829 u32 snap_count;
3830 struct ceph_snap_context *snapc;
3831 u32 i;
3832
3833 /*
3834 * We'll need room for the seq value (maximum snapshot id),
3835 * snapshot count, and array of that many snapshot ids.
3836 * For now we have a fixed upper limit on the number we're
3837 * prepared to receive.
3838 */
3839 size = sizeof (__le64) + sizeof (__le32) +
3840 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3841 reply_buf = kzalloc(size, GFP_KERNEL);
3842 if (!reply_buf)
3843 return -ENOMEM;
3844
36be9a76 3845 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3846 "rbd", "get_snapcontext", NULL, 0,
e2a58ee5 3847 reply_buf, size);
36be9a76 3848 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
3849 if (ret < 0)
3850 goto out;
3851
35d489f9 3852 p = reply_buf;
57385b51
AE
3853 end = reply_buf + ret;
3854 ret = -ERANGE;
35d489f9
AE
3855 ceph_decode_64_safe(&p, end, seq, out);
3856 ceph_decode_32_safe(&p, end, snap_count, out);
3857
3858 /*
3859 * Make sure the reported number of snapshot ids wouldn't go
3860 * beyond the end of our buffer. But before checking that,
3861 * make sure the computed size of the snapshot context we
3862 * allocate is representable in a size_t.
3863 */
3864 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3865 / sizeof (u64)) {
3866 ret = -EINVAL;
3867 goto out;
3868 }
3869 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3870 goto out;
468521c1 3871 ret = 0;
35d489f9 3872
812164f8 3873 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
3874 if (!snapc) {
3875 ret = -ENOMEM;
3876 goto out;
3877 }
35d489f9 3878 snapc->seq = seq;
35d489f9
AE
3879 for (i = 0; i < snap_count; i++)
3880 snapc->snaps[i] = ceph_decode_64(&p);
3881
3882 rbd_dev->header.snapc = snapc;
3883
3884 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 3885 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
3886out:
3887 kfree(reply_buf);
3888
57385b51 3889 return ret;
35d489f9
AE
3890}
3891
cb75223d 3892static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
b8b1e2db
AE
3893{
3894 size_t size;
3895 void *reply_buf;
3896 __le64 snap_id;
3897 int ret;
3898 void *p;
3899 void *end;
b8b1e2db
AE
3900 char *snap_name;
3901
3902 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3903 reply_buf = kmalloc(size, GFP_KERNEL);
3904 if (!reply_buf)
3905 return ERR_PTR(-ENOMEM);
3906
acb1b6ca 3907 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
b8b1e2db 3908 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
36be9a76 3909 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b8b1e2db 3910 "rbd", "get_snapshot_name",
4157976b 3911 &snap_id, sizeof (snap_id),
e2a58ee5 3912 reply_buf, size);
36be9a76 3913 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
3914 if (ret < 0) {
3915 snap_name = ERR_PTR(ret);
b8b1e2db 3916 goto out;
f40eb349 3917 }
b8b1e2db
AE
3918
3919 p = reply_buf;
f40eb349 3920 end = reply_buf + ret;
e5c35534 3921 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 3922 if (IS_ERR(snap_name))
b8b1e2db 3923 goto out;
b8b1e2db 3924
f40eb349
AE
3925 dout(" snap_id 0x%016llx snap_name = %s\n",
3926 (unsigned long long)le64_to_cpu(snap_id), snap_name);
b8b1e2db
AE
3927out:
3928 kfree(reply_buf);
3929
f40eb349 3930 return snap_name;
b8b1e2db
AE
3931}
3932
cb75223d 3933static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
b8b1e2db
AE
3934 u64 *snap_size, u64 *snap_features)
3935{
e0b49868 3936 u64 snap_id;
acb1b6ca
AE
3937 u64 size;
3938 u64 features;
cb75223d 3939 const char *snap_name;
b8b1e2db
AE
3940 int ret;
3941
acb1b6ca 3942 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
b8b1e2db 3943 snap_id = rbd_dev->header.snapc->snaps[which];
acb1b6ca 3944 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
b8b1e2db 3945 if (ret)
acb1b6ca
AE
3946 goto out_err;
3947
3948 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
b8b1e2db 3949 if (ret)
acb1b6ca
AE
3950 goto out_err;
3951
3952 snap_name = rbd_dev_v2_snap_name(rbd_dev, which);
3953 if (!IS_ERR(snap_name)) {
3954 *snap_size = size;
3955 *snap_features = features;
3956 }
b8b1e2db 3957
acb1b6ca
AE
3958 return snap_name;
3959out_err:
3960 return ERR_PTR(ret);
b8b1e2db
AE
3961}
3962
cb75223d 3963static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
b8b1e2db
AE
3964 u64 *snap_size, u64 *snap_features)
3965{
3966 if (rbd_dev->image_format == 1)
3967 return rbd_dev_v1_snap_info(rbd_dev, which,
3968 snap_size, snap_features);
3969 if (rbd_dev->image_format == 2)
3970 return rbd_dev_v2_snap_info(rbd_dev, which,
3971 snap_size, snap_features);
3972 return ERR_PTR(-EINVAL);
3973}
3974
cc4a38bd 3975static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
117973fb
AE
3976{
3977 int ret;
117973fb
AE
3978
3979 down_write(&rbd_dev->header_rwsem);
3980
117973fb
AE
3981 ret = rbd_dev_v2_image_size(rbd_dev);
3982 if (ret)
3983 goto out;
117973fb
AE
3984 rbd_update_mapping_size(rbd_dev);
3985
cc4a38bd 3986 ret = rbd_dev_v2_snap_context(rbd_dev);
117973fb
AE
3987 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3988 if (ret)
3989 goto out;
3990 ret = rbd_dev_snaps_update(rbd_dev);
3991 dout("rbd_dev_snaps_update returned %d\n", ret);
3992 if (ret)
3993 goto out;
117973fb
AE
3994out:
3995 up_write(&rbd_dev->header_rwsem);
3996
3997 return ret;
3998}
3999
dfc5606d 4000/*
35938150
AE
4001 * Scan the rbd device's current snapshot list and compare it to the
4002 * newly-received snapshot context. Remove any existing snapshots
4003 * not present in the new snapshot context. Add a new snapshot for
4004 * any snaphots in the snapshot context not in the current list.
4005 * And verify there are no changes to snapshots we already know
4006 * about.
4007 *
4008 * Assumes the snapshots in the snapshot context are sorted by
4009 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
4010 * are also maintained in that order.)
522a0cc0
AE
4011 *
4012 * Note that any error occurs while updating the snapshot list
4013 * aborts the update, and the entire list is cleared. The snapshot
4014 * list becomes inconsistent at that point anyway, so it might as
4015 * well be empty.
dfc5606d 4016 */
304f6808 4017static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
dfc5606d 4018{
35938150
AE
4019 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4020 const u32 snap_count = snapc->num_snaps;
35938150
AE
4021 struct list_head *head = &rbd_dev->snaps;
4022 struct list_head *links = head->next;
4023 u32 index = 0;
522a0cc0 4024 int ret = 0;
dfc5606d 4025
522a0cc0 4026 dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count);
35938150
AE
4027 while (index < snap_count || links != head) {
4028 u64 snap_id;
4029 struct rbd_snap *snap;
cb75223d 4030 const char *snap_name;
cd892126
AE
4031 u64 snap_size = 0;
4032 u64 snap_features = 0;
dfc5606d 4033
35938150
AE
4034 snap_id = index < snap_count ? snapc->snaps[index]
4035 : CEPH_NOSNAP;
4036 snap = links != head ? list_entry(links, struct rbd_snap, node)
4037 : NULL;
aafb230e 4038 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
dfc5606d 4039
35938150
AE
4040 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
4041 struct list_head *next = links->next;
dfc5606d 4042
6d292906
AE
4043 /*
4044 * A previously-existing snapshot is not in
4045 * the new snap context.
4046 *
522a0cc0
AE
4047 * If the now-missing snapshot is the one
4048 * the image represents, clear its existence
4049 * flag so we can avoid sending any more
4050 * requests to it.
6d292906 4051 */
0d7dbfce 4052 if (rbd_dev->spec->snap_id == snap->id)
6d292906 4053 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3e83b65b 4054 dout("removing %ssnap id %llu\n",
0d7dbfce
AE
4055 rbd_dev->spec->snap_id == snap->id ?
4056 "mapped " : "",
522a0cc0 4057 (unsigned long long)snap->id);
6087b51b
AE
4058
4059 list_del(&snap->node);
4060 rbd_snap_destroy(snap);
35938150
AE
4061
4062 /* Done with this list entry; advance */
4063
4064 links = next;
dfc5606d
YS
4065 continue;
4066 }
35938150 4067
b8b1e2db
AE
4068 snap_name = rbd_dev_snap_info(rbd_dev, index,
4069 &snap_size, &snap_features);
522a0cc0
AE
4070 if (IS_ERR(snap_name)) {
4071 ret = PTR_ERR(snap_name);
4072 dout("failed to get snap info, error %d\n", ret);
4073 goto out_err;
4074 }
cd892126 4075
522a0cc0
AE
4076 dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count,
4077 (unsigned long long)snap_id);
35938150
AE
4078 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
4079 struct rbd_snap *new_snap;
4080
4081 /* We haven't seen this snapshot before */
4082
6087b51b 4083 new_snap = rbd_snap_create(rbd_dev, snap_name,
cd892126 4084 snap_id, snap_size, snap_features);
9fcbb800 4085 if (IS_ERR(new_snap)) {
522a0cc0
AE
4086 ret = PTR_ERR(new_snap);
4087 dout(" failed to add dev, error %d\n", ret);
4088 goto out_err;
9fcbb800 4089 }
35938150
AE
4090
4091 /* New goes before existing, or at end of list */
4092
9fcbb800 4093 dout(" added dev%s\n", snap ? "" : " at end\n");
35938150
AE
4094 if (snap)
4095 list_add_tail(&new_snap->node, &snap->node);
4096 else
523f3258 4097 list_add_tail(&new_snap->node, head);
35938150
AE
4098 } else {
4099 /* Already have this one */
4100
9fcbb800
AE
4101 dout(" already present\n");
4102
cd892126 4103 rbd_assert(snap->size == snap_size);
aafb230e 4104 rbd_assert(!strcmp(snap->name, snap_name));
cd892126 4105 rbd_assert(snap->features == snap_features);
35938150
AE
4106
4107 /* Done with this list entry; advance */
4108
4109 links = links->next;
dfc5606d 4110 }
35938150
AE
4111
4112 /* Advance to the next entry in the snapshot context */
4113
4114 index++;
dfc5606d 4115 }
9fcbb800 4116 dout("%s: done\n", __func__);
dfc5606d
YS
4117
4118 return 0;
522a0cc0
AE
4119out_err:
4120 rbd_remove_all_snaps(rbd_dev);
4121
4122 return ret;
dfc5606d
YS
4123}
4124
dfc5606d
YS
4125static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4126{
dfc5606d 4127 struct device *dev;
cd789ab9 4128 int ret;
dfc5606d
YS
4129
4130 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dfc5606d 4131
cd789ab9 4132 dev = &rbd_dev->dev;
dfc5606d
YS
4133 dev->bus = &rbd_bus_type;
4134 dev->type = &rbd_device_type;
4135 dev->parent = &rbd_root_dev;
200a6a8b 4136 dev->release = rbd_dev_device_release;
de71a297 4137 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 4138 ret = device_register(dev);
dfc5606d 4139
dfc5606d 4140 mutex_unlock(&ctl_mutex);
cd789ab9 4141
dfc5606d 4142 return ret;
602adf40
YS
4143}
4144
dfc5606d
YS
4145static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4146{
4147 device_unregister(&rbd_dev->dev);
4148}
4149
e2839308 4150static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
1ddbe94e
AE
4151
4152/*
499afd5b
AE
4153 * Get a unique rbd identifier for the given new rbd_dev, and add
4154 * the rbd_dev to the global list. The minimum rbd id is 1.
1ddbe94e 4155 */
e2839308 4156static void rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 4157{
e2839308 4158 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
499afd5b
AE
4159
4160 spin_lock(&rbd_dev_list_lock);
4161 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4162 spin_unlock(&rbd_dev_list_lock);
e2839308
AE
4163 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4164 (unsigned long long) rbd_dev->dev_id);
1ddbe94e 4165}
b7f23c36 4166
1ddbe94e 4167/*
499afd5b
AE
4168 * Remove an rbd_dev from the global list, and record that its
4169 * identifier is no longer in use.
1ddbe94e 4170 */
e2839308 4171static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 4172{
d184f6bf 4173 struct list_head *tmp;
de71a297 4174 int rbd_id = rbd_dev->dev_id;
d184f6bf
AE
4175 int max_id;
4176
aafb230e 4177 rbd_assert(rbd_id > 0);
499afd5b 4178
e2839308
AE
4179 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4180 (unsigned long long) rbd_dev->dev_id);
499afd5b
AE
4181 spin_lock(&rbd_dev_list_lock);
4182 list_del_init(&rbd_dev->node);
d184f6bf
AE
4183
4184 /*
4185 * If the id being "put" is not the current maximum, there
4186 * is nothing special we need to do.
4187 */
e2839308 4188 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
d184f6bf
AE
4189 spin_unlock(&rbd_dev_list_lock);
4190 return;
4191 }
4192
4193 /*
4194 * We need to update the current maximum id. Search the
4195 * list to find out what it is. We're more likely to find
4196 * the maximum at the end, so search the list backward.
4197 */
4198 max_id = 0;
4199 list_for_each_prev(tmp, &rbd_dev_list) {
4200 struct rbd_device *rbd_dev;
4201
4202 rbd_dev = list_entry(tmp, struct rbd_device, node);
b213e0b1
AE
4203 if (rbd_dev->dev_id > max_id)
4204 max_id = rbd_dev->dev_id;
d184f6bf 4205 }
499afd5b 4206 spin_unlock(&rbd_dev_list_lock);
b7f23c36 4207
1ddbe94e 4208 /*
e2839308 4209 * The max id could have been updated by rbd_dev_id_get(), in
d184f6bf
AE
4210 * which case it now accurately reflects the new maximum.
4211 * Be careful not to overwrite the maximum value in that
4212 * case.
1ddbe94e 4213 */
e2839308
AE
4214 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4215 dout(" max dev id has been reset\n");
b7f23c36
AE
4216}
4217
e28fff26
AE
4218/*
4219 * Skips over white space at *buf, and updates *buf to point to the
4220 * first found non-space character (if any). Returns the length of
593a9e7b
AE
4221 * the token (string of non-white space characters) found. Note
4222 * that *buf must be terminated with '\0'.
e28fff26
AE
4223 */
4224static inline size_t next_token(const char **buf)
4225{
4226 /*
4227 * These are the characters that produce nonzero for
4228 * isspace() in the "C" and "POSIX" locales.
4229 */
4230 const char *spaces = " \f\n\r\t\v";
4231
4232 *buf += strspn(*buf, spaces); /* Find start of token */
4233
4234 return strcspn(*buf, spaces); /* Return token length */
4235}
4236
4237/*
4238 * Finds the next token in *buf, and if the provided token buffer is
4239 * big enough, copies the found token into it. The result, if
593a9e7b
AE
4240 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4241 * must be terminated with '\0' on entry.
e28fff26
AE
4242 *
4243 * Returns the length of the token found (not including the '\0').
4244 * Return value will be 0 if no token is found, and it will be >=
4245 * token_size if the token would not fit.
4246 *
593a9e7b 4247 * The *buf pointer will be updated to point beyond the end of the
e28fff26
AE
4248 * found token. Note that this occurs even if the token buffer is
4249 * too small to hold it.
4250 */
4251static inline size_t copy_token(const char **buf,
4252 char *token,
4253 size_t token_size)
4254{
4255 size_t len;
4256
4257 len = next_token(buf);
4258 if (len < token_size) {
4259 memcpy(token, *buf, len);
4260 *(token + len) = '\0';
4261 }
4262 *buf += len;
4263
4264 return len;
4265}
4266
ea3352f4
AE
4267/*
4268 * Finds the next token in *buf, dynamically allocates a buffer big
4269 * enough to hold a copy of it, and copies the token into the new
4270 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4271 * that a duplicate buffer is created even for a zero-length token.
4272 *
4273 * Returns a pointer to the newly-allocated duplicate, or a null
4274 * pointer if memory for the duplicate was not available. If
4275 * the lenp argument is a non-null pointer, the length of the token
4276 * (not including the '\0') is returned in *lenp.
4277 *
4278 * If successful, the *buf pointer will be updated to point beyond
4279 * the end of the found token.
4280 *
4281 * Note: uses GFP_KERNEL for allocation.
4282 */
4283static inline char *dup_token(const char **buf, size_t *lenp)
4284{
4285 char *dup;
4286 size_t len;
4287
4288 len = next_token(buf);
4caf35f9 4289 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
4290 if (!dup)
4291 return NULL;
ea3352f4
AE
4292 *(dup + len) = '\0';
4293 *buf += len;
4294
4295 if (lenp)
4296 *lenp = len;
4297
4298 return dup;
4299}
4300
a725f65e 4301/*
859c31df
AE
4302 * Parse the options provided for an "rbd add" (i.e., rbd image
4303 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4304 * and the data written is passed here via a NUL-terminated buffer.
4305 * Returns 0 if successful or an error code otherwise.
d22f76e7 4306 *
859c31df
AE
4307 * The information extracted from these options is recorded in
4308 * the other parameters which return dynamically-allocated
4309 * structures:
4310 * ceph_opts
4311 * The address of a pointer that will refer to a ceph options
4312 * structure. Caller must release the returned pointer using
4313 * ceph_destroy_options() when it is no longer needed.
4314 * rbd_opts
4315 * Address of an rbd options pointer. Fully initialized by
4316 * this function; caller must release with kfree().
4317 * spec
4318 * Address of an rbd image specification pointer. Fully
4319 * initialized by this function based on parsed options.
4320 * Caller must release with rbd_spec_put().
4321 *
4322 * The options passed take this form:
4323 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4324 * where:
4325 * <mon_addrs>
4326 * A comma-separated list of one or more monitor addresses.
4327 * A monitor address is an ip address, optionally followed
4328 * by a port number (separated by a colon).
4329 * I.e.: ip1[:port1][,ip2[:port2]...]
4330 * <options>
4331 * A comma-separated list of ceph and/or rbd options.
4332 * <pool_name>
4333 * The name of the rados pool containing the rbd image.
4334 * <image_name>
4335 * The name of the image in that pool to map.
4336 * <snap_id>
4337 * An optional snapshot id. If provided, the mapping will
4338 * present data from the image at the time that snapshot was
4339 * created. The image head is used if no snapshot id is
4340 * provided. Snapshot mappings are always read-only.
a725f65e 4341 */
859c31df 4342static int rbd_add_parse_args(const char *buf,
dc79b113 4343 struct ceph_options **ceph_opts,
859c31df
AE
4344 struct rbd_options **opts,
4345 struct rbd_spec **rbd_spec)
e28fff26 4346{
d22f76e7 4347 size_t len;
859c31df 4348 char *options;
0ddebc0c 4349 const char *mon_addrs;
ecb4dc22 4350 char *snap_name;
0ddebc0c 4351 size_t mon_addrs_size;
859c31df 4352 struct rbd_spec *spec = NULL;
4e9afeba 4353 struct rbd_options *rbd_opts = NULL;
859c31df 4354 struct ceph_options *copts;
dc79b113 4355 int ret;
e28fff26
AE
4356
4357 /* The first four tokens are required */
4358
7ef3214a 4359 len = next_token(&buf);
4fb5d671
AE
4360 if (!len) {
4361 rbd_warn(NULL, "no monitor address(es) provided");
4362 return -EINVAL;
4363 }
0ddebc0c 4364 mon_addrs = buf;
f28e565a 4365 mon_addrs_size = len + 1;
7ef3214a 4366 buf += len;
a725f65e 4367
dc79b113 4368 ret = -EINVAL;
f28e565a
AE
4369 options = dup_token(&buf, NULL);
4370 if (!options)
dc79b113 4371 return -ENOMEM;
4fb5d671
AE
4372 if (!*options) {
4373 rbd_warn(NULL, "no options provided");
4374 goto out_err;
4375 }
e28fff26 4376
859c31df
AE
4377 spec = rbd_spec_alloc();
4378 if (!spec)
f28e565a 4379 goto out_mem;
859c31df
AE
4380
4381 spec->pool_name = dup_token(&buf, NULL);
4382 if (!spec->pool_name)
4383 goto out_mem;
4fb5d671
AE
4384 if (!*spec->pool_name) {
4385 rbd_warn(NULL, "no pool name provided");
4386 goto out_err;
4387 }
e28fff26 4388
69e7a02f 4389 spec->image_name = dup_token(&buf, NULL);
859c31df 4390 if (!spec->image_name)
f28e565a 4391 goto out_mem;
4fb5d671
AE
4392 if (!*spec->image_name) {
4393 rbd_warn(NULL, "no image name provided");
4394 goto out_err;
4395 }
d4b125e9 4396
f28e565a
AE
4397 /*
4398 * Snapshot name is optional; default is to use "-"
4399 * (indicating the head/no snapshot).
4400 */
3feeb894 4401 len = next_token(&buf);
820a5f3e 4402 if (!len) {
3feeb894
AE
4403 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4404 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 4405 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 4406 ret = -ENAMETOOLONG;
f28e565a 4407 goto out_err;
849b4260 4408 }
ecb4dc22
AE
4409 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4410 if (!snap_name)
f28e565a 4411 goto out_mem;
ecb4dc22
AE
4412 *(snap_name + len) = '\0';
4413 spec->snap_name = snap_name;
e5c35534 4414
0ddebc0c 4415 /* Initialize all rbd options to the defaults */
e28fff26 4416
4e9afeba
AE
4417 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4418 if (!rbd_opts)
4419 goto out_mem;
4420
4421 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
d22f76e7 4422
859c31df 4423 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 4424 mon_addrs + mon_addrs_size - 1,
4e9afeba 4425 parse_rbd_opts_token, rbd_opts);
859c31df
AE
4426 if (IS_ERR(copts)) {
4427 ret = PTR_ERR(copts);
dc79b113
AE
4428 goto out_err;
4429 }
859c31df
AE
4430 kfree(options);
4431
4432 *ceph_opts = copts;
4e9afeba 4433 *opts = rbd_opts;
859c31df 4434 *rbd_spec = spec;
0ddebc0c 4435
dc79b113 4436 return 0;
f28e565a 4437out_mem:
dc79b113 4438 ret = -ENOMEM;
d22f76e7 4439out_err:
859c31df
AE
4440 kfree(rbd_opts);
4441 rbd_spec_put(spec);
f28e565a 4442 kfree(options);
d22f76e7 4443
dc79b113 4444 return ret;
a725f65e
AE
4445}
4446
589d30e0
AE
4447/*
4448 * An rbd format 2 image has a unique identifier, distinct from the
4449 * name given to it by the user. Internally, that identifier is
4450 * what's used to specify the names of objects related to the image.
4451 *
4452 * A special "rbd id" object is used to map an rbd image name to its
4453 * id. If that object doesn't exist, then there is no v2 rbd image
4454 * with the supplied name.
4455 *
4456 * This function will record the given rbd_dev's image_id field if
4457 * it can be determined, and in that case will return 0. If any
4458 * errors occur a negative errno will be returned and the rbd_dev's
4459 * image_id field will be unchanged (and should be NULL).
4460 */
4461static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4462{
4463 int ret;
4464 size_t size;
4465 char *object_name;
4466 void *response;
c0fba368 4467 char *image_id;
2f82ee54 4468
2c0d0a10
AE
4469 /*
4470 * When probing a parent image, the image id is already
4471 * known (and the image name likely is not). There's no
c0fba368
AE
4472 * need to fetch the image id again in this case. We
4473 * do still need to set the image format though.
2c0d0a10 4474 */
c0fba368
AE
4475 if (rbd_dev->spec->image_id) {
4476 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4477
2c0d0a10 4478 return 0;
c0fba368 4479 }
2c0d0a10 4480
589d30e0
AE
4481 /*
4482 * First, see if the format 2 image id file exists, and if
4483 * so, get the image's persistent id from it.
4484 */
69e7a02f 4485 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0
AE
4486 object_name = kmalloc(size, GFP_NOIO);
4487 if (!object_name)
4488 return -ENOMEM;
0d7dbfce 4489 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0
AE
4490 dout("rbd id object name is %s\n", object_name);
4491
4492 /* Response will be an encoded string, which includes a length */
4493
4494 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4495 response = kzalloc(size, GFP_NOIO);
4496 if (!response) {
4497 ret = -ENOMEM;
4498 goto out;
4499 }
4500
c0fba368
AE
4501 /* If it doesn't exist we'll assume it's a format 1 image */
4502
36be9a76 4503 ret = rbd_obj_method_sync(rbd_dev, object_name,
4157976b 4504 "rbd", "get_id", NULL, 0,
e2a58ee5 4505 response, RBD_IMAGE_ID_LEN_MAX);
36be9a76 4506 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
4507 if (ret == -ENOENT) {
4508 image_id = kstrdup("", GFP_KERNEL);
4509 ret = image_id ? 0 : -ENOMEM;
4510 if (!ret)
4511 rbd_dev->image_format = 1;
4512 } else if (ret > sizeof (__le32)) {
4513 void *p = response;
4514
4515 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 4516 NULL, GFP_NOIO);
c0fba368
AE
4517 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4518 if (!ret)
4519 rbd_dev->image_format = 2;
589d30e0 4520 } else {
c0fba368
AE
4521 ret = -EINVAL;
4522 }
4523
4524 if (!ret) {
4525 rbd_dev->spec->image_id = image_id;
4526 dout("image_id is %s\n", image_id);
589d30e0
AE
4527 }
4528out:
4529 kfree(response);
4530 kfree(object_name);
4531
4532 return ret;
4533}
4534
6fd48b3b
AE
4535/* Undo whatever state changes are made by v1 or v2 image probe */
4536
4537static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4538{
4539 struct rbd_image_header *header;
4540
4541 rbd_dev_remove_parent(rbd_dev);
4542 rbd_spec_put(rbd_dev->parent_spec);
4543 rbd_dev->parent_spec = NULL;
4544 rbd_dev->parent_overlap = 0;
4545
4546 /* Free dynamic fields from the header, then zero it out */
4547
4548 header = &rbd_dev->header;
812164f8 4549 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
4550 kfree(header->snap_sizes);
4551 kfree(header->snap_names);
4552 kfree(header->object_prefix);
4553 memset(header, 0, sizeof (*header));
4554}
4555
a30b71b9
AE
4556static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4557{
4558 int ret;
a30b71b9
AE
4559
4560 /* Populate rbd image metadata */
4561
4562 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4563 if (ret < 0)
4564 goto out_err;
86b00e0d
AE
4565
4566 /* Version 1 images have no parent (no layering) */
4567
4568 rbd_dev->parent_spec = NULL;
4569 rbd_dev->parent_overlap = 0;
4570
a30b71b9
AE
4571 dout("discovered version 1 image, header name is %s\n",
4572 rbd_dev->header_name);
4573
4574 return 0;
4575
4576out_err:
4577 kfree(rbd_dev->header_name);
4578 rbd_dev->header_name = NULL;
0d7dbfce
AE
4579 kfree(rbd_dev->spec->image_id);
4580 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
4581
4582 return ret;
4583}
4584
4585static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4586{
9d475de5 4587 int ret;
a30b71b9 4588
9d475de5 4589 ret = rbd_dev_v2_image_size(rbd_dev);
57385b51 4590 if (ret)
1e130199
AE
4591 goto out_err;
4592
4593 /* Get the object prefix (a.k.a. block_name) for the image */
4594
4595 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 4596 if (ret)
b1b5402a
AE
4597 goto out_err;
4598
d889140c 4599 /* Get the and check features for the image */
b1b5402a
AE
4600
4601 ret = rbd_dev_v2_features(rbd_dev);
57385b51 4602 if (ret)
9d475de5 4603 goto out_err;
35d489f9 4604
86b00e0d
AE
4605 /* If the image supports layering, get the parent info */
4606
4607 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4608 ret = rbd_dev_v2_parent_info(rbd_dev);
57385b51 4609 if (ret)
86b00e0d 4610 goto out_err;
96882f55
AE
4611
4612 /*
4613 * Don't print a warning for parent images. We can
4614 * tell this point because we won't know its pool
4615 * name yet (just its pool id).
4616 */
4617 if (rbd_dev->spec->pool_name)
4618 rbd_warn(rbd_dev, "WARNING: kernel layering "
4619 "is EXPERIMENTAL!");
86b00e0d
AE
4620 }
4621
cc070d59
AE
4622 /* If the image supports fancy striping, get its parameters */
4623
4624 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4625 ret = rbd_dev_v2_striping_info(rbd_dev);
4626 if (ret < 0)
4627 goto out_err;
4628 }
4629
6e14b1a6
AE
4630 /* crypto and compression type aren't (yet) supported for v2 images */
4631
4632 rbd_dev->header.crypt_type = 0;
4633 rbd_dev->header.comp_type = 0;
35d489f9 4634
6e14b1a6
AE
4635 /* Get the snapshot context, plus the header version */
4636
cc4a38bd 4637 ret = rbd_dev_v2_snap_context(rbd_dev);
35d489f9
AE
4638 if (ret)
4639 goto out_err;
6e14b1a6 4640
a30b71b9
AE
4641 dout("discovered version 2 image, header name is %s\n",
4642 rbd_dev->header_name);
4643
35152979 4644 return 0;
9d475de5 4645out_err:
86b00e0d
AE
4646 rbd_dev->parent_overlap = 0;
4647 rbd_spec_put(rbd_dev->parent_spec);
4648 rbd_dev->parent_spec = NULL;
9d475de5
AE
4649 kfree(rbd_dev->header_name);
4650 rbd_dev->header_name = NULL;
1e130199
AE
4651 kfree(rbd_dev->header.object_prefix);
4652 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
4653
4654 return ret;
a30b71b9
AE
4655}
4656
124afba2 4657static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
83a06263 4658{
2f82ee54 4659 struct rbd_device *parent = NULL;
124afba2
AE
4660 struct rbd_spec *parent_spec;
4661 struct rbd_client *rbdc;
4662 int ret;
4663
4664 if (!rbd_dev->parent_spec)
4665 return 0;
4666 /*
4667 * We need to pass a reference to the client and the parent
4668 * spec when creating the parent rbd_dev. Images related by
4669 * parent/child relationships always share both.
4670 */
4671 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4672 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4673
4674 ret = -ENOMEM;
4675 parent = rbd_dev_create(rbdc, parent_spec);
4676 if (!parent)
4677 goto out_err;
4678
4679 ret = rbd_dev_image_probe(parent);
4680 if (ret < 0)
4681 goto out_err;
4682 rbd_dev->parent = parent;
4683
4684 return 0;
4685out_err:
4686 if (parent) {
4687 rbd_spec_put(rbd_dev->parent_spec);
4688 kfree(rbd_dev->header_name);
4689 rbd_dev_destroy(parent);
4690 } else {
4691 rbd_put_client(rbdc);
4692 rbd_spec_put(parent_spec);
4693 }
4694
4695 return ret;
4696}
4697
200a6a8b 4698static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 4699{
83a06263 4700 int ret;
d1cf5788
AE
4701
4702 ret = rbd_dev_mapping_set(rbd_dev);
83a06263 4703 if (ret)
9bb81c9b 4704 return ret;
5de10f3b 4705
83a06263
AE
4706 /* generate unique id: find highest unique id, add one */
4707 rbd_dev_id_get(rbd_dev);
4708
4709 /* Fill in the device name, now that we have its id. */
4710 BUILD_BUG_ON(DEV_NAME_LEN
4711 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4712 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4713
4714 /* Get our block major device number. */
4715
4716 ret = register_blkdev(0, rbd_dev->name);
4717 if (ret < 0)
4718 goto err_out_id;
4719 rbd_dev->major = ret;
4720
4721 /* Set up the blkdev mapping. */
4722
4723 ret = rbd_init_disk(rbd_dev);
4724 if (ret)
4725 goto err_out_blkdev;
4726
4727 ret = rbd_bus_add_dev(rbd_dev);
4728 if (ret)
4729 goto err_out_disk;
4730
83a06263
AE
4731 /* Everything's ready. Announce the disk to the world. */
4732
b5156e76 4733 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
129b79d4 4734 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
83a06263
AE
4735 add_disk(rbd_dev->disk);
4736
4737 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4738 (unsigned long long) rbd_dev->mapping.size);
4739
4740 return ret;
2f82ee54 4741
83a06263
AE
4742err_out_disk:
4743 rbd_free_disk(rbd_dev);
4744err_out_blkdev:
4745 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4746err_out_id:
4747 rbd_dev_id_put(rbd_dev);
d1cf5788 4748 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
4749
4750 return ret;
4751}
4752
332bb12d
AE
4753static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4754{
4755 struct rbd_spec *spec = rbd_dev->spec;
4756 size_t size;
4757
4758 /* Record the header object name for this rbd image. */
4759
4760 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4761
4762 if (rbd_dev->image_format == 1)
4763 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4764 else
4765 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4766
4767 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4768 if (!rbd_dev->header_name)
4769 return -ENOMEM;
4770
4771 if (rbd_dev->image_format == 1)
4772 sprintf(rbd_dev->header_name, "%s%s",
4773 spec->image_name, RBD_SUFFIX);
4774 else
4775 sprintf(rbd_dev->header_name, "%s%s",
4776 RBD_HEADER_PREFIX, spec->image_id);
4777 return 0;
4778}
4779
200a6a8b
AE
4780static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4781{
6fd48b3b
AE
4782 int ret;
4783
4784 rbd_remove_all_snaps(rbd_dev);
4785 rbd_dev_unprobe(rbd_dev);
4786 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4787 if (ret)
4788 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
200a6a8b 4789 kfree(rbd_dev->header_name);
6fd48b3b
AE
4790 rbd_dev->header_name = NULL;
4791 rbd_dev->image_format = 0;
4792 kfree(rbd_dev->spec->image_id);
4793 rbd_dev->spec->image_id = NULL;
4794
200a6a8b
AE
4795 rbd_dev_destroy(rbd_dev);
4796}
4797
a30b71b9
AE
4798/*
4799 * Probe for the existence of the header object for the given rbd
4800 * device. For format 2 images this includes determining the image
4801 * id.
4802 */
71f293e2 4803static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
a30b71b9
AE
4804{
4805 int ret;
b644de2b 4806 int tmp;
a30b71b9
AE
4807
4808 /*
4809 * Get the id from the image id object. If it's not a
4810 * format 2 image, we'll get ENOENT back, and we'll assume
4811 * it's a format 1 image.
4812 */
4813 ret = rbd_dev_image_id(rbd_dev);
4814 if (ret)
c0fba368
AE
4815 return ret;
4816 rbd_assert(rbd_dev->spec->image_id);
4817 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4818
332bb12d
AE
4819 ret = rbd_dev_header_name(rbd_dev);
4820 if (ret)
4821 goto err_out_format;
4822
b644de2b
AE
4823 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4824 if (ret)
4825 goto out_header_name;
4826
c0fba368 4827 if (rbd_dev->image_format == 1)
a30b71b9
AE
4828 ret = rbd_dev_v1_probe(rbd_dev);
4829 else
4830 ret = rbd_dev_v2_probe(rbd_dev);
5655c4d9 4831 if (ret)
b644de2b 4832 goto err_out_watch;
83a06263 4833
9bb81c9b
AE
4834 ret = rbd_dev_snaps_update(rbd_dev);
4835 if (ret)
6fd48b3b 4836 goto err_out_probe;
9bb81c9b
AE
4837
4838 ret = rbd_dev_spec_update(rbd_dev);
4839 if (ret)
4840 goto err_out_snaps;
4841
4842 ret = rbd_dev_probe_parent(rbd_dev);
6fd48b3b
AE
4843 if (!ret)
4844 return 0;
83a06263 4845
9bb81c9b
AE
4846err_out_snaps:
4847 rbd_remove_all_snaps(rbd_dev);
6fd48b3b
AE
4848err_out_probe:
4849 rbd_dev_unprobe(rbd_dev);
b644de2b
AE
4850err_out_watch:
4851 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4852 if (tmp)
4853 rbd_warn(rbd_dev, "unable to tear down watch request\n");
332bb12d
AE
4854out_header_name:
4855 kfree(rbd_dev->header_name);
4856 rbd_dev->header_name = NULL;
4857err_out_format:
4858 rbd_dev->image_format = 0;
5655c4d9
AE
4859 kfree(rbd_dev->spec->image_id);
4860 rbd_dev->spec->image_id = NULL;
4861
4862 dout("probe failed, returning %d\n", ret);
4863
a30b71b9
AE
4864 return ret;
4865}
4866
59c2be1e
YS
4867static ssize_t rbd_add(struct bus_type *bus,
4868 const char *buf,
4869 size_t count)
602adf40 4870{
cb8627c7 4871 struct rbd_device *rbd_dev = NULL;
dc79b113 4872 struct ceph_options *ceph_opts = NULL;
4e9afeba 4873 struct rbd_options *rbd_opts = NULL;
859c31df 4874 struct rbd_spec *spec = NULL;
9d3997fd 4875 struct rbd_client *rbdc;
27cc2594
AE
4876 struct ceph_osd_client *osdc;
4877 int rc = -ENOMEM;
602adf40
YS
4878
4879 if (!try_module_get(THIS_MODULE))
4880 return -ENODEV;
4881
602adf40 4882 /* parse add command */
859c31df 4883 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 4884 if (rc < 0)
bd4ba655 4885 goto err_out_module;
78cea76e 4886
9d3997fd
AE
4887 rbdc = rbd_get_client(ceph_opts);
4888 if (IS_ERR(rbdc)) {
4889 rc = PTR_ERR(rbdc);
0ddebc0c 4890 goto err_out_args;
9d3997fd 4891 }
c53d5893 4892 ceph_opts = NULL; /* rbd_dev client now owns this */
602adf40 4893
602adf40 4894 /* pick the pool */
9d3997fd 4895 osdc = &rbdc->client->osdc;
859c31df 4896 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
602adf40
YS
4897 if (rc < 0)
4898 goto err_out_client;
c0cd10db 4899 spec->pool_id = (u64)rc;
859c31df 4900
0903e875
AE
4901 /* The ceph file layout needs to fit pool id in 32 bits */
4902
c0cd10db
AE
4903 if (spec->pool_id > (u64)U32_MAX) {
4904 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4905 (unsigned long long)spec->pool_id, U32_MAX);
0903e875
AE
4906 rc = -EIO;
4907 goto err_out_client;
4908 }
4909
c53d5893 4910 rbd_dev = rbd_dev_create(rbdc, spec);
bd4ba655
AE
4911 if (!rbd_dev)
4912 goto err_out_client;
c53d5893
AE
4913 rbdc = NULL; /* rbd_dev now owns this */
4914 spec = NULL; /* rbd_dev now owns this */
602adf40 4915
bd4ba655 4916 rbd_dev->mapping.read_only = rbd_opts->read_only;
c53d5893
AE
4917 kfree(rbd_opts);
4918 rbd_opts = NULL; /* done with this */
bd4ba655 4919
71f293e2 4920 rc = rbd_dev_image_probe(rbd_dev);
a30b71b9 4921 if (rc < 0)
c53d5893 4922 goto err_out_rbd_dev;
05fd6f6f 4923
b536f69a
AE
4924 rc = rbd_dev_device_setup(rbd_dev);
4925 if (!rc)
4926 return count;
4927
4928 rbd_dev_image_release(rbd_dev);
c53d5893
AE
4929err_out_rbd_dev:
4930 rbd_dev_destroy(rbd_dev);
bd4ba655 4931err_out_client:
9d3997fd 4932 rbd_put_client(rbdc);
0ddebc0c 4933err_out_args:
78cea76e
AE
4934 if (ceph_opts)
4935 ceph_destroy_options(ceph_opts);
4e9afeba 4936 kfree(rbd_opts);
859c31df 4937 rbd_spec_put(spec);
bd4ba655
AE
4938err_out_module:
4939 module_put(THIS_MODULE);
27cc2594 4940
602adf40 4941 dout("Error adding device %s\n", buf);
27cc2594 4942
c0cd10db 4943 return (ssize_t)rc;
602adf40
YS
4944}
4945
de71a297 4946static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
602adf40
YS
4947{
4948 struct list_head *tmp;
4949 struct rbd_device *rbd_dev;
4950
e124a82f 4951 spin_lock(&rbd_dev_list_lock);
602adf40
YS
4952 list_for_each(tmp, &rbd_dev_list) {
4953 rbd_dev = list_entry(tmp, struct rbd_device, node);
de71a297 4954 if (rbd_dev->dev_id == dev_id) {
e124a82f 4955 spin_unlock(&rbd_dev_list_lock);
602adf40 4956 return rbd_dev;
e124a82f 4957 }
602adf40 4958 }
e124a82f 4959 spin_unlock(&rbd_dev_list_lock);
602adf40
YS
4960 return NULL;
4961}
4962
200a6a8b 4963static void rbd_dev_device_release(struct device *dev)
602adf40 4964{
593a9e7b 4965 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4966
602adf40 4967 rbd_free_disk(rbd_dev);
200a6a8b
AE
4968 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4969 rbd_dev_clear_mapping(rbd_dev);
602adf40 4970 unregister_blkdev(rbd_dev->major, rbd_dev->name);
200a6a8b 4971 rbd_dev->major = 0;
e2839308 4972 rbd_dev_id_put(rbd_dev);
d1cf5788 4973 rbd_dev_mapping_clear(rbd_dev);
602adf40
YS
4974}
4975
05a46afd
AE
4976static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4977{
ad945fc1 4978 while (rbd_dev->parent) {
05a46afd
AE
4979 struct rbd_device *first = rbd_dev;
4980 struct rbd_device *second = first->parent;
4981 struct rbd_device *third;
4982
4983 /*
4984 * Follow to the parent with no grandparent and
4985 * remove it.
4986 */
4987 while (second && (third = second->parent)) {
4988 first = second;
4989 second = third;
4990 }
ad945fc1 4991 rbd_assert(second);
8ad42cd0 4992 rbd_dev_image_release(second);
ad945fc1
AE
4993 first->parent = NULL;
4994 first->parent_overlap = 0;
4995
4996 rbd_assert(first->parent_spec);
05a46afd
AE
4997 rbd_spec_put(first->parent_spec);
4998 first->parent_spec = NULL;
05a46afd
AE
4999 }
5000}
5001
dfc5606d
YS
5002static ssize_t rbd_remove(struct bus_type *bus,
5003 const char *buf,
5004 size_t count)
602adf40
YS
5005{
5006 struct rbd_device *rbd_dev = NULL;
0d8189e1 5007 int target_id;
602adf40 5008 unsigned long ul;
0d8189e1 5009 int ret;
602adf40 5010
0d8189e1
AE
5011 ret = strict_strtoul(buf, 10, &ul);
5012 if (ret)
5013 return ret;
602adf40
YS
5014
5015 /* convert to int; abort if we lost anything in the conversion */
5016 target_id = (int) ul;
5017 if (target_id != ul)
5018 return -EINVAL;
5019
5020 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5021
5022 rbd_dev = __rbd_get_dev(target_id);
5023 if (!rbd_dev) {
5024 ret = -ENOENT;
5025 goto done;
42382b70
AE
5026 }
5027
a14ea269 5028 spin_lock_irq(&rbd_dev->lock);
b82d167b 5029 if (rbd_dev->open_count)
42382b70 5030 ret = -EBUSY;
b82d167b
AE
5031 else
5032 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
a14ea269 5033 spin_unlock_irq(&rbd_dev->lock);
b82d167b 5034 if (ret < 0)
42382b70 5035 goto done;
0d8189e1 5036 ret = count;
b480815a 5037 rbd_bus_del_dev(rbd_dev);
8ad42cd0 5038 rbd_dev_image_release(rbd_dev);
79ab7558 5039 module_put(THIS_MODULE);
602adf40
YS
5040done:
5041 mutex_unlock(&ctl_mutex);
aafb230e 5042
602adf40
YS
5043 return ret;
5044}
5045
602adf40
YS
5046/*
5047 * create control files in sysfs
dfc5606d 5048 * /sys/bus/rbd/...
602adf40
YS
5049 */
5050static int rbd_sysfs_init(void)
5051{
dfc5606d 5052 int ret;
602adf40 5053
fed4c143 5054 ret = device_register(&rbd_root_dev);
21079786 5055 if (ret < 0)
dfc5606d 5056 return ret;
602adf40 5057
fed4c143
AE
5058 ret = bus_register(&rbd_bus_type);
5059 if (ret < 0)
5060 device_unregister(&rbd_root_dev);
602adf40 5061
602adf40
YS
5062 return ret;
5063}
5064
5065static void rbd_sysfs_cleanup(void)
5066{
dfc5606d 5067 bus_unregister(&rbd_bus_type);
fed4c143 5068 device_unregister(&rbd_root_dev);
602adf40
YS
5069}
5070
cc344fa1 5071static int __init rbd_init(void)
602adf40
YS
5072{
5073 int rc;
5074
1e32d34c
AE
5075 if (!libceph_compatible(NULL)) {
5076 rbd_warn(NULL, "libceph incompatibility (quitting)");
5077
5078 return -EINVAL;
5079 }
602adf40
YS
5080 rc = rbd_sysfs_init();
5081 if (rc)
5082 return rc;
f0f8cef5 5083 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
602adf40
YS
5084 return 0;
5085}
5086
cc344fa1 5087static void __exit rbd_exit(void)
602adf40
YS
5088{
5089 rbd_sysfs_cleanup();
5090}
5091
5092module_init(rbd_init);
5093module_exit(rbd_exit);
5094
5095MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5096MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5097MODULE_DESCRIPTION("rados block device");
5098
5099/* following authorship retained from original osdblk.c */
5100MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5101
5102MODULE_LICENSE("GPL");