]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/rbd.c
libceph: provide data length when preparing message
[mirror_ubuntu-bionic-kernel.git] / drivers / block / rbd.c
CommitLineData
602adf40
YS
1/*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
dfc5606d 24 For usage instructions, please refer to:
602adf40 25
dfc5606d 26 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
27
28 */
29
30#include <linux/ceph/libceph.h>
31#include <linux/ceph/osd_client.h>
32#include <linux/ceph/mon_client.h>
33#include <linux/ceph/decode.h>
59c2be1e 34#include <linux/parser.h>
602adf40
YS
35
36#include <linux/kernel.h>
37#include <linux/device.h>
38#include <linux/module.h>
39#include <linux/fs.h>
40#include <linux/blkdev.h>
41
42#include "rbd_types.h"
43
aafb230e
AE
44#define RBD_DEBUG /* Activate rbd_assert() calls */
45
593a9e7b
AE
46/*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52#define SECTOR_SHIFT 9
53#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
f0f8cef5
AE
55#define RBD_DRV_NAME "rbd"
56#define RBD_DRV_NAME_LONG "rbd (rados block device)"
602adf40
YS
57
58#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
59
d4b125e9
AE
60#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
61#define RBD_MAX_SNAP_NAME_LEN \
62 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
63
35d489f9 64#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
65
66#define RBD_SNAP_HEAD_NAME "-"
67
9e15b77d
AE
68/* This allows a single page to hold an image name sent by OSD */
69#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 70#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 71
1e130199 72#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 73
d889140c
AE
74/* Feature bits */
75
76#define RBD_FEATURE_LAYERING 1
77
78/* Features supported by this (client software) implementation. */
79
80#define RBD_FEATURES_ALL (0)
81
81a89793
AE
82/*
83 * An RBD device name will be "rbd#", where the "rbd" comes from
84 * RBD_DRV_NAME above, and # is a unique integer identifier.
85 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
86 * enough to hold all possible device names.
87 */
602adf40 88#define DEV_NAME_LEN 32
81a89793 89#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40
YS
90
91/*
92 * block device image metadata (in-memory version)
93 */
94struct rbd_image_header {
f84344f3 95 /* These four fields never change for a given rbd image */
849b4260 96 char *object_prefix;
34b13184 97 u64 features;
602adf40
YS
98 __u8 obj_order;
99 __u8 crypt_type;
100 __u8 comp_type;
602adf40 101
f84344f3
AE
102 /* The remaining fields need to be updated occasionally */
103 u64 image_size;
104 struct ceph_snap_context *snapc;
602adf40
YS
105 char *snap_names;
106 u64 *snap_sizes;
59c2be1e
YS
107
108 u64 obj_version;
109};
110
0d7dbfce
AE
111/*
112 * An rbd image specification.
113 *
114 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
115 * identify an image. Each rbd_dev structure includes a pointer to
116 * an rbd_spec structure that encapsulates this identity.
117 *
118 * Each of the id's in an rbd_spec has an associated name. For a
119 * user-mapped image, the names are supplied and the id's associated
120 * with them are looked up. For a layered image, a parent image is
121 * defined by the tuple, and the names are looked up.
122 *
123 * An rbd_dev structure contains a parent_spec pointer which is
124 * non-null if the image it represents is a child in a layered
125 * image. This pointer will refer to the rbd_spec structure used
126 * by the parent rbd_dev for its own identity (i.e., the structure
127 * is shared between the parent and child).
128 *
129 * Since these structures are populated once, during the discovery
130 * phase of image construction, they are effectively immutable so
131 * we make no effort to synchronize access to them.
132 *
133 * Note that code herein does not assume the image name is known (it
134 * could be a null pointer).
0d7dbfce
AE
135 */
136struct rbd_spec {
137 u64 pool_id;
138 char *pool_name;
139
140 char *image_id;
0d7dbfce 141 char *image_name;
0d7dbfce
AE
142
143 u64 snap_id;
144 char *snap_name;
145
146 struct kref kref;
147};
148
602adf40 149/*
f0f8cef5 150 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
151 */
152struct rbd_client {
153 struct ceph_client *client;
154 struct kref kref;
155 struct list_head node;
156};
157
bf0d5f50
AE
158struct rbd_img_request;
159typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
160
161#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
162
163struct rbd_obj_request;
164typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
165
9969ebc5
AE
166enum obj_request_type {
167 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
168};
bf0d5f50
AE
169
170struct rbd_obj_request {
171 const char *object_name;
172 u64 offset; /* object start byte */
173 u64 length; /* bytes from offset */
174
175 struct rbd_img_request *img_request;
176 struct list_head links; /* img_request->obj_requests */
177 u32 which; /* posn image request list */
178
179 enum obj_request_type type;
788e2df3
AE
180 union {
181 struct bio *bio_list;
182 struct {
183 struct page **pages;
184 u32 page_count;
185 };
186 };
bf0d5f50
AE
187
188 struct ceph_osd_request *osd_req;
189
190 u64 xferred; /* bytes transferred */
191 u64 version;
1b83bef2 192 int result;
bf0d5f50
AE
193 atomic_t done;
194
195 rbd_obj_callback_t callback;
788e2df3 196 struct completion completion;
bf0d5f50
AE
197
198 struct kref kref;
199};
200
201struct rbd_img_request {
202 struct request *rq;
203 struct rbd_device *rbd_dev;
204 u64 offset; /* starting image byte offset */
205 u64 length; /* byte count from offset */
206 bool write_request; /* false for read */
207 union {
208 struct ceph_snap_context *snapc; /* for writes */
209 u64 snap_id; /* for reads */
210 };
211 spinlock_t completion_lock;/* protects next_completion */
212 u32 next_completion;
213 rbd_img_callback_t callback;
214
215 u32 obj_request_count;
216 struct list_head obj_requests; /* rbd_obj_request structs */
217
218 struct kref kref;
219};
220
221#define for_each_obj_request(ireq, oreq) \
ef06f4d3 222 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 223#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 224 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 225#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 226 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 227
dfc5606d
YS
228struct rbd_snap {
229 struct device dev;
230 const char *name;
3591538f 231 u64 size;
dfc5606d
YS
232 struct list_head node;
233 u64 id;
34b13184 234 u64 features;
dfc5606d
YS
235};
236
f84344f3 237struct rbd_mapping {
99c1f08f 238 u64 size;
34b13184 239 u64 features;
f84344f3
AE
240 bool read_only;
241};
242
602adf40
YS
243/*
244 * a single device
245 */
246struct rbd_device {
de71a297 247 int dev_id; /* blkdev unique id */
602adf40
YS
248
249 int major; /* blkdev assigned major */
250 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 251
a30b71b9 252 u32 image_format; /* Either 1 or 2 */
602adf40
YS
253 struct rbd_client *rbd_client;
254
255 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
256
b82d167b 257 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
258
259 struct rbd_image_header header;
b82d167b 260 unsigned long flags; /* possibly lock protected */
0d7dbfce 261 struct rbd_spec *spec;
602adf40 262
0d7dbfce 263 char *header_name;
971f839a 264
0903e875
AE
265 struct ceph_file_layout layout;
266
59c2be1e 267 struct ceph_osd_event *watch_event;
975241af 268 struct rbd_obj_request *watch_request;
59c2be1e 269
86b00e0d
AE
270 struct rbd_spec *parent_spec;
271 u64 parent_overlap;
272
c666601a
JD
273 /* protects updating the header */
274 struct rw_semaphore header_rwsem;
f84344f3
AE
275
276 struct rbd_mapping mapping;
602adf40
YS
277
278 struct list_head node;
dfc5606d
YS
279
280 /* list of snapshots */
281 struct list_head snaps;
282
283 /* sysfs related */
284 struct device dev;
b82d167b 285 unsigned long open_count; /* protected by lock */
dfc5606d
YS
286};
287
b82d167b
AE
288/*
289 * Flag bits for rbd_dev->flags. If atomicity is required,
290 * rbd_dev->lock is used to protect access.
291 *
292 * Currently, only the "removing" flag (which is coupled with the
293 * "open_count" field) requires atomic access.
294 */
6d292906
AE
295enum rbd_dev_flags {
296 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 297 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
298};
299
602adf40 300static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
e124a82f 301
602adf40 302static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
303static DEFINE_SPINLOCK(rbd_dev_list_lock);
304
432b8587
AE
305static LIST_HEAD(rbd_client_list); /* clients */
306static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 307
304f6808
AE
308static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
309static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
310
dfc5606d 311static void rbd_dev_release(struct device *dev);
41f38c2b 312static void rbd_remove_snap_dev(struct rbd_snap *snap);
dfc5606d 313
f0f8cef5
AE
314static ssize_t rbd_add(struct bus_type *bus, const char *buf,
315 size_t count);
316static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
317 size_t count);
318
319static struct bus_attribute rbd_bus_attrs[] = {
320 __ATTR(add, S_IWUSR, NULL, rbd_add),
321 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
322 __ATTR_NULL
323};
324
325static struct bus_type rbd_bus_type = {
326 .name = "rbd",
327 .bus_attrs = rbd_bus_attrs,
328};
329
330static void rbd_root_dev_release(struct device *dev)
331{
332}
333
334static struct device rbd_root_dev = {
335 .init_name = "rbd",
336 .release = rbd_root_dev_release,
337};
338
06ecc6cb
AE
339static __printf(2, 3)
340void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
341{
342 struct va_format vaf;
343 va_list args;
344
345 va_start(args, fmt);
346 vaf.fmt = fmt;
347 vaf.va = &args;
348
349 if (!rbd_dev)
350 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
351 else if (rbd_dev->disk)
352 printk(KERN_WARNING "%s: %s: %pV\n",
353 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
354 else if (rbd_dev->spec && rbd_dev->spec->image_name)
355 printk(KERN_WARNING "%s: image %s: %pV\n",
356 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
357 else if (rbd_dev->spec && rbd_dev->spec->image_id)
358 printk(KERN_WARNING "%s: id %s: %pV\n",
359 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
360 else /* punt */
361 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
362 RBD_DRV_NAME, rbd_dev, &vaf);
363 va_end(args);
364}
365
aafb230e
AE
366#ifdef RBD_DEBUG
367#define rbd_assert(expr) \
368 if (unlikely(!(expr))) { \
369 printk(KERN_ERR "\nAssertion failure in %s() " \
370 "at line %d:\n\n" \
371 "\trbd_assert(%s);\n\n", \
372 __func__, __LINE__, #expr); \
373 BUG(); \
374 }
375#else /* !RBD_DEBUG */
376# define rbd_assert(expr) ((void) 0)
377#endif /* !RBD_DEBUG */
dfc5606d 378
117973fb
AE
379static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
380static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
59c2be1e 381
602adf40
YS
382static int rbd_open(struct block_device *bdev, fmode_t mode)
383{
f0f8cef5 384 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 385 bool removing = false;
602adf40 386
f84344f3 387 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
388 return -EROFS;
389
a14ea269 390 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
391 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
392 removing = true;
393 else
394 rbd_dev->open_count++;
a14ea269 395 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
396 if (removing)
397 return -ENOENT;
398
42382b70 399 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 400 (void) get_device(&rbd_dev->dev);
f84344f3 401 set_device_ro(bdev, rbd_dev->mapping.read_only);
42382b70 402 mutex_unlock(&ctl_mutex);
340c7a2b 403
602adf40
YS
404 return 0;
405}
406
dfc5606d
YS
407static int rbd_release(struct gendisk *disk, fmode_t mode)
408{
409 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
410 unsigned long open_count_before;
411
a14ea269 412 spin_lock_irq(&rbd_dev->lock);
b82d167b 413 open_count_before = rbd_dev->open_count--;
a14ea269 414 spin_unlock_irq(&rbd_dev->lock);
b82d167b 415 rbd_assert(open_count_before > 0);
dfc5606d 416
42382b70 417 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 418 put_device(&rbd_dev->dev);
42382b70 419 mutex_unlock(&ctl_mutex);
dfc5606d
YS
420
421 return 0;
422}
423
602adf40
YS
424static const struct block_device_operations rbd_bd_ops = {
425 .owner = THIS_MODULE,
426 .open = rbd_open,
dfc5606d 427 .release = rbd_release,
602adf40
YS
428};
429
430/*
431 * Initialize an rbd client instance.
43ae4701 432 * We own *ceph_opts.
602adf40 433 */
f8c38929 434static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
435{
436 struct rbd_client *rbdc;
437 int ret = -ENOMEM;
438
37206ee5 439 dout("%s:\n", __func__);
602adf40
YS
440 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
441 if (!rbdc)
442 goto out_opt;
443
444 kref_init(&rbdc->kref);
445 INIT_LIST_HEAD(&rbdc->node);
446
bc534d86
AE
447 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
448
43ae4701 449 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 450 if (IS_ERR(rbdc->client))
bc534d86 451 goto out_mutex;
43ae4701 452 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
453
454 ret = ceph_open_session(rbdc->client);
455 if (ret < 0)
456 goto out_err;
457
432b8587 458 spin_lock(&rbd_client_list_lock);
602adf40 459 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 460 spin_unlock(&rbd_client_list_lock);
602adf40 461
bc534d86 462 mutex_unlock(&ctl_mutex);
37206ee5 463 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 464
602adf40
YS
465 return rbdc;
466
467out_err:
468 ceph_destroy_client(rbdc->client);
bc534d86
AE
469out_mutex:
470 mutex_unlock(&ctl_mutex);
602adf40
YS
471 kfree(rbdc);
472out_opt:
43ae4701
AE
473 if (ceph_opts)
474 ceph_destroy_options(ceph_opts);
37206ee5
AE
475 dout("%s: error %d\n", __func__, ret);
476
28f259b7 477 return ERR_PTR(ret);
602adf40
YS
478}
479
480/*
1f7ba331
AE
481 * Find a ceph client with specific addr and configuration. If
482 * found, bump its reference count.
602adf40 483 */
1f7ba331 484static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
485{
486 struct rbd_client *client_node;
1f7ba331 487 bool found = false;
602adf40 488
43ae4701 489 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
490 return NULL;
491
1f7ba331
AE
492 spin_lock(&rbd_client_list_lock);
493 list_for_each_entry(client_node, &rbd_client_list, node) {
494 if (!ceph_compare_options(ceph_opts, client_node->client)) {
495 kref_get(&client_node->kref);
496 found = true;
497 break;
498 }
499 }
500 spin_unlock(&rbd_client_list_lock);
501
502 return found ? client_node : NULL;
602adf40
YS
503}
504
59c2be1e
YS
505/*
506 * mount options
507 */
508enum {
59c2be1e
YS
509 Opt_last_int,
510 /* int args above */
511 Opt_last_string,
512 /* string args above */
cc0538b6
AE
513 Opt_read_only,
514 Opt_read_write,
515 /* Boolean args above */
516 Opt_last_bool,
59c2be1e
YS
517};
518
43ae4701 519static match_table_t rbd_opts_tokens = {
59c2be1e
YS
520 /* int args above */
521 /* string args above */
be466c1c 522 {Opt_read_only, "read_only"},
cc0538b6
AE
523 {Opt_read_only, "ro"}, /* Alternate spelling */
524 {Opt_read_write, "read_write"},
525 {Opt_read_write, "rw"}, /* Alternate spelling */
526 /* Boolean args above */
59c2be1e
YS
527 {-1, NULL}
528};
529
98571b5a
AE
530struct rbd_options {
531 bool read_only;
532};
533
534#define RBD_READ_ONLY_DEFAULT false
535
59c2be1e
YS
536static int parse_rbd_opts_token(char *c, void *private)
537{
43ae4701 538 struct rbd_options *rbd_opts = private;
59c2be1e
YS
539 substring_t argstr[MAX_OPT_ARGS];
540 int token, intval, ret;
541
43ae4701 542 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
543 if (token < 0)
544 return -EINVAL;
545
546 if (token < Opt_last_int) {
547 ret = match_int(&argstr[0], &intval);
548 if (ret < 0) {
549 pr_err("bad mount option arg (not int) "
550 "at '%s'\n", c);
551 return ret;
552 }
553 dout("got int token %d val %d\n", token, intval);
554 } else if (token > Opt_last_int && token < Opt_last_string) {
555 dout("got string token %d val %s\n", token,
556 argstr[0].from);
cc0538b6
AE
557 } else if (token > Opt_last_string && token < Opt_last_bool) {
558 dout("got Boolean token %d\n", token);
59c2be1e
YS
559 } else {
560 dout("got token %d\n", token);
561 }
562
563 switch (token) {
cc0538b6
AE
564 case Opt_read_only:
565 rbd_opts->read_only = true;
566 break;
567 case Opt_read_write:
568 rbd_opts->read_only = false;
569 break;
59c2be1e 570 default:
aafb230e
AE
571 rbd_assert(false);
572 break;
59c2be1e
YS
573 }
574 return 0;
575}
576
602adf40
YS
577/*
578 * Get a ceph client with specific addr and configuration, if one does
579 * not exist create it.
580 */
9d3997fd 581static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 582{
f8c38929 583 struct rbd_client *rbdc;
59c2be1e 584
1f7ba331 585 rbdc = rbd_client_find(ceph_opts);
9d3997fd 586 if (rbdc) /* using an existing client */
43ae4701 587 ceph_destroy_options(ceph_opts);
9d3997fd 588 else
f8c38929 589 rbdc = rbd_client_create(ceph_opts);
602adf40 590
9d3997fd 591 return rbdc;
602adf40
YS
592}
593
594/*
595 * Destroy ceph client
d23a4b3f 596 *
432b8587 597 * Caller must hold rbd_client_list_lock.
602adf40
YS
598 */
599static void rbd_client_release(struct kref *kref)
600{
601 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
602
37206ee5 603 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 604 spin_lock(&rbd_client_list_lock);
602adf40 605 list_del(&rbdc->node);
cd9d9f5d 606 spin_unlock(&rbd_client_list_lock);
602adf40
YS
607
608 ceph_destroy_client(rbdc->client);
609 kfree(rbdc);
610}
611
612/*
613 * Drop reference to ceph client node. If it's not referenced anymore, release
614 * it.
615 */
9d3997fd 616static void rbd_put_client(struct rbd_client *rbdc)
602adf40 617{
c53d5893
AE
618 if (rbdc)
619 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
620}
621
a30b71b9
AE
622static bool rbd_image_format_valid(u32 image_format)
623{
624 return image_format == 1 || image_format == 2;
625}
626
8e94af8e
AE
627static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
628{
103a150f
AE
629 size_t size;
630 u32 snap_count;
631
632 /* The header has to start with the magic rbd header text */
633 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
634 return false;
635
db2388b6
AE
636 /* The bio layer requires at least sector-sized I/O */
637
638 if (ondisk->options.order < SECTOR_SHIFT)
639 return false;
640
641 /* If we use u64 in a few spots we may be able to loosen this */
642
643 if (ondisk->options.order > 8 * sizeof (int) - 1)
644 return false;
645
103a150f
AE
646 /*
647 * The size of a snapshot header has to fit in a size_t, and
648 * that limits the number of snapshots.
649 */
650 snap_count = le32_to_cpu(ondisk->snap_count);
651 size = SIZE_MAX - sizeof (struct ceph_snap_context);
652 if (snap_count > size / sizeof (__le64))
653 return false;
654
655 /*
656 * Not only that, but the size of the entire the snapshot
657 * header must also be representable in a size_t.
658 */
659 size -= snap_count * sizeof (__le64);
660 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
661 return false;
662
663 return true;
8e94af8e
AE
664}
665
602adf40
YS
666/*
667 * Create a new header structure, translate header format from the on-disk
668 * header.
669 */
670static int rbd_header_from_disk(struct rbd_image_header *header,
4156d998 671 struct rbd_image_header_ondisk *ondisk)
602adf40 672{
ccece235 673 u32 snap_count;
58c17b0e 674 size_t len;
d2bb24e5 675 size_t size;
621901d6 676 u32 i;
602adf40 677
6a52325f
AE
678 memset(header, 0, sizeof (*header));
679
103a150f
AE
680 snap_count = le32_to_cpu(ondisk->snap_count);
681
58c17b0e
AE
682 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
683 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
6a52325f 684 if (!header->object_prefix)
602adf40 685 return -ENOMEM;
58c17b0e
AE
686 memcpy(header->object_prefix, ondisk->object_prefix, len);
687 header->object_prefix[len] = '\0';
00f1f36f 688
602adf40 689 if (snap_count) {
f785cc1d
AE
690 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
691
621901d6
AE
692 /* Save a copy of the snapshot names */
693
f785cc1d
AE
694 if (snap_names_len > (u64) SIZE_MAX)
695 return -EIO;
696 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
602adf40 697 if (!header->snap_names)
6a52325f 698 goto out_err;
f785cc1d
AE
699 /*
700 * Note that rbd_dev_v1_header_read() guarantees
701 * the ondisk buffer we're working with has
702 * snap_names_len bytes beyond the end of the
703 * snapshot id array, this memcpy() is safe.
704 */
705 memcpy(header->snap_names, &ondisk->snaps[snap_count],
706 snap_names_len);
6a52325f 707
621901d6
AE
708 /* Record each snapshot's size */
709
d2bb24e5
AE
710 size = snap_count * sizeof (*header->snap_sizes);
711 header->snap_sizes = kmalloc(size, GFP_KERNEL);
602adf40 712 if (!header->snap_sizes)
6a52325f 713 goto out_err;
621901d6
AE
714 for (i = 0; i < snap_count; i++)
715 header->snap_sizes[i] =
716 le64_to_cpu(ondisk->snaps[i].image_size);
602adf40 717 } else {
ccece235 718 WARN_ON(ondisk->snap_names_len);
602adf40
YS
719 header->snap_names = NULL;
720 header->snap_sizes = NULL;
721 }
849b4260 722
34b13184 723 header->features = 0; /* No features support in v1 images */
602adf40
YS
724 header->obj_order = ondisk->options.order;
725 header->crypt_type = ondisk->options.crypt_type;
726 header->comp_type = ondisk->options.comp_type;
6a52325f 727
621901d6
AE
728 /* Allocate and fill in the snapshot context */
729
f84344f3 730 header->image_size = le64_to_cpu(ondisk->image_size);
6a52325f
AE
731 size = sizeof (struct ceph_snap_context);
732 size += snap_count * sizeof (header->snapc->snaps[0]);
733 header->snapc = kzalloc(size, GFP_KERNEL);
734 if (!header->snapc)
735 goto out_err;
602adf40
YS
736
737 atomic_set(&header->snapc->nref, 1);
505cbb9b 738 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 739 header->snapc->num_snaps = snap_count;
621901d6
AE
740 for (i = 0; i < snap_count; i++)
741 header->snapc->snaps[i] =
742 le64_to_cpu(ondisk->snaps[i].id);
602adf40
YS
743
744 return 0;
745
6a52325f 746out_err:
849b4260 747 kfree(header->snap_sizes);
ccece235 748 header->snap_sizes = NULL;
602adf40 749 kfree(header->snap_names);
ccece235 750 header->snap_names = NULL;
6a52325f
AE
751 kfree(header->object_prefix);
752 header->object_prefix = NULL;
ccece235 753
00f1f36f 754 return -ENOMEM;
602adf40
YS
755}
756
9e15b77d
AE
757static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
758{
759 struct rbd_snap *snap;
760
761 if (snap_id == CEPH_NOSNAP)
762 return RBD_SNAP_HEAD_NAME;
763
764 list_for_each_entry(snap, &rbd_dev->snaps, node)
765 if (snap_id == snap->id)
766 return snap->name;
767
768 return NULL;
769}
770
8836b995 771static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
602adf40 772{
602adf40 773
e86924a8 774 struct rbd_snap *snap;
602adf40 775
e86924a8
AE
776 list_for_each_entry(snap, &rbd_dev->snaps, node) {
777 if (!strcmp(snap_name, snap->name)) {
0d7dbfce 778 rbd_dev->spec->snap_id = snap->id;
e86924a8 779 rbd_dev->mapping.size = snap->size;
34b13184 780 rbd_dev->mapping.features = snap->features;
602adf40 781
e86924a8 782 return 0;
00f1f36f 783 }
00f1f36f 784 }
e86924a8 785
00f1f36f 786 return -ENOENT;
602adf40
YS
787}
788
819d52bf 789static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
602adf40 790{
78dc447d 791 int ret;
602adf40 792
0d7dbfce 793 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
cc9d734c 794 sizeof (RBD_SNAP_HEAD_NAME))) {
0d7dbfce 795 rbd_dev->spec->snap_id = CEPH_NOSNAP;
99c1f08f 796 rbd_dev->mapping.size = rbd_dev->header.image_size;
34b13184 797 rbd_dev->mapping.features = rbd_dev->header.features;
e86924a8 798 ret = 0;
602adf40 799 } else {
0d7dbfce 800 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
602adf40
YS
801 if (ret < 0)
802 goto done;
f84344f3 803 rbd_dev->mapping.read_only = true;
602adf40 804 }
6d292906
AE
805 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
806
602adf40 807done:
602adf40
YS
808 return ret;
809}
810
811static void rbd_header_free(struct rbd_image_header *header)
812{
849b4260 813 kfree(header->object_prefix);
d78fd7ae 814 header->object_prefix = NULL;
602adf40 815 kfree(header->snap_sizes);
d78fd7ae 816 header->snap_sizes = NULL;
849b4260 817 kfree(header->snap_names);
d78fd7ae 818 header->snap_names = NULL;
d1d25646 819 ceph_put_snap_context(header->snapc);
d78fd7ae 820 header->snapc = NULL;
602adf40
YS
821}
822
98571b5a 823static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 824{
65ccfe21
AE
825 char *name;
826 u64 segment;
827 int ret;
602adf40 828
2fd82b9e 829 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
65ccfe21
AE
830 if (!name)
831 return NULL;
832 segment = offset >> rbd_dev->header.obj_order;
2fd82b9e 833 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
65ccfe21 834 rbd_dev->header.object_prefix, segment);
2fd82b9e 835 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
65ccfe21
AE
836 pr_err("error formatting segment name for #%llu (%d)\n",
837 segment, ret);
838 kfree(name);
839 name = NULL;
840 }
602adf40 841
65ccfe21
AE
842 return name;
843}
602adf40 844
65ccfe21
AE
845static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
846{
847 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 848
65ccfe21
AE
849 return offset & (segment_size - 1);
850}
851
852static u64 rbd_segment_length(struct rbd_device *rbd_dev,
853 u64 offset, u64 length)
854{
855 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
856
857 offset &= segment_size - 1;
858
aafb230e 859 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
860 if (offset + length > segment_size)
861 length = segment_size - offset;
862
863 return length;
602adf40
YS
864}
865
029bcbd8
JD
866/*
867 * returns the size of an object in the image
868 */
869static u64 rbd_obj_bytes(struct rbd_image_header *header)
870{
871 return 1 << header->obj_order;
872}
873
602adf40
YS
874/*
875 * bio helpers
876 */
877
878static void bio_chain_put(struct bio *chain)
879{
880 struct bio *tmp;
881
882 while (chain) {
883 tmp = chain;
884 chain = chain->bi_next;
885 bio_put(tmp);
886 }
887}
888
889/*
890 * zeros a bio chain, starting at specific offset
891 */
892static void zero_bio_chain(struct bio *chain, int start_ofs)
893{
894 struct bio_vec *bv;
895 unsigned long flags;
896 void *buf;
897 int i;
898 int pos = 0;
899
900 while (chain) {
901 bio_for_each_segment(bv, chain, i) {
902 if (pos + bv->bv_len > start_ofs) {
903 int remainder = max(start_ofs - pos, 0);
904 buf = bvec_kmap_irq(bv, &flags);
905 memset(buf + remainder, 0,
906 bv->bv_len - remainder);
85b5aaa6 907 bvec_kunmap_irq(buf, &flags);
602adf40
YS
908 }
909 pos += bv->bv_len;
910 }
911
912 chain = chain->bi_next;
913 }
914}
915
916/*
f7760dad
AE
917 * Clone a portion of a bio, starting at the given byte offset
918 * and continuing for the number of bytes indicated.
602adf40 919 */
f7760dad
AE
920static struct bio *bio_clone_range(struct bio *bio_src,
921 unsigned int offset,
922 unsigned int len,
923 gfp_t gfpmask)
602adf40 924{
f7760dad
AE
925 struct bio_vec *bv;
926 unsigned int resid;
927 unsigned short idx;
928 unsigned int voff;
929 unsigned short end_idx;
930 unsigned short vcnt;
931 struct bio *bio;
932
933 /* Handle the easy case for the caller */
934
935 if (!offset && len == bio_src->bi_size)
936 return bio_clone(bio_src, gfpmask);
937
938 if (WARN_ON_ONCE(!len))
939 return NULL;
940 if (WARN_ON_ONCE(len > bio_src->bi_size))
941 return NULL;
942 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
943 return NULL;
944
945 /* Find first affected segment... */
946
947 resid = offset;
948 __bio_for_each_segment(bv, bio_src, idx, 0) {
949 if (resid < bv->bv_len)
950 break;
951 resid -= bv->bv_len;
602adf40 952 }
f7760dad 953 voff = resid;
602adf40 954
f7760dad 955 /* ...and the last affected segment */
602adf40 956
f7760dad
AE
957 resid += len;
958 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
959 if (resid <= bv->bv_len)
960 break;
961 resid -= bv->bv_len;
962 }
963 vcnt = end_idx - idx + 1;
964
965 /* Build the clone */
966
967 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
968 if (!bio)
969 return NULL; /* ENOMEM */
602adf40 970
f7760dad
AE
971 bio->bi_bdev = bio_src->bi_bdev;
972 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
973 bio->bi_rw = bio_src->bi_rw;
974 bio->bi_flags |= 1 << BIO_CLONED;
975
976 /*
977 * Copy over our part of the bio_vec, then update the first
978 * and last (or only) entries.
979 */
980 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
981 vcnt * sizeof (struct bio_vec));
982 bio->bi_io_vec[0].bv_offset += voff;
983 if (vcnt > 1) {
984 bio->bi_io_vec[0].bv_len -= voff;
985 bio->bi_io_vec[vcnt - 1].bv_len = resid;
986 } else {
987 bio->bi_io_vec[0].bv_len = len;
602adf40
YS
988 }
989
f7760dad
AE
990 bio->bi_vcnt = vcnt;
991 bio->bi_size = len;
992 bio->bi_idx = 0;
993
994 return bio;
995}
996
997/*
998 * Clone a portion of a bio chain, starting at the given byte offset
999 * into the first bio in the source chain and continuing for the
1000 * number of bytes indicated. The result is another bio chain of
1001 * exactly the given length, or a null pointer on error.
1002 *
1003 * The bio_src and offset parameters are both in-out. On entry they
1004 * refer to the first source bio and the offset into that bio where
1005 * the start of data to be cloned is located.
1006 *
1007 * On return, bio_src is updated to refer to the bio in the source
1008 * chain that contains first un-cloned byte, and *offset will
1009 * contain the offset of that byte within that bio.
1010 */
1011static struct bio *bio_chain_clone_range(struct bio **bio_src,
1012 unsigned int *offset,
1013 unsigned int len,
1014 gfp_t gfpmask)
1015{
1016 struct bio *bi = *bio_src;
1017 unsigned int off = *offset;
1018 struct bio *chain = NULL;
1019 struct bio **end;
1020
1021 /* Build up a chain of clone bios up to the limit */
1022
1023 if (!bi || off >= bi->bi_size || !len)
1024 return NULL; /* Nothing to clone */
602adf40 1025
f7760dad
AE
1026 end = &chain;
1027 while (len) {
1028 unsigned int bi_size;
1029 struct bio *bio;
1030
f5400b7a
AE
1031 if (!bi) {
1032 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad 1033 goto out_err; /* EINVAL; ran out of bio's */
f5400b7a 1034 }
f7760dad
AE
1035 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1036 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1037 if (!bio)
1038 goto out_err; /* ENOMEM */
1039
1040 *end = bio;
1041 end = &bio->bi_next;
602adf40 1042
f7760dad
AE
1043 off += bi_size;
1044 if (off == bi->bi_size) {
1045 bi = bi->bi_next;
1046 off = 0;
1047 }
1048 len -= bi_size;
1049 }
1050 *bio_src = bi;
1051 *offset = off;
1052
1053 return chain;
1054out_err:
1055 bio_chain_put(chain);
602adf40 1056
602adf40
YS
1057 return NULL;
1058}
1059
bf0d5f50
AE
1060static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1061{
37206ee5
AE
1062 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1063 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1064 kref_get(&obj_request->kref);
1065}
1066
1067static void rbd_obj_request_destroy(struct kref *kref);
1068static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1069{
1070 rbd_assert(obj_request != NULL);
37206ee5
AE
1071 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1072 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1073 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1074}
1075
1076static void rbd_img_request_get(struct rbd_img_request *img_request)
1077{
37206ee5
AE
1078 dout("%s: img %p (was %d)\n", __func__, img_request,
1079 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1080 kref_get(&img_request->kref);
1081}
1082
1083static void rbd_img_request_destroy(struct kref *kref);
1084static void rbd_img_request_put(struct rbd_img_request *img_request)
1085{
1086 rbd_assert(img_request != NULL);
37206ee5
AE
1087 dout("%s: img %p (was %d)\n", __func__, img_request,
1088 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1089 kref_put(&img_request->kref, rbd_img_request_destroy);
1090}
1091
1092static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1093 struct rbd_obj_request *obj_request)
1094{
25dcf954
AE
1095 rbd_assert(obj_request->img_request == NULL);
1096
bf0d5f50
AE
1097 rbd_obj_request_get(obj_request);
1098 obj_request->img_request = img_request;
25dcf954 1099 obj_request->which = img_request->obj_request_count;
bf0d5f50 1100 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1101 img_request->obj_request_count++;
1102 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1103 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1104 obj_request->which);
bf0d5f50
AE
1105}
1106
1107static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1108 struct rbd_obj_request *obj_request)
1109{
1110 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1111
37206ee5
AE
1112 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1113 obj_request->which);
bf0d5f50 1114 list_del(&obj_request->links);
25dcf954
AE
1115 rbd_assert(img_request->obj_request_count > 0);
1116 img_request->obj_request_count--;
1117 rbd_assert(obj_request->which == img_request->obj_request_count);
1118 obj_request->which = BAD_WHICH;
bf0d5f50 1119 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1120 obj_request->img_request = NULL;
25dcf954 1121 obj_request->callback = NULL;
bf0d5f50
AE
1122 rbd_obj_request_put(obj_request);
1123}
1124
1125static bool obj_request_type_valid(enum obj_request_type type)
1126{
1127 switch (type) {
9969ebc5 1128 case OBJ_REQUEST_NODATA:
bf0d5f50 1129 case OBJ_REQUEST_BIO:
788e2df3 1130 case OBJ_REQUEST_PAGES:
bf0d5f50
AE
1131 return true;
1132 default:
1133 return false;
1134 }
1135}
1136
bf0d5f50
AE
1137static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1138 struct rbd_obj_request *obj_request)
1139{
37206ee5
AE
1140 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1141
bf0d5f50
AE
1142 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1143}
1144
1145static void rbd_img_request_complete(struct rbd_img_request *img_request)
1146{
37206ee5 1147 dout("%s: img %p\n", __func__, img_request);
bf0d5f50
AE
1148 if (img_request->callback)
1149 img_request->callback(img_request);
1150 else
1151 rbd_img_request_put(img_request);
1152}
1153
788e2df3
AE
1154/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1155
1156static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1157{
37206ee5
AE
1158 dout("%s: obj %p\n", __func__, obj_request);
1159
788e2df3
AE
1160 return wait_for_completion_interruptible(&obj_request->completion);
1161}
1162
07741308
AE
1163static void obj_request_done_init(struct rbd_obj_request *obj_request)
1164{
1165 atomic_set(&obj_request->done, 0);
1166 smp_wmb();
1167}
1168
1169static void obj_request_done_set(struct rbd_obj_request *obj_request)
1170{
632b88ca
AE
1171 int done;
1172
1173 done = atomic_inc_return(&obj_request->done);
1174 if (done > 1) {
1175 struct rbd_img_request *img_request = obj_request->img_request;
1176 struct rbd_device *rbd_dev;
1177
1178 rbd_dev = img_request ? img_request->rbd_dev : NULL;
1179 rbd_warn(rbd_dev, "obj_request %p was already done\n",
1180 obj_request);
1181 }
07741308
AE
1182}
1183
1184static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1185{
632b88ca 1186 smp_mb();
07741308
AE
1187 return atomic_read(&obj_request->done) != 0;
1188}
1189
6e2a4505
AE
1190static void
1191rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1192{
1193 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1194 obj_request, obj_request->img_request, obj_request->result,
1195 obj_request->xferred, obj_request->length);
1196 /*
1197 * ENOENT means a hole in the image. We zero-fill the
1198 * entire length of the request. A short read also implies
1199 * zero-fill to the end of the request. Either way we
1200 * update the xferred count to indicate the whole request
1201 * was satisfied.
1202 */
1203 BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1204 if (obj_request->result == -ENOENT) {
1205 zero_bio_chain(obj_request->bio_list, 0);
1206 obj_request->result = 0;
1207 obj_request->xferred = obj_request->length;
1208 } else if (obj_request->xferred < obj_request->length &&
1209 !obj_request->result) {
1210 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1211 obj_request->xferred = obj_request->length;
1212 }
1213 obj_request_done_set(obj_request);
1214}
1215
bf0d5f50
AE
1216static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1217{
37206ee5
AE
1218 dout("%s: obj %p cb %p\n", __func__, obj_request,
1219 obj_request->callback);
bf0d5f50
AE
1220 if (obj_request->callback)
1221 obj_request->callback(obj_request);
788e2df3
AE
1222 else
1223 complete_all(&obj_request->completion);
bf0d5f50
AE
1224}
1225
c47f9371 1226static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
39bf2c5d
AE
1227{
1228 dout("%s: obj %p\n", __func__, obj_request);
1229 obj_request_done_set(obj_request);
1230}
1231
c47f9371 1232static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1233{
37206ee5 1234 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
c47f9371 1235 obj_request->result, obj_request->xferred, obj_request->length);
6e2a4505
AE
1236 if (obj_request->img_request)
1237 rbd_img_obj_request_read_callback(obj_request);
1238 else
1239 obj_request_done_set(obj_request);
bf0d5f50
AE
1240}
1241
c47f9371 1242static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1243{
1b83bef2
SW
1244 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1245 obj_request->result, obj_request->length);
1246 /*
1247 * There is no such thing as a successful short write.
1248 * Our xferred value is the number of bytes transferred
1249 * back. Set it to our originally-requested length.
1250 */
1251 obj_request->xferred = obj_request->length;
07741308 1252 obj_request_done_set(obj_request);
bf0d5f50
AE
1253}
1254
fbfab539
AE
1255/*
1256 * For a simple stat call there's nothing to do. We'll do more if
1257 * this is part of a write sequence for a layered image.
1258 */
c47f9371 1259static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab539 1260{
37206ee5 1261 dout("%s: obj %p\n", __func__, obj_request);
fbfab539
AE
1262 obj_request_done_set(obj_request);
1263}
1264
bf0d5f50
AE
1265static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1266 struct ceph_msg *msg)
1267{
1268 struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f50
AE
1269 u16 opcode;
1270
37206ee5 1271 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
bf0d5f50
AE
1272 rbd_assert(osd_req == obj_request->osd_req);
1273 rbd_assert(!!obj_request->img_request ^
1274 (obj_request->which == BAD_WHICH));
1275
1b83bef2
SW
1276 if (osd_req->r_result < 0)
1277 obj_request->result = osd_req->r_result;
bf0d5f50
AE
1278 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1279
1b83bef2 1280 WARN_ON(osd_req->r_num_ops != 1); /* For now */
bf0d5f50 1281
c47f9371
AE
1282 /*
1283 * We support a 64-bit length, but ultimately it has to be
1284 * passed to blk_end_request(), which takes an unsigned int.
1285 */
1b83bef2 1286 obj_request->xferred = osd_req->r_reply_op_len[0];
c47f9371 1287 rbd_assert(obj_request->xferred < (u64) UINT_MAX);
1b83bef2 1288 opcode = osd_req->r_request_ops[0].op;
bf0d5f50
AE
1289 switch (opcode) {
1290 case CEPH_OSD_OP_READ:
c47f9371 1291 rbd_osd_read_callback(obj_request);
bf0d5f50
AE
1292 break;
1293 case CEPH_OSD_OP_WRITE:
c47f9371 1294 rbd_osd_write_callback(obj_request);
bf0d5f50 1295 break;
fbfab539 1296 case CEPH_OSD_OP_STAT:
c47f9371 1297 rbd_osd_stat_callback(obj_request);
fbfab539 1298 break;
36be9a76 1299 case CEPH_OSD_OP_CALL:
b8d70035 1300 case CEPH_OSD_OP_NOTIFY_ACK:
9969ebc5 1301 case CEPH_OSD_OP_WATCH:
c47f9371 1302 rbd_osd_trivial_callback(obj_request);
9969ebc5 1303 break;
bf0d5f50
AE
1304 default:
1305 rbd_warn(NULL, "%s: unsupported op %hu\n",
1306 obj_request->object_name, (unsigned short) opcode);
1307 break;
1308 }
1309
07741308 1310 if (obj_request_done_test(obj_request))
bf0d5f50
AE
1311 rbd_obj_request_complete(obj_request);
1312}
1313
1314static struct ceph_osd_request *rbd_osd_req_create(
1315 struct rbd_device *rbd_dev,
1316 bool write_request,
1317 struct rbd_obj_request *obj_request,
1318 struct ceph_osd_req_op *op)
1319{
1320 struct rbd_img_request *img_request = obj_request->img_request;
1321 struct ceph_snap_context *snapc = NULL;
1322 struct ceph_osd_client *osdc;
1323 struct ceph_osd_request *osd_req;
0fff87ec 1324 struct ceph_osd_data *osd_data;
bf0d5f50
AE
1325 struct timespec now;
1326 struct timespec *mtime;
1327 u64 snap_id = CEPH_NOSNAP;
1328 u64 offset = obj_request->offset;
1329 u64 length = obj_request->length;
1330
1331 if (img_request) {
1332 rbd_assert(img_request->write_request == write_request);
1333 if (img_request->write_request)
1334 snapc = img_request->snapc;
1335 else
1336 snap_id = img_request->snap_id;
1337 }
1338
1339 /* Allocate and initialize the request, for the single op */
1340
1341 osdc = &rbd_dev->rbd_client->client->osdc;
1342 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1343 if (!osd_req)
1344 return NULL; /* ENOMEM */
0fff87ec 1345 osd_data = write_request ? &osd_req->r_data_out : &osd_req->r_data_in;
bf0d5f50
AE
1346
1347 rbd_assert(obj_request_type_valid(obj_request->type));
1348 switch (obj_request->type) {
9969ebc5
AE
1349 case OBJ_REQUEST_NODATA:
1350 break; /* Nothing to do */
bf0d5f50
AE
1351 case OBJ_REQUEST_BIO:
1352 rbd_assert(obj_request->bio_list != NULL);
0fff87ec
AE
1353 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
1354 osd_data->bio = obj_request->bio_list;
fdce58cc 1355 osd_data->bio_length = obj_request->length;
bf0d5f50 1356 break;
788e2df3 1357 case OBJ_REQUEST_PAGES:
0fff87ec
AE
1358 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
1359 osd_data->pages = obj_request->pages;
e0c59487 1360 osd_data->length = obj_request->length;
0fff87ec
AE
1361 osd_data->alignment = offset & ~PAGE_MASK;
1362 osd_data->pages_from_pool = false;
1363 osd_data->own_pages = false;
788e2df3 1364 break;
bf0d5f50
AE
1365 }
1366
1367 if (write_request) {
1368 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1369 now = CURRENT_TIME;
1370 mtime = &now;
1371 } else {
1372 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1373 mtime = NULL; /* not needed for reads */
1374 offset = 0; /* These are not used... */
1375 length = 0; /* ...for osd read requests */
1376 }
1377
1378 osd_req->r_callback = rbd_osd_req_callback;
1379 osd_req->r_priv = obj_request;
1380
1381 osd_req->r_oid_len = strlen(obj_request->object_name);
1382 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1383 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1384
1385 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1386
1387 /* osd_req will get its own reference to snapc (if non-null) */
1388
175face2 1389 ceph_osdc_build_request(osd_req, offset, 1, op,
bf0d5f50
AE
1390 snapc, snap_id, mtime);
1391
1392 return osd_req;
1393}
1394
1395static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1396{
1397 ceph_osdc_put_request(osd_req);
1398}
1399
1400/* object_name is assumed to be a non-null pointer and NUL-terminated */
1401
1402static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1403 u64 offset, u64 length,
1404 enum obj_request_type type)
1405{
1406 struct rbd_obj_request *obj_request;
1407 size_t size;
1408 char *name;
1409
1410 rbd_assert(obj_request_type_valid(type));
1411
1412 size = strlen(object_name) + 1;
1413 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1414 if (!obj_request)
1415 return NULL;
1416
1417 name = (char *)(obj_request + 1);
1418 obj_request->object_name = memcpy(name, object_name, size);
1419 obj_request->offset = offset;
1420 obj_request->length = length;
1421 obj_request->which = BAD_WHICH;
1422 obj_request->type = type;
1423 INIT_LIST_HEAD(&obj_request->links);
07741308 1424 obj_request_done_init(obj_request);
788e2df3 1425 init_completion(&obj_request->completion);
bf0d5f50
AE
1426 kref_init(&obj_request->kref);
1427
37206ee5
AE
1428 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1429 offset, length, (int)type, obj_request);
1430
bf0d5f50
AE
1431 return obj_request;
1432}
1433
1434static void rbd_obj_request_destroy(struct kref *kref)
1435{
1436 struct rbd_obj_request *obj_request;
1437
1438 obj_request = container_of(kref, struct rbd_obj_request, kref);
1439
37206ee5
AE
1440 dout("%s: obj %p\n", __func__, obj_request);
1441
bf0d5f50
AE
1442 rbd_assert(obj_request->img_request == NULL);
1443 rbd_assert(obj_request->which == BAD_WHICH);
1444
1445 if (obj_request->osd_req)
1446 rbd_osd_req_destroy(obj_request->osd_req);
1447
1448 rbd_assert(obj_request_type_valid(obj_request->type));
1449 switch (obj_request->type) {
9969ebc5
AE
1450 case OBJ_REQUEST_NODATA:
1451 break; /* Nothing to do */
bf0d5f50
AE
1452 case OBJ_REQUEST_BIO:
1453 if (obj_request->bio_list)
1454 bio_chain_put(obj_request->bio_list);
1455 break;
788e2df3
AE
1456 case OBJ_REQUEST_PAGES:
1457 if (obj_request->pages)
1458 ceph_release_page_vector(obj_request->pages,
1459 obj_request->page_count);
1460 break;
bf0d5f50
AE
1461 }
1462
1463 kfree(obj_request);
1464}
1465
1466/*
1467 * Caller is responsible for filling in the list of object requests
1468 * that comprises the image request, and the Linux request pointer
1469 * (if there is one).
1470 */
cc344fa1
AE
1471static struct rbd_img_request *rbd_img_request_create(
1472 struct rbd_device *rbd_dev,
bf0d5f50
AE
1473 u64 offset, u64 length,
1474 bool write_request)
1475{
1476 struct rbd_img_request *img_request;
1477 struct ceph_snap_context *snapc = NULL;
1478
1479 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1480 if (!img_request)
1481 return NULL;
1482
1483 if (write_request) {
1484 down_read(&rbd_dev->header_rwsem);
1485 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1486 up_read(&rbd_dev->header_rwsem);
1487 if (WARN_ON(!snapc)) {
1488 kfree(img_request);
1489 return NULL; /* Shouldn't happen */
1490 }
1491 }
1492
1493 img_request->rq = NULL;
1494 img_request->rbd_dev = rbd_dev;
1495 img_request->offset = offset;
1496 img_request->length = length;
1497 img_request->write_request = write_request;
1498 if (write_request)
1499 img_request->snapc = snapc;
1500 else
1501 img_request->snap_id = rbd_dev->spec->snap_id;
1502 spin_lock_init(&img_request->completion_lock);
1503 img_request->next_completion = 0;
1504 img_request->callback = NULL;
1505 img_request->obj_request_count = 0;
1506 INIT_LIST_HEAD(&img_request->obj_requests);
1507 kref_init(&img_request->kref);
1508
1509 rbd_img_request_get(img_request); /* Avoid a warning */
1510 rbd_img_request_put(img_request); /* TEMPORARY */
1511
37206ee5
AE
1512 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1513 write_request ? "write" : "read", offset, length,
1514 img_request);
1515
bf0d5f50
AE
1516 return img_request;
1517}
1518
1519static void rbd_img_request_destroy(struct kref *kref)
1520{
1521 struct rbd_img_request *img_request;
1522 struct rbd_obj_request *obj_request;
1523 struct rbd_obj_request *next_obj_request;
1524
1525 img_request = container_of(kref, struct rbd_img_request, kref);
1526
37206ee5
AE
1527 dout("%s: img %p\n", __func__, img_request);
1528
bf0d5f50
AE
1529 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1530 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1531 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50
AE
1532
1533 if (img_request->write_request)
1534 ceph_put_snap_context(img_request->snapc);
1535
1536 kfree(img_request);
1537}
1538
1539static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1540 struct bio *bio_list)
1541{
1542 struct rbd_device *rbd_dev = img_request->rbd_dev;
1543 struct rbd_obj_request *obj_request = NULL;
1544 struct rbd_obj_request *next_obj_request;
1545 unsigned int bio_offset;
1546 u64 image_offset;
1547 u64 resid;
1548 u16 opcode;
1549
37206ee5
AE
1550 dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
1551
bf0d5f50
AE
1552 opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
1553 : CEPH_OSD_OP_READ;
1554 bio_offset = 0;
1555 image_offset = img_request->offset;
1556 rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1557 resid = img_request->length;
4dda41d3 1558 rbd_assert(resid > 0);
bf0d5f50
AE
1559 while (resid) {
1560 const char *object_name;
1561 unsigned int clone_size;
33803f33 1562 struct ceph_osd_req_op op;
bf0d5f50
AE
1563 u64 offset;
1564 u64 length;
1565
1566 object_name = rbd_segment_name(rbd_dev, image_offset);
1567 if (!object_name)
1568 goto out_unwind;
1569 offset = rbd_segment_offset(rbd_dev, image_offset);
1570 length = rbd_segment_length(rbd_dev, image_offset, resid);
1571 obj_request = rbd_obj_request_create(object_name,
1572 offset, length,
1573 OBJ_REQUEST_BIO);
1574 kfree(object_name); /* object request has its own copy */
1575 if (!obj_request)
1576 goto out_unwind;
1577
1578 rbd_assert(length <= (u64) UINT_MAX);
1579 clone_size = (unsigned int) length;
1580 obj_request->bio_list = bio_chain_clone_range(&bio_list,
1581 &bio_offset, clone_size,
1582 GFP_ATOMIC);
1583 if (!obj_request->bio_list)
1584 goto out_partial;
1585
1586 /*
1587 * Build up the op to use in building the osd
1588 * request. Note that the contents of the op are
1589 * copied by rbd_osd_req_create().
1590 */
33803f33 1591 osd_req_op_extent_init(&op, opcode, offset, length, 0, 0);
bf0d5f50
AE
1592 obj_request->osd_req = rbd_osd_req_create(rbd_dev,
1593 img_request->write_request,
33803f33 1594 obj_request, &op);
bf0d5f50
AE
1595 if (!obj_request->osd_req)
1596 goto out_partial;
1597 /* status and version are initially zero-filled */
1598
1599 rbd_img_obj_request_add(img_request, obj_request);
1600
1601 image_offset += length;
1602 resid -= length;
1603 }
1604
1605 return 0;
1606
1607out_partial:
1608 rbd_obj_request_put(obj_request);
1609out_unwind:
1610 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1611 rbd_obj_request_put(obj_request);
1612
1613 return -ENOMEM;
1614}
1615
1616static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1617{
1618 struct rbd_img_request *img_request;
1619 u32 which = obj_request->which;
1620 bool more = true;
1621
1622 img_request = obj_request->img_request;
4dda41d3 1623
37206ee5 1624 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
bf0d5f50
AE
1625 rbd_assert(img_request != NULL);
1626 rbd_assert(img_request->rq != NULL);
4dda41d3 1627 rbd_assert(img_request->obj_request_count > 0);
bf0d5f50
AE
1628 rbd_assert(which != BAD_WHICH);
1629 rbd_assert(which < img_request->obj_request_count);
1630 rbd_assert(which >= img_request->next_completion);
1631
1632 spin_lock_irq(&img_request->completion_lock);
1633 if (which != img_request->next_completion)
1634 goto out;
1635
1636 for_each_obj_request_from(img_request, obj_request) {
1637 unsigned int xferred;
1638 int result;
1639
1640 rbd_assert(more);
1641 rbd_assert(which < img_request->obj_request_count);
1642
07741308 1643 if (!obj_request_done_test(obj_request))
bf0d5f50
AE
1644 break;
1645
1646 rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1647 xferred = (unsigned int) obj_request->xferred;
1648 result = (int) obj_request->result;
1649 if (result)
1650 rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1651 img_request->write_request ? "write" : "read",
1652 result, xferred);
1653
1654 more = blk_end_request(img_request->rq, result, xferred);
1655 which++;
1656 }
1b83bef2 1657
bf0d5f50
AE
1658 rbd_assert(more ^ (which == img_request->obj_request_count));
1659 img_request->next_completion = which;
1660out:
1661 spin_unlock_irq(&img_request->completion_lock);
1662
1663 if (!more)
1664 rbd_img_request_complete(img_request);
1665}
1666
1667static int rbd_img_request_submit(struct rbd_img_request *img_request)
1668{
1669 struct rbd_device *rbd_dev = img_request->rbd_dev;
1670 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1671 struct rbd_obj_request *obj_request;
46faeed4 1672 struct rbd_obj_request *next_obj_request;
bf0d5f50 1673
37206ee5 1674 dout("%s: img %p\n", __func__, img_request);
46faeed4 1675 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
bf0d5f50
AE
1676 int ret;
1677
1678 obj_request->callback = rbd_img_obj_callback;
1679 ret = rbd_obj_request_submit(osdc, obj_request);
1680 if (ret)
1681 return ret;
1682 /*
1683 * The image request has its own reference to each
1684 * of its object requests, so we can safely drop the
1685 * initial one here.
1686 */
1687 rbd_obj_request_put(obj_request);
1688 }
1689
1690 return 0;
1691}
1692
cf81b60e 1693static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
b8d70035
AE
1694 u64 ver, u64 notify_id)
1695{
1696 struct rbd_obj_request *obj_request;
33803f33 1697 struct ceph_osd_req_op op;
b8d70035
AE
1698 struct ceph_osd_client *osdc;
1699 int ret;
1700
1701 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1702 OBJ_REQUEST_NODATA);
1703 if (!obj_request)
1704 return -ENOMEM;
1705
1706 ret = -ENOMEM;
33803f33 1707 osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0);
b8d70035 1708 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
33803f33 1709 obj_request, &op);
b8d70035
AE
1710 if (!obj_request->osd_req)
1711 goto out;
1712
1713 osdc = &rbd_dev->rbd_client->client->osdc;
cf81b60e 1714 obj_request->callback = rbd_obj_request_put;
b8d70035 1715 ret = rbd_obj_request_submit(osdc, obj_request);
b8d70035 1716out:
cf81b60e
AE
1717 if (ret)
1718 rbd_obj_request_put(obj_request);
b8d70035
AE
1719
1720 return ret;
1721}
1722
1723static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1724{
1725 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1726 u64 hver;
1727 int rc;
1728
1729 if (!rbd_dev)
1730 return;
1731
37206ee5 1732 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
b8d70035
AE
1733 rbd_dev->header_name, (unsigned long long) notify_id,
1734 (unsigned int) opcode);
1735 rc = rbd_dev_refresh(rbd_dev, &hver);
1736 if (rc)
1737 rbd_warn(rbd_dev, "got notification but failed to "
1738 " update snaps: %d\n", rc);
1739
cf81b60e 1740 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
b8d70035
AE
1741}
1742
9969ebc5
AE
1743/*
1744 * Request sync osd watch/unwatch. The value of "start" determines
1745 * whether a watch request is being initiated or torn down.
1746 */
1747static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1748{
1749 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1750 struct rbd_obj_request *obj_request;
33803f33 1751 struct ceph_osd_req_op op;
9969ebc5
AE
1752 int ret;
1753
1754 rbd_assert(start ^ !!rbd_dev->watch_event);
1755 rbd_assert(start ^ !!rbd_dev->watch_request);
1756
1757 if (start) {
3c663bbd 1758 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
9969ebc5
AE
1759 &rbd_dev->watch_event);
1760 if (ret < 0)
1761 return ret;
8eb87565 1762 rbd_assert(rbd_dev->watch_event != NULL);
9969ebc5
AE
1763 }
1764
1765 ret = -ENOMEM;
1766 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1767 OBJ_REQUEST_NODATA);
1768 if (!obj_request)
1769 goto out_cancel;
1770
33803f33 1771 osd_req_op_watch_init(&op, CEPH_OSD_OP_WATCH,
9969ebc5
AE
1772 rbd_dev->watch_event->cookie,
1773 rbd_dev->header.obj_version, start);
9969ebc5 1774 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
33803f33 1775 obj_request, &op);
9969ebc5
AE
1776 if (!obj_request->osd_req)
1777 goto out_cancel;
1778
8eb87565 1779 if (start)
975241af 1780 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
8eb87565 1781 else
6977c3f9 1782 ceph_osdc_unregister_linger_request(osdc,
975241af 1783 rbd_dev->watch_request->osd_req);
9969ebc5
AE
1784 ret = rbd_obj_request_submit(osdc, obj_request);
1785 if (ret)
1786 goto out_cancel;
1787 ret = rbd_obj_request_wait(obj_request);
1788 if (ret)
1789 goto out_cancel;
9969ebc5
AE
1790 ret = obj_request->result;
1791 if (ret)
1792 goto out_cancel;
1793
8eb87565
AE
1794 /*
1795 * A watch request is set to linger, so the underlying osd
1796 * request won't go away until we unregister it. We retain
1797 * a pointer to the object request during that time (in
1798 * rbd_dev->watch_request), so we'll keep a reference to
1799 * it. We'll drop that reference (below) after we've
1800 * unregistered it.
1801 */
1802 if (start) {
1803 rbd_dev->watch_request = obj_request;
1804
1805 return 0;
1806 }
1807
1808 /* We have successfully torn down the watch request */
1809
1810 rbd_obj_request_put(rbd_dev->watch_request);
1811 rbd_dev->watch_request = NULL;
9969ebc5
AE
1812out_cancel:
1813 /* Cancel the event if we're tearing down, or on error */
1814 ceph_osdc_cancel_event(rbd_dev->watch_event);
1815 rbd_dev->watch_event = NULL;
9969ebc5
AE
1816 if (obj_request)
1817 rbd_obj_request_put(obj_request);
1818
1819 return ret;
1820}
1821
36be9a76
AE
1822/*
1823 * Synchronous osd object method call
1824 */
1825static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1826 const char *object_name,
1827 const char *class_name,
1828 const char *method_name,
1829 const char *outbound,
1830 size_t outbound_size,
1831 char *inbound,
1832 size_t inbound_size,
1833 u64 *version)
1834{
1835 struct rbd_obj_request *obj_request;
1836 struct ceph_osd_client *osdc;
33803f33 1837 struct ceph_osd_req_op op;
36be9a76
AE
1838 struct page **pages;
1839 u32 page_count;
1840 int ret;
1841
1842 /*
1843 * Method calls are ultimately read operations but they
1844 * don't involve object data (so no offset or length).
1845 * The result should placed into the inbound buffer
1846 * provided. They also supply outbound data--parameters for
1847 * the object method. Currently if this is present it will
1848 * be a snapshot id.
1849 */
1850 page_count = (u32) calc_pages_for(0, inbound_size);
1851 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1852 if (IS_ERR(pages))
1853 return PTR_ERR(pages);
1854
1855 ret = -ENOMEM;
1856 obj_request = rbd_obj_request_create(object_name, 0, 0,
1857 OBJ_REQUEST_PAGES);
1858 if (!obj_request)
1859 goto out;
1860
1861 obj_request->pages = pages;
1862 obj_request->page_count = page_count;
1863
33803f33
AE
1864 osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name,
1865 outbound, outbound_size);
36be9a76 1866 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
33803f33 1867 obj_request, &op);
36be9a76
AE
1868 if (!obj_request->osd_req)
1869 goto out;
1870
1871 osdc = &rbd_dev->rbd_client->client->osdc;
1872 ret = rbd_obj_request_submit(osdc, obj_request);
1873 if (ret)
1874 goto out;
1875 ret = rbd_obj_request_wait(obj_request);
1876 if (ret)
1877 goto out;
1878
1879 ret = obj_request->result;
1880 if (ret < 0)
1881 goto out;
23ed6e13 1882 ret = 0;
903bb32e 1883 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a76
AE
1884 if (version)
1885 *version = obj_request->version;
1886out:
1887 if (obj_request)
1888 rbd_obj_request_put(obj_request);
1889 else
1890 ceph_release_page_vector(pages, page_count);
1891
1892 return ret;
1893}
1894
bf0d5f50 1895static void rbd_request_fn(struct request_queue *q)
cc344fa1 1896 __releases(q->queue_lock) __acquires(q->queue_lock)
bf0d5f50
AE
1897{
1898 struct rbd_device *rbd_dev = q->queuedata;
1899 bool read_only = rbd_dev->mapping.read_only;
1900 struct request *rq;
1901 int result;
1902
1903 while ((rq = blk_fetch_request(q))) {
1904 bool write_request = rq_data_dir(rq) == WRITE;
1905 struct rbd_img_request *img_request;
1906 u64 offset;
1907 u64 length;
1908
1909 /* Ignore any non-FS requests that filter through. */
1910
1911 if (rq->cmd_type != REQ_TYPE_FS) {
4dda41d3
AE
1912 dout("%s: non-fs request type %d\n", __func__,
1913 (int) rq->cmd_type);
1914 __blk_end_request_all(rq, 0);
1915 continue;
1916 }
1917
1918 /* Ignore/skip any zero-length requests */
1919
1920 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
1921 length = (u64) blk_rq_bytes(rq);
1922
1923 if (!length) {
1924 dout("%s: zero-length request\n", __func__);
bf0d5f50
AE
1925 __blk_end_request_all(rq, 0);
1926 continue;
1927 }
1928
1929 spin_unlock_irq(q->queue_lock);
1930
1931 /* Disallow writes to a read-only device */
1932
1933 if (write_request) {
1934 result = -EROFS;
1935 if (read_only)
1936 goto end_request;
1937 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
1938 }
1939
6d292906
AE
1940 /*
1941 * Quit early if the mapped snapshot no longer
1942 * exists. It's still possible the snapshot will
1943 * have disappeared by the time our request arrives
1944 * at the osd, but there's no sense in sending it if
1945 * we already know.
1946 */
1947 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
bf0d5f50
AE
1948 dout("request for non-existent snapshot");
1949 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
1950 result = -ENXIO;
1951 goto end_request;
1952 }
1953
bf0d5f50
AE
1954 result = -EINVAL;
1955 if (WARN_ON(offset && length > U64_MAX - offset + 1))
1956 goto end_request; /* Shouldn't happen */
1957
1958 result = -ENOMEM;
1959 img_request = rbd_img_request_create(rbd_dev, offset, length,
1960 write_request);
1961 if (!img_request)
1962 goto end_request;
1963
1964 img_request->rq = rq;
1965
1966 result = rbd_img_request_fill_bio(img_request, rq->bio);
1967 if (!result)
1968 result = rbd_img_request_submit(img_request);
1969 if (result)
1970 rbd_img_request_put(img_request);
1971end_request:
1972 spin_lock_irq(q->queue_lock);
1973 if (result < 0) {
1974 rbd_warn(rbd_dev, "obj_request %s result %d\n",
1975 write_request ? "write" : "read", result);
1976 __blk_end_request_all(rq, result);
1977 }
1978 }
1979}
1980
602adf40
YS
1981/*
1982 * a queue callback. Makes sure that we don't create a bio that spans across
1983 * multiple osd objects. One exception would be with a single page bios,
f7760dad 1984 * which we handle later at bio_chain_clone_range()
602adf40
YS
1985 */
1986static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1987 struct bio_vec *bvec)
1988{
1989 struct rbd_device *rbd_dev = q->queuedata;
e5cfeed2
AE
1990 sector_t sector_offset;
1991 sector_t sectors_per_obj;
1992 sector_t obj_sector_offset;
1993 int ret;
1994
1995 /*
1996 * Find how far into its rbd object the partition-relative
1997 * bio start sector is to offset relative to the enclosing
1998 * device.
1999 */
2000 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2001 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2002 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2003
2004 /*
2005 * Compute the number of bytes from that offset to the end
2006 * of the object. Account for what's already used by the bio.
2007 */
2008 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2009 if (ret > bmd->bi_size)
2010 ret -= bmd->bi_size;
2011 else
2012 ret = 0;
2013
2014 /*
2015 * Don't send back more than was asked for. And if the bio
2016 * was empty, let the whole thing through because: "Note
2017 * that a block device *must* allow a single page to be
2018 * added to an empty bio."
2019 */
2020 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2021 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2022 ret = (int) bvec->bv_len;
2023
2024 return ret;
602adf40
YS
2025}
2026
2027static void rbd_free_disk(struct rbd_device *rbd_dev)
2028{
2029 struct gendisk *disk = rbd_dev->disk;
2030
2031 if (!disk)
2032 return;
2033
602adf40
YS
2034 if (disk->flags & GENHD_FL_UP)
2035 del_gendisk(disk);
2036 if (disk->queue)
2037 blk_cleanup_queue(disk->queue);
2038 put_disk(disk);
2039}
2040
788e2df3
AE
2041static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2042 const char *object_name,
2043 u64 offset, u64 length,
2044 char *buf, u64 *version)
2045
2046{
33803f33 2047 struct ceph_osd_req_op op;
788e2df3
AE
2048 struct rbd_obj_request *obj_request;
2049 struct ceph_osd_client *osdc;
2050 struct page **pages = NULL;
2051 u32 page_count;
1ceae7ef 2052 size_t size;
788e2df3
AE
2053 int ret;
2054
2055 page_count = (u32) calc_pages_for(offset, length);
2056 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2057 if (IS_ERR(pages))
2058 ret = PTR_ERR(pages);
2059
2060 ret = -ENOMEM;
2061 obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a76 2062 OBJ_REQUEST_PAGES);
788e2df3
AE
2063 if (!obj_request)
2064 goto out;
2065
2066 obj_request->pages = pages;
2067 obj_request->page_count = page_count;
2068
33803f33 2069 osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0);
788e2df3 2070 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
33803f33 2071 obj_request, &op);
788e2df3
AE
2072 if (!obj_request->osd_req)
2073 goto out;
2074
2075 osdc = &rbd_dev->rbd_client->client->osdc;
2076 ret = rbd_obj_request_submit(osdc, obj_request);
2077 if (ret)
2078 goto out;
2079 ret = rbd_obj_request_wait(obj_request);
2080 if (ret)
2081 goto out;
2082
2083 ret = obj_request->result;
2084 if (ret < 0)
2085 goto out;
1ceae7ef
AE
2086
2087 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2088 size = (size_t) obj_request->xferred;
903bb32e 2089 ceph_copy_from_page_vector(pages, buf, 0, size);
23ed6e13
AE
2090 rbd_assert(size <= (size_t) INT_MAX);
2091 ret = (int) size;
788e2df3
AE
2092 if (version)
2093 *version = obj_request->version;
2094out:
2095 if (obj_request)
2096 rbd_obj_request_put(obj_request);
2097 else
2098 ceph_release_page_vector(pages, page_count);
2099
2100 return ret;
2101}
2102
602adf40 2103/*
4156d998
AE
2104 * Read the complete header for the given rbd device.
2105 *
2106 * Returns a pointer to a dynamically-allocated buffer containing
2107 * the complete and validated header. Caller can pass the address
2108 * of a variable that will be filled in with the version of the
2109 * header object at the time it was read.
2110 *
2111 * Returns a pointer-coded errno if a failure occurs.
602adf40 2112 */
4156d998
AE
2113static struct rbd_image_header_ondisk *
2114rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
602adf40 2115{
4156d998 2116 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 2117 u32 snap_count = 0;
4156d998
AE
2118 u64 names_size = 0;
2119 u32 want_count;
2120 int ret;
602adf40 2121
00f1f36f 2122 /*
4156d998
AE
2123 * The complete header will include an array of its 64-bit
2124 * snapshot ids, followed by the names of those snapshots as
2125 * a contiguous block of NUL-terminated strings. Note that
2126 * the number of snapshots could change by the time we read
2127 * it in, in which case we re-read it.
00f1f36f 2128 */
4156d998
AE
2129 do {
2130 size_t size;
2131
2132 kfree(ondisk);
2133
2134 size = sizeof (*ondisk);
2135 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2136 size += names_size;
2137 ondisk = kmalloc(size, GFP_KERNEL);
2138 if (!ondisk)
2139 return ERR_PTR(-ENOMEM);
2140
788e2df3 2141 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
4156d998
AE
2142 0, size,
2143 (char *) ondisk, version);
4156d998
AE
2144 if (ret < 0)
2145 goto out_err;
2146 if (WARN_ON((size_t) ret < size)) {
2147 ret = -ENXIO;
06ecc6cb
AE
2148 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2149 size, ret);
4156d998
AE
2150 goto out_err;
2151 }
2152 if (!rbd_dev_ondisk_valid(ondisk)) {
2153 ret = -ENXIO;
06ecc6cb 2154 rbd_warn(rbd_dev, "invalid header");
4156d998 2155 goto out_err;
81e759fb 2156 }
602adf40 2157
4156d998
AE
2158 names_size = le64_to_cpu(ondisk->snap_names_len);
2159 want_count = snap_count;
2160 snap_count = le32_to_cpu(ondisk->snap_count);
2161 } while (snap_count != want_count);
00f1f36f 2162
4156d998 2163 return ondisk;
00f1f36f 2164
4156d998
AE
2165out_err:
2166 kfree(ondisk);
2167
2168 return ERR_PTR(ret);
2169}
2170
2171/*
2172 * reload the ondisk the header
2173 */
2174static int rbd_read_header(struct rbd_device *rbd_dev,
2175 struct rbd_image_header *header)
2176{
2177 struct rbd_image_header_ondisk *ondisk;
2178 u64 ver = 0;
2179 int ret;
602adf40 2180
4156d998
AE
2181 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2182 if (IS_ERR(ondisk))
2183 return PTR_ERR(ondisk);
2184 ret = rbd_header_from_disk(header, ondisk);
2185 if (ret >= 0)
2186 header->obj_version = ver;
2187 kfree(ondisk);
2188
2189 return ret;
602adf40
YS
2190}
2191
41f38c2b 2192static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
dfc5606d
YS
2193{
2194 struct rbd_snap *snap;
a0593290 2195 struct rbd_snap *next;
dfc5606d 2196
a0593290 2197 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
41f38c2b 2198 rbd_remove_snap_dev(snap);
dfc5606d
YS
2199}
2200
9478554a
AE
2201static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2202{
2203 sector_t size;
2204
0d7dbfce 2205 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9478554a
AE
2206 return;
2207
2208 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2209 dout("setting size to %llu sectors", (unsigned long long) size);
2210 rbd_dev->mapping.size = (u64) size;
2211 set_capacity(rbd_dev->disk, size);
2212}
2213
602adf40
YS
2214/*
2215 * only read the first part of the ondisk header, without the snaps info
2216 */
117973fb 2217static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
602adf40
YS
2218{
2219 int ret;
2220 struct rbd_image_header h;
602adf40
YS
2221
2222 ret = rbd_read_header(rbd_dev, &h);
2223 if (ret < 0)
2224 return ret;
2225
a51aa0c0
JD
2226 down_write(&rbd_dev->header_rwsem);
2227
9478554a
AE
2228 /* Update image size, and check for resize of mapped image */
2229 rbd_dev->header.image_size = h.image_size;
2230 rbd_update_mapping_size(rbd_dev);
9db4b3e3 2231
849b4260 2232 /* rbd_dev->header.object_prefix shouldn't change */
602adf40 2233 kfree(rbd_dev->header.snap_sizes);
849b4260 2234 kfree(rbd_dev->header.snap_names);
d1d25646
JD
2235 /* osd requests may still refer to snapc */
2236 ceph_put_snap_context(rbd_dev->header.snapc);
602adf40 2237
b813623a
AE
2238 if (hver)
2239 *hver = h.obj_version;
a71b891b 2240 rbd_dev->header.obj_version = h.obj_version;
93a24e08 2241 rbd_dev->header.image_size = h.image_size;
602adf40
YS
2242 rbd_dev->header.snapc = h.snapc;
2243 rbd_dev->header.snap_names = h.snap_names;
2244 rbd_dev->header.snap_sizes = h.snap_sizes;
849b4260
AE
2245 /* Free the extra copy of the object prefix */
2246 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2247 kfree(h.object_prefix);
2248
304f6808
AE
2249 ret = rbd_dev_snaps_update(rbd_dev);
2250 if (!ret)
2251 ret = rbd_dev_snaps_register(rbd_dev);
dfc5606d 2252
c666601a 2253 up_write(&rbd_dev->header_rwsem);
602adf40 2254
dfc5606d 2255 return ret;
602adf40
YS
2256}
2257
117973fb 2258static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
1fe5e993
AE
2259{
2260 int ret;
2261
117973fb 2262 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1fe5e993 2263 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
117973fb
AE
2264 if (rbd_dev->image_format == 1)
2265 ret = rbd_dev_v1_refresh(rbd_dev, hver);
2266 else
2267 ret = rbd_dev_v2_refresh(rbd_dev, hver);
1fe5e993
AE
2268 mutex_unlock(&ctl_mutex);
2269
2270 return ret;
2271}
2272
602adf40
YS
2273static int rbd_init_disk(struct rbd_device *rbd_dev)
2274{
2275 struct gendisk *disk;
2276 struct request_queue *q;
593a9e7b 2277 u64 segment_size;
602adf40 2278
602adf40 2279 /* create gendisk info */
602adf40
YS
2280 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2281 if (!disk)
1fcdb8aa 2282 return -ENOMEM;
602adf40 2283
f0f8cef5 2284 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 2285 rbd_dev->dev_id);
602adf40
YS
2286 disk->major = rbd_dev->major;
2287 disk->first_minor = 0;
2288 disk->fops = &rbd_bd_ops;
2289 disk->private_data = rbd_dev;
2290
bf0d5f50 2291 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
602adf40
YS
2292 if (!q)
2293 goto out_disk;
029bcbd8 2294
593a9e7b
AE
2295 /* We use the default size, but let's be explicit about it. */
2296 blk_queue_physical_block_size(q, SECTOR_SIZE);
2297
029bcbd8 2298 /* set io sizes to object size */
593a9e7b
AE
2299 segment_size = rbd_obj_bytes(&rbd_dev->header);
2300 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2301 blk_queue_max_segment_size(q, segment_size);
2302 blk_queue_io_min(q, segment_size);
2303 blk_queue_io_opt(q, segment_size);
029bcbd8 2304
602adf40
YS
2305 blk_queue_merge_bvec(q, rbd_merge_bvec);
2306 disk->queue = q;
2307
2308 q->queuedata = rbd_dev;
2309
2310 rbd_dev->disk = disk;
602adf40 2311
12f02944
AE
2312 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2313
602adf40 2314 return 0;
602adf40
YS
2315out_disk:
2316 put_disk(disk);
1fcdb8aa
AE
2317
2318 return -ENOMEM;
602adf40
YS
2319}
2320
dfc5606d
YS
2321/*
2322 sysfs
2323*/
2324
593a9e7b
AE
2325static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2326{
2327 return container_of(dev, struct rbd_device, dev);
2328}
2329
dfc5606d
YS
2330static ssize_t rbd_size_show(struct device *dev,
2331 struct device_attribute *attr, char *buf)
2332{
593a9e7b 2333 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0
JD
2334 sector_t size;
2335
2336 down_read(&rbd_dev->header_rwsem);
2337 size = get_capacity(rbd_dev->disk);
2338 up_read(&rbd_dev->header_rwsem);
dfc5606d 2339
a51aa0c0 2340 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
dfc5606d
YS
2341}
2342
34b13184
AE
2343/*
2344 * Note this shows the features for whatever's mapped, which is not
2345 * necessarily the base image.
2346 */
2347static ssize_t rbd_features_show(struct device *dev,
2348 struct device_attribute *attr, char *buf)
2349{
2350 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2351
2352 return sprintf(buf, "0x%016llx\n",
2353 (unsigned long long) rbd_dev->mapping.features);
2354}
2355
dfc5606d
YS
2356static ssize_t rbd_major_show(struct device *dev,
2357 struct device_attribute *attr, char *buf)
2358{
593a9e7b 2359 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 2360
dfc5606d
YS
2361 return sprintf(buf, "%d\n", rbd_dev->major);
2362}
2363
2364static ssize_t rbd_client_id_show(struct device *dev,
2365 struct device_attribute *attr, char *buf)
602adf40 2366{
593a9e7b 2367 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 2368
1dbb4399
AE
2369 return sprintf(buf, "client%lld\n",
2370 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
2371}
2372
dfc5606d
YS
2373static ssize_t rbd_pool_show(struct device *dev,
2374 struct device_attribute *attr, char *buf)
602adf40 2375{
593a9e7b 2376 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 2377
0d7dbfce 2378 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
2379}
2380
9bb2f334
AE
2381static ssize_t rbd_pool_id_show(struct device *dev,
2382 struct device_attribute *attr, char *buf)
2383{
2384 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2385
0d7dbfce
AE
2386 return sprintf(buf, "%llu\n",
2387 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
2388}
2389
dfc5606d
YS
2390static ssize_t rbd_name_show(struct device *dev,
2391 struct device_attribute *attr, char *buf)
2392{
593a9e7b 2393 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 2394
a92ffdf8
AE
2395 if (rbd_dev->spec->image_name)
2396 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2397
2398 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
2399}
2400
589d30e0
AE
2401static ssize_t rbd_image_id_show(struct device *dev,
2402 struct device_attribute *attr, char *buf)
2403{
2404 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2405
0d7dbfce 2406 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
2407}
2408
34b13184
AE
2409/*
2410 * Shows the name of the currently-mapped snapshot (or
2411 * RBD_SNAP_HEAD_NAME for the base image).
2412 */
dfc5606d
YS
2413static ssize_t rbd_snap_show(struct device *dev,
2414 struct device_attribute *attr,
2415 char *buf)
2416{
593a9e7b 2417 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 2418
0d7dbfce 2419 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
2420}
2421
86b00e0d
AE
2422/*
2423 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2424 * for the parent image. If there is no parent, simply shows
2425 * "(no parent image)".
2426 */
2427static ssize_t rbd_parent_show(struct device *dev,
2428 struct device_attribute *attr,
2429 char *buf)
2430{
2431 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2432 struct rbd_spec *spec = rbd_dev->parent_spec;
2433 int count;
2434 char *bufp = buf;
2435
2436 if (!spec)
2437 return sprintf(buf, "(no parent image)\n");
2438
2439 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2440 (unsigned long long) spec->pool_id, spec->pool_name);
2441 if (count < 0)
2442 return count;
2443 bufp += count;
2444
2445 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2446 spec->image_name ? spec->image_name : "(unknown)");
2447 if (count < 0)
2448 return count;
2449 bufp += count;
2450
2451 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2452 (unsigned long long) spec->snap_id, spec->snap_name);
2453 if (count < 0)
2454 return count;
2455 bufp += count;
2456
2457 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2458 if (count < 0)
2459 return count;
2460 bufp += count;
2461
2462 return (ssize_t) (bufp - buf);
2463}
2464
dfc5606d
YS
2465static ssize_t rbd_image_refresh(struct device *dev,
2466 struct device_attribute *attr,
2467 const char *buf,
2468 size_t size)
2469{
593a9e7b 2470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 2471 int ret;
602adf40 2472
117973fb 2473 ret = rbd_dev_refresh(rbd_dev, NULL);
b813623a
AE
2474
2475 return ret < 0 ? ret : size;
dfc5606d 2476}
602adf40 2477
dfc5606d 2478static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 2479static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d
YS
2480static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2481static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2482static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 2483static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 2484static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 2485static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
2486static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2487static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0d 2488static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
2489
2490static struct attribute *rbd_attrs[] = {
2491 &dev_attr_size.attr,
34b13184 2492 &dev_attr_features.attr,
dfc5606d
YS
2493 &dev_attr_major.attr,
2494 &dev_attr_client_id.attr,
2495 &dev_attr_pool.attr,
9bb2f334 2496 &dev_attr_pool_id.attr,
dfc5606d 2497 &dev_attr_name.attr,
589d30e0 2498 &dev_attr_image_id.attr,
dfc5606d 2499 &dev_attr_current_snap.attr,
86b00e0d 2500 &dev_attr_parent.attr,
dfc5606d 2501 &dev_attr_refresh.attr,
dfc5606d
YS
2502 NULL
2503};
2504
2505static struct attribute_group rbd_attr_group = {
2506 .attrs = rbd_attrs,
2507};
2508
2509static const struct attribute_group *rbd_attr_groups[] = {
2510 &rbd_attr_group,
2511 NULL
2512};
2513
2514static void rbd_sysfs_dev_release(struct device *dev)
2515{
2516}
2517
2518static struct device_type rbd_device_type = {
2519 .name = "rbd",
2520 .groups = rbd_attr_groups,
2521 .release = rbd_sysfs_dev_release,
2522};
2523
2524
2525/*
2526 sysfs - snapshots
2527*/
2528
2529static ssize_t rbd_snap_size_show(struct device *dev,
2530 struct device_attribute *attr,
2531 char *buf)
2532{
2533 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2534
3591538f 2535 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
dfc5606d
YS
2536}
2537
2538static ssize_t rbd_snap_id_show(struct device *dev,
2539 struct device_attribute *attr,
2540 char *buf)
2541{
2542 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2543
3591538f 2544 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
dfc5606d
YS
2545}
2546
34b13184
AE
2547static ssize_t rbd_snap_features_show(struct device *dev,
2548 struct device_attribute *attr,
2549 char *buf)
2550{
2551 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2552
2553 return sprintf(buf, "0x%016llx\n",
2554 (unsigned long long) snap->features);
2555}
2556
dfc5606d
YS
2557static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2558static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
34b13184 2559static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
dfc5606d
YS
2560
2561static struct attribute *rbd_snap_attrs[] = {
2562 &dev_attr_snap_size.attr,
2563 &dev_attr_snap_id.attr,
34b13184 2564 &dev_attr_snap_features.attr,
dfc5606d
YS
2565 NULL,
2566};
2567
2568static struct attribute_group rbd_snap_attr_group = {
2569 .attrs = rbd_snap_attrs,
2570};
2571
2572static void rbd_snap_dev_release(struct device *dev)
2573{
2574 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2575 kfree(snap->name);
2576 kfree(snap);
2577}
2578
2579static const struct attribute_group *rbd_snap_attr_groups[] = {
2580 &rbd_snap_attr_group,
2581 NULL
2582};
2583
2584static struct device_type rbd_snap_device_type = {
2585 .groups = rbd_snap_attr_groups,
2586 .release = rbd_snap_dev_release,
2587};
2588
8b8fb99c
AE
2589static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2590{
2591 kref_get(&spec->kref);
2592
2593 return spec;
2594}
2595
2596static void rbd_spec_free(struct kref *kref);
2597static void rbd_spec_put(struct rbd_spec *spec)
2598{
2599 if (spec)
2600 kref_put(&spec->kref, rbd_spec_free);
2601}
2602
2603static struct rbd_spec *rbd_spec_alloc(void)
2604{
2605 struct rbd_spec *spec;
2606
2607 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2608 if (!spec)
2609 return NULL;
2610 kref_init(&spec->kref);
2611
2612 rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
2613
2614 return spec;
2615}
2616
2617static void rbd_spec_free(struct kref *kref)
2618{
2619 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2620
2621 kfree(spec->pool_name);
2622 kfree(spec->image_id);
2623 kfree(spec->image_name);
2624 kfree(spec->snap_name);
2625 kfree(spec);
2626}
2627
cc344fa1 2628static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
c53d5893
AE
2629 struct rbd_spec *spec)
2630{
2631 struct rbd_device *rbd_dev;
2632
2633 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2634 if (!rbd_dev)
2635 return NULL;
2636
2637 spin_lock_init(&rbd_dev->lock);
6d292906 2638 rbd_dev->flags = 0;
c53d5893
AE
2639 INIT_LIST_HEAD(&rbd_dev->node);
2640 INIT_LIST_HEAD(&rbd_dev->snaps);
2641 init_rwsem(&rbd_dev->header_rwsem);
2642
2643 rbd_dev->spec = spec;
2644 rbd_dev->rbd_client = rbdc;
2645
0903e875
AE
2646 /* Initialize the layout used for all rbd requests */
2647
2648 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2649 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2650 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2651 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2652
c53d5893
AE
2653 return rbd_dev;
2654}
2655
2656static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2657{
86b00e0d 2658 rbd_spec_put(rbd_dev->parent_spec);
c53d5893
AE
2659 kfree(rbd_dev->header_name);
2660 rbd_put_client(rbd_dev->rbd_client);
2661 rbd_spec_put(rbd_dev->spec);
2662 kfree(rbd_dev);
2663}
2664
304f6808
AE
2665static bool rbd_snap_registered(struct rbd_snap *snap)
2666{
2667 bool ret = snap->dev.type == &rbd_snap_device_type;
2668 bool reg = device_is_registered(&snap->dev);
2669
2670 rbd_assert(!ret ^ reg);
2671
2672 return ret;
2673}
2674
41f38c2b 2675static void rbd_remove_snap_dev(struct rbd_snap *snap)
dfc5606d
YS
2676{
2677 list_del(&snap->node);
304f6808
AE
2678 if (device_is_registered(&snap->dev))
2679 device_unregister(&snap->dev);
dfc5606d
YS
2680}
2681
14e7085d 2682static int rbd_register_snap_dev(struct rbd_snap *snap,
dfc5606d
YS
2683 struct device *parent)
2684{
2685 struct device *dev = &snap->dev;
2686 int ret;
2687
2688 dev->type = &rbd_snap_device_type;
2689 dev->parent = parent;
2690 dev->release = rbd_snap_dev_release;
d4b125e9 2691 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
304f6808
AE
2692 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2693
dfc5606d
YS
2694 ret = device_register(dev);
2695
2696 return ret;
2697}
2698
4e891e0a 2699static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
c8d18425 2700 const char *snap_name,
34b13184
AE
2701 u64 snap_id, u64 snap_size,
2702 u64 snap_features)
dfc5606d 2703{
4e891e0a 2704 struct rbd_snap *snap;
dfc5606d 2705 int ret;
4e891e0a
AE
2706
2707 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
dfc5606d 2708 if (!snap)
4e891e0a
AE
2709 return ERR_PTR(-ENOMEM);
2710
2711 ret = -ENOMEM;
c8d18425 2712 snap->name = kstrdup(snap_name, GFP_KERNEL);
4e891e0a
AE
2713 if (!snap->name)
2714 goto err;
2715
c8d18425
AE
2716 snap->id = snap_id;
2717 snap->size = snap_size;
34b13184 2718 snap->features = snap_features;
4e891e0a
AE
2719
2720 return snap;
2721
dfc5606d
YS
2722err:
2723 kfree(snap->name);
2724 kfree(snap);
4e891e0a
AE
2725
2726 return ERR_PTR(ret);
dfc5606d
YS
2727}
2728
cd892126
AE
2729static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2730 u64 *snap_size, u64 *snap_features)
2731{
2732 char *snap_name;
2733
2734 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2735
2736 *snap_size = rbd_dev->header.snap_sizes[which];
2737 *snap_features = 0; /* No features for v1 */
2738
2739 /* Skip over names until we find the one we are looking for */
2740
2741 snap_name = rbd_dev->header.snap_names;
2742 while (which--)
2743 snap_name += strlen(snap_name) + 1;
2744
2745 return snap_name;
2746}
2747
9d475de5
AE
2748/*
2749 * Get the size and object order for an image snapshot, or if
2750 * snap_id is CEPH_NOSNAP, gets this information for the base
2751 * image.
2752 */
2753static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2754 u8 *order, u64 *snap_size)
2755{
2756 __le64 snapid = cpu_to_le64(snap_id);
2757 int ret;
2758 struct {
2759 u8 order;
2760 __le64 size;
2761 } __attribute__ ((packed)) size_buf = { 0 };
2762
36be9a76 2763 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
9d475de5
AE
2764 "rbd", "get_size",
2765 (char *) &snapid, sizeof (snapid),
07b2391f 2766 (char *) &size_buf, sizeof (size_buf), NULL);
36be9a76 2767 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
2768 if (ret < 0)
2769 return ret;
2770
2771 *order = size_buf.order;
2772 *snap_size = le64_to_cpu(size_buf.size);
2773
2774 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2775 (unsigned long long) snap_id, (unsigned int) *order,
2776 (unsigned long long) *snap_size);
2777
2778 return 0;
2779}
2780
2781static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2782{
2783 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2784 &rbd_dev->header.obj_order,
2785 &rbd_dev->header.image_size);
2786}
2787
1e130199
AE
2788static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2789{
2790 void *reply_buf;
2791 int ret;
2792 void *p;
2793
2794 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2795 if (!reply_buf)
2796 return -ENOMEM;
2797
36be9a76 2798 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
1e130199
AE
2799 "rbd", "get_object_prefix",
2800 NULL, 0,
07b2391f 2801 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
36be9a76 2802 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
2803 if (ret < 0)
2804 goto out;
2805
2806 p = reply_buf;
2807 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2808 p + RBD_OBJ_PREFIX_LEN_MAX,
2809 NULL, GFP_NOIO);
2810
2811 if (IS_ERR(rbd_dev->header.object_prefix)) {
2812 ret = PTR_ERR(rbd_dev->header.object_prefix);
2813 rbd_dev->header.object_prefix = NULL;
2814 } else {
2815 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2816 }
2817
2818out:
2819 kfree(reply_buf);
2820
2821 return ret;
2822}
2823
b1b5402a
AE
2824static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2825 u64 *snap_features)
2826{
2827 __le64 snapid = cpu_to_le64(snap_id);
2828 struct {
2829 __le64 features;
2830 __le64 incompat;
2831 } features_buf = { 0 };
d889140c 2832 u64 incompat;
b1b5402a
AE
2833 int ret;
2834
36be9a76 2835 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b1b5402a
AE
2836 "rbd", "get_features",
2837 (char *) &snapid, sizeof (snapid),
2838 (char *) &features_buf, sizeof (features_buf),
07b2391f 2839 NULL);
36be9a76 2840 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
2841 if (ret < 0)
2842 return ret;
d889140c
AE
2843
2844 incompat = le64_to_cpu(features_buf.incompat);
2845 if (incompat & ~RBD_FEATURES_ALL)
b8f5c6ed 2846 return -ENXIO;
d889140c 2847
b1b5402a
AE
2848 *snap_features = le64_to_cpu(features_buf.features);
2849
2850 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2851 (unsigned long long) snap_id,
2852 (unsigned long long) *snap_features,
2853 (unsigned long long) le64_to_cpu(features_buf.incompat));
2854
2855 return 0;
2856}
2857
2858static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2859{
2860 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2861 &rbd_dev->header.features);
2862}
2863
86b00e0d
AE
2864static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2865{
2866 struct rbd_spec *parent_spec;
2867 size_t size;
2868 void *reply_buf = NULL;
2869 __le64 snapid;
2870 void *p;
2871 void *end;
2872 char *image_id;
2873 u64 overlap;
86b00e0d
AE
2874 int ret;
2875
2876 parent_spec = rbd_spec_alloc();
2877 if (!parent_spec)
2878 return -ENOMEM;
2879
2880 size = sizeof (__le64) + /* pool_id */
2881 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
2882 sizeof (__le64) + /* snap_id */
2883 sizeof (__le64); /* overlap */
2884 reply_buf = kmalloc(size, GFP_KERNEL);
2885 if (!reply_buf) {
2886 ret = -ENOMEM;
2887 goto out_err;
2888 }
2889
2890 snapid = cpu_to_le64(CEPH_NOSNAP);
36be9a76 2891 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
86b00e0d
AE
2892 "rbd", "get_parent",
2893 (char *) &snapid, sizeof (snapid),
07b2391f 2894 (char *) reply_buf, size, NULL);
36be9a76 2895 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
2896 if (ret < 0)
2897 goto out_err;
2898
2899 ret = -ERANGE;
2900 p = reply_buf;
2901 end = (char *) reply_buf + size;
2902 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2903 if (parent_spec->pool_id == CEPH_NOPOOL)
2904 goto out; /* No parent? No problem. */
2905
0903e875
AE
2906 /* The ceph file layout needs to fit pool id in 32 bits */
2907
2908 ret = -EIO;
2909 if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2910 goto out;
2911
979ed480 2912 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
2913 if (IS_ERR(image_id)) {
2914 ret = PTR_ERR(image_id);
2915 goto out_err;
2916 }
2917 parent_spec->image_id = image_id;
2918 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
2919 ceph_decode_64_safe(&p, end, overlap, out_err);
2920
2921 rbd_dev->parent_overlap = overlap;
2922 rbd_dev->parent_spec = parent_spec;
2923 parent_spec = NULL; /* rbd_dev now owns this */
2924out:
2925 ret = 0;
2926out_err:
2927 kfree(reply_buf);
2928 rbd_spec_put(parent_spec);
2929
2930 return ret;
2931}
2932
9e15b77d
AE
2933static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
2934{
2935 size_t image_id_size;
2936 char *image_id;
2937 void *p;
2938 void *end;
2939 size_t size;
2940 void *reply_buf = NULL;
2941 size_t len = 0;
2942 char *image_name = NULL;
2943 int ret;
2944
2945 rbd_assert(!rbd_dev->spec->image_name);
2946
69e7a02f
AE
2947 len = strlen(rbd_dev->spec->image_id);
2948 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
2949 image_id = kmalloc(image_id_size, GFP_KERNEL);
2950 if (!image_id)
2951 return NULL;
2952
2953 p = image_id;
2954 end = (char *) image_id + image_id_size;
69e7a02f 2955 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
9e15b77d
AE
2956
2957 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
2958 reply_buf = kmalloc(size, GFP_KERNEL);
2959 if (!reply_buf)
2960 goto out;
2961
36be9a76 2962 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d
AE
2963 "rbd", "dir_get_name",
2964 image_id, image_id_size,
07b2391f 2965 (char *) reply_buf, size, NULL);
9e15b77d
AE
2966 if (ret < 0)
2967 goto out;
2968 p = reply_buf;
2969 end = (char *) reply_buf + size;
2970 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
2971 if (IS_ERR(image_name))
2972 image_name = NULL;
2973 else
2974 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
2975out:
2976 kfree(reply_buf);
2977 kfree(image_id);
2978
2979 return image_name;
2980}
2981
2982/*
2983 * When a parent image gets probed, we only have the pool, image,
2984 * and snapshot ids but not the names of any of them. This call
2985 * is made later to fill in those names. It has to be done after
2986 * rbd_dev_snaps_update() has completed because some of the
2987 * information (in particular, snapshot name) is not available
2988 * until then.
2989 */
2990static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
2991{
2992 struct ceph_osd_client *osdc;
2993 const char *name;
2994 void *reply_buf = NULL;
2995 int ret;
2996
2997 if (rbd_dev->spec->pool_name)
2998 return 0; /* Already have the names */
2999
3000 /* Look up the pool name */
3001
3002 osdc = &rbd_dev->rbd_client->client->osdc;
3003 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
935dc89f
AE
3004 if (!name) {
3005 rbd_warn(rbd_dev, "there is no pool with id %llu",
3006 rbd_dev->spec->pool_id); /* Really a BUG() */
3007 return -EIO;
3008 }
9e15b77d
AE
3009
3010 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3011 if (!rbd_dev->spec->pool_name)
3012 return -ENOMEM;
3013
3014 /* Fetch the image name; tolerate failure here */
3015
3016 name = rbd_dev_image_name(rbd_dev);
69e7a02f 3017 if (name)
9e15b77d 3018 rbd_dev->spec->image_name = (char *) name;
69e7a02f 3019 else
06ecc6cb 3020 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d
AE
3021
3022 /* Look up the snapshot name. */
3023
3024 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3025 if (!name) {
935dc89f
AE
3026 rbd_warn(rbd_dev, "no snapshot with id %llu",
3027 rbd_dev->spec->snap_id); /* Really a BUG() */
9e15b77d
AE
3028 ret = -EIO;
3029 goto out_err;
3030 }
3031 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3032 if(!rbd_dev->spec->snap_name)
3033 goto out_err;
3034
3035 return 0;
3036out_err:
3037 kfree(reply_buf);
3038 kfree(rbd_dev->spec->pool_name);
3039 rbd_dev->spec->pool_name = NULL;
3040
3041 return ret;
3042}
3043
6e14b1a6 3044static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
35d489f9
AE
3045{
3046 size_t size;
3047 int ret;
3048 void *reply_buf;
3049 void *p;
3050 void *end;
3051 u64 seq;
3052 u32 snap_count;
3053 struct ceph_snap_context *snapc;
3054 u32 i;
3055
3056 /*
3057 * We'll need room for the seq value (maximum snapshot id),
3058 * snapshot count, and array of that many snapshot ids.
3059 * For now we have a fixed upper limit on the number we're
3060 * prepared to receive.
3061 */
3062 size = sizeof (__le64) + sizeof (__le32) +
3063 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3064 reply_buf = kzalloc(size, GFP_KERNEL);
3065 if (!reply_buf)
3066 return -ENOMEM;
3067
36be9a76 3068 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
35d489f9
AE
3069 "rbd", "get_snapcontext",
3070 NULL, 0,
07b2391f 3071 reply_buf, size, ver);
36be9a76 3072 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
3073 if (ret < 0)
3074 goto out;
3075
3076 ret = -ERANGE;
3077 p = reply_buf;
3078 end = (char *) reply_buf + size;
3079 ceph_decode_64_safe(&p, end, seq, out);
3080 ceph_decode_32_safe(&p, end, snap_count, out);
3081
3082 /*
3083 * Make sure the reported number of snapshot ids wouldn't go
3084 * beyond the end of our buffer. But before checking that,
3085 * make sure the computed size of the snapshot context we
3086 * allocate is representable in a size_t.
3087 */
3088 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3089 / sizeof (u64)) {
3090 ret = -EINVAL;
3091 goto out;
3092 }
3093 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3094 goto out;
3095
3096 size = sizeof (struct ceph_snap_context) +
3097 snap_count * sizeof (snapc->snaps[0]);
3098 snapc = kmalloc(size, GFP_KERNEL);
3099 if (!snapc) {
3100 ret = -ENOMEM;
3101 goto out;
3102 }
3103
3104 atomic_set(&snapc->nref, 1);
3105 snapc->seq = seq;
3106 snapc->num_snaps = snap_count;
3107 for (i = 0; i < snap_count; i++)
3108 snapc->snaps[i] = ceph_decode_64(&p);
3109
3110 rbd_dev->header.snapc = snapc;
3111
3112 dout(" snap context seq = %llu, snap_count = %u\n",
3113 (unsigned long long) seq, (unsigned int) snap_count);
3114
3115out:
3116 kfree(reply_buf);
3117
3118 return 0;
3119}
3120
b8b1e2db
AE
3121static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3122{
3123 size_t size;
3124 void *reply_buf;
3125 __le64 snap_id;
3126 int ret;
3127 void *p;
3128 void *end;
b8b1e2db
AE
3129 char *snap_name;
3130
3131 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3132 reply_buf = kmalloc(size, GFP_KERNEL);
3133 if (!reply_buf)
3134 return ERR_PTR(-ENOMEM);
3135
3136 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
36be9a76 3137 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b8b1e2db
AE
3138 "rbd", "get_snapshot_name",
3139 (char *) &snap_id, sizeof (snap_id),
07b2391f 3140 reply_buf, size, NULL);
36be9a76 3141 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b8b1e2db
AE
3142 if (ret < 0)
3143 goto out;
3144
3145 p = reply_buf;
3146 end = (char *) reply_buf + size;
e5c35534 3147 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
b8b1e2db
AE
3148 if (IS_ERR(snap_name)) {
3149 ret = PTR_ERR(snap_name);
3150 goto out;
3151 } else {
3152 dout(" snap_id 0x%016llx snap_name = %s\n",
3153 (unsigned long long) le64_to_cpu(snap_id), snap_name);
3154 }
3155 kfree(reply_buf);
3156
3157 return snap_name;
3158out:
3159 kfree(reply_buf);
3160
3161 return ERR_PTR(ret);
3162}
3163
3164static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3165 u64 *snap_size, u64 *snap_features)
3166{
e0b49868 3167 u64 snap_id;
b8b1e2db
AE
3168 u8 order;
3169 int ret;
3170
3171 snap_id = rbd_dev->header.snapc->snaps[which];
3172 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3173 if (ret)
3174 return ERR_PTR(ret);
3175 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3176 if (ret)
3177 return ERR_PTR(ret);
3178
3179 return rbd_dev_v2_snap_name(rbd_dev, which);
3180}
3181
3182static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3183 u64 *snap_size, u64 *snap_features)
3184{
3185 if (rbd_dev->image_format == 1)
3186 return rbd_dev_v1_snap_info(rbd_dev, which,
3187 snap_size, snap_features);
3188 if (rbd_dev->image_format == 2)
3189 return rbd_dev_v2_snap_info(rbd_dev, which,
3190 snap_size, snap_features);
3191 return ERR_PTR(-EINVAL);
3192}
3193
117973fb
AE
3194static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3195{
3196 int ret;
3197 __u8 obj_order;
3198
3199 down_write(&rbd_dev->header_rwsem);
3200
3201 /* Grab old order first, to see if it changes */
3202
3203 obj_order = rbd_dev->header.obj_order,
3204 ret = rbd_dev_v2_image_size(rbd_dev);
3205 if (ret)
3206 goto out;
3207 if (rbd_dev->header.obj_order != obj_order) {
3208 ret = -EIO;
3209 goto out;
3210 }
3211 rbd_update_mapping_size(rbd_dev);
3212
3213 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3214 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3215 if (ret)
3216 goto out;
3217 ret = rbd_dev_snaps_update(rbd_dev);
3218 dout("rbd_dev_snaps_update returned %d\n", ret);
3219 if (ret)
3220 goto out;
3221 ret = rbd_dev_snaps_register(rbd_dev);
3222 dout("rbd_dev_snaps_register returned %d\n", ret);
3223out:
3224 up_write(&rbd_dev->header_rwsem);
3225
3226 return ret;
3227}
3228
dfc5606d 3229/*
35938150
AE
3230 * Scan the rbd device's current snapshot list and compare it to the
3231 * newly-received snapshot context. Remove any existing snapshots
3232 * not present in the new snapshot context. Add a new snapshot for
3233 * any snaphots in the snapshot context not in the current list.
3234 * And verify there are no changes to snapshots we already know
3235 * about.
3236 *
3237 * Assumes the snapshots in the snapshot context are sorted by
3238 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3239 * are also maintained in that order.)
dfc5606d 3240 */
304f6808 3241static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
dfc5606d 3242{
35938150
AE
3243 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3244 const u32 snap_count = snapc->num_snaps;
35938150
AE
3245 struct list_head *head = &rbd_dev->snaps;
3246 struct list_head *links = head->next;
3247 u32 index = 0;
dfc5606d 3248
9fcbb800 3249 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
35938150
AE
3250 while (index < snap_count || links != head) {
3251 u64 snap_id;
3252 struct rbd_snap *snap;
cd892126
AE
3253 char *snap_name;
3254 u64 snap_size = 0;
3255 u64 snap_features = 0;
dfc5606d 3256
35938150
AE
3257 snap_id = index < snap_count ? snapc->snaps[index]
3258 : CEPH_NOSNAP;
3259 snap = links != head ? list_entry(links, struct rbd_snap, node)
3260 : NULL;
aafb230e 3261 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
dfc5606d 3262
35938150
AE
3263 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3264 struct list_head *next = links->next;
dfc5606d 3265
6d292906
AE
3266 /*
3267 * A previously-existing snapshot is not in
3268 * the new snap context.
3269 *
3270 * If the now missing snapshot is the one the
3271 * image is mapped to, clear its exists flag
3272 * so we can avoid sending any more requests
3273 * to it.
3274 */
0d7dbfce 3275 if (rbd_dev->spec->snap_id == snap->id)
6d292906 3276 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
41f38c2b 3277 rbd_remove_snap_dev(snap);
9fcbb800 3278 dout("%ssnap id %llu has been removed\n",
0d7dbfce
AE
3279 rbd_dev->spec->snap_id == snap->id ?
3280 "mapped " : "",
9fcbb800 3281 (unsigned long long) snap->id);
35938150
AE
3282
3283 /* Done with this list entry; advance */
3284
3285 links = next;
dfc5606d
YS
3286 continue;
3287 }
35938150 3288
b8b1e2db
AE
3289 snap_name = rbd_dev_snap_info(rbd_dev, index,
3290 &snap_size, &snap_features);
cd892126
AE
3291 if (IS_ERR(snap_name))
3292 return PTR_ERR(snap_name);
3293
9fcbb800
AE
3294 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3295 (unsigned long long) snap_id);
35938150
AE
3296 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3297 struct rbd_snap *new_snap;
3298
3299 /* We haven't seen this snapshot before */
3300
c8d18425 3301 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
cd892126 3302 snap_id, snap_size, snap_features);
9fcbb800
AE
3303 if (IS_ERR(new_snap)) {
3304 int err = PTR_ERR(new_snap);
3305
3306 dout(" failed to add dev, error %d\n", err);
3307
3308 return err;
3309 }
35938150
AE
3310
3311 /* New goes before existing, or at end of list */
3312
9fcbb800 3313 dout(" added dev%s\n", snap ? "" : " at end\n");
35938150
AE
3314 if (snap)
3315 list_add_tail(&new_snap->node, &snap->node);
3316 else
523f3258 3317 list_add_tail(&new_snap->node, head);
35938150
AE
3318 } else {
3319 /* Already have this one */
3320
9fcbb800
AE
3321 dout(" already present\n");
3322
cd892126 3323 rbd_assert(snap->size == snap_size);
aafb230e 3324 rbd_assert(!strcmp(snap->name, snap_name));
cd892126 3325 rbd_assert(snap->features == snap_features);
35938150
AE
3326
3327 /* Done with this list entry; advance */
3328
3329 links = links->next;
dfc5606d 3330 }
35938150
AE
3331
3332 /* Advance to the next entry in the snapshot context */
3333
3334 index++;
dfc5606d 3335 }
9fcbb800 3336 dout("%s: done\n", __func__);
dfc5606d
YS
3337
3338 return 0;
3339}
3340
304f6808
AE
3341/*
3342 * Scan the list of snapshots and register the devices for any that
3343 * have not already been registered.
3344 */
3345static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3346{
3347 struct rbd_snap *snap;
3348 int ret = 0;
3349
37206ee5 3350 dout("%s:\n", __func__);
86ff77bb
AE
3351 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3352 return -EIO;
304f6808
AE
3353
3354 list_for_each_entry(snap, &rbd_dev->snaps, node) {
3355 if (!rbd_snap_registered(snap)) {
3356 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3357 if (ret < 0)
3358 break;
3359 }
3360 }
3361 dout("%s: returning %d\n", __func__, ret);
3362
3363 return ret;
3364}
3365
dfc5606d
YS
3366static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3367{
dfc5606d 3368 struct device *dev;
cd789ab9 3369 int ret;
dfc5606d
YS
3370
3371 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dfc5606d 3372
cd789ab9 3373 dev = &rbd_dev->dev;
dfc5606d
YS
3374 dev->bus = &rbd_bus_type;
3375 dev->type = &rbd_device_type;
3376 dev->parent = &rbd_root_dev;
3377 dev->release = rbd_dev_release;
de71a297 3378 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 3379 ret = device_register(dev);
dfc5606d 3380
dfc5606d 3381 mutex_unlock(&ctl_mutex);
cd789ab9 3382
dfc5606d 3383 return ret;
602adf40
YS
3384}
3385
dfc5606d
YS
3386static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3387{
3388 device_unregister(&rbd_dev->dev);
3389}
3390
e2839308 3391static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
1ddbe94e
AE
3392
3393/*
499afd5b
AE
3394 * Get a unique rbd identifier for the given new rbd_dev, and add
3395 * the rbd_dev to the global list. The minimum rbd id is 1.
1ddbe94e 3396 */
e2839308 3397static void rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 3398{
e2839308 3399 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
499afd5b
AE
3400
3401 spin_lock(&rbd_dev_list_lock);
3402 list_add_tail(&rbd_dev->node, &rbd_dev_list);
3403 spin_unlock(&rbd_dev_list_lock);
e2839308
AE
3404 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3405 (unsigned long long) rbd_dev->dev_id);
1ddbe94e 3406}
b7f23c36 3407
1ddbe94e 3408/*
499afd5b
AE
3409 * Remove an rbd_dev from the global list, and record that its
3410 * identifier is no longer in use.
1ddbe94e 3411 */
e2839308 3412static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 3413{
d184f6bf 3414 struct list_head *tmp;
de71a297 3415 int rbd_id = rbd_dev->dev_id;
d184f6bf
AE
3416 int max_id;
3417
aafb230e 3418 rbd_assert(rbd_id > 0);
499afd5b 3419
e2839308
AE
3420 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3421 (unsigned long long) rbd_dev->dev_id);
499afd5b
AE
3422 spin_lock(&rbd_dev_list_lock);
3423 list_del_init(&rbd_dev->node);
d184f6bf
AE
3424
3425 /*
3426 * If the id being "put" is not the current maximum, there
3427 * is nothing special we need to do.
3428 */
e2839308 3429 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
d184f6bf
AE
3430 spin_unlock(&rbd_dev_list_lock);
3431 return;
3432 }
3433
3434 /*
3435 * We need to update the current maximum id. Search the
3436 * list to find out what it is. We're more likely to find
3437 * the maximum at the end, so search the list backward.
3438 */
3439 max_id = 0;
3440 list_for_each_prev(tmp, &rbd_dev_list) {
3441 struct rbd_device *rbd_dev;
3442
3443 rbd_dev = list_entry(tmp, struct rbd_device, node);
b213e0b1
AE
3444 if (rbd_dev->dev_id > max_id)
3445 max_id = rbd_dev->dev_id;
d184f6bf 3446 }
499afd5b 3447 spin_unlock(&rbd_dev_list_lock);
b7f23c36 3448
1ddbe94e 3449 /*
e2839308 3450 * The max id could have been updated by rbd_dev_id_get(), in
d184f6bf
AE
3451 * which case it now accurately reflects the new maximum.
3452 * Be careful not to overwrite the maximum value in that
3453 * case.
1ddbe94e 3454 */
e2839308
AE
3455 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3456 dout(" max dev id has been reset\n");
b7f23c36
AE
3457}
3458
e28fff26
AE
3459/*
3460 * Skips over white space at *buf, and updates *buf to point to the
3461 * first found non-space character (if any). Returns the length of
593a9e7b
AE
3462 * the token (string of non-white space characters) found. Note
3463 * that *buf must be terminated with '\0'.
e28fff26
AE
3464 */
3465static inline size_t next_token(const char **buf)
3466{
3467 /*
3468 * These are the characters that produce nonzero for
3469 * isspace() in the "C" and "POSIX" locales.
3470 */
3471 const char *spaces = " \f\n\r\t\v";
3472
3473 *buf += strspn(*buf, spaces); /* Find start of token */
3474
3475 return strcspn(*buf, spaces); /* Return token length */
3476}
3477
3478/*
3479 * Finds the next token in *buf, and if the provided token buffer is
3480 * big enough, copies the found token into it. The result, if
593a9e7b
AE
3481 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3482 * must be terminated with '\0' on entry.
e28fff26
AE
3483 *
3484 * Returns the length of the token found (not including the '\0').
3485 * Return value will be 0 if no token is found, and it will be >=
3486 * token_size if the token would not fit.
3487 *
593a9e7b 3488 * The *buf pointer will be updated to point beyond the end of the
e28fff26
AE
3489 * found token. Note that this occurs even if the token buffer is
3490 * too small to hold it.
3491 */
3492static inline size_t copy_token(const char **buf,
3493 char *token,
3494 size_t token_size)
3495{
3496 size_t len;
3497
3498 len = next_token(buf);
3499 if (len < token_size) {
3500 memcpy(token, *buf, len);
3501 *(token + len) = '\0';
3502 }
3503 *buf += len;
3504
3505 return len;
3506}
3507
ea3352f4
AE
3508/*
3509 * Finds the next token in *buf, dynamically allocates a buffer big
3510 * enough to hold a copy of it, and copies the token into the new
3511 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3512 * that a duplicate buffer is created even for a zero-length token.
3513 *
3514 * Returns a pointer to the newly-allocated duplicate, or a null
3515 * pointer if memory for the duplicate was not available. If
3516 * the lenp argument is a non-null pointer, the length of the token
3517 * (not including the '\0') is returned in *lenp.
3518 *
3519 * If successful, the *buf pointer will be updated to point beyond
3520 * the end of the found token.
3521 *
3522 * Note: uses GFP_KERNEL for allocation.
3523 */
3524static inline char *dup_token(const char **buf, size_t *lenp)
3525{
3526 char *dup;
3527 size_t len;
3528
3529 len = next_token(buf);
4caf35f9 3530 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
3531 if (!dup)
3532 return NULL;
ea3352f4
AE
3533 *(dup + len) = '\0';
3534 *buf += len;
3535
3536 if (lenp)
3537 *lenp = len;
3538
3539 return dup;
3540}
3541
a725f65e 3542/*
859c31df
AE
3543 * Parse the options provided for an "rbd add" (i.e., rbd image
3544 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3545 * and the data written is passed here via a NUL-terminated buffer.
3546 * Returns 0 if successful or an error code otherwise.
d22f76e7 3547 *
859c31df
AE
3548 * The information extracted from these options is recorded in
3549 * the other parameters which return dynamically-allocated
3550 * structures:
3551 * ceph_opts
3552 * The address of a pointer that will refer to a ceph options
3553 * structure. Caller must release the returned pointer using
3554 * ceph_destroy_options() when it is no longer needed.
3555 * rbd_opts
3556 * Address of an rbd options pointer. Fully initialized by
3557 * this function; caller must release with kfree().
3558 * spec
3559 * Address of an rbd image specification pointer. Fully
3560 * initialized by this function based on parsed options.
3561 * Caller must release with rbd_spec_put().
3562 *
3563 * The options passed take this form:
3564 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3565 * where:
3566 * <mon_addrs>
3567 * A comma-separated list of one or more monitor addresses.
3568 * A monitor address is an ip address, optionally followed
3569 * by a port number (separated by a colon).
3570 * I.e.: ip1[:port1][,ip2[:port2]...]
3571 * <options>
3572 * A comma-separated list of ceph and/or rbd options.
3573 * <pool_name>
3574 * The name of the rados pool containing the rbd image.
3575 * <image_name>
3576 * The name of the image in that pool to map.
3577 * <snap_id>
3578 * An optional snapshot id. If provided, the mapping will
3579 * present data from the image at the time that snapshot was
3580 * created. The image head is used if no snapshot id is
3581 * provided. Snapshot mappings are always read-only.
a725f65e 3582 */
859c31df 3583static int rbd_add_parse_args(const char *buf,
dc79b113 3584 struct ceph_options **ceph_opts,
859c31df
AE
3585 struct rbd_options **opts,
3586 struct rbd_spec **rbd_spec)
e28fff26 3587{
d22f76e7 3588 size_t len;
859c31df 3589 char *options;
0ddebc0c
AE
3590 const char *mon_addrs;
3591 size_t mon_addrs_size;
859c31df 3592 struct rbd_spec *spec = NULL;
4e9afeba 3593 struct rbd_options *rbd_opts = NULL;
859c31df 3594 struct ceph_options *copts;
dc79b113 3595 int ret;
e28fff26
AE
3596
3597 /* The first four tokens are required */
3598
7ef3214a 3599 len = next_token(&buf);
4fb5d671
AE
3600 if (!len) {
3601 rbd_warn(NULL, "no monitor address(es) provided");
3602 return -EINVAL;
3603 }
0ddebc0c 3604 mon_addrs = buf;
f28e565a 3605 mon_addrs_size = len + 1;
7ef3214a 3606 buf += len;
a725f65e 3607
dc79b113 3608 ret = -EINVAL;
f28e565a
AE
3609 options = dup_token(&buf, NULL);
3610 if (!options)
dc79b113 3611 return -ENOMEM;
4fb5d671
AE
3612 if (!*options) {
3613 rbd_warn(NULL, "no options provided");
3614 goto out_err;
3615 }
e28fff26 3616
859c31df
AE
3617 spec = rbd_spec_alloc();
3618 if (!spec)
f28e565a 3619 goto out_mem;
859c31df
AE
3620
3621 spec->pool_name = dup_token(&buf, NULL);
3622 if (!spec->pool_name)
3623 goto out_mem;
4fb5d671
AE
3624 if (!*spec->pool_name) {
3625 rbd_warn(NULL, "no pool name provided");
3626 goto out_err;
3627 }
e28fff26 3628
69e7a02f 3629 spec->image_name = dup_token(&buf, NULL);
859c31df 3630 if (!spec->image_name)
f28e565a 3631 goto out_mem;
4fb5d671
AE
3632 if (!*spec->image_name) {
3633 rbd_warn(NULL, "no image name provided");
3634 goto out_err;
3635 }
d4b125e9 3636
f28e565a
AE
3637 /*
3638 * Snapshot name is optional; default is to use "-"
3639 * (indicating the head/no snapshot).
3640 */
3feeb894 3641 len = next_token(&buf);
820a5f3e 3642 if (!len) {
3feeb894
AE
3643 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3644 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 3645 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 3646 ret = -ENAMETOOLONG;
f28e565a 3647 goto out_err;
849b4260 3648 }
4caf35f9 3649 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
859c31df 3650 if (!spec->snap_name)
f28e565a 3651 goto out_mem;
859c31df 3652 *(spec->snap_name + len) = '\0';
e5c35534 3653
0ddebc0c 3654 /* Initialize all rbd options to the defaults */
e28fff26 3655
4e9afeba
AE
3656 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3657 if (!rbd_opts)
3658 goto out_mem;
3659
3660 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
d22f76e7 3661
859c31df 3662 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 3663 mon_addrs + mon_addrs_size - 1,
4e9afeba 3664 parse_rbd_opts_token, rbd_opts);
859c31df
AE
3665 if (IS_ERR(copts)) {
3666 ret = PTR_ERR(copts);
dc79b113
AE
3667 goto out_err;
3668 }
859c31df
AE
3669 kfree(options);
3670
3671 *ceph_opts = copts;
4e9afeba 3672 *opts = rbd_opts;
859c31df 3673 *rbd_spec = spec;
0ddebc0c 3674
dc79b113 3675 return 0;
f28e565a 3676out_mem:
dc79b113 3677 ret = -ENOMEM;
d22f76e7 3678out_err:
859c31df
AE
3679 kfree(rbd_opts);
3680 rbd_spec_put(spec);
f28e565a 3681 kfree(options);
d22f76e7 3682
dc79b113 3683 return ret;
a725f65e
AE
3684}
3685
589d30e0
AE
3686/*
3687 * An rbd format 2 image has a unique identifier, distinct from the
3688 * name given to it by the user. Internally, that identifier is
3689 * what's used to specify the names of objects related to the image.
3690 *
3691 * A special "rbd id" object is used to map an rbd image name to its
3692 * id. If that object doesn't exist, then there is no v2 rbd image
3693 * with the supplied name.
3694 *
3695 * This function will record the given rbd_dev's image_id field if
3696 * it can be determined, and in that case will return 0. If any
3697 * errors occur a negative errno will be returned and the rbd_dev's
3698 * image_id field will be unchanged (and should be NULL).
3699 */
3700static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3701{
3702 int ret;
3703 size_t size;
3704 char *object_name;
3705 void *response;
3706 void *p;
3707
2c0d0a10
AE
3708 /*
3709 * When probing a parent image, the image id is already
3710 * known (and the image name likely is not). There's no
3711 * need to fetch the image id again in this case.
3712 */
3713 if (rbd_dev->spec->image_id)
3714 return 0;
3715
589d30e0
AE
3716 /*
3717 * First, see if the format 2 image id file exists, and if
3718 * so, get the image's persistent id from it.
3719 */
69e7a02f 3720 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0
AE
3721 object_name = kmalloc(size, GFP_NOIO);
3722 if (!object_name)
3723 return -ENOMEM;
0d7dbfce 3724 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0
AE
3725 dout("rbd id object name is %s\n", object_name);
3726
3727 /* Response will be an encoded string, which includes a length */
3728
3729 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3730 response = kzalloc(size, GFP_NOIO);
3731 if (!response) {
3732 ret = -ENOMEM;
3733 goto out;
3734 }
3735
36be9a76 3736 ret = rbd_obj_method_sync(rbd_dev, object_name,
589d30e0
AE
3737 "rbd", "get_id",
3738 NULL, 0,
07b2391f 3739 response, RBD_IMAGE_ID_LEN_MAX, NULL);
36be9a76 3740 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
589d30e0
AE
3741 if (ret < 0)
3742 goto out;
3743
3744 p = response;
0d7dbfce 3745 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
589d30e0 3746 p + RBD_IMAGE_ID_LEN_MAX,
979ed480 3747 NULL, GFP_NOIO);
0d7dbfce
AE
3748 if (IS_ERR(rbd_dev->spec->image_id)) {
3749 ret = PTR_ERR(rbd_dev->spec->image_id);
3750 rbd_dev->spec->image_id = NULL;
589d30e0 3751 } else {
0d7dbfce 3752 dout("image_id is %s\n", rbd_dev->spec->image_id);
589d30e0
AE
3753 }
3754out:
3755 kfree(response);
3756 kfree(object_name);
3757
3758 return ret;
3759}
3760
a30b71b9
AE
3761static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3762{
3763 int ret;
3764 size_t size;
3765
3766 /* Version 1 images have no id; empty string is used */
3767
0d7dbfce
AE
3768 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3769 if (!rbd_dev->spec->image_id)
a30b71b9 3770 return -ENOMEM;
a30b71b9
AE
3771
3772 /* Record the header object name for this rbd image. */
3773
69e7a02f 3774 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
a30b71b9
AE
3775 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3776 if (!rbd_dev->header_name) {
3777 ret = -ENOMEM;
3778 goto out_err;
3779 }
0d7dbfce
AE
3780 sprintf(rbd_dev->header_name, "%s%s",
3781 rbd_dev->spec->image_name, RBD_SUFFIX);
a30b71b9
AE
3782
3783 /* Populate rbd image metadata */
3784
3785 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3786 if (ret < 0)
3787 goto out_err;
86b00e0d
AE
3788
3789 /* Version 1 images have no parent (no layering) */
3790
3791 rbd_dev->parent_spec = NULL;
3792 rbd_dev->parent_overlap = 0;
3793
a30b71b9
AE
3794 rbd_dev->image_format = 1;
3795
3796 dout("discovered version 1 image, header name is %s\n",
3797 rbd_dev->header_name);
3798
3799 return 0;
3800
3801out_err:
3802 kfree(rbd_dev->header_name);
3803 rbd_dev->header_name = NULL;
0d7dbfce
AE
3804 kfree(rbd_dev->spec->image_id);
3805 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
3806
3807 return ret;
3808}
3809
3810static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3811{
3812 size_t size;
9d475de5 3813 int ret;
6e14b1a6 3814 u64 ver = 0;
a30b71b9
AE
3815
3816 /*
3817 * Image id was filled in by the caller. Record the header
3818 * object name for this rbd image.
3819 */
979ed480 3820 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
a30b71b9
AE
3821 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3822 if (!rbd_dev->header_name)
3823 return -ENOMEM;
3824 sprintf(rbd_dev->header_name, "%s%s",
0d7dbfce 3825 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
9d475de5
AE
3826
3827 /* Get the size and object order for the image */
3828
3829 ret = rbd_dev_v2_image_size(rbd_dev);
1e130199
AE
3830 if (ret < 0)
3831 goto out_err;
3832
3833 /* Get the object prefix (a.k.a. block_name) for the image */
3834
3835 ret = rbd_dev_v2_object_prefix(rbd_dev);
b1b5402a
AE
3836 if (ret < 0)
3837 goto out_err;
3838
d889140c 3839 /* Get the and check features for the image */
b1b5402a
AE
3840
3841 ret = rbd_dev_v2_features(rbd_dev);
9d475de5
AE
3842 if (ret < 0)
3843 goto out_err;
35d489f9 3844
86b00e0d
AE
3845 /* If the image supports layering, get the parent info */
3846
3847 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3848 ret = rbd_dev_v2_parent_info(rbd_dev);
3849 if (ret < 0)
3850 goto out_err;
3851 }
3852
6e14b1a6
AE
3853 /* crypto and compression type aren't (yet) supported for v2 images */
3854
3855 rbd_dev->header.crypt_type = 0;
3856 rbd_dev->header.comp_type = 0;
35d489f9 3857
6e14b1a6
AE
3858 /* Get the snapshot context, plus the header version */
3859
3860 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
35d489f9
AE
3861 if (ret)
3862 goto out_err;
6e14b1a6
AE
3863 rbd_dev->header.obj_version = ver;
3864
a30b71b9
AE
3865 rbd_dev->image_format = 2;
3866
3867 dout("discovered version 2 image, header name is %s\n",
3868 rbd_dev->header_name);
3869
35152979 3870 return 0;
9d475de5 3871out_err:
86b00e0d
AE
3872 rbd_dev->parent_overlap = 0;
3873 rbd_spec_put(rbd_dev->parent_spec);
3874 rbd_dev->parent_spec = NULL;
9d475de5
AE
3875 kfree(rbd_dev->header_name);
3876 rbd_dev->header_name = NULL;
1e130199
AE
3877 kfree(rbd_dev->header.object_prefix);
3878 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
3879
3880 return ret;
a30b71b9
AE
3881}
3882
83a06263
AE
3883static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3884{
3885 int ret;
3886
3887 /* no need to lock here, as rbd_dev is not registered yet */
3888 ret = rbd_dev_snaps_update(rbd_dev);
3889 if (ret)
3890 return ret;
3891
9e15b77d
AE
3892 ret = rbd_dev_probe_update_spec(rbd_dev);
3893 if (ret)
3894 goto err_out_snaps;
3895
83a06263
AE
3896 ret = rbd_dev_set_mapping(rbd_dev);
3897 if (ret)
3898 goto err_out_snaps;
3899
3900 /* generate unique id: find highest unique id, add one */
3901 rbd_dev_id_get(rbd_dev);
3902
3903 /* Fill in the device name, now that we have its id. */
3904 BUILD_BUG_ON(DEV_NAME_LEN
3905 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3906 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3907
3908 /* Get our block major device number. */
3909
3910 ret = register_blkdev(0, rbd_dev->name);
3911 if (ret < 0)
3912 goto err_out_id;
3913 rbd_dev->major = ret;
3914
3915 /* Set up the blkdev mapping. */
3916
3917 ret = rbd_init_disk(rbd_dev);
3918 if (ret)
3919 goto err_out_blkdev;
3920
3921 ret = rbd_bus_add_dev(rbd_dev);
3922 if (ret)
3923 goto err_out_disk;
3924
3925 /*
3926 * At this point cleanup in the event of an error is the job
3927 * of the sysfs code (initiated by rbd_bus_del_dev()).
3928 */
3929 down_write(&rbd_dev->header_rwsem);
3930 ret = rbd_dev_snaps_register(rbd_dev);
3931 up_write(&rbd_dev->header_rwsem);
3932 if (ret)
3933 goto err_out_bus;
3934
9969ebc5 3935 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
83a06263
AE
3936 if (ret)
3937 goto err_out_bus;
3938
3939 /* Everything's ready. Announce the disk to the world. */
3940
3941 add_disk(rbd_dev->disk);
3942
3943 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3944 (unsigned long long) rbd_dev->mapping.size);
3945
3946 return ret;
3947err_out_bus:
3948 /* this will also clean up rest of rbd_dev stuff */
3949
3950 rbd_bus_del_dev(rbd_dev);
3951
3952 return ret;
3953err_out_disk:
3954 rbd_free_disk(rbd_dev);
3955err_out_blkdev:
3956 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3957err_out_id:
3958 rbd_dev_id_put(rbd_dev);
3959err_out_snaps:
3960 rbd_remove_all_snaps(rbd_dev);
3961
3962 return ret;
3963}
3964
a30b71b9
AE
3965/*
3966 * Probe for the existence of the header object for the given rbd
3967 * device. For format 2 images this includes determining the image
3968 * id.
3969 */
3970static int rbd_dev_probe(struct rbd_device *rbd_dev)
3971{
3972 int ret;
3973
3974 /*
3975 * Get the id from the image id object. If it's not a
3976 * format 2 image, we'll get ENOENT back, and we'll assume
3977 * it's a format 1 image.
3978 */
3979 ret = rbd_dev_image_id(rbd_dev);
3980 if (ret)
3981 ret = rbd_dev_v1_probe(rbd_dev);
3982 else
3983 ret = rbd_dev_v2_probe(rbd_dev);
83a06263 3984 if (ret) {
a30b71b9
AE
3985 dout("probe failed, returning %d\n", ret);
3986
83a06263
AE
3987 return ret;
3988 }
3989
3990 ret = rbd_dev_probe_finish(rbd_dev);
3991 if (ret)
3992 rbd_header_free(&rbd_dev->header);
3993
a30b71b9
AE
3994 return ret;
3995}
3996
59c2be1e
YS
3997static ssize_t rbd_add(struct bus_type *bus,
3998 const char *buf,
3999 size_t count)
602adf40 4000{
cb8627c7 4001 struct rbd_device *rbd_dev = NULL;
dc79b113 4002 struct ceph_options *ceph_opts = NULL;
4e9afeba 4003 struct rbd_options *rbd_opts = NULL;
859c31df 4004 struct rbd_spec *spec = NULL;
9d3997fd 4005 struct rbd_client *rbdc;
27cc2594
AE
4006 struct ceph_osd_client *osdc;
4007 int rc = -ENOMEM;
602adf40
YS
4008
4009 if (!try_module_get(THIS_MODULE))
4010 return -ENODEV;
4011
602adf40 4012 /* parse add command */
859c31df 4013 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 4014 if (rc < 0)
bd4ba655 4015 goto err_out_module;
78cea76e 4016
9d3997fd
AE
4017 rbdc = rbd_get_client(ceph_opts);
4018 if (IS_ERR(rbdc)) {
4019 rc = PTR_ERR(rbdc);
0ddebc0c 4020 goto err_out_args;
9d3997fd 4021 }
c53d5893 4022 ceph_opts = NULL; /* rbd_dev client now owns this */
602adf40 4023
602adf40 4024 /* pick the pool */
9d3997fd 4025 osdc = &rbdc->client->osdc;
859c31df 4026 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
602adf40
YS
4027 if (rc < 0)
4028 goto err_out_client;
859c31df
AE
4029 spec->pool_id = (u64) rc;
4030
0903e875
AE
4031 /* The ceph file layout needs to fit pool id in 32 bits */
4032
4033 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4034 rc = -EIO;
4035 goto err_out_client;
4036 }
4037
c53d5893 4038 rbd_dev = rbd_dev_create(rbdc, spec);
bd4ba655
AE
4039 if (!rbd_dev)
4040 goto err_out_client;
c53d5893
AE
4041 rbdc = NULL; /* rbd_dev now owns this */
4042 spec = NULL; /* rbd_dev now owns this */
602adf40 4043
bd4ba655 4044 rbd_dev->mapping.read_only = rbd_opts->read_only;
c53d5893
AE
4045 kfree(rbd_opts);
4046 rbd_opts = NULL; /* done with this */
bd4ba655 4047
a30b71b9
AE
4048 rc = rbd_dev_probe(rbd_dev);
4049 if (rc < 0)
c53d5893 4050 goto err_out_rbd_dev;
05fd6f6f 4051
602adf40 4052 return count;
c53d5893
AE
4053err_out_rbd_dev:
4054 rbd_dev_destroy(rbd_dev);
bd4ba655 4055err_out_client:
9d3997fd 4056 rbd_put_client(rbdc);
0ddebc0c 4057err_out_args:
78cea76e
AE
4058 if (ceph_opts)
4059 ceph_destroy_options(ceph_opts);
4e9afeba 4060 kfree(rbd_opts);
859c31df 4061 rbd_spec_put(spec);
bd4ba655
AE
4062err_out_module:
4063 module_put(THIS_MODULE);
27cc2594 4064
602adf40 4065 dout("Error adding device %s\n", buf);
27cc2594
AE
4066
4067 return (ssize_t) rc;
602adf40
YS
4068}
4069
de71a297 4070static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
602adf40
YS
4071{
4072 struct list_head *tmp;
4073 struct rbd_device *rbd_dev;
4074
e124a82f 4075 spin_lock(&rbd_dev_list_lock);
602adf40
YS
4076 list_for_each(tmp, &rbd_dev_list) {
4077 rbd_dev = list_entry(tmp, struct rbd_device, node);
de71a297 4078 if (rbd_dev->dev_id == dev_id) {
e124a82f 4079 spin_unlock(&rbd_dev_list_lock);
602adf40 4080 return rbd_dev;
e124a82f 4081 }
602adf40 4082 }
e124a82f 4083 spin_unlock(&rbd_dev_list_lock);
602adf40
YS
4084 return NULL;
4085}
4086
dfc5606d 4087static void rbd_dev_release(struct device *dev)
602adf40 4088{
593a9e7b 4089 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4090
59c2be1e 4091 if (rbd_dev->watch_event)
9969ebc5 4092 rbd_dev_header_watch_sync(rbd_dev, 0);
602adf40
YS
4093
4094 /* clean up and free blkdev */
4095 rbd_free_disk(rbd_dev);
4096 unregister_blkdev(rbd_dev->major, rbd_dev->name);
32eec68d 4097
2ac4e75d
AE
4098 /* release allocated disk header fields */
4099 rbd_header_free(&rbd_dev->header);
4100
32eec68d 4101 /* done with the id, and with the rbd_dev */
e2839308 4102 rbd_dev_id_put(rbd_dev);
c53d5893
AE
4103 rbd_assert(rbd_dev->rbd_client != NULL);
4104 rbd_dev_destroy(rbd_dev);
602adf40
YS
4105
4106 /* release module ref */
4107 module_put(THIS_MODULE);
602adf40
YS
4108}
4109
dfc5606d
YS
4110static ssize_t rbd_remove(struct bus_type *bus,
4111 const char *buf,
4112 size_t count)
602adf40
YS
4113{
4114 struct rbd_device *rbd_dev = NULL;
4115 int target_id, rc;
4116 unsigned long ul;
4117 int ret = count;
4118
4119 rc = strict_strtoul(buf, 10, &ul);
4120 if (rc)
4121 return rc;
4122
4123 /* convert to int; abort if we lost anything in the conversion */
4124 target_id = (int) ul;
4125 if (target_id != ul)
4126 return -EINVAL;
4127
4128 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4129
4130 rbd_dev = __rbd_get_dev(target_id);
4131 if (!rbd_dev) {
4132 ret = -ENOENT;
4133 goto done;
42382b70
AE
4134 }
4135
a14ea269 4136 spin_lock_irq(&rbd_dev->lock);
b82d167b 4137 if (rbd_dev->open_count)
42382b70 4138 ret = -EBUSY;
b82d167b
AE
4139 else
4140 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
a14ea269 4141 spin_unlock_irq(&rbd_dev->lock);
b82d167b 4142 if (ret < 0)
42382b70 4143 goto done;
602adf40 4144
41f38c2b 4145 rbd_remove_all_snaps(rbd_dev);
dfc5606d 4146 rbd_bus_del_dev(rbd_dev);
602adf40
YS
4147
4148done:
4149 mutex_unlock(&ctl_mutex);
aafb230e 4150
602adf40
YS
4151 return ret;
4152}
4153
602adf40
YS
4154/*
4155 * create control files in sysfs
dfc5606d 4156 * /sys/bus/rbd/...
602adf40
YS
4157 */
4158static int rbd_sysfs_init(void)
4159{
dfc5606d 4160 int ret;
602adf40 4161
fed4c143 4162 ret = device_register(&rbd_root_dev);
21079786 4163 if (ret < 0)
dfc5606d 4164 return ret;
602adf40 4165
fed4c143
AE
4166 ret = bus_register(&rbd_bus_type);
4167 if (ret < 0)
4168 device_unregister(&rbd_root_dev);
602adf40 4169
602adf40
YS
4170 return ret;
4171}
4172
4173static void rbd_sysfs_cleanup(void)
4174{
dfc5606d 4175 bus_unregister(&rbd_bus_type);
fed4c143 4176 device_unregister(&rbd_root_dev);
602adf40
YS
4177}
4178
cc344fa1 4179static int __init rbd_init(void)
602adf40
YS
4180{
4181 int rc;
4182
1e32d34c
AE
4183 if (!libceph_compatible(NULL)) {
4184 rbd_warn(NULL, "libceph incompatibility (quitting)");
4185
4186 return -EINVAL;
4187 }
602adf40
YS
4188 rc = rbd_sysfs_init();
4189 if (rc)
4190 return rc;
f0f8cef5 4191 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
602adf40
YS
4192 return 0;
4193}
4194
cc344fa1 4195static void __exit rbd_exit(void)
602adf40
YS
4196{
4197 rbd_sysfs_cleanup();
4198}
4199
4200module_init(rbd_init);
4201module_exit(rbd_exit);
4202
4203MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4204MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4205MODULE_DESCRIPTION("rados block device");
4206
4207/* following authorship retained from original osdblk.c */
4208MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4209
4210MODULE_LICENSE("GPL");