]> git.proxmox.com Git - mirror_qemu.git/blame - block/rbd.c
allow qemu_iovec_from_buffer() to specify offset from which to start copying
[mirror_qemu.git] / block / rbd.c
CommitLineData
f27aaf4b
CB
1/*
2 * QEMU Block driver for RADOS (Ceph)
3 *
ad32e9c0
JD
4 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
5 * Josh Durgin <josh.durgin@dreamhost.com>
f27aaf4b
CB
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
9 *
6b620ca3
PB
10 * Contributions after 2012-01-13 are licensed under the terms of the
11 * GNU GPL, version 2 or (at your option) any later version.
f27aaf4b
CB
12 */
13
ad32e9c0
JD
14#include <inttypes.h>
15
f27aaf4b
CB
16#include "qemu-common.h"
17#include "qemu-error.h"
f27aaf4b
CB
18#include "block_int.h"
19
ad32e9c0 20#include <rbd/librbd.h>
f27aaf4b 21
f27aaf4b
CB
22/*
23 * When specifying the image filename use:
24 *
fab5cf59 25 * rbd:poolname/devicename[@snapshotname][:option1=value1[:option2=value2...]]
f27aaf4b 26 *
9e1fbcde 27 * poolname must be the name of an existing rados pool.
f27aaf4b 28 *
9e1fbcde 29 * devicename is the name of the rbd image.
f27aaf4b 30 *
9e1fbcde
SW
31 * Each option given is used to configure rados, and may be any valid
32 * Ceph option, "id", or "conf".
fab5cf59 33 *
9e1fbcde
SW
34 * The "id" option indicates what user we should authenticate as to
35 * the Ceph cluster. If it is excluded we will use the Ceph default
36 * (normally 'admin').
f27aaf4b 37 *
9e1fbcde
SW
38 * The "conf" option specifies a Ceph configuration file to read. If
39 * it is not specified, we will read from the default Ceph locations
40 * (e.g., /etc/ceph/ceph.conf). To avoid reading _any_ configuration
41 * file, specify conf=/dev/null.
f27aaf4b 42 *
9e1fbcde
SW
43 * Configuration values containing :, @, or = can be escaped with a
44 * leading "\".
f27aaf4b
CB
45 */
46
787f3133
JD
47/* rbd_aio_discard added in 0.1.2 */
48#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 2)
49#define LIBRBD_SUPPORTS_DISCARD
50#else
51#undef LIBRBD_SUPPORTS_DISCARD
52#endif
53
f27aaf4b
CB
54#define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER)
55
ad32e9c0
JD
56#define RBD_MAX_CONF_NAME_SIZE 128
57#define RBD_MAX_CONF_VAL_SIZE 512
58#define RBD_MAX_CONF_SIZE 1024
59#define RBD_MAX_POOL_NAME_SIZE 128
60#define RBD_MAX_SNAP_NAME_SIZE 128
61#define RBD_MAX_SNAPS 100
62
787f3133
JD
63typedef enum {
64 RBD_AIO_READ,
65 RBD_AIO_WRITE,
66 RBD_AIO_DISCARD
67} RBDAIOCmd;
68
f27aaf4b
CB
69typedef struct RBDAIOCB {
70 BlockDriverAIOCB common;
71 QEMUBH *bh;
72 int ret;
73 QEMUIOVector *qiov;
74 char *bounce;
787f3133 75 RBDAIOCmd cmd;
f27aaf4b 76 int64_t sector_num;
f27aaf4b
CB
77 int error;
78 struct BDRVRBDState *s;
79 int cancelled;
80} RBDAIOCB;
81
82typedef struct RADOSCB {
83 int rcbid;
84 RBDAIOCB *acb;
85 struct BDRVRBDState *s;
86 int done;
ad32e9c0 87 int64_t size;
f27aaf4b
CB
88 char *buf;
89 int ret;
90} RADOSCB;
91
92#define RBD_FD_READ 0
93#define RBD_FD_WRITE 1
94
95typedef struct BDRVRBDState {
96 int fds[2];
ad32e9c0
JD
97 rados_t cluster;
98 rados_ioctx_t io_ctx;
99 rbd_image_t image;
100 char name[RBD_MAX_IMAGE_NAME_SIZE];
f27aaf4b 101 int qemu_aio_count;
ad32e9c0 102 char *snap;
f27aaf4b
CB
103 int event_reader_pos;
104 RADOSCB *event_rcb;
105} BDRVRBDState;
106
f27aaf4b
CB
107static void rbd_aio_bh_cb(void *opaque);
108
ad32e9c0
JD
109static int qemu_rbd_next_tok(char *dst, int dst_len,
110 char *src, char delim,
111 const char *name,
112 char **p)
f27aaf4b
CB
113{
114 int l;
115 char *end;
116
117 *p = NULL;
118
119 if (delim != '\0') {
16a06b24
SW
120 for (end = src; *end; ++end) {
121 if (*end == delim) {
122 break;
123 }
124 if (*end == '\\' && end[1] != '\0') {
125 end++;
126 }
127 }
128 if (*end == delim) {
f27aaf4b
CB
129 *p = end + 1;
130 *end = '\0';
131 }
132 }
133 l = strlen(src);
134 if (l >= dst_len) {
135 error_report("%s too long", name);
136 return -EINVAL;
137 } else if (l == 0) {
138 error_report("%s too short", name);
139 return -EINVAL;
140 }
141
142 pstrcpy(dst, dst_len, src);
143
144 return 0;
145}
146
16a06b24
SW
147static void qemu_rbd_unescape(char *src)
148{
149 char *p;
150
151 for (p = src; *src; ++src, ++p) {
152 if (*src == '\\' && src[1] != '\0') {
153 src++;
154 }
155 *p = *src;
156 }
157 *p = '\0';
158}
159
ad32e9c0
JD
160static int qemu_rbd_parsename(const char *filename,
161 char *pool, int pool_len,
162 char *snap, int snap_len,
fab5cf59
JD
163 char *name, int name_len,
164 char *conf, int conf_len)
f27aaf4b
CB
165{
166 const char *start;
167 char *p, *buf;
168 int ret;
169
170 if (!strstart(filename, "rbd:", &start)) {
171 return -EINVAL;
172 }
173
7267c094 174 buf = g_strdup(start);
f27aaf4b 175 p = buf;
fab5cf59
JD
176 *snap = '\0';
177 *conf = '\0';
f27aaf4b 178
ad32e9c0 179 ret = qemu_rbd_next_tok(pool, pool_len, p, '/', "pool name", &p);
f27aaf4b
CB
180 if (ret < 0 || !p) {
181 ret = -EINVAL;
182 goto done;
183 }
16a06b24 184 qemu_rbd_unescape(pool);
fab5cf59
JD
185
186 if (strchr(p, '@')) {
187 ret = qemu_rbd_next_tok(name, name_len, p, '@', "object name", &p);
188 if (ret < 0) {
189 goto done;
190 }
191 ret = qemu_rbd_next_tok(snap, snap_len, p, ':', "snap name", &p);
16a06b24 192 qemu_rbd_unescape(snap);
fab5cf59
JD
193 } else {
194 ret = qemu_rbd_next_tok(name, name_len, p, ':', "object name", &p);
f27aaf4b 195 }
16a06b24 196 qemu_rbd_unescape(name);
fab5cf59 197 if (ret < 0 || !p) {
f27aaf4b
CB
198 goto done;
199 }
200
fab5cf59 201 ret = qemu_rbd_next_tok(conf, conf_len, p, '\0', "configuration", &p);
f27aaf4b
CB
202
203done:
7267c094 204 g_free(buf);
f27aaf4b
CB
205 return ret;
206}
207
7c7e9df0
SW
208static char *qemu_rbd_parse_clientname(const char *conf, char *clientname)
209{
210 const char *p = conf;
211
212 while (*p) {
213 int len;
214 const char *end = strchr(p, ':');
215
216 if (end) {
217 len = end - p;
218 } else {
219 len = strlen(p);
220 }
221
222 if (strncmp(p, "id=", 3) == 0) {
223 len -= 3;
224 strncpy(clientname, p + 3, len);
225 clientname[len] = '\0';
226 return clientname;
227 }
228 if (end == NULL) {
229 break;
230 }
231 p = end + 1;
232 }
233 return NULL;
234}
235
fab5cf59
JD
236static int qemu_rbd_set_conf(rados_t cluster, const char *conf)
237{
238 char *p, *buf;
239 char name[RBD_MAX_CONF_NAME_SIZE];
240 char value[RBD_MAX_CONF_VAL_SIZE];
241 int ret = 0;
242
7267c094 243 buf = g_strdup(conf);
fab5cf59
JD
244 p = buf;
245
246 while (p) {
247 ret = qemu_rbd_next_tok(name, sizeof(name), p,
248 '=', "conf option name", &p);
249 if (ret < 0) {
250 break;
251 }
16a06b24 252 qemu_rbd_unescape(name);
fab5cf59
JD
253
254 if (!p) {
255 error_report("conf option %s has no value", name);
256 ret = -EINVAL;
257 break;
258 }
259
260 ret = qemu_rbd_next_tok(value, sizeof(value), p,
261 ':', "conf option value", &p);
262 if (ret < 0) {
263 break;
264 }
16a06b24 265 qemu_rbd_unescape(value);
fab5cf59 266
7c7e9df0
SW
267 if (strcmp(name, "conf") == 0) {
268 ret = rados_conf_read_file(cluster, value);
fab5cf59 269 if (ret < 0) {
7c7e9df0 270 error_report("error reading conf file %s", value);
fab5cf59
JD
271 break;
272 }
7c7e9df0
SW
273 } else if (strcmp(name, "id") == 0) {
274 /* ignore, this is parsed by qemu_rbd_parse_clientname() */
fab5cf59 275 } else {
7c7e9df0 276 ret = rados_conf_set(cluster, name, value);
fab5cf59 277 if (ret < 0) {
7c7e9df0
SW
278 error_report("invalid conf option %s", name);
279 ret = -EINVAL;
fab5cf59
JD
280 break;
281 }
282 }
283 }
284
7267c094 285 g_free(buf);
fab5cf59
JD
286 return ret;
287}
288
ad32e9c0 289static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options)
f27aaf4b
CB
290{
291 int64_t bytes = 0;
292 int64_t objsize;
ad32e9c0
JD
293 int obj_order = 0;
294 char pool[RBD_MAX_POOL_NAME_SIZE];
295 char name[RBD_MAX_IMAGE_NAME_SIZE];
296 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
fab5cf59 297 char conf[RBD_MAX_CONF_SIZE];
7c7e9df0
SW
298 char clientname_buf[RBD_MAX_CONF_SIZE];
299 char *clientname;
ad32e9c0
JD
300 rados_t cluster;
301 rados_ioctx_t io_ctx;
f27aaf4b
CB
302 int ret;
303
ad32e9c0
JD
304 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
305 snap_buf, sizeof(snap_buf),
fab5cf59
JD
306 name, sizeof(name),
307 conf, sizeof(conf)) < 0) {
f27aaf4b
CB
308 return -EINVAL;
309 }
f27aaf4b 310
f27aaf4b
CB
311 /* Read out options */
312 while (options && options->name) {
313 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
314 bytes = options->value.n;
315 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
316 if (options->value.n) {
317 objsize = options->value.n;
318 if ((objsize - 1) & objsize) { /* not a power of 2? */
319 error_report("obj size needs to be power of 2");
320 return -EINVAL;
321 }
322 if (objsize < 4096) {
323 error_report("obj size too small");
324 return -EINVAL;
325 }
ad32e9c0 326 obj_order = ffs(objsize) - 1;
f27aaf4b
CB
327 }
328 }
329 options++;
330 }
331
7c7e9df0
SW
332 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
333 if (rados_create(&cluster, clientname) < 0) {
f27aaf4b
CB
334 error_report("error initializing");
335 return -EIO;
336 }
337
fab5cf59 338 if (strstr(conf, "conf=") == NULL) {
f9fe18ec
SW
339 /* try default location, but ignore failure */
340 rados_conf_read_file(cluster, NULL);
fab5cf59
JD
341 }
342
343 if (conf[0] != '\0' &&
344 qemu_rbd_set_conf(cluster, conf) < 0) {
345 error_report("error setting config options");
ad32e9c0 346 rados_shutdown(cluster);
f27aaf4b
CB
347 return -EIO;
348 }
349
ad32e9c0
JD
350 if (rados_connect(cluster) < 0) {
351 error_report("error connecting");
352 rados_shutdown(cluster);
f27aaf4b
CB
353 return -EIO;
354 }
f27aaf4b 355
ad32e9c0
JD
356 if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) {
357 error_report("error opening pool %s", pool);
358 rados_shutdown(cluster);
359 return -EIO;
f27aaf4b
CB
360 }
361
ad32e9c0
JD
362 ret = rbd_create(io_ctx, name, bytes, &obj_order);
363 rados_ioctx_destroy(io_ctx);
364 rados_shutdown(cluster);
f27aaf4b
CB
365
366 return ret;
367}
368
369/*
ad32e9c0
JD
370 * This aio completion is being called from qemu_rbd_aio_event_reader()
371 * and runs in qemu context. It schedules a bh, but just in case the aio
f27aaf4b
CB
372 * was not cancelled before.
373 */
ad32e9c0 374static void qemu_rbd_complete_aio(RADOSCB *rcb)
f27aaf4b
CB
375{
376 RBDAIOCB *acb = rcb->acb;
377 int64_t r;
378
f27aaf4b 379 if (acb->cancelled) {
ad32e9c0
JD
380 qemu_vfree(acb->bounce);
381 qemu_aio_release(acb);
f27aaf4b
CB
382 goto done;
383 }
384
385 r = rcb->ret;
386
787f3133
JD
387 if (acb->cmd == RBD_AIO_WRITE ||
388 acb->cmd == RBD_AIO_DISCARD) {
f27aaf4b
CB
389 if (r < 0) {
390 acb->ret = r;
391 acb->error = 1;
392 } else if (!acb->error) {
ad32e9c0 393 acb->ret = rcb->size;
f27aaf4b
CB
394 }
395 } else {
ad32e9c0
JD
396 if (r < 0) {
397 memset(rcb->buf, 0, rcb->size);
f27aaf4b
CB
398 acb->ret = r;
399 acb->error = 1;
ad32e9c0
JD
400 } else if (r < rcb->size) {
401 memset(rcb->buf + r, 0, rcb->size - r);
f27aaf4b 402 if (!acb->error) {
ad32e9c0 403 acb->ret = rcb->size;
f27aaf4b
CB
404 }
405 } else if (!acb->error) {
ad32e9c0 406 acb->ret = r;
f27aaf4b
CB
407 }
408 }
409 /* Note that acb->bh can be NULL in case where the aio was cancelled */
ad32e9c0
JD
410 acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb);
411 qemu_bh_schedule(acb->bh);
f27aaf4b 412done:
7267c094 413 g_free(rcb);
f27aaf4b
CB
414}
415
416/*
417 * aio fd read handler. It runs in the qemu context and calls the
418 * completion handling of completed rados aio operations.
419 */
ad32e9c0 420static void qemu_rbd_aio_event_reader(void *opaque)
f27aaf4b
CB
421{
422 BDRVRBDState *s = opaque;
423
424 ssize_t ret;
425
426 do {
427 char *p = (char *)&s->event_rcb;
428
429 /* now read the rcb pointer that was sent from a non qemu thread */
dfe80b07
SW
430 ret = read(s->fds[RBD_FD_READ], p + s->event_reader_pos,
431 sizeof(s->event_rcb) - s->event_reader_pos);
432 if (ret > 0) {
433 s->event_reader_pos += ret;
434 if (s->event_reader_pos == sizeof(s->event_rcb)) {
435 s->event_reader_pos = 0;
436 qemu_rbd_complete_aio(s->event_rcb);
437 s->qemu_aio_count--;
f27aaf4b
CB
438 }
439 }
440 } while (ret < 0 && errno == EINTR);
441}
442
ad32e9c0 443static int qemu_rbd_aio_flush_cb(void *opaque)
f27aaf4b
CB
444{
445 BDRVRBDState *s = opaque;
446
447 return (s->qemu_aio_count > 0);
448}
449
ad32e9c0 450static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
f27aaf4b
CB
451{
452 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
453 char pool[RBD_MAX_POOL_NAME_SIZE];
454 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
fab5cf59 455 char conf[RBD_MAX_CONF_SIZE];
7c7e9df0
SW
456 char clientname_buf[RBD_MAX_CONF_SIZE];
457 char *clientname;
f27aaf4b
CB
458 int r;
459
ad32e9c0
JD
460 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
461 snap_buf, sizeof(snap_buf),
fab5cf59
JD
462 s->name, sizeof(s->name),
463 conf, sizeof(conf)) < 0) {
f27aaf4b
CB
464 return -EINVAL;
465 }
f27aaf4b 466
7c7e9df0
SW
467 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
468 r = rados_create(&s->cluster, clientname);
ad32e9c0 469 if (r < 0) {
f27aaf4b
CB
470 error_report("error initializing");
471 return r;
472 }
473
eb93d5d9
SW
474 s->snap = NULL;
475 if (snap_buf[0] != '\0') {
476 s->snap = g_strdup(snap_buf);
477 }
478
fab5cf59 479 if (strstr(conf, "conf=") == NULL) {
f9fe18ec
SW
480 /* try default location, but ignore failure */
481 rados_conf_read_file(s->cluster, NULL);
fab5cf59
JD
482 }
483
484 if (conf[0] != '\0') {
485 r = qemu_rbd_set_conf(s->cluster, conf);
486 if (r < 0) {
487 error_report("error setting config options");
eb93d5d9 488 goto failed_shutdown;
fab5cf59 489 }
f27aaf4b
CB
490 }
491
ad32e9c0
JD
492 r = rados_connect(s->cluster);
493 if (r < 0) {
494 error_report("error connecting");
eb93d5d9 495 goto failed_shutdown;
f27aaf4b
CB
496 }
497
ad32e9c0
JD
498 r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
499 if (r < 0) {
500 error_report("error opening pool %s", pool);
eb93d5d9 501 goto failed_shutdown;
f27aaf4b
CB
502 }
503
ad32e9c0 504 r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
f27aaf4b 505 if (r < 0) {
ad32e9c0 506 error_report("error reading header from %s", s->name);
eb93d5d9 507 goto failed_open;
f27aaf4b
CB
508 }
509
ad32e9c0 510 bs->read_only = (s->snap != NULL);
f27aaf4b
CB
511
512 s->event_reader_pos = 0;
513 r = qemu_pipe(s->fds);
514 if (r < 0) {
515 error_report("error opening eventfd");
516 goto failed;
517 }
518 fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
519 fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
ad32e9c0 520 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
bafbd6a1 521 NULL, qemu_rbd_aio_flush_cb, s);
f27aaf4b 522
f27aaf4b
CB
523
524 return 0;
525
526failed:
ad32e9c0 527 rbd_close(s->image);
eb93d5d9 528failed_open:
ad32e9c0 529 rados_ioctx_destroy(s->io_ctx);
eb93d5d9 530failed_shutdown:
ad32e9c0 531 rados_shutdown(s->cluster);
eb93d5d9 532 g_free(s->snap);
f27aaf4b
CB
533 return r;
534}
535
ad32e9c0 536static void qemu_rbd_close(BlockDriverState *bs)
f27aaf4b
CB
537{
538 BDRVRBDState *s = bs->opaque;
539
540 close(s->fds[0]);
541 close(s->fds[1]);
bafbd6a1 542 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
f27aaf4b 543
ad32e9c0
JD
544 rbd_close(s->image);
545 rados_ioctx_destroy(s->io_ctx);
7267c094 546 g_free(s->snap);
ad32e9c0 547 rados_shutdown(s->cluster);
f27aaf4b
CB
548}
549
550/*
551 * Cancel aio. Since we don't reference acb in a non qemu threads,
552 * it is safe to access it here.
553 */
ad32e9c0 554static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb)
f27aaf4b
CB
555{
556 RBDAIOCB *acb = (RBDAIOCB *) blockacb;
557 acb->cancelled = 1;
558}
559
560static AIOPool rbd_aio_pool = {
561 .aiocb_size = sizeof(RBDAIOCB),
ad32e9c0 562 .cancel = qemu_rbd_aio_cancel,
f27aaf4b
CB
563};
564
ad32e9c0 565static int qemu_rbd_send_pipe(BDRVRBDState *s, RADOSCB *rcb)
f27aaf4b 566{
ad32e9c0 567 int ret = 0;
f27aaf4b
CB
568 while (1) {
569 fd_set wfd;
ad32e9c0 570 int fd = s->fds[RBD_FD_WRITE];
f27aaf4b 571
ad32e9c0
JD
572 /* send the op pointer to the qemu thread that is responsible
573 for the aio/op completion. Must do it in a qemu thread context */
f27aaf4b
CB
574 ret = write(fd, (void *)&rcb, sizeof(rcb));
575 if (ret >= 0) {
576 break;
577 }
578 if (errno == EINTR) {
579 continue;
ad32e9c0 580 }
f27aaf4b
CB
581 if (errno != EAGAIN) {
582 break;
ad32e9c0 583 }
f27aaf4b
CB
584
585 FD_ZERO(&wfd);
586 FD_SET(fd, &wfd);
587 do {
588 ret = select(fd + 1, NULL, &wfd, NULL, NULL);
589 } while (ret < 0 && errno == EINTR);
590 }
591
ad32e9c0
JD
592 return ret;
593}
594
595/*
596 * This is the callback function for rbd_aio_read and _write
597 *
598 * Note: this function is being called from a non qemu thread so
599 * we need to be careful about what we do here. Generally we only
600 * write to the block notification pipe, and do the rest of the
601 * io completion handling from qemu_rbd_aio_event_reader() which
602 * runs in a qemu context.
603 */
604static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
605{
606 int ret;
607 rcb->ret = rbd_aio_get_return_value(c);
608 rbd_aio_release(c);
609 ret = qemu_rbd_send_pipe(rcb->s, rcb);
f27aaf4b 610 if (ret < 0) {
ad32e9c0 611 error_report("failed writing to acb->s->fds");
7267c094 612 g_free(rcb);
f27aaf4b
CB
613 }
614}
615
ad32e9c0 616/* Callback when all queued rbd_aio requests are complete */
f27aaf4b
CB
617
618static void rbd_aio_bh_cb(void *opaque)
619{
620 RBDAIOCB *acb = opaque;
621
787f3133 622 if (acb->cmd == RBD_AIO_READ) {
03396148 623 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
f27aaf4b
CB
624 }
625 qemu_vfree(acb->bounce);
626 acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
627 qemu_bh_delete(acb->bh);
628 acb->bh = NULL;
629
630 qemu_aio_release(acb);
631}
632
787f3133
JD
633static int rbd_aio_discard_wrapper(rbd_image_t image,
634 uint64_t off,
635 uint64_t len,
636 rbd_completion_t comp)
637{
638#ifdef LIBRBD_SUPPORTS_DISCARD
639 return rbd_aio_discard(image, off, len, comp);
640#else
641 return -ENOTSUP;
642#endif
643}
644
645static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
646 int64_t sector_num,
647 QEMUIOVector *qiov,
648 int nb_sectors,
649 BlockDriverCompletionFunc *cb,
650 void *opaque,
651 RBDAIOCmd cmd)
f27aaf4b
CB
652{
653 RBDAIOCB *acb;
654 RADOSCB *rcb;
ad32e9c0 655 rbd_completion_t c;
f27aaf4b
CB
656 int64_t off, size;
657 char *buf;
51a13528 658 int r;
f27aaf4b
CB
659
660 BDRVRBDState *s = bs->opaque;
661
662 acb = qemu_aio_get(&rbd_aio_pool, bs, cb, opaque);
787f3133 663 acb->cmd = cmd;
f27aaf4b 664 acb->qiov = qiov;
787f3133
JD
665 if (cmd == RBD_AIO_DISCARD) {
666 acb->bounce = NULL;
667 } else {
668 acb->bounce = qemu_blockalign(bs, qiov->size);
669 }
f27aaf4b
CB
670 acb->ret = 0;
671 acb->error = 0;
672 acb->s = s;
673 acb->cancelled = 0;
674 acb->bh = NULL;
675
787f3133 676 if (cmd == RBD_AIO_WRITE) {
f27aaf4b
CB
677 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
678 }
679
680 buf = acb->bounce;
681
682 off = sector_num * BDRV_SECTOR_SIZE;
683 size = nb_sectors * BDRV_SECTOR_SIZE;
f27aaf4b 684
ad32e9c0 685 s->qemu_aio_count++; /* All the RADOSCB */
f27aaf4b 686
7267c094 687 rcb = g_malloc(sizeof(RADOSCB));
ad32e9c0
JD
688 rcb->done = 0;
689 rcb->acb = acb;
690 rcb->buf = buf;
691 rcb->s = acb->s;
692 rcb->size = size;
51a13528
JD
693 r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c);
694 if (r < 0) {
695 goto failed;
696 }
f27aaf4b 697
787f3133
JD
698 switch (cmd) {
699 case RBD_AIO_WRITE:
51a13528 700 r = rbd_aio_write(s->image, off, size, buf, c);
787f3133
JD
701 break;
702 case RBD_AIO_READ:
51a13528 703 r = rbd_aio_read(s->image, off, size, buf, c);
787f3133
JD
704 break;
705 case RBD_AIO_DISCARD:
706 r = rbd_aio_discard_wrapper(s->image, off, size, c);
707 break;
708 default:
709 r = -EINVAL;
51a13528
JD
710 }
711
712 if (r < 0) {
713 goto failed;
f27aaf4b
CB
714 }
715
716 return &acb->common;
51a13528
JD
717
718failed:
7267c094 719 g_free(rcb);
51a13528
JD
720 s->qemu_aio_count--;
721 qemu_aio_release(acb);
722 return NULL;
f27aaf4b
CB
723}
724
ad32e9c0
JD
725static BlockDriverAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs,
726 int64_t sector_num,
727 QEMUIOVector *qiov,
728 int nb_sectors,
729 BlockDriverCompletionFunc *cb,
730 void *opaque)
f27aaf4b 731{
787f3133
JD
732 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
733 RBD_AIO_READ);
f27aaf4b
CB
734}
735
ad32e9c0
JD
736static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs,
737 int64_t sector_num,
738 QEMUIOVector *qiov,
739 int nb_sectors,
740 BlockDriverCompletionFunc *cb,
741 void *opaque)
f27aaf4b 742{
787f3133
JD
743 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
744 RBD_AIO_WRITE);
f27aaf4b
CB
745}
746
8b94ff85 747static int qemu_rbd_co_flush(BlockDriverState *bs)
7a3f5fe9
SW
748{
749#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 1)
750 /* rbd_flush added in 0.1.1 */
751 BDRVRBDState *s = bs->opaque;
752 return rbd_flush(s->image);
753#else
754 return 0;
755#endif
756}
757
ad32e9c0 758static int qemu_rbd_getinfo(BlockDriverState *bs, BlockDriverInfo *bdi)
f27aaf4b
CB
759{
760 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
761 rbd_image_info_t info;
762 int r;
763
764 r = rbd_stat(s->image, &info, sizeof(info));
765 if (r < 0) {
766 return r;
767 }
768
769 bdi->cluster_size = info.obj_size;
f27aaf4b
CB
770 return 0;
771}
772
ad32e9c0 773static int64_t qemu_rbd_getlength(BlockDriverState *bs)
f27aaf4b
CB
774{
775 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
776 rbd_image_info_t info;
777 int r;
f27aaf4b 778
ad32e9c0
JD
779 r = rbd_stat(s->image, &info, sizeof(info));
780 if (r < 0) {
781 return r;
782 }
783
784 return info.size;
f27aaf4b
CB
785}
786
30cdc48c
JD
787static int qemu_rbd_truncate(BlockDriverState *bs, int64_t offset)
788{
789 BDRVRBDState *s = bs->opaque;
790 int r;
791
792 r = rbd_resize(s->image, offset);
793 if (r < 0) {
794 return r;
795 }
796
797 return 0;
798}
799
ad32e9c0
JD
800static int qemu_rbd_snap_create(BlockDriverState *bs,
801 QEMUSnapshotInfo *sn_info)
f27aaf4b
CB
802{
803 BDRVRBDState *s = bs->opaque;
f27aaf4b 804 int r;
f27aaf4b
CB
805
806 if (sn_info->name[0] == '\0') {
807 return -EINVAL; /* we need a name for rbd snapshots */
808 }
809
810 /*
811 * rbd snapshots are using the name as the user controlled unique identifier
812 * we can't use the rbd snapid for that purpose, as it can't be set
813 */
814 if (sn_info->id_str[0] != '\0' &&
815 strcmp(sn_info->id_str, sn_info->name) != 0) {
816 return -EINVAL;
817 }
818
819 if (strlen(sn_info->name) >= sizeof(sn_info->id_str)) {
820 return -ERANGE;
821 }
822
ad32e9c0 823 r = rbd_snap_create(s->image, sn_info->name);
f27aaf4b 824 if (r < 0) {
ad32e9c0 825 error_report("failed to create snap: %s", strerror(-r));
f27aaf4b
CB
826 return r;
827 }
828
f27aaf4b
CB
829 return 0;
830}
831
bd603247
GF
832static int qemu_rbd_snap_remove(BlockDriverState *bs,
833 const char *snapshot_name)
834{
835 BDRVRBDState *s = bs->opaque;
836 int r;
837
838 r = rbd_snap_remove(s->image, snapshot_name);
839 return r;
840}
841
842static int qemu_rbd_snap_rollback(BlockDriverState *bs,
843 const char *snapshot_name)
844{
845 BDRVRBDState *s = bs->opaque;
846 int r;
847
848 r = rbd_snap_rollback(s->image, snapshot_name);
849 return r;
850}
851
ad32e9c0
JD
852static int qemu_rbd_snap_list(BlockDriverState *bs,
853 QEMUSnapshotInfo **psn_tab)
f27aaf4b
CB
854{
855 BDRVRBDState *s = bs->opaque;
f27aaf4b 856 QEMUSnapshotInfo *sn_info, *sn_tab = NULL;
ad32e9c0
JD
857 int i, snap_count;
858 rbd_snap_info_t *snaps;
859 int max_snaps = RBD_MAX_SNAPS;
f27aaf4b 860
ad32e9c0 861 do {
7267c094 862 snaps = g_malloc(sizeof(*snaps) * max_snaps);
ad32e9c0
JD
863 snap_count = rbd_snap_list(s->image, snaps, &max_snaps);
864 if (snap_count < 0) {
7267c094 865 g_free(snaps);
f27aaf4b 866 }
ad32e9c0 867 } while (snap_count == -ERANGE);
f27aaf4b 868
ad32e9c0 869 if (snap_count <= 0) {
b9c53290 870 goto done;
f27aaf4b
CB
871 }
872
7267c094 873 sn_tab = g_malloc0(snap_count * sizeof(QEMUSnapshotInfo));
f27aaf4b 874
ad32e9c0
JD
875 for (i = 0; i < snap_count; i++) {
876 const char *snap_name = snaps[i].name;
f27aaf4b
CB
877
878 sn_info = sn_tab + i;
879 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name);
880 pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name);
f27aaf4b 881
ad32e9c0 882 sn_info->vm_state_size = snaps[i].size;
f27aaf4b
CB
883 sn_info->date_sec = 0;
884 sn_info->date_nsec = 0;
885 sn_info->vm_clock_nsec = 0;
886 }
ad32e9c0
JD
887 rbd_snap_list_end(snaps);
888
b9c53290 889 done:
f27aaf4b 890 *psn_tab = sn_tab;
f27aaf4b 891 return snap_count;
f27aaf4b
CB
892}
893
787f3133
JD
894#ifdef LIBRBD_SUPPORTS_DISCARD
895static BlockDriverAIOCB* qemu_rbd_aio_discard(BlockDriverState *bs,
896 int64_t sector_num,
897 int nb_sectors,
898 BlockDriverCompletionFunc *cb,
899 void *opaque)
900{
901 return rbd_start_aio(bs, sector_num, NULL, nb_sectors, cb, opaque,
902 RBD_AIO_DISCARD);
903}
904#endif
905
ad32e9c0 906static QEMUOptionParameter qemu_rbd_create_options[] = {
f27aaf4b
CB
907 {
908 .name = BLOCK_OPT_SIZE,
909 .type = OPT_SIZE,
910 .help = "Virtual disk size"
911 },
912 {
913 .name = BLOCK_OPT_CLUSTER_SIZE,
914 .type = OPT_SIZE,
915 .help = "RBD object size"
916 },
917 {NULL}
918};
919
920static BlockDriver bdrv_rbd = {
921 .format_name = "rbd",
922 .instance_size = sizeof(BDRVRBDState),
ad32e9c0
JD
923 .bdrv_file_open = qemu_rbd_open,
924 .bdrv_close = qemu_rbd_close,
925 .bdrv_create = qemu_rbd_create,
926 .bdrv_get_info = qemu_rbd_getinfo,
927 .create_options = qemu_rbd_create_options,
928 .bdrv_getlength = qemu_rbd_getlength,
30cdc48c 929 .bdrv_truncate = qemu_rbd_truncate,
f27aaf4b
CB
930 .protocol_name = "rbd",
931
c68b89ac
KW
932 .bdrv_aio_readv = qemu_rbd_aio_readv,
933 .bdrv_aio_writev = qemu_rbd_aio_writev,
934 .bdrv_co_flush_to_disk = qemu_rbd_co_flush,
f27aaf4b 935
787f3133
JD
936#ifdef LIBRBD_SUPPORTS_DISCARD
937 .bdrv_aio_discard = qemu_rbd_aio_discard,
938#endif
939
c68b89ac 940 .bdrv_snapshot_create = qemu_rbd_snap_create,
bd603247 941 .bdrv_snapshot_delete = qemu_rbd_snap_remove,
c68b89ac 942 .bdrv_snapshot_list = qemu_rbd_snap_list,
bd603247 943 .bdrv_snapshot_goto = qemu_rbd_snap_rollback,
f27aaf4b
CB
944};
945
946static void bdrv_rbd_init(void)
947{
948 bdrv_register(&bdrv_rbd);
949}
950
951block_init(bdrv_rbd_init);