]> git.proxmox.com Git - qemu.git/blame - block/rbd.c
rbd: add an asynchronous flush
[qemu.git] / block / rbd.c
CommitLineData
f27aaf4b
CB
1/*
2 * QEMU Block driver for RADOS (Ceph)
3 *
ad32e9c0
JD
4 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
5 * Josh Durgin <josh.durgin@dreamhost.com>
f27aaf4b
CB
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
9 *
6b620ca3
PB
10 * Contributions after 2012-01-13 are licensed under the terms of the
11 * GNU GPL, version 2 or (at your option) any later version.
f27aaf4b
CB
12 */
13
ad32e9c0
JD
14#include <inttypes.h>
15
f27aaf4b 16#include "qemu-common.h"
1de7afc9 17#include "qemu/error-report.h"
737e150e 18#include "block/block_int.h"
f27aaf4b 19
ad32e9c0 20#include <rbd/librbd.h>
f27aaf4b 21
f27aaf4b
CB
22/*
23 * When specifying the image filename use:
24 *
fab5cf59 25 * rbd:poolname/devicename[@snapshotname][:option1=value1[:option2=value2...]]
f27aaf4b 26 *
9e1fbcde 27 * poolname must be the name of an existing rados pool.
f27aaf4b 28 *
9e1fbcde 29 * devicename is the name of the rbd image.
f27aaf4b 30 *
9e1fbcde
SW
31 * Each option given is used to configure rados, and may be any valid
32 * Ceph option, "id", or "conf".
fab5cf59 33 *
9e1fbcde
SW
34 * The "id" option indicates what user we should authenticate as to
35 * the Ceph cluster. If it is excluded we will use the Ceph default
36 * (normally 'admin').
f27aaf4b 37 *
9e1fbcde
SW
38 * The "conf" option specifies a Ceph configuration file to read. If
39 * it is not specified, we will read from the default Ceph locations
40 * (e.g., /etc/ceph/ceph.conf). To avoid reading _any_ configuration
41 * file, specify conf=/dev/null.
f27aaf4b 42 *
9e1fbcde
SW
43 * Configuration values containing :, @, or = can be escaped with a
44 * leading "\".
f27aaf4b
CB
45 */
46
787f3133
JD
47/* rbd_aio_discard added in 0.1.2 */
48#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 2)
49#define LIBRBD_SUPPORTS_DISCARD
50#else
51#undef LIBRBD_SUPPORTS_DISCARD
52#endif
53
f27aaf4b
CB
54#define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER)
55
ad32e9c0
JD
56#define RBD_MAX_CONF_NAME_SIZE 128
57#define RBD_MAX_CONF_VAL_SIZE 512
58#define RBD_MAX_CONF_SIZE 1024
59#define RBD_MAX_POOL_NAME_SIZE 128
60#define RBD_MAX_SNAP_NAME_SIZE 128
61#define RBD_MAX_SNAPS 100
62
787f3133
JD
63typedef enum {
64 RBD_AIO_READ,
65 RBD_AIO_WRITE,
dc7588c1
JD
66 RBD_AIO_DISCARD,
67 RBD_AIO_FLUSH
787f3133
JD
68} RBDAIOCmd;
69
f27aaf4b
CB
70typedef struct RBDAIOCB {
71 BlockDriverAIOCB common;
72 QEMUBH *bh;
08448d51 73 int64_t ret;
f27aaf4b
CB
74 QEMUIOVector *qiov;
75 char *bounce;
787f3133 76 RBDAIOCmd cmd;
f27aaf4b 77 int64_t sector_num;
f27aaf4b
CB
78 int error;
79 struct BDRVRBDState *s;
80 int cancelled;
473c7f02 81 int status;
f27aaf4b
CB
82} RBDAIOCB;
83
84typedef struct RADOSCB {
85 int rcbid;
86 RBDAIOCB *acb;
87 struct BDRVRBDState *s;
88 int done;
ad32e9c0 89 int64_t size;
f27aaf4b 90 char *buf;
08448d51 91 int64_t ret;
f27aaf4b
CB
92} RADOSCB;
93
94#define RBD_FD_READ 0
95#define RBD_FD_WRITE 1
96
97typedef struct BDRVRBDState {
98 int fds[2];
ad32e9c0
JD
99 rados_t cluster;
100 rados_ioctx_t io_ctx;
101 rbd_image_t image;
102 char name[RBD_MAX_IMAGE_NAME_SIZE];
f27aaf4b 103 int qemu_aio_count;
ad32e9c0 104 char *snap;
f27aaf4b
CB
105 int event_reader_pos;
106 RADOSCB *event_rcb;
107} BDRVRBDState;
108
f27aaf4b
CB
109static void rbd_aio_bh_cb(void *opaque);
110
ad32e9c0
JD
111static int qemu_rbd_next_tok(char *dst, int dst_len,
112 char *src, char delim,
113 const char *name,
114 char **p)
f27aaf4b
CB
115{
116 int l;
117 char *end;
118
119 *p = NULL;
120
121 if (delim != '\0') {
16a06b24
SW
122 for (end = src; *end; ++end) {
123 if (*end == delim) {
124 break;
125 }
126 if (*end == '\\' && end[1] != '\0') {
127 end++;
128 }
129 }
130 if (*end == delim) {
f27aaf4b
CB
131 *p = end + 1;
132 *end = '\0';
133 }
134 }
135 l = strlen(src);
136 if (l >= dst_len) {
137 error_report("%s too long", name);
138 return -EINVAL;
139 } else if (l == 0) {
140 error_report("%s too short", name);
141 return -EINVAL;
142 }
143
144 pstrcpy(dst, dst_len, src);
145
146 return 0;
147}
148
16a06b24
SW
149static void qemu_rbd_unescape(char *src)
150{
151 char *p;
152
153 for (p = src; *src; ++src, ++p) {
154 if (*src == '\\' && src[1] != '\0') {
155 src++;
156 }
157 *p = *src;
158 }
159 *p = '\0';
160}
161
ad32e9c0
JD
162static int qemu_rbd_parsename(const char *filename,
163 char *pool, int pool_len,
164 char *snap, int snap_len,
fab5cf59
JD
165 char *name, int name_len,
166 char *conf, int conf_len)
f27aaf4b
CB
167{
168 const char *start;
169 char *p, *buf;
170 int ret;
171
172 if (!strstart(filename, "rbd:", &start)) {
173 return -EINVAL;
174 }
175
7267c094 176 buf = g_strdup(start);
f27aaf4b 177 p = buf;
fab5cf59
JD
178 *snap = '\0';
179 *conf = '\0';
f27aaf4b 180
ad32e9c0 181 ret = qemu_rbd_next_tok(pool, pool_len, p, '/', "pool name", &p);
f27aaf4b
CB
182 if (ret < 0 || !p) {
183 ret = -EINVAL;
184 goto done;
185 }
16a06b24 186 qemu_rbd_unescape(pool);
fab5cf59
JD
187
188 if (strchr(p, '@')) {
189 ret = qemu_rbd_next_tok(name, name_len, p, '@', "object name", &p);
190 if (ret < 0) {
191 goto done;
192 }
193 ret = qemu_rbd_next_tok(snap, snap_len, p, ':', "snap name", &p);
16a06b24 194 qemu_rbd_unescape(snap);
fab5cf59
JD
195 } else {
196 ret = qemu_rbd_next_tok(name, name_len, p, ':', "object name", &p);
f27aaf4b 197 }
16a06b24 198 qemu_rbd_unescape(name);
fab5cf59 199 if (ret < 0 || !p) {
f27aaf4b
CB
200 goto done;
201 }
202
fab5cf59 203 ret = qemu_rbd_next_tok(conf, conf_len, p, '\0', "configuration", &p);
f27aaf4b
CB
204
205done:
7267c094 206 g_free(buf);
f27aaf4b
CB
207 return ret;
208}
209
7c7e9df0
SW
210static char *qemu_rbd_parse_clientname(const char *conf, char *clientname)
211{
212 const char *p = conf;
213
214 while (*p) {
215 int len;
216 const char *end = strchr(p, ':');
217
218 if (end) {
219 len = end - p;
220 } else {
221 len = strlen(p);
222 }
223
224 if (strncmp(p, "id=", 3) == 0) {
225 len -= 3;
226 strncpy(clientname, p + 3, len);
227 clientname[len] = '\0';
228 return clientname;
229 }
230 if (end == NULL) {
231 break;
232 }
233 p = end + 1;
234 }
235 return NULL;
236}
237
fab5cf59
JD
238static int qemu_rbd_set_conf(rados_t cluster, const char *conf)
239{
240 char *p, *buf;
241 char name[RBD_MAX_CONF_NAME_SIZE];
242 char value[RBD_MAX_CONF_VAL_SIZE];
243 int ret = 0;
244
7267c094 245 buf = g_strdup(conf);
fab5cf59
JD
246 p = buf;
247
248 while (p) {
249 ret = qemu_rbd_next_tok(name, sizeof(name), p,
250 '=', "conf option name", &p);
251 if (ret < 0) {
252 break;
253 }
16a06b24 254 qemu_rbd_unescape(name);
fab5cf59
JD
255
256 if (!p) {
257 error_report("conf option %s has no value", name);
258 ret = -EINVAL;
259 break;
260 }
261
262 ret = qemu_rbd_next_tok(value, sizeof(value), p,
263 ':', "conf option value", &p);
264 if (ret < 0) {
265 break;
266 }
16a06b24 267 qemu_rbd_unescape(value);
fab5cf59 268
7c7e9df0
SW
269 if (strcmp(name, "conf") == 0) {
270 ret = rados_conf_read_file(cluster, value);
fab5cf59 271 if (ret < 0) {
7c7e9df0 272 error_report("error reading conf file %s", value);
fab5cf59
JD
273 break;
274 }
7c7e9df0
SW
275 } else if (strcmp(name, "id") == 0) {
276 /* ignore, this is parsed by qemu_rbd_parse_clientname() */
fab5cf59 277 } else {
7c7e9df0 278 ret = rados_conf_set(cluster, name, value);
fab5cf59 279 if (ret < 0) {
7c7e9df0
SW
280 error_report("invalid conf option %s", name);
281 ret = -EINVAL;
fab5cf59
JD
282 break;
283 }
284 }
285 }
286
7267c094 287 g_free(buf);
fab5cf59
JD
288 return ret;
289}
290
ad32e9c0 291static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options)
f27aaf4b
CB
292{
293 int64_t bytes = 0;
294 int64_t objsize;
ad32e9c0
JD
295 int obj_order = 0;
296 char pool[RBD_MAX_POOL_NAME_SIZE];
297 char name[RBD_MAX_IMAGE_NAME_SIZE];
298 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
fab5cf59 299 char conf[RBD_MAX_CONF_SIZE];
7c7e9df0
SW
300 char clientname_buf[RBD_MAX_CONF_SIZE];
301 char *clientname;
ad32e9c0
JD
302 rados_t cluster;
303 rados_ioctx_t io_ctx;
f27aaf4b
CB
304 int ret;
305
ad32e9c0
JD
306 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
307 snap_buf, sizeof(snap_buf),
fab5cf59
JD
308 name, sizeof(name),
309 conf, sizeof(conf)) < 0) {
f27aaf4b
CB
310 return -EINVAL;
311 }
f27aaf4b 312
f27aaf4b
CB
313 /* Read out options */
314 while (options && options->name) {
315 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
316 bytes = options->value.n;
317 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
318 if (options->value.n) {
319 objsize = options->value.n;
320 if ((objsize - 1) & objsize) { /* not a power of 2? */
321 error_report("obj size needs to be power of 2");
322 return -EINVAL;
323 }
324 if (objsize < 4096) {
325 error_report("obj size too small");
326 return -EINVAL;
327 }
ad32e9c0 328 obj_order = ffs(objsize) - 1;
f27aaf4b
CB
329 }
330 }
331 options++;
332 }
333
7c7e9df0
SW
334 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
335 if (rados_create(&cluster, clientname) < 0) {
f27aaf4b
CB
336 error_report("error initializing");
337 return -EIO;
338 }
339
fab5cf59 340 if (strstr(conf, "conf=") == NULL) {
f9fe18ec
SW
341 /* try default location, but ignore failure */
342 rados_conf_read_file(cluster, NULL);
fab5cf59
JD
343 }
344
345 if (conf[0] != '\0' &&
346 qemu_rbd_set_conf(cluster, conf) < 0) {
347 error_report("error setting config options");
ad32e9c0 348 rados_shutdown(cluster);
f27aaf4b
CB
349 return -EIO;
350 }
351
ad32e9c0
JD
352 if (rados_connect(cluster) < 0) {
353 error_report("error connecting");
354 rados_shutdown(cluster);
f27aaf4b
CB
355 return -EIO;
356 }
f27aaf4b 357
ad32e9c0
JD
358 if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) {
359 error_report("error opening pool %s", pool);
360 rados_shutdown(cluster);
361 return -EIO;
f27aaf4b
CB
362 }
363
ad32e9c0
JD
364 ret = rbd_create(io_ctx, name, bytes, &obj_order);
365 rados_ioctx_destroy(io_ctx);
366 rados_shutdown(cluster);
f27aaf4b
CB
367
368 return ret;
369}
370
371/*
ad32e9c0
JD
372 * This aio completion is being called from qemu_rbd_aio_event_reader()
373 * and runs in qemu context. It schedules a bh, but just in case the aio
f27aaf4b
CB
374 * was not cancelled before.
375 */
ad32e9c0 376static void qemu_rbd_complete_aio(RADOSCB *rcb)
f27aaf4b
CB
377{
378 RBDAIOCB *acb = rcb->acb;
379 int64_t r;
380
f27aaf4b
CB
381 r = rcb->ret;
382
dc7588c1 383 if (acb->cmd != RBD_AIO_READ) {
f27aaf4b
CB
384 if (r < 0) {
385 acb->ret = r;
386 acb->error = 1;
387 } else if (!acb->error) {
ad32e9c0 388 acb->ret = rcb->size;
f27aaf4b
CB
389 }
390 } else {
ad32e9c0
JD
391 if (r < 0) {
392 memset(rcb->buf, 0, rcb->size);
f27aaf4b
CB
393 acb->ret = r;
394 acb->error = 1;
ad32e9c0
JD
395 } else if (r < rcb->size) {
396 memset(rcb->buf + r, 0, rcb->size - r);
f27aaf4b 397 if (!acb->error) {
ad32e9c0 398 acb->ret = rcb->size;
f27aaf4b
CB
399 }
400 } else if (!acb->error) {
ad32e9c0 401 acb->ret = r;
f27aaf4b
CB
402 }
403 }
404 /* Note that acb->bh can be NULL in case where the aio was cancelled */
ad32e9c0
JD
405 acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb);
406 qemu_bh_schedule(acb->bh);
7267c094 407 g_free(rcb);
f27aaf4b
CB
408}
409
410/*
411 * aio fd read handler. It runs in the qemu context and calls the
412 * completion handling of completed rados aio operations.
413 */
ad32e9c0 414static void qemu_rbd_aio_event_reader(void *opaque)
f27aaf4b
CB
415{
416 BDRVRBDState *s = opaque;
417
418 ssize_t ret;
419
420 do {
421 char *p = (char *)&s->event_rcb;
422
423 /* now read the rcb pointer that was sent from a non qemu thread */
dfe80b07
SW
424 ret = read(s->fds[RBD_FD_READ], p + s->event_reader_pos,
425 sizeof(s->event_rcb) - s->event_reader_pos);
426 if (ret > 0) {
427 s->event_reader_pos += ret;
428 if (s->event_reader_pos == sizeof(s->event_rcb)) {
429 s->event_reader_pos = 0;
430 qemu_rbd_complete_aio(s->event_rcb);
431 s->qemu_aio_count--;
f27aaf4b
CB
432 }
433 }
434 } while (ret < 0 && errno == EINTR);
435}
436
ad32e9c0 437static int qemu_rbd_aio_flush_cb(void *opaque)
f27aaf4b
CB
438{
439 BDRVRBDState *s = opaque;
440
441 return (s->qemu_aio_count > 0);
442}
443
d43731c7
LY
444static int qemu_rbd_open(BlockDriverState *bs, const char *filename,
445 QDict *options, int flags)
f27aaf4b
CB
446{
447 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
448 char pool[RBD_MAX_POOL_NAME_SIZE];
449 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
fab5cf59 450 char conf[RBD_MAX_CONF_SIZE];
7c7e9df0
SW
451 char clientname_buf[RBD_MAX_CONF_SIZE];
452 char *clientname;
f27aaf4b
CB
453 int r;
454
ad32e9c0
JD
455 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
456 snap_buf, sizeof(snap_buf),
fab5cf59
JD
457 s->name, sizeof(s->name),
458 conf, sizeof(conf)) < 0) {
f27aaf4b
CB
459 return -EINVAL;
460 }
f27aaf4b 461
7c7e9df0
SW
462 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
463 r = rados_create(&s->cluster, clientname);
ad32e9c0 464 if (r < 0) {
f27aaf4b
CB
465 error_report("error initializing");
466 return r;
467 }
468
eb93d5d9
SW
469 s->snap = NULL;
470 if (snap_buf[0] != '\0') {
471 s->snap = g_strdup(snap_buf);
472 }
473
b11f38fc
JD
474 /*
475 * Fallback to more conservative semantics if setting cache
476 * options fails. Ignore errors from setting rbd_cache because the
477 * only possible error is that the option does not exist, and
478 * librbd defaults to no caching. If write through caching cannot
479 * be set up, fall back to no caching.
480 */
481 if (flags & BDRV_O_NOCACHE) {
482 rados_conf_set(s->cluster, "rbd_cache", "false");
483 } else {
484 rados_conf_set(s->cluster, "rbd_cache", "true");
b11f38fc
JD
485 }
486
fab5cf59 487 if (strstr(conf, "conf=") == NULL) {
f9fe18ec
SW
488 /* try default location, but ignore failure */
489 rados_conf_read_file(s->cluster, NULL);
fab5cf59
JD
490 }
491
492 if (conf[0] != '\0') {
493 r = qemu_rbd_set_conf(s->cluster, conf);
494 if (r < 0) {
495 error_report("error setting config options");
eb93d5d9 496 goto failed_shutdown;
fab5cf59 497 }
f27aaf4b
CB
498 }
499
ad32e9c0
JD
500 r = rados_connect(s->cluster);
501 if (r < 0) {
502 error_report("error connecting");
eb93d5d9 503 goto failed_shutdown;
f27aaf4b
CB
504 }
505
ad32e9c0
JD
506 r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
507 if (r < 0) {
508 error_report("error opening pool %s", pool);
eb93d5d9 509 goto failed_shutdown;
f27aaf4b
CB
510 }
511
ad32e9c0 512 r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
f27aaf4b 513 if (r < 0) {
ad32e9c0 514 error_report("error reading header from %s", s->name);
eb93d5d9 515 goto failed_open;
f27aaf4b
CB
516 }
517
ad32e9c0 518 bs->read_only = (s->snap != NULL);
f27aaf4b
CB
519
520 s->event_reader_pos = 0;
521 r = qemu_pipe(s->fds);
522 if (r < 0) {
523 error_report("error opening eventfd");
524 goto failed;
525 }
526 fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
527 fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
ad32e9c0 528 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
bafbd6a1 529 NULL, qemu_rbd_aio_flush_cb, s);
f27aaf4b 530
f27aaf4b
CB
531
532 return 0;
533
534failed:
ad32e9c0 535 rbd_close(s->image);
eb93d5d9 536failed_open:
ad32e9c0 537 rados_ioctx_destroy(s->io_ctx);
eb93d5d9 538failed_shutdown:
ad32e9c0 539 rados_shutdown(s->cluster);
eb93d5d9 540 g_free(s->snap);
f27aaf4b
CB
541 return r;
542}
543
ad32e9c0 544static void qemu_rbd_close(BlockDriverState *bs)
f27aaf4b
CB
545{
546 BDRVRBDState *s = bs->opaque;
547
548 close(s->fds[0]);
549 close(s->fds[1]);
bafbd6a1 550 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
f27aaf4b 551
ad32e9c0
JD
552 rbd_close(s->image);
553 rados_ioctx_destroy(s->io_ctx);
7267c094 554 g_free(s->snap);
ad32e9c0 555 rados_shutdown(s->cluster);
f27aaf4b
CB
556}
557
558/*
559 * Cancel aio. Since we don't reference acb in a non qemu threads,
560 * it is safe to access it here.
561 */
ad32e9c0 562static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb)
f27aaf4b
CB
563{
564 RBDAIOCB *acb = (RBDAIOCB *) blockacb;
565 acb->cancelled = 1;
473c7f02
SP
566
567 while (acb->status == -EINPROGRESS) {
568 qemu_aio_wait();
569 }
570
571 qemu_aio_release(acb);
f27aaf4b
CB
572}
573
d7331bed 574static const AIOCBInfo rbd_aiocb_info = {
f27aaf4b 575 .aiocb_size = sizeof(RBDAIOCB),
ad32e9c0 576 .cancel = qemu_rbd_aio_cancel,
f27aaf4b
CB
577};
578
ad32e9c0 579static int qemu_rbd_send_pipe(BDRVRBDState *s, RADOSCB *rcb)
f27aaf4b 580{
ad32e9c0 581 int ret = 0;
f27aaf4b
CB
582 while (1) {
583 fd_set wfd;
ad32e9c0 584 int fd = s->fds[RBD_FD_WRITE];
f27aaf4b 585
ad32e9c0
JD
586 /* send the op pointer to the qemu thread that is responsible
587 for the aio/op completion. Must do it in a qemu thread context */
f27aaf4b
CB
588 ret = write(fd, (void *)&rcb, sizeof(rcb));
589 if (ret >= 0) {
590 break;
591 }
592 if (errno == EINTR) {
593 continue;
ad32e9c0 594 }
f27aaf4b
CB
595 if (errno != EAGAIN) {
596 break;
ad32e9c0 597 }
f27aaf4b
CB
598
599 FD_ZERO(&wfd);
600 FD_SET(fd, &wfd);
601 do {
602 ret = select(fd + 1, NULL, &wfd, NULL, NULL);
603 } while (ret < 0 && errno == EINTR);
604 }
605
ad32e9c0
JD
606 return ret;
607}
608
609/*
610 * This is the callback function for rbd_aio_read and _write
611 *
612 * Note: this function is being called from a non qemu thread so
613 * we need to be careful about what we do here. Generally we only
614 * write to the block notification pipe, and do the rest of the
615 * io completion handling from qemu_rbd_aio_event_reader() which
616 * runs in a qemu context.
617 */
618static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
619{
620 int ret;
621 rcb->ret = rbd_aio_get_return_value(c);
622 rbd_aio_release(c);
623 ret = qemu_rbd_send_pipe(rcb->s, rcb);
f27aaf4b 624 if (ret < 0) {
ad32e9c0 625 error_report("failed writing to acb->s->fds");
7267c094 626 g_free(rcb);
f27aaf4b
CB
627 }
628}
629
ad32e9c0 630/* Callback when all queued rbd_aio requests are complete */
f27aaf4b
CB
631
632static void rbd_aio_bh_cb(void *opaque)
633{
634 RBDAIOCB *acb = opaque;
635
787f3133 636 if (acb->cmd == RBD_AIO_READ) {
03396148 637 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
f27aaf4b
CB
638 }
639 qemu_vfree(acb->bounce);
640 acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
641 qemu_bh_delete(acb->bh);
642 acb->bh = NULL;
473c7f02 643 acb->status = 0;
f27aaf4b 644
473c7f02
SP
645 if (!acb->cancelled) {
646 qemu_aio_release(acb);
647 }
f27aaf4b
CB
648}
649
787f3133
JD
650static int rbd_aio_discard_wrapper(rbd_image_t image,
651 uint64_t off,
652 uint64_t len,
653 rbd_completion_t comp)
654{
655#ifdef LIBRBD_SUPPORTS_DISCARD
656 return rbd_aio_discard(image, off, len, comp);
657#else
658 return -ENOTSUP;
659#endif
660}
661
dc7588c1
JD
662static int rbd_aio_flush_wrapper(rbd_image_t image,
663 rbd_completion_t comp)
664{
665#ifdef LIBRBD_SUPPORTS_AIO_FLUSH
666 return rbd_aio_flush(image, comp);
667#else
668 return -ENOTSUP;
669#endif
670}
671
787f3133
JD
672static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
673 int64_t sector_num,
674 QEMUIOVector *qiov,
675 int nb_sectors,
676 BlockDriverCompletionFunc *cb,
677 void *opaque,
678 RBDAIOCmd cmd)
f27aaf4b
CB
679{
680 RBDAIOCB *acb;
681 RADOSCB *rcb;
ad32e9c0 682 rbd_completion_t c;
f27aaf4b
CB
683 int64_t off, size;
684 char *buf;
51a13528 685 int r;
f27aaf4b
CB
686
687 BDRVRBDState *s = bs->opaque;
688
d7331bed 689 acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque);
787f3133 690 acb->cmd = cmd;
f27aaf4b 691 acb->qiov = qiov;
dc7588c1 692 if (cmd == RBD_AIO_DISCARD || cmd == RBD_AIO_FLUSH) {
787f3133
JD
693 acb->bounce = NULL;
694 } else {
695 acb->bounce = qemu_blockalign(bs, qiov->size);
696 }
f27aaf4b
CB
697 acb->ret = 0;
698 acb->error = 0;
699 acb->s = s;
700 acb->cancelled = 0;
701 acb->bh = NULL;
473c7f02 702 acb->status = -EINPROGRESS;
f27aaf4b 703
787f3133 704 if (cmd == RBD_AIO_WRITE) {
d5e6b161 705 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
f27aaf4b
CB
706 }
707
708 buf = acb->bounce;
709
710 off = sector_num * BDRV_SECTOR_SIZE;
711 size = nb_sectors * BDRV_SECTOR_SIZE;
f27aaf4b 712
ad32e9c0 713 s->qemu_aio_count++; /* All the RADOSCB */
f27aaf4b 714
7267c094 715 rcb = g_malloc(sizeof(RADOSCB));
ad32e9c0
JD
716 rcb->done = 0;
717 rcb->acb = acb;
718 rcb->buf = buf;
719 rcb->s = acb->s;
720 rcb->size = size;
51a13528
JD
721 r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c);
722 if (r < 0) {
723 goto failed;
724 }
f27aaf4b 725
787f3133
JD
726 switch (cmd) {
727 case RBD_AIO_WRITE:
51a13528 728 r = rbd_aio_write(s->image, off, size, buf, c);
787f3133
JD
729 break;
730 case RBD_AIO_READ:
51a13528 731 r = rbd_aio_read(s->image, off, size, buf, c);
787f3133
JD
732 break;
733 case RBD_AIO_DISCARD:
734 r = rbd_aio_discard_wrapper(s->image, off, size, c);
735 break;
dc7588c1
JD
736 case RBD_AIO_FLUSH:
737 r = rbd_aio_flush_wrapper(s->image, c);
738 break;
787f3133
JD
739 default:
740 r = -EINVAL;
51a13528
JD
741 }
742
743 if (r < 0) {
744 goto failed;
f27aaf4b
CB
745 }
746
747 return &acb->common;
51a13528
JD
748
749failed:
7267c094 750 g_free(rcb);
51a13528
JD
751 s->qemu_aio_count--;
752 qemu_aio_release(acb);
753 return NULL;
f27aaf4b
CB
754}
755
ad32e9c0
JD
756static BlockDriverAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs,
757 int64_t sector_num,
758 QEMUIOVector *qiov,
759 int nb_sectors,
760 BlockDriverCompletionFunc *cb,
761 void *opaque)
f27aaf4b 762{
787f3133
JD
763 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
764 RBD_AIO_READ);
f27aaf4b
CB
765}
766
ad32e9c0
JD
767static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs,
768 int64_t sector_num,
769 QEMUIOVector *qiov,
770 int nb_sectors,
771 BlockDriverCompletionFunc *cb,
772 void *opaque)
f27aaf4b 773{
787f3133
JD
774 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
775 RBD_AIO_WRITE);
f27aaf4b
CB
776}
777
dc7588c1
JD
778#ifdef LIBRBD_SUPPORTS_AIO_FLUSH
779static BlockDriverAIOCB *qemu_rbd_aio_flush(BlockDriverState *bs,
780 BlockDriverCompletionFunc *cb,
781 void *opaque)
782{
783 return rbd_start_aio(bs, 0, NULL, 0, cb, opaque, RBD_AIO_FLUSH);
784}
785
786#else
787
8b94ff85 788static int qemu_rbd_co_flush(BlockDriverState *bs)
7a3f5fe9
SW
789{
790#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 1)
791 /* rbd_flush added in 0.1.1 */
792 BDRVRBDState *s = bs->opaque;
793 return rbd_flush(s->image);
794#else
795 return 0;
796#endif
797}
dc7588c1 798#endif
7a3f5fe9 799
ad32e9c0 800static int qemu_rbd_getinfo(BlockDriverState *bs, BlockDriverInfo *bdi)
f27aaf4b
CB
801{
802 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
803 rbd_image_info_t info;
804 int r;
805
806 r = rbd_stat(s->image, &info, sizeof(info));
807 if (r < 0) {
808 return r;
809 }
810
811 bdi->cluster_size = info.obj_size;
f27aaf4b
CB
812 return 0;
813}
814
ad32e9c0 815static int64_t qemu_rbd_getlength(BlockDriverState *bs)
f27aaf4b
CB
816{
817 BDRVRBDState *s = bs->opaque;
ad32e9c0
JD
818 rbd_image_info_t info;
819 int r;
f27aaf4b 820
ad32e9c0
JD
821 r = rbd_stat(s->image, &info, sizeof(info));
822 if (r < 0) {
823 return r;
824 }
825
826 return info.size;
f27aaf4b
CB
827}
828
30cdc48c
JD
829static int qemu_rbd_truncate(BlockDriverState *bs, int64_t offset)
830{
831 BDRVRBDState *s = bs->opaque;
832 int r;
833
834 r = rbd_resize(s->image, offset);
835 if (r < 0) {
836 return r;
837 }
838
839 return 0;
840}
841
ad32e9c0
JD
842static int qemu_rbd_snap_create(BlockDriverState *bs,
843 QEMUSnapshotInfo *sn_info)
f27aaf4b
CB
844{
845 BDRVRBDState *s = bs->opaque;
f27aaf4b 846 int r;
f27aaf4b
CB
847
848 if (sn_info->name[0] == '\0') {
849 return -EINVAL; /* we need a name for rbd snapshots */
850 }
851
852 /*
853 * rbd snapshots are using the name as the user controlled unique identifier
854 * we can't use the rbd snapid for that purpose, as it can't be set
855 */
856 if (sn_info->id_str[0] != '\0' &&
857 strcmp(sn_info->id_str, sn_info->name) != 0) {
858 return -EINVAL;
859 }
860
861 if (strlen(sn_info->name) >= sizeof(sn_info->id_str)) {
862 return -ERANGE;
863 }
864
ad32e9c0 865 r = rbd_snap_create(s->image, sn_info->name);
f27aaf4b 866 if (r < 0) {
ad32e9c0 867 error_report("failed to create snap: %s", strerror(-r));
f27aaf4b
CB
868 return r;
869 }
870
f27aaf4b
CB
871 return 0;
872}
873
bd603247
GF
874static int qemu_rbd_snap_remove(BlockDriverState *bs,
875 const char *snapshot_name)
876{
877 BDRVRBDState *s = bs->opaque;
878 int r;
879
880 r = rbd_snap_remove(s->image, snapshot_name);
881 return r;
882}
883
884static int qemu_rbd_snap_rollback(BlockDriverState *bs,
885 const char *snapshot_name)
886{
887 BDRVRBDState *s = bs->opaque;
888 int r;
889
890 r = rbd_snap_rollback(s->image, snapshot_name);
891 return r;
892}
893
ad32e9c0
JD
894static int qemu_rbd_snap_list(BlockDriverState *bs,
895 QEMUSnapshotInfo **psn_tab)
f27aaf4b
CB
896{
897 BDRVRBDState *s = bs->opaque;
f27aaf4b 898 QEMUSnapshotInfo *sn_info, *sn_tab = NULL;
ad32e9c0
JD
899 int i, snap_count;
900 rbd_snap_info_t *snaps;
901 int max_snaps = RBD_MAX_SNAPS;
f27aaf4b 902
ad32e9c0 903 do {
7267c094 904 snaps = g_malloc(sizeof(*snaps) * max_snaps);
ad32e9c0
JD
905 snap_count = rbd_snap_list(s->image, snaps, &max_snaps);
906 if (snap_count < 0) {
7267c094 907 g_free(snaps);
f27aaf4b 908 }
ad32e9c0 909 } while (snap_count == -ERANGE);
f27aaf4b 910
ad32e9c0 911 if (snap_count <= 0) {
b9c53290 912 goto done;
f27aaf4b
CB
913 }
914
7267c094 915 sn_tab = g_malloc0(snap_count * sizeof(QEMUSnapshotInfo));
f27aaf4b 916
ad32e9c0
JD
917 for (i = 0; i < snap_count; i++) {
918 const char *snap_name = snaps[i].name;
f27aaf4b
CB
919
920 sn_info = sn_tab + i;
921 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name);
922 pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name);
f27aaf4b 923
ad32e9c0 924 sn_info->vm_state_size = snaps[i].size;
f27aaf4b
CB
925 sn_info->date_sec = 0;
926 sn_info->date_nsec = 0;
927 sn_info->vm_clock_nsec = 0;
928 }
ad32e9c0
JD
929 rbd_snap_list_end(snaps);
930
b9c53290 931 done:
f27aaf4b 932 *psn_tab = sn_tab;
f27aaf4b 933 return snap_count;
f27aaf4b
CB
934}
935
787f3133
JD
936#ifdef LIBRBD_SUPPORTS_DISCARD
937static BlockDriverAIOCB* qemu_rbd_aio_discard(BlockDriverState *bs,
938 int64_t sector_num,
939 int nb_sectors,
940 BlockDriverCompletionFunc *cb,
941 void *opaque)
942{
943 return rbd_start_aio(bs, sector_num, NULL, nb_sectors, cb, opaque,
944 RBD_AIO_DISCARD);
945}
946#endif
947
ad32e9c0 948static QEMUOptionParameter qemu_rbd_create_options[] = {
f27aaf4b
CB
949 {
950 .name = BLOCK_OPT_SIZE,
951 .type = OPT_SIZE,
952 .help = "Virtual disk size"
953 },
954 {
955 .name = BLOCK_OPT_CLUSTER_SIZE,
956 .type = OPT_SIZE,
957 .help = "RBD object size"
958 },
959 {NULL}
960};
961
962static BlockDriver bdrv_rbd = {
963 .format_name = "rbd",
964 .instance_size = sizeof(BDRVRBDState),
ad32e9c0
JD
965 .bdrv_file_open = qemu_rbd_open,
966 .bdrv_close = qemu_rbd_close,
967 .bdrv_create = qemu_rbd_create,
968 .bdrv_get_info = qemu_rbd_getinfo,
969 .create_options = qemu_rbd_create_options,
970 .bdrv_getlength = qemu_rbd_getlength,
30cdc48c 971 .bdrv_truncate = qemu_rbd_truncate,
f27aaf4b
CB
972 .protocol_name = "rbd",
973
c68b89ac
KW
974 .bdrv_aio_readv = qemu_rbd_aio_readv,
975 .bdrv_aio_writev = qemu_rbd_aio_writev,
dc7588c1
JD
976
977#ifdef LIBRBD_SUPPORTS_AIO_FLUSH
978 .bdrv_aio_flush = qemu_rbd_aio_flush,
979#else
c68b89ac 980 .bdrv_co_flush_to_disk = qemu_rbd_co_flush,
dc7588c1 981#endif
f27aaf4b 982
787f3133
JD
983#ifdef LIBRBD_SUPPORTS_DISCARD
984 .bdrv_aio_discard = qemu_rbd_aio_discard,
985#endif
986
c68b89ac 987 .bdrv_snapshot_create = qemu_rbd_snap_create,
bd603247 988 .bdrv_snapshot_delete = qemu_rbd_snap_remove,
c68b89ac 989 .bdrv_snapshot_list = qemu_rbd_snap_list,
bd603247 990 .bdrv_snapshot_goto = qemu_rbd_snap_rollback,
f27aaf4b
CB
991};
992
993static void bdrv_rbd_init(void)
994{
995 bdrv_register(&bdrv_rbd);
996}
997
998block_init(bdrv_rbd_init);