]> git.proxmox.com Git - mirror_qemu.git/blame - block/gluster.c
cputlb: don't cpu_abort() if guest tries to execute outside RAM or RAM
[mirror_qemu.git] / block / gluster.c
CommitLineData
8d6d89cb
BR
1/*
2 * GlusterFS backend for QEMU
3 *
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5 *
85c09bc0
BR
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8d6d89cb 8 *
8d6d89cb 9 */
80c71a24 10#include "qemu/osdep.h"
8d6d89cb 11#include <glusterfs/api/glfs.h>
737e150e 12#include "block/block_int.h"
da34e65c 13#include "qapi/error.h"
1de7afc9 14#include "qemu/uri.h"
8d6d89cb
BR
15
16typedef struct GlusterAIOCB {
8d6d89cb
BR
17 int64_t size;
18 int ret;
8d6d89cb 19 QEMUBH *bh;
15744b0b 20 Coroutine *coroutine;
6ee50af2 21 AioContext *aio_context;
8d6d89cb
BR
22} GlusterAIOCB;
23
24typedef struct BDRVGlusterState {
25 struct glfs *glfs;
8d6d89cb 26 struct glfs_fd *fd;
8d6d89cb
BR
27} BDRVGlusterState;
28
8d6d89cb
BR
29typedef struct GlusterConf {
30 char *server;
31 int port;
32 char *volname;
33 char *image;
34 char *transport;
35} GlusterConf;
36
37static void qemu_gluster_gconf_free(GlusterConf *gconf)
38{
1b37b344
JC
39 if (gconf) {
40 g_free(gconf->server);
41 g_free(gconf->volname);
42 g_free(gconf->image);
43 g_free(gconf->transport);
44 g_free(gconf);
45 }
8d6d89cb
BR
46}
47
48static int parse_volume_options(GlusterConf *gconf, char *path)
49{
50 char *p, *q;
51
52 if (!path) {
53 return -EINVAL;
54 }
55
56 /* volume */
57 p = q = path + strspn(path, "/");
58 p += strcspn(p, "/");
59 if (*p == '\0') {
60 return -EINVAL;
61 }
62 gconf->volname = g_strndup(q, p - q);
63
64 /* image */
65 p += strspn(p, "/");
66 if (*p == '\0') {
67 return -EINVAL;
68 }
69 gconf->image = g_strdup(p);
70 return 0;
71}
72
73/*
74 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
75 *
76 * 'gluster' is the protocol.
77 *
78 * 'transport' specifies the transport type used to connect to gluster
79 * management daemon (glusterd). Valid transport types are
80 * tcp, unix and rdma. If a transport type isn't specified, then tcp
81 * type is assumed.
82 *
83 * 'server' specifies the server where the volume file specification for
84 * the given volume resides. This can be either hostname, ipv4 address
85 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
dc6fb73d 86 * If transport type is 'unix', then 'server' field should not be specified.
8d6d89cb
BR
87 * The 'socket' field needs to be populated with the path to unix domain
88 * socket.
89 *
90 * 'port' is the port number on which glusterd is listening. This is optional
91 * and if not specified, QEMU will send 0 which will make gluster to use the
92 * default port. If the transport type is unix, then 'port' should not be
93 * specified.
94 *
95 * 'volname' is the name of the gluster volume which contains the VM image.
96 *
97 * 'image' is the path to the actual VM image that resides on gluster volume.
98 *
99 * Examples:
100 *
101 * file=gluster://1.2.3.4/testvol/a.img
102 * file=gluster+tcp://1.2.3.4/testvol/a.img
103 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
104 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
105 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
106 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
107 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
108 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
109 */
110static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
111{
112 URI *uri;
113 QueryParams *qp = NULL;
114 bool is_unix = false;
115 int ret = 0;
116
117 uri = uri_parse(filename);
118 if (!uri) {
119 return -EINVAL;
120 }
121
122 /* transport */
24897a76 123 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
8d6d89cb
BR
124 gconf->transport = g_strdup("tcp");
125 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
126 gconf->transport = g_strdup("tcp");
127 } else if (!strcmp(uri->scheme, "gluster+unix")) {
128 gconf->transport = g_strdup("unix");
129 is_unix = true;
130 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
131 gconf->transport = g_strdup("rdma");
132 } else {
133 ret = -EINVAL;
134 goto out;
135 }
136
137 ret = parse_volume_options(gconf, uri->path);
138 if (ret < 0) {
139 goto out;
140 }
141
142 qp = query_params_parse(uri->query);
143 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
144 ret = -EINVAL;
145 goto out;
146 }
147
148 if (is_unix) {
149 if (uri->server || uri->port) {
150 ret = -EINVAL;
151 goto out;
152 }
153 if (strcmp(qp->p[0].name, "socket")) {
154 ret = -EINVAL;
155 goto out;
156 }
157 gconf->server = g_strdup(qp->p[0].value);
158 } else {
24897a76 159 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
8d6d89cb
BR
160 gconf->port = uri->port;
161 }
162
163out:
164 if (qp) {
165 query_params_free(qp);
166 }
167 uri_free(uri);
168 return ret;
169}
170
a7451cb8
PB
171static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
172 Error **errp)
8d6d89cb
BR
173{
174 struct glfs *glfs = NULL;
175 int ret;
176 int old_errno;
177
178 ret = qemu_gluster_parseuri(gconf, filename);
179 if (ret < 0) {
a7451cb8
PB
180 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
181 "volname/image[?socket=...]");
8d6d89cb
BR
182 errno = -ret;
183 goto out;
184 }
185
186 glfs = glfs_new(gconf->volname);
187 if (!glfs) {
188 goto out;
189 }
190
191 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
192 gconf->port);
193 if (ret < 0) {
194 goto out;
195 }
196
197 /*
198 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
199 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
200 */
201 ret = glfs_set_logging(glfs, "-", 4);
202 if (ret < 0) {
203 goto out;
204 }
205
206 ret = glfs_init(glfs);
207 if (ret) {
a7451cb8
PB
208 error_setg_errno(errp, errno,
209 "Gluster connection failed for server=%s port=%d "
210 "volume=%s image=%s transport=%s", gconf->server,
211 gconf->port, gconf->volname, gconf->image,
212 gconf->transport);
4557117d
PK
213
214 /* glfs_init sometimes doesn't set errno although docs suggest that */
215 if (errno == 0)
216 errno = EINVAL;
217
8d6d89cb
BR
218 goto out;
219 }
220 return glfs;
221
222out:
223 if (glfs) {
224 old_errno = errno;
225 glfs_fini(glfs);
226 errno = old_errno;
227 }
228 return NULL;
229}
230
15744b0b 231static void qemu_gluster_complete_aio(void *opaque)
8d6d89cb 232{
15744b0b 233 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
8d6d89cb 234
15744b0b
BR
235 qemu_bh_delete(acb->bh);
236 acb->bh = NULL;
237 qemu_coroutine_enter(acb->coroutine, NULL);
8d6d89cb
BR
238}
239
7c815372
BR
240/*
241 * AIO callback routine called from GlusterFS thread.
242 */
243static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
244{
245 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
246
247 if (!ret || ret == acb->size) {
248 acb->ret = 0; /* Success */
249 } else if (ret < 0) {
a8827453 250 acb->ret = -errno; /* Read/Write failed */
7c815372
BR
251 } else {
252 acb->ret = -EIO; /* Partial read/write - fail it */
253 }
254
6ee50af2 255 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
7c815372
BR
256 qemu_bh_schedule(acb->bh);
257}
258
b4894776
KW
259/* TODO Convert to fine grained options */
260static QemuOptsList runtime_opts = {
261 .name = "gluster",
262 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
263 .desc = {
264 {
265 .name = "filename",
266 .type = QEMU_OPT_STRING,
267 .help = "URL to the gluster image",
268 },
269 { /* end of list */ }
270 },
271};
272
1b37b344
JC
273static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
274{
275 assert(open_flags != NULL);
276
277 *open_flags |= O_BINARY;
278
279 if (bdrv_flags & BDRV_O_RDWR) {
280 *open_flags |= O_RDWR;
281 } else {
282 *open_flags |= O_RDONLY;
283 }
284
285 if ((bdrv_flags & BDRV_O_NOCACHE)) {
286 *open_flags |= O_DIRECT;
287 }
288}
289
56d1b4d2 290static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
015a1036 291 int bdrv_flags, Error **errp)
8d6d89cb
BR
292{
293 BDRVGlusterState *s = bs->opaque;
1b37b344 294 int open_flags = 0;
8d6d89cb 295 int ret = 0;
5839e53b 296 GlusterConf *gconf = g_new0(GlusterConf, 1);
b4894776
KW
297 QemuOpts *opts;
298 Error *local_err = NULL;
299 const char *filename;
300
87ea75d5 301 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
b4894776 302 qemu_opts_absorb_qdict(opts, options, &local_err);
84d18f06 303 if (local_err) {
a7451cb8 304 error_propagate(errp, local_err);
b4894776
KW
305 ret = -EINVAL;
306 goto out;
307 }
308
309 filename = qemu_opt_get(opts, "filename");
310
a7451cb8 311 s->glfs = qemu_gluster_init(gconf, filename, errp);
8d6d89cb
BR
312 if (!s->glfs) {
313 ret = -errno;
314 goto out;
315 }
316
d85fa9eb
JC
317#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
318 /* Without this, if fsync fails for a recoverable reason (for instance,
319 * ENOSPC), gluster will dump its cache, preventing retries. This means
320 * almost certain data loss. Not all gluster versions support the
321 * 'resync-failed-syncs-after-fsync' key value, but there is no way to
322 * discover during runtime if it is supported (this api returns success for
323 * unknown key/value pairs) */
324 ret = glfs_set_xlator_option(s->glfs, "*-write-behind",
325 "resync-failed-syncs-after-fsync",
326 "on");
327 if (ret < 0) {
328 error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
329 ret = -errno;
330 goto out;
331 }
332#endif
333
1b37b344 334 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
8d6d89cb
BR
335
336 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
337 if (!s->fd) {
338 ret = -errno;
8d6d89cb 339 }
8d6d89cb
BR
340
341out:
b4894776 342 qemu_opts_del(opts);
8d6d89cb
BR
343 qemu_gluster_gconf_free(gconf);
344 if (!ret) {
345 return ret;
346 }
347 if (s->fd) {
348 glfs_close(s->fd);
349 }
350 if (s->glfs) {
351 glfs_fini(s->glfs);
352 }
353 return ret;
354}
355
adccfbcd
JC
356typedef struct BDRVGlusterReopenState {
357 struct glfs *glfs;
358 struct glfs_fd *fd;
359} BDRVGlusterReopenState;
360
361
362static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
363 BlockReopenQueue *queue, Error **errp)
364{
365 int ret = 0;
366 BDRVGlusterReopenState *reop_s;
367 GlusterConf *gconf = NULL;
368 int open_flags = 0;
369
370 assert(state != NULL);
371 assert(state->bs != NULL);
372
5839e53b 373 state->opaque = g_new0(BDRVGlusterReopenState, 1);
adccfbcd
JC
374 reop_s = state->opaque;
375
376 qemu_gluster_parse_flags(state->flags, &open_flags);
377
5839e53b 378 gconf = g_new0(GlusterConf, 1);
adccfbcd 379
f55ea629 380 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
adccfbcd
JC
381 if (reop_s->glfs == NULL) {
382 ret = -errno;
383 goto exit;
384 }
385
d85fa9eb
JC
386#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
387 ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind",
388 "resync-failed-syncs-after-fsync", "on");
389 if (ret < 0) {
390 error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
391 ret = -errno;
392 goto exit;
393 }
394#endif
395
adccfbcd
JC
396 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
397 if (reop_s->fd == NULL) {
398 /* reops->glfs will be cleaned up in _abort */
399 ret = -errno;
400 goto exit;
401 }
402
403exit:
404 /* state->opaque will be freed in either the _abort or _commit */
405 qemu_gluster_gconf_free(gconf);
406 return ret;
407}
408
409static void qemu_gluster_reopen_commit(BDRVReopenState *state)
410{
411 BDRVGlusterReopenState *reop_s = state->opaque;
412 BDRVGlusterState *s = state->bs->opaque;
413
414
415 /* close the old */
416 if (s->fd) {
417 glfs_close(s->fd);
418 }
419 if (s->glfs) {
420 glfs_fini(s->glfs);
421 }
422
423 /* use the newly opened image / connection */
424 s->fd = reop_s->fd;
425 s->glfs = reop_s->glfs;
426
427 g_free(state->opaque);
428 state->opaque = NULL;
429
430 return;
431}
432
433
434static void qemu_gluster_reopen_abort(BDRVReopenState *state)
435{
436 BDRVGlusterReopenState *reop_s = state->opaque;
437
438 if (reop_s == NULL) {
439 return;
440 }
441
442 if (reop_s->fd) {
443 glfs_close(reop_s->fd);
444 }
445
446 if (reop_s->glfs) {
447 glfs_fini(reop_s->glfs);
448 }
449
450 g_free(state->opaque);
451 state->opaque = NULL;
452
453 return;
454}
455
7c815372 456#ifdef CONFIG_GLUSTERFS_ZEROFILL
e88a36eb
EB
457static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
458 int64_t offset, int size, BdrvRequestFlags flags)
7c815372
BR
459{
460 int ret;
c833d1e8 461 GlusterAIOCB acb;
7c815372 462 BDRVGlusterState *s = bs->opaque;
7c815372 463
c833d1e8
PB
464 acb.size = size;
465 acb.ret = 0;
466 acb.coroutine = qemu_coroutine_self();
467 acb.aio_context = bdrv_get_aio_context(bs);
7c815372 468
c833d1e8 469 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
7c815372 470 if (ret < 0) {
c833d1e8 471 return -errno;
7c815372
BR
472 }
473
474 qemu_coroutine_yield();
c833d1e8 475 return acb.ret;
7c815372 476}
cf7f616b
BR
477
478static inline bool gluster_supports_zerofill(void)
479{
480 return 1;
481}
482
483static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
484 int64_t size)
485{
486 return glfs_zerofill(fd, offset, size);
487}
488
489#else
490static inline bool gluster_supports_zerofill(void)
491{
492 return 0;
493}
494
495static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
496 int64_t size)
497{
498 return 0;
499}
7c815372
BR
500#endif
501
8d6d89cb 502static int qemu_gluster_create(const char *filename,
90c772de 503 QemuOpts *opts, Error **errp)
8d6d89cb
BR
504{
505 struct glfs *glfs;
506 struct glfs_fd *fd;
507 int ret = 0;
cf7f616b 508 int prealloc = 0;
8d6d89cb 509 int64_t total_size = 0;
90c772de 510 char *tmp = NULL;
5839e53b 511 GlusterConf *gconf = g_new0(GlusterConf, 1);
8d6d89cb 512
a7451cb8 513 glfs = qemu_gluster_init(gconf, filename, errp);
8d6d89cb 514 if (!glfs) {
4557117d 515 ret = -errno;
8d6d89cb
BR
516 goto out;
517 }
518
180e9526
HT
519 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
520 BDRV_SECTOR_SIZE);
90c772de
CL
521
522 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
523 if (!tmp || !strcmp(tmp, "off")) {
524 prealloc = 0;
525 } else if (!strcmp(tmp, "full") &&
526 gluster_supports_zerofill()) {
527 prealloc = 1;
528 } else {
529 error_setg(errp, "Invalid preallocation mode: '%s'"
530 " or GlusterFS doesn't support zerofill API",
531 tmp);
532 ret = -EINVAL;
533 goto out;
8d6d89cb
BR
534 }
535
536 fd = glfs_creat(glfs, gconf->image,
537 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
538 if (!fd) {
539 ret = -errno;
540 } else {
180e9526
HT
541 if (!glfs_ftruncate(fd, total_size)) {
542 if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
cf7f616b
BR
543 ret = -errno;
544 }
545 } else {
8d6d89cb
BR
546 ret = -errno;
547 }
cf7f616b 548
8d6d89cb
BR
549 if (glfs_close(fd) != 0) {
550 ret = -errno;
551 }
552 }
553out:
90c772de 554 g_free(tmp);
8d6d89cb
BR
555 qemu_gluster_gconf_free(gconf);
556 if (glfs) {
557 glfs_fini(glfs);
558 }
559 return ret;
560}
561
15744b0b
BR
562static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
563 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
8d6d89cb
BR
564{
565 int ret;
c833d1e8 566 GlusterAIOCB acb;
8d6d89cb 567 BDRVGlusterState *s = bs->opaque;
15744b0b
BR
568 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
569 off_t offset = sector_num * BDRV_SECTOR_SIZE;
8d6d89cb 570
c833d1e8
PB
571 acb.size = size;
572 acb.ret = 0;
573 acb.coroutine = qemu_coroutine_self();
574 acb.aio_context = bdrv_get_aio_context(bs);
8d6d89cb
BR
575
576 if (write) {
577 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
c833d1e8 578 gluster_finish_aiocb, &acb);
8d6d89cb
BR
579 } else {
580 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
c833d1e8 581 gluster_finish_aiocb, &acb);
8d6d89cb
BR
582 }
583
584 if (ret < 0) {
c833d1e8 585 return -errno;
8d6d89cb 586 }
15744b0b
BR
587
588 qemu_coroutine_yield();
c833d1e8 589 return acb.ret;
8d6d89cb
BR
590}
591
42ec24e2
PB
592static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
593{
594 int ret;
595 BDRVGlusterState *s = bs->opaque;
596
597 ret = glfs_ftruncate(s->fd, offset);
598 if (ret < 0) {
599 return -errno;
600 }
601
602 return 0;
603}
604
15744b0b
BR
605static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
606 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
8d6d89cb 607{
15744b0b 608 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
8d6d89cb
BR
609}
610
15744b0b
BR
611static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
612 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
8d6d89cb 613{
15744b0b 614 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
8d6d89cb
BR
615}
616
5d4343e6
JC
617static void qemu_gluster_close(BlockDriverState *bs)
618{
619 BDRVGlusterState *s = bs->opaque;
620
621 if (s->fd) {
622 glfs_close(s->fd);
623 s->fd = NULL;
624 }
625 glfs_fini(s->glfs);
626}
627
15744b0b 628static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
8d6d89cb
BR
629{
630 int ret;
c833d1e8 631 GlusterAIOCB acb;
8d6d89cb
BR
632 BDRVGlusterState *s = bs->opaque;
633
c833d1e8
PB
634 acb.size = 0;
635 acb.ret = 0;
636 acb.coroutine = qemu_coroutine_self();
637 acb.aio_context = bdrv_get_aio_context(bs);
8d6d89cb 638
c833d1e8 639 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
8d6d89cb 640 if (ret < 0) {
d85fa9eb
JC
641 ret = -errno;
642 goto error;
8d6d89cb 643 }
15744b0b
BR
644
645 qemu_coroutine_yield();
d85fa9eb
JC
646 if (acb.ret < 0) {
647 ret = acb.ret;
648 goto error;
649 }
650
c833d1e8 651 return acb.ret;
d85fa9eb
JC
652
653error:
654 /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache
655 * after a fsync failure, so we have no way of allowing the guest to safely
656 * continue. Gluster versions prior to 3.5.6 don't retain the cache
657 * either, but will invalidate the fd on error, so this is again our only
658 * option.
659 *
660 * The 'resync-failed-syncs-after-fsync' xlator option for the
661 * write-behind cache will cause later gluster versions to retain its
662 * cache after error, so long as the fd remains open. However, we
663 * currently have no way of knowing if this option is supported.
664 *
665 * TODO: Once gluster provides a way for us to determine if the option
666 * is supported, bypass the closure and setting drv to NULL. */
667 qemu_gluster_close(bs);
668 bs->drv = NULL;
669 return ret;
8d6d89cb
BR
670}
671
0c14fb47 672#ifdef CONFIG_GLUSTERFS_DISCARD
15744b0b
BR
673static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
674 int64_t sector_num, int nb_sectors)
0c14fb47
BR
675{
676 int ret;
c833d1e8 677 GlusterAIOCB acb;
0c14fb47 678 BDRVGlusterState *s = bs->opaque;
15744b0b
BR
679 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
680 off_t offset = sector_num * BDRV_SECTOR_SIZE;
0c14fb47 681
c833d1e8
PB
682 acb.size = 0;
683 acb.ret = 0;
684 acb.coroutine = qemu_coroutine_self();
685 acb.aio_context = bdrv_get_aio_context(bs);
0c14fb47 686
c833d1e8 687 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
0c14fb47 688 if (ret < 0) {
c833d1e8 689 return -errno;
0c14fb47 690 }
15744b0b
BR
691
692 qemu_coroutine_yield();
c833d1e8 693 return acb.ret;
0c14fb47
BR
694}
695#endif
696
8d6d89cb
BR
697static int64_t qemu_gluster_getlength(BlockDriverState *bs)
698{
699 BDRVGlusterState *s = bs->opaque;
700 int64_t ret;
701
702 ret = glfs_lseek(s->fd, 0, SEEK_END);
703 if (ret < 0) {
704 return -errno;
705 } else {
706 return ret;
707 }
708}
709
710static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
711{
712 BDRVGlusterState *s = bs->opaque;
713 struct stat st;
714 int ret;
715
716 ret = glfs_fstat(s->fd, &st);
717 if (ret < 0) {
718 return -errno;
719 } else {
720 return st.st_blocks * 512;
721 }
722}
723
8ab6feec
KW
724static int qemu_gluster_has_zero_init(BlockDriverState *bs)
725{
726 /* GlusterFS volume could be backed by a block device */
727 return 0;
728}
729
90c772de
CL
730static QemuOptsList qemu_gluster_create_opts = {
731 .name = "qemu-gluster-create-opts",
732 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
733 .desc = {
734 {
735 .name = BLOCK_OPT_SIZE,
736 .type = QEMU_OPT_SIZE,
737 .help = "Virtual disk size"
738 },
739 {
740 .name = BLOCK_OPT_PREALLOC,
741 .type = QEMU_OPT_STRING,
742 .help = "Preallocation mode (allowed values: off, full)"
743 },
744 { /* end of list */ }
745 }
8d6d89cb
BR
746};
747
748static BlockDriver bdrv_gluster = {
749 .format_name = "gluster",
750 .protocol_name = "gluster",
751 .instance_size = sizeof(BDRVGlusterState),
030be321 752 .bdrv_needs_filename = true,
8d6d89cb 753 .bdrv_file_open = qemu_gluster_open,
adccfbcd
JC
754 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
755 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
756 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
8d6d89cb 757 .bdrv_close = qemu_gluster_close,
c282e1fd 758 .bdrv_create = qemu_gluster_create,
8d6d89cb
BR
759 .bdrv_getlength = qemu_gluster_getlength,
760 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
42ec24e2 761 .bdrv_truncate = qemu_gluster_truncate,
15744b0b
BR
762 .bdrv_co_readv = qemu_gluster_co_readv,
763 .bdrv_co_writev = qemu_gluster_co_writev,
764 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
8ab6feec 765 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
0c14fb47 766#ifdef CONFIG_GLUSTERFS_DISCARD
15744b0b 767 .bdrv_co_discard = qemu_gluster_co_discard,
7c815372
BR
768#endif
769#ifdef CONFIG_GLUSTERFS_ZEROFILL
e88a36eb 770 .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
0c14fb47 771#endif
90c772de 772 .create_opts = &qemu_gluster_create_opts,
8d6d89cb
BR
773};
774
775static BlockDriver bdrv_gluster_tcp = {
776 .format_name = "gluster",
777 .protocol_name = "gluster+tcp",
778 .instance_size = sizeof(BDRVGlusterState),
030be321 779 .bdrv_needs_filename = true,
8d6d89cb 780 .bdrv_file_open = qemu_gluster_open,
adccfbcd
JC
781 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
782 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
783 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
8d6d89cb 784 .bdrv_close = qemu_gluster_close,
c282e1fd 785 .bdrv_create = qemu_gluster_create,
8d6d89cb
BR
786 .bdrv_getlength = qemu_gluster_getlength,
787 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
42ec24e2 788 .bdrv_truncate = qemu_gluster_truncate,
15744b0b
BR
789 .bdrv_co_readv = qemu_gluster_co_readv,
790 .bdrv_co_writev = qemu_gluster_co_writev,
791 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
8ab6feec 792 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
0c14fb47 793#ifdef CONFIG_GLUSTERFS_DISCARD
15744b0b 794 .bdrv_co_discard = qemu_gluster_co_discard,
7c815372
BR
795#endif
796#ifdef CONFIG_GLUSTERFS_ZEROFILL
e88a36eb 797 .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
0c14fb47 798#endif
90c772de 799 .create_opts = &qemu_gluster_create_opts,
8d6d89cb
BR
800};
801
802static BlockDriver bdrv_gluster_unix = {
803 .format_name = "gluster",
804 .protocol_name = "gluster+unix",
805 .instance_size = sizeof(BDRVGlusterState),
030be321 806 .bdrv_needs_filename = true,
8d6d89cb 807 .bdrv_file_open = qemu_gluster_open,
adccfbcd
JC
808 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
809 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
810 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
8d6d89cb 811 .bdrv_close = qemu_gluster_close,
c282e1fd 812 .bdrv_create = qemu_gluster_create,
8d6d89cb
BR
813 .bdrv_getlength = qemu_gluster_getlength,
814 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
42ec24e2 815 .bdrv_truncate = qemu_gluster_truncate,
15744b0b
BR
816 .bdrv_co_readv = qemu_gluster_co_readv,
817 .bdrv_co_writev = qemu_gluster_co_writev,
818 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
8ab6feec 819 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
0c14fb47 820#ifdef CONFIG_GLUSTERFS_DISCARD
15744b0b 821 .bdrv_co_discard = qemu_gluster_co_discard,
7c815372
BR
822#endif
823#ifdef CONFIG_GLUSTERFS_ZEROFILL
e88a36eb 824 .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
0c14fb47 825#endif
90c772de 826 .create_opts = &qemu_gluster_create_opts,
8d6d89cb
BR
827};
828
829static BlockDriver bdrv_gluster_rdma = {
830 .format_name = "gluster",
831 .protocol_name = "gluster+rdma",
832 .instance_size = sizeof(BDRVGlusterState),
030be321 833 .bdrv_needs_filename = true,
8d6d89cb 834 .bdrv_file_open = qemu_gluster_open,
adccfbcd
JC
835 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
836 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
837 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
8d6d89cb 838 .bdrv_close = qemu_gluster_close,
c282e1fd 839 .bdrv_create = qemu_gluster_create,
8d6d89cb
BR
840 .bdrv_getlength = qemu_gluster_getlength,
841 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
42ec24e2 842 .bdrv_truncate = qemu_gluster_truncate,
15744b0b
BR
843 .bdrv_co_readv = qemu_gluster_co_readv,
844 .bdrv_co_writev = qemu_gluster_co_writev,
845 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
8ab6feec 846 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
0c14fb47 847#ifdef CONFIG_GLUSTERFS_DISCARD
15744b0b 848 .bdrv_co_discard = qemu_gluster_co_discard,
7c815372
BR
849#endif
850#ifdef CONFIG_GLUSTERFS_ZEROFILL
e88a36eb 851 .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
0c14fb47 852#endif
90c772de 853 .create_opts = &qemu_gluster_create_opts,
8d6d89cb
BR
854};
855
856static void bdrv_gluster_init(void)
857{
858 bdrv_register(&bdrv_gluster_rdma);
859 bdrv_register(&bdrv_gluster_unix);
860 bdrv_register(&bdrv_gluster_tcp);
861 bdrv_register(&bdrv_gluster);
862}
863
864block_init(bdrv_gluster_init);