]> git.proxmox.com Git - mirror_qemu.git/blob - block/gluster.c
cleanup QEMUOptionParameter
[mirror_qemu.git] / block / gluster.c
1 /*
2 * GlusterFS backend for QEMU
3 *
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10 #include <glusterfs/api/glfs.h>
11 #include "block/block_int.h"
12 #include "qemu/uri.h"
13
14 typedef struct GlusterAIOCB {
15 int64_t size;
16 int ret;
17 QEMUBH *bh;
18 Coroutine *coroutine;
19 AioContext *aio_context;
20 } GlusterAIOCB;
21
22 typedef struct BDRVGlusterState {
23 struct glfs *glfs;
24 struct glfs_fd *fd;
25 } BDRVGlusterState;
26
27 typedef struct GlusterConf {
28 char *server;
29 int port;
30 char *volname;
31 char *image;
32 char *transport;
33 } GlusterConf;
34
35 static void qemu_gluster_gconf_free(GlusterConf *gconf)
36 {
37 if (gconf) {
38 g_free(gconf->server);
39 g_free(gconf->volname);
40 g_free(gconf->image);
41 g_free(gconf->transport);
42 g_free(gconf);
43 }
44 }
45
46 static int parse_volume_options(GlusterConf *gconf, char *path)
47 {
48 char *p, *q;
49
50 if (!path) {
51 return -EINVAL;
52 }
53
54 /* volume */
55 p = q = path + strspn(path, "/");
56 p += strcspn(p, "/");
57 if (*p == '\0') {
58 return -EINVAL;
59 }
60 gconf->volname = g_strndup(q, p - q);
61
62 /* image */
63 p += strspn(p, "/");
64 if (*p == '\0') {
65 return -EINVAL;
66 }
67 gconf->image = g_strdup(p);
68 return 0;
69 }
70
71 /*
72 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
73 *
74 * 'gluster' is the protocol.
75 *
76 * 'transport' specifies the transport type used to connect to gluster
77 * management daemon (glusterd). Valid transport types are
78 * tcp, unix and rdma. If a transport type isn't specified, then tcp
79 * type is assumed.
80 *
81 * 'server' specifies the server where the volume file specification for
82 * the given volume resides. This can be either hostname, ipv4 address
83 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
84 * If transport type is 'unix', then 'server' field should not be specified.
85 * The 'socket' field needs to be populated with the path to unix domain
86 * socket.
87 *
88 * 'port' is the port number on which glusterd is listening. This is optional
89 * and if not specified, QEMU will send 0 which will make gluster to use the
90 * default port. If the transport type is unix, then 'port' should not be
91 * specified.
92 *
93 * 'volname' is the name of the gluster volume which contains the VM image.
94 *
95 * 'image' is the path to the actual VM image that resides on gluster volume.
96 *
97 * Examples:
98 *
99 * file=gluster://1.2.3.4/testvol/a.img
100 * file=gluster+tcp://1.2.3.4/testvol/a.img
101 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
102 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
103 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
104 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
105 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
106 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
107 */
108 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
109 {
110 URI *uri;
111 QueryParams *qp = NULL;
112 bool is_unix = false;
113 int ret = 0;
114
115 uri = uri_parse(filename);
116 if (!uri) {
117 return -EINVAL;
118 }
119
120 /* transport */
121 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
122 gconf->transport = g_strdup("tcp");
123 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
124 gconf->transport = g_strdup("tcp");
125 } else if (!strcmp(uri->scheme, "gluster+unix")) {
126 gconf->transport = g_strdup("unix");
127 is_unix = true;
128 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
129 gconf->transport = g_strdup("rdma");
130 } else {
131 ret = -EINVAL;
132 goto out;
133 }
134
135 ret = parse_volume_options(gconf, uri->path);
136 if (ret < 0) {
137 goto out;
138 }
139
140 qp = query_params_parse(uri->query);
141 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
142 ret = -EINVAL;
143 goto out;
144 }
145
146 if (is_unix) {
147 if (uri->server || uri->port) {
148 ret = -EINVAL;
149 goto out;
150 }
151 if (strcmp(qp->p[0].name, "socket")) {
152 ret = -EINVAL;
153 goto out;
154 }
155 gconf->server = g_strdup(qp->p[0].value);
156 } else {
157 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
158 gconf->port = uri->port;
159 }
160
161 out:
162 if (qp) {
163 query_params_free(qp);
164 }
165 uri_free(uri);
166 return ret;
167 }
168
169 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
170 Error **errp)
171 {
172 struct glfs *glfs = NULL;
173 int ret;
174 int old_errno;
175
176 ret = qemu_gluster_parseuri(gconf, filename);
177 if (ret < 0) {
178 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
179 "volname/image[?socket=...]");
180 errno = -ret;
181 goto out;
182 }
183
184 glfs = glfs_new(gconf->volname);
185 if (!glfs) {
186 goto out;
187 }
188
189 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
190 gconf->port);
191 if (ret < 0) {
192 goto out;
193 }
194
195 /*
196 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
197 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
198 */
199 ret = glfs_set_logging(glfs, "-", 4);
200 if (ret < 0) {
201 goto out;
202 }
203
204 ret = glfs_init(glfs);
205 if (ret) {
206 error_setg_errno(errp, errno,
207 "Gluster connection failed for server=%s port=%d "
208 "volume=%s image=%s transport=%s", gconf->server,
209 gconf->port, gconf->volname, gconf->image,
210 gconf->transport);
211
212 /* glfs_init sometimes doesn't set errno although docs suggest that */
213 if (errno == 0)
214 errno = EINVAL;
215
216 goto out;
217 }
218 return glfs;
219
220 out:
221 if (glfs) {
222 old_errno = errno;
223 glfs_fini(glfs);
224 errno = old_errno;
225 }
226 return NULL;
227 }
228
229 static void qemu_gluster_complete_aio(void *opaque)
230 {
231 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
232
233 qemu_bh_delete(acb->bh);
234 acb->bh = NULL;
235 qemu_coroutine_enter(acb->coroutine, NULL);
236 }
237
238 /*
239 * AIO callback routine called from GlusterFS thread.
240 */
241 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
242 {
243 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
244
245 if (!ret || ret == acb->size) {
246 acb->ret = 0; /* Success */
247 } else if (ret < 0) {
248 acb->ret = ret; /* Read/Write failed */
249 } else {
250 acb->ret = -EIO; /* Partial read/write - fail it */
251 }
252
253 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
254 qemu_bh_schedule(acb->bh);
255 }
256
257 /* TODO Convert to fine grained options */
258 static QemuOptsList runtime_opts = {
259 .name = "gluster",
260 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
261 .desc = {
262 {
263 .name = "filename",
264 .type = QEMU_OPT_STRING,
265 .help = "URL to the gluster image",
266 },
267 { /* end of list */ }
268 },
269 };
270
271 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
272 {
273 assert(open_flags != NULL);
274
275 *open_flags |= O_BINARY;
276
277 if (bdrv_flags & BDRV_O_RDWR) {
278 *open_flags |= O_RDWR;
279 } else {
280 *open_flags |= O_RDONLY;
281 }
282
283 if ((bdrv_flags & BDRV_O_NOCACHE)) {
284 *open_flags |= O_DIRECT;
285 }
286 }
287
288 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
289 int bdrv_flags, Error **errp)
290 {
291 BDRVGlusterState *s = bs->opaque;
292 int open_flags = 0;
293 int ret = 0;
294 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
295 QemuOpts *opts;
296 Error *local_err = NULL;
297 const char *filename;
298
299 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
300 qemu_opts_absorb_qdict(opts, options, &local_err);
301 if (local_err) {
302 error_propagate(errp, local_err);
303 ret = -EINVAL;
304 goto out;
305 }
306
307 filename = qemu_opt_get(opts, "filename");
308
309 s->glfs = qemu_gluster_init(gconf, filename, errp);
310 if (!s->glfs) {
311 ret = -errno;
312 goto out;
313 }
314
315 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
316
317 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
318 if (!s->fd) {
319 ret = -errno;
320 }
321
322 out:
323 qemu_opts_del(opts);
324 qemu_gluster_gconf_free(gconf);
325 if (!ret) {
326 return ret;
327 }
328 if (s->fd) {
329 glfs_close(s->fd);
330 }
331 if (s->glfs) {
332 glfs_fini(s->glfs);
333 }
334 return ret;
335 }
336
337 typedef struct BDRVGlusterReopenState {
338 struct glfs *glfs;
339 struct glfs_fd *fd;
340 } BDRVGlusterReopenState;
341
342
343 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
344 BlockReopenQueue *queue, Error **errp)
345 {
346 int ret = 0;
347 BDRVGlusterReopenState *reop_s;
348 GlusterConf *gconf = NULL;
349 int open_flags = 0;
350
351 assert(state != NULL);
352 assert(state->bs != NULL);
353
354 state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState));
355 reop_s = state->opaque;
356
357 qemu_gluster_parse_flags(state->flags, &open_flags);
358
359 gconf = g_malloc0(sizeof(GlusterConf));
360
361 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
362 if (reop_s->glfs == NULL) {
363 ret = -errno;
364 goto exit;
365 }
366
367 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
368 if (reop_s->fd == NULL) {
369 /* reops->glfs will be cleaned up in _abort */
370 ret = -errno;
371 goto exit;
372 }
373
374 exit:
375 /* state->opaque will be freed in either the _abort or _commit */
376 qemu_gluster_gconf_free(gconf);
377 return ret;
378 }
379
380 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
381 {
382 BDRVGlusterReopenState *reop_s = state->opaque;
383 BDRVGlusterState *s = state->bs->opaque;
384
385
386 /* close the old */
387 if (s->fd) {
388 glfs_close(s->fd);
389 }
390 if (s->glfs) {
391 glfs_fini(s->glfs);
392 }
393
394 /* use the newly opened image / connection */
395 s->fd = reop_s->fd;
396 s->glfs = reop_s->glfs;
397
398 g_free(state->opaque);
399 state->opaque = NULL;
400
401 return;
402 }
403
404
405 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
406 {
407 BDRVGlusterReopenState *reop_s = state->opaque;
408
409 if (reop_s == NULL) {
410 return;
411 }
412
413 if (reop_s->fd) {
414 glfs_close(reop_s->fd);
415 }
416
417 if (reop_s->glfs) {
418 glfs_fini(reop_s->glfs);
419 }
420
421 g_free(state->opaque);
422 state->opaque = NULL;
423
424 return;
425 }
426
427 #ifdef CONFIG_GLUSTERFS_ZEROFILL
428 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
429 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
430 {
431 int ret;
432 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
433 BDRVGlusterState *s = bs->opaque;
434 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
435 off_t offset = sector_num * BDRV_SECTOR_SIZE;
436
437 acb->size = size;
438 acb->ret = 0;
439 acb->coroutine = qemu_coroutine_self();
440 acb->aio_context = bdrv_get_aio_context(bs);
441
442 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
443 if (ret < 0) {
444 ret = -errno;
445 goto out;
446 }
447
448 qemu_coroutine_yield();
449 ret = acb->ret;
450
451 out:
452 g_slice_free(GlusterAIOCB, acb);
453 return ret;
454 }
455
456 static inline bool gluster_supports_zerofill(void)
457 {
458 return 1;
459 }
460
461 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
462 int64_t size)
463 {
464 return glfs_zerofill(fd, offset, size);
465 }
466
467 #else
468 static inline bool gluster_supports_zerofill(void)
469 {
470 return 0;
471 }
472
473 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
474 int64_t size)
475 {
476 return 0;
477 }
478 #endif
479
480 static int qemu_gluster_create(const char *filename,
481 QemuOpts *opts, Error **errp)
482 {
483 struct glfs *glfs;
484 struct glfs_fd *fd;
485 int ret = 0;
486 int prealloc = 0;
487 int64_t total_size = 0;
488 char *tmp = NULL;
489 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
490
491 glfs = qemu_gluster_init(gconf, filename, errp);
492 if (!glfs) {
493 ret = -errno;
494 goto out;
495 }
496
497 total_size =
498 qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0) / BDRV_SECTOR_SIZE;
499
500 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
501 if (!tmp || !strcmp(tmp, "off")) {
502 prealloc = 0;
503 } else if (!strcmp(tmp, "full") &&
504 gluster_supports_zerofill()) {
505 prealloc = 1;
506 } else {
507 error_setg(errp, "Invalid preallocation mode: '%s'"
508 " or GlusterFS doesn't support zerofill API",
509 tmp);
510 ret = -EINVAL;
511 goto out;
512 }
513
514 fd = glfs_creat(glfs, gconf->image,
515 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
516 if (!fd) {
517 ret = -errno;
518 } else {
519 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
520 if (prealloc && qemu_gluster_zerofill(fd, 0,
521 total_size * BDRV_SECTOR_SIZE)) {
522 ret = -errno;
523 }
524 } else {
525 ret = -errno;
526 }
527
528 if (glfs_close(fd) != 0) {
529 ret = -errno;
530 }
531 }
532 out:
533 g_free(tmp);
534 qemu_gluster_gconf_free(gconf);
535 if (glfs) {
536 glfs_fini(glfs);
537 }
538 return ret;
539 }
540
541 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
542 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
543 {
544 int ret;
545 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
546 BDRVGlusterState *s = bs->opaque;
547 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
548 off_t offset = sector_num * BDRV_SECTOR_SIZE;
549
550 acb->size = size;
551 acb->ret = 0;
552 acb->coroutine = qemu_coroutine_self();
553 acb->aio_context = bdrv_get_aio_context(bs);
554
555 if (write) {
556 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
557 &gluster_finish_aiocb, acb);
558 } else {
559 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
560 &gluster_finish_aiocb, acb);
561 }
562
563 if (ret < 0) {
564 ret = -errno;
565 goto out;
566 }
567
568 qemu_coroutine_yield();
569 ret = acb->ret;
570
571 out:
572 g_slice_free(GlusterAIOCB, acb);
573 return ret;
574 }
575
576 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
577 {
578 int ret;
579 BDRVGlusterState *s = bs->opaque;
580
581 ret = glfs_ftruncate(s->fd, offset);
582 if (ret < 0) {
583 return -errno;
584 }
585
586 return 0;
587 }
588
589 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
590 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
591 {
592 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
593 }
594
595 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
596 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
597 {
598 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
599 }
600
601 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
602 {
603 int ret;
604 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
605 BDRVGlusterState *s = bs->opaque;
606
607 acb->size = 0;
608 acb->ret = 0;
609 acb->coroutine = qemu_coroutine_self();
610 acb->aio_context = bdrv_get_aio_context(bs);
611
612 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
613 if (ret < 0) {
614 ret = -errno;
615 goto out;
616 }
617
618 qemu_coroutine_yield();
619 ret = acb->ret;
620
621 out:
622 g_slice_free(GlusterAIOCB, acb);
623 return ret;
624 }
625
626 #ifdef CONFIG_GLUSTERFS_DISCARD
627 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
628 int64_t sector_num, int nb_sectors)
629 {
630 int ret;
631 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
632 BDRVGlusterState *s = bs->opaque;
633 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
634 off_t offset = sector_num * BDRV_SECTOR_SIZE;
635
636 acb->size = 0;
637 acb->ret = 0;
638 acb->coroutine = qemu_coroutine_self();
639 acb->aio_context = bdrv_get_aio_context(bs);
640
641 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
642 if (ret < 0) {
643 ret = -errno;
644 goto out;
645 }
646
647 qemu_coroutine_yield();
648 ret = acb->ret;
649
650 out:
651 g_slice_free(GlusterAIOCB, acb);
652 return ret;
653 }
654 #endif
655
656 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
657 {
658 BDRVGlusterState *s = bs->opaque;
659 int64_t ret;
660
661 ret = glfs_lseek(s->fd, 0, SEEK_END);
662 if (ret < 0) {
663 return -errno;
664 } else {
665 return ret;
666 }
667 }
668
669 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
670 {
671 BDRVGlusterState *s = bs->opaque;
672 struct stat st;
673 int ret;
674
675 ret = glfs_fstat(s->fd, &st);
676 if (ret < 0) {
677 return -errno;
678 } else {
679 return st.st_blocks * 512;
680 }
681 }
682
683 static void qemu_gluster_close(BlockDriverState *bs)
684 {
685 BDRVGlusterState *s = bs->opaque;
686
687 if (s->fd) {
688 glfs_close(s->fd);
689 s->fd = NULL;
690 }
691 glfs_fini(s->glfs);
692 }
693
694 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
695 {
696 /* GlusterFS volume could be backed by a block device */
697 return 0;
698 }
699
700 static QemuOptsList qemu_gluster_create_opts = {
701 .name = "qemu-gluster-create-opts",
702 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
703 .desc = {
704 {
705 .name = BLOCK_OPT_SIZE,
706 .type = QEMU_OPT_SIZE,
707 .help = "Virtual disk size"
708 },
709 {
710 .name = BLOCK_OPT_PREALLOC,
711 .type = QEMU_OPT_STRING,
712 .help = "Preallocation mode (allowed values: off, full)"
713 },
714 { /* end of list */ }
715 }
716 };
717
718 static BlockDriver bdrv_gluster = {
719 .format_name = "gluster",
720 .protocol_name = "gluster",
721 .instance_size = sizeof(BDRVGlusterState),
722 .bdrv_needs_filename = true,
723 .bdrv_file_open = qemu_gluster_open,
724 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
725 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
726 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
727 .bdrv_close = qemu_gluster_close,
728 .bdrv_create = qemu_gluster_create,
729 .bdrv_getlength = qemu_gluster_getlength,
730 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
731 .bdrv_truncate = qemu_gluster_truncate,
732 .bdrv_co_readv = qemu_gluster_co_readv,
733 .bdrv_co_writev = qemu_gluster_co_writev,
734 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
735 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
736 #ifdef CONFIG_GLUSTERFS_DISCARD
737 .bdrv_co_discard = qemu_gluster_co_discard,
738 #endif
739 #ifdef CONFIG_GLUSTERFS_ZEROFILL
740 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
741 #endif
742 .create_opts = &qemu_gluster_create_opts,
743 };
744
745 static BlockDriver bdrv_gluster_tcp = {
746 .format_name = "gluster",
747 .protocol_name = "gluster+tcp",
748 .instance_size = sizeof(BDRVGlusterState),
749 .bdrv_needs_filename = true,
750 .bdrv_file_open = qemu_gluster_open,
751 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
752 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
753 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
754 .bdrv_close = qemu_gluster_close,
755 .bdrv_create = qemu_gluster_create,
756 .bdrv_getlength = qemu_gluster_getlength,
757 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
758 .bdrv_truncate = qemu_gluster_truncate,
759 .bdrv_co_readv = qemu_gluster_co_readv,
760 .bdrv_co_writev = qemu_gluster_co_writev,
761 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
762 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
763 #ifdef CONFIG_GLUSTERFS_DISCARD
764 .bdrv_co_discard = qemu_gluster_co_discard,
765 #endif
766 #ifdef CONFIG_GLUSTERFS_ZEROFILL
767 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
768 #endif
769 .create_opts = &qemu_gluster_create_opts,
770 };
771
772 static BlockDriver bdrv_gluster_unix = {
773 .format_name = "gluster",
774 .protocol_name = "gluster+unix",
775 .instance_size = sizeof(BDRVGlusterState),
776 .bdrv_needs_filename = true,
777 .bdrv_file_open = qemu_gluster_open,
778 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
779 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
780 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
781 .bdrv_close = qemu_gluster_close,
782 .bdrv_create = qemu_gluster_create,
783 .bdrv_getlength = qemu_gluster_getlength,
784 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
785 .bdrv_truncate = qemu_gluster_truncate,
786 .bdrv_co_readv = qemu_gluster_co_readv,
787 .bdrv_co_writev = qemu_gluster_co_writev,
788 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
789 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
790 #ifdef CONFIG_GLUSTERFS_DISCARD
791 .bdrv_co_discard = qemu_gluster_co_discard,
792 #endif
793 #ifdef CONFIG_GLUSTERFS_ZEROFILL
794 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
795 #endif
796 .create_opts = &qemu_gluster_create_opts,
797 };
798
799 static BlockDriver bdrv_gluster_rdma = {
800 .format_name = "gluster",
801 .protocol_name = "gluster+rdma",
802 .instance_size = sizeof(BDRVGlusterState),
803 .bdrv_needs_filename = true,
804 .bdrv_file_open = qemu_gluster_open,
805 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
806 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
807 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
808 .bdrv_close = qemu_gluster_close,
809 .bdrv_create = qemu_gluster_create,
810 .bdrv_getlength = qemu_gluster_getlength,
811 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
812 .bdrv_truncate = qemu_gluster_truncate,
813 .bdrv_co_readv = qemu_gluster_co_readv,
814 .bdrv_co_writev = qemu_gluster_co_writev,
815 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
816 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
817 #ifdef CONFIG_GLUSTERFS_DISCARD
818 .bdrv_co_discard = qemu_gluster_co_discard,
819 #endif
820 #ifdef CONFIG_GLUSTERFS_ZEROFILL
821 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
822 #endif
823 .create_opts = &qemu_gluster_create_opts,
824 };
825
826 static void bdrv_gluster_init(void)
827 {
828 bdrv_register(&bdrv_gluster_rdma);
829 bdrv_register(&bdrv_gluster_unix);
830 bdrv_register(&bdrv_gluster_tcp);
831 bdrv_register(&bdrv_gluster);
832 }
833
834 block_init(bdrv_gluster_init);