]> git.proxmox.com Git - mirror_qemu.git/blob - block/gluster.c
Merge remote-tracking branch 'remotes/sstabellini/tags/xen-20160121' into staging
[mirror_qemu.git] / block / gluster.c
1 /*
2 * GlusterFS backend for QEMU
3 *
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10 #include "qemu/osdep.h"
11 #include <glusterfs/api/glfs.h>
12 #include "block/block_int.h"
13 #include "qemu/uri.h"
14
15 typedef struct GlusterAIOCB {
16 int64_t size;
17 int ret;
18 QEMUBH *bh;
19 Coroutine *coroutine;
20 AioContext *aio_context;
21 } GlusterAIOCB;
22
23 typedef struct BDRVGlusterState {
24 struct glfs *glfs;
25 struct glfs_fd *fd;
26 } BDRVGlusterState;
27
28 typedef struct GlusterConf {
29 char *server;
30 int port;
31 char *volname;
32 char *image;
33 char *transport;
34 } GlusterConf;
35
36 static void qemu_gluster_gconf_free(GlusterConf *gconf)
37 {
38 if (gconf) {
39 g_free(gconf->server);
40 g_free(gconf->volname);
41 g_free(gconf->image);
42 g_free(gconf->transport);
43 g_free(gconf);
44 }
45 }
46
47 static int parse_volume_options(GlusterConf *gconf, char *path)
48 {
49 char *p, *q;
50
51 if (!path) {
52 return -EINVAL;
53 }
54
55 /* volume */
56 p = q = path + strspn(path, "/");
57 p += strcspn(p, "/");
58 if (*p == '\0') {
59 return -EINVAL;
60 }
61 gconf->volname = g_strndup(q, p - q);
62
63 /* image */
64 p += strspn(p, "/");
65 if (*p == '\0') {
66 return -EINVAL;
67 }
68 gconf->image = g_strdup(p);
69 return 0;
70 }
71
72 /*
73 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
74 *
75 * 'gluster' is the protocol.
76 *
77 * 'transport' specifies the transport type used to connect to gluster
78 * management daemon (glusterd). Valid transport types are
79 * tcp, unix and rdma. If a transport type isn't specified, then tcp
80 * type is assumed.
81 *
82 * 'server' specifies the server where the volume file specification for
83 * the given volume resides. This can be either hostname, ipv4 address
84 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
85 * If transport type is 'unix', then 'server' field should not be specified.
86 * The 'socket' field needs to be populated with the path to unix domain
87 * socket.
88 *
89 * 'port' is the port number on which glusterd is listening. This is optional
90 * and if not specified, QEMU will send 0 which will make gluster to use the
91 * default port. If the transport type is unix, then 'port' should not be
92 * specified.
93 *
94 * 'volname' is the name of the gluster volume which contains the VM image.
95 *
96 * 'image' is the path to the actual VM image that resides on gluster volume.
97 *
98 * Examples:
99 *
100 * file=gluster://1.2.3.4/testvol/a.img
101 * file=gluster+tcp://1.2.3.4/testvol/a.img
102 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
103 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
104 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
105 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
106 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
107 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
108 */
109 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
110 {
111 URI *uri;
112 QueryParams *qp = NULL;
113 bool is_unix = false;
114 int ret = 0;
115
116 uri = uri_parse(filename);
117 if (!uri) {
118 return -EINVAL;
119 }
120
121 /* transport */
122 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
123 gconf->transport = g_strdup("tcp");
124 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
125 gconf->transport = g_strdup("tcp");
126 } else if (!strcmp(uri->scheme, "gluster+unix")) {
127 gconf->transport = g_strdup("unix");
128 is_unix = true;
129 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
130 gconf->transport = g_strdup("rdma");
131 } else {
132 ret = -EINVAL;
133 goto out;
134 }
135
136 ret = parse_volume_options(gconf, uri->path);
137 if (ret < 0) {
138 goto out;
139 }
140
141 qp = query_params_parse(uri->query);
142 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
143 ret = -EINVAL;
144 goto out;
145 }
146
147 if (is_unix) {
148 if (uri->server || uri->port) {
149 ret = -EINVAL;
150 goto out;
151 }
152 if (strcmp(qp->p[0].name, "socket")) {
153 ret = -EINVAL;
154 goto out;
155 }
156 gconf->server = g_strdup(qp->p[0].value);
157 } else {
158 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
159 gconf->port = uri->port;
160 }
161
162 out:
163 if (qp) {
164 query_params_free(qp);
165 }
166 uri_free(uri);
167 return ret;
168 }
169
170 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
171 Error **errp)
172 {
173 struct glfs *glfs = NULL;
174 int ret;
175 int old_errno;
176
177 ret = qemu_gluster_parseuri(gconf, filename);
178 if (ret < 0) {
179 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
180 "volname/image[?socket=...]");
181 errno = -ret;
182 goto out;
183 }
184
185 glfs = glfs_new(gconf->volname);
186 if (!glfs) {
187 goto out;
188 }
189
190 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
191 gconf->port);
192 if (ret < 0) {
193 goto out;
194 }
195
196 /*
197 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
198 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
199 */
200 ret = glfs_set_logging(glfs, "-", 4);
201 if (ret < 0) {
202 goto out;
203 }
204
205 ret = glfs_init(glfs);
206 if (ret) {
207 error_setg_errno(errp, errno,
208 "Gluster connection failed for server=%s port=%d "
209 "volume=%s image=%s transport=%s", gconf->server,
210 gconf->port, gconf->volname, gconf->image,
211 gconf->transport);
212
213 /* glfs_init sometimes doesn't set errno although docs suggest that */
214 if (errno == 0)
215 errno = EINVAL;
216
217 goto out;
218 }
219 return glfs;
220
221 out:
222 if (glfs) {
223 old_errno = errno;
224 glfs_fini(glfs);
225 errno = old_errno;
226 }
227 return NULL;
228 }
229
230 static void qemu_gluster_complete_aio(void *opaque)
231 {
232 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
233
234 qemu_bh_delete(acb->bh);
235 acb->bh = NULL;
236 qemu_coroutine_enter(acb->coroutine, NULL);
237 }
238
239 /*
240 * AIO callback routine called from GlusterFS thread.
241 */
242 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
243 {
244 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
245
246 if (!ret || ret == acb->size) {
247 acb->ret = 0; /* Success */
248 } else if (ret < 0) {
249 acb->ret = ret; /* Read/Write failed */
250 } else {
251 acb->ret = -EIO; /* Partial read/write - fail it */
252 }
253
254 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
255 qemu_bh_schedule(acb->bh);
256 }
257
258 /* TODO Convert to fine grained options */
259 static QemuOptsList runtime_opts = {
260 .name = "gluster",
261 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
262 .desc = {
263 {
264 .name = "filename",
265 .type = QEMU_OPT_STRING,
266 .help = "URL to the gluster image",
267 },
268 { /* end of list */ }
269 },
270 };
271
272 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
273 {
274 assert(open_flags != NULL);
275
276 *open_flags |= O_BINARY;
277
278 if (bdrv_flags & BDRV_O_RDWR) {
279 *open_flags |= O_RDWR;
280 } else {
281 *open_flags |= O_RDONLY;
282 }
283
284 if ((bdrv_flags & BDRV_O_NOCACHE)) {
285 *open_flags |= O_DIRECT;
286 }
287 }
288
289 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
290 int bdrv_flags, Error **errp)
291 {
292 BDRVGlusterState *s = bs->opaque;
293 int open_flags = 0;
294 int ret = 0;
295 GlusterConf *gconf = g_new0(GlusterConf, 1);
296 QemuOpts *opts;
297 Error *local_err = NULL;
298 const char *filename;
299
300 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
301 qemu_opts_absorb_qdict(opts, options, &local_err);
302 if (local_err) {
303 error_propagate(errp, local_err);
304 ret = -EINVAL;
305 goto out;
306 }
307
308 filename = qemu_opt_get(opts, "filename");
309
310 s->glfs = qemu_gluster_init(gconf, filename, errp);
311 if (!s->glfs) {
312 ret = -errno;
313 goto out;
314 }
315
316 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
317
318 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
319 if (!s->fd) {
320 ret = -errno;
321 }
322
323 out:
324 qemu_opts_del(opts);
325 qemu_gluster_gconf_free(gconf);
326 if (!ret) {
327 return ret;
328 }
329 if (s->fd) {
330 glfs_close(s->fd);
331 }
332 if (s->glfs) {
333 glfs_fini(s->glfs);
334 }
335 return ret;
336 }
337
338 typedef struct BDRVGlusterReopenState {
339 struct glfs *glfs;
340 struct glfs_fd *fd;
341 } BDRVGlusterReopenState;
342
343
344 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
345 BlockReopenQueue *queue, Error **errp)
346 {
347 int ret = 0;
348 BDRVGlusterReopenState *reop_s;
349 GlusterConf *gconf = NULL;
350 int open_flags = 0;
351
352 assert(state != NULL);
353 assert(state->bs != NULL);
354
355 state->opaque = g_new0(BDRVGlusterReopenState, 1);
356 reop_s = state->opaque;
357
358 qemu_gluster_parse_flags(state->flags, &open_flags);
359
360 gconf = g_new0(GlusterConf, 1);
361
362 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
363 if (reop_s->glfs == NULL) {
364 ret = -errno;
365 goto exit;
366 }
367
368 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
369 if (reop_s->fd == NULL) {
370 /* reops->glfs will be cleaned up in _abort */
371 ret = -errno;
372 goto exit;
373 }
374
375 exit:
376 /* state->opaque will be freed in either the _abort or _commit */
377 qemu_gluster_gconf_free(gconf);
378 return ret;
379 }
380
381 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
382 {
383 BDRVGlusterReopenState *reop_s = state->opaque;
384 BDRVGlusterState *s = state->bs->opaque;
385
386
387 /* close the old */
388 if (s->fd) {
389 glfs_close(s->fd);
390 }
391 if (s->glfs) {
392 glfs_fini(s->glfs);
393 }
394
395 /* use the newly opened image / connection */
396 s->fd = reop_s->fd;
397 s->glfs = reop_s->glfs;
398
399 g_free(state->opaque);
400 state->opaque = NULL;
401
402 return;
403 }
404
405
406 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
407 {
408 BDRVGlusterReopenState *reop_s = state->opaque;
409
410 if (reop_s == NULL) {
411 return;
412 }
413
414 if (reop_s->fd) {
415 glfs_close(reop_s->fd);
416 }
417
418 if (reop_s->glfs) {
419 glfs_fini(reop_s->glfs);
420 }
421
422 g_free(state->opaque);
423 state->opaque = NULL;
424
425 return;
426 }
427
428 #ifdef CONFIG_GLUSTERFS_ZEROFILL
429 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
430 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
431 {
432 int ret;
433 GlusterAIOCB acb;
434 BDRVGlusterState *s = bs->opaque;
435 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
436 off_t offset = sector_num * BDRV_SECTOR_SIZE;
437
438 acb.size = size;
439 acb.ret = 0;
440 acb.coroutine = qemu_coroutine_self();
441 acb.aio_context = bdrv_get_aio_context(bs);
442
443 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
444 if (ret < 0) {
445 return -errno;
446 }
447
448 qemu_coroutine_yield();
449 return acb.ret;
450 }
451
452 static inline bool gluster_supports_zerofill(void)
453 {
454 return 1;
455 }
456
457 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
458 int64_t size)
459 {
460 return glfs_zerofill(fd, offset, size);
461 }
462
463 #else
464 static inline bool gluster_supports_zerofill(void)
465 {
466 return 0;
467 }
468
469 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
470 int64_t size)
471 {
472 return 0;
473 }
474 #endif
475
476 static int qemu_gluster_create(const char *filename,
477 QemuOpts *opts, Error **errp)
478 {
479 struct glfs *glfs;
480 struct glfs_fd *fd;
481 int ret = 0;
482 int prealloc = 0;
483 int64_t total_size = 0;
484 char *tmp = NULL;
485 GlusterConf *gconf = g_new0(GlusterConf, 1);
486
487 glfs = qemu_gluster_init(gconf, filename, errp);
488 if (!glfs) {
489 ret = -errno;
490 goto out;
491 }
492
493 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
494 BDRV_SECTOR_SIZE);
495
496 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
497 if (!tmp || !strcmp(tmp, "off")) {
498 prealloc = 0;
499 } else if (!strcmp(tmp, "full") &&
500 gluster_supports_zerofill()) {
501 prealloc = 1;
502 } else {
503 error_setg(errp, "Invalid preallocation mode: '%s'"
504 " or GlusterFS doesn't support zerofill API",
505 tmp);
506 ret = -EINVAL;
507 goto out;
508 }
509
510 fd = glfs_creat(glfs, gconf->image,
511 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
512 if (!fd) {
513 ret = -errno;
514 } else {
515 if (!glfs_ftruncate(fd, total_size)) {
516 if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
517 ret = -errno;
518 }
519 } else {
520 ret = -errno;
521 }
522
523 if (glfs_close(fd) != 0) {
524 ret = -errno;
525 }
526 }
527 out:
528 g_free(tmp);
529 qemu_gluster_gconf_free(gconf);
530 if (glfs) {
531 glfs_fini(glfs);
532 }
533 return ret;
534 }
535
536 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
537 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
538 {
539 int ret;
540 GlusterAIOCB acb;
541 BDRVGlusterState *s = bs->opaque;
542 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
543 off_t offset = sector_num * BDRV_SECTOR_SIZE;
544
545 acb.size = size;
546 acb.ret = 0;
547 acb.coroutine = qemu_coroutine_self();
548 acb.aio_context = bdrv_get_aio_context(bs);
549
550 if (write) {
551 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
552 gluster_finish_aiocb, &acb);
553 } else {
554 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
555 gluster_finish_aiocb, &acb);
556 }
557
558 if (ret < 0) {
559 return -errno;
560 }
561
562 qemu_coroutine_yield();
563 return acb.ret;
564 }
565
566 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
567 {
568 int ret;
569 BDRVGlusterState *s = bs->opaque;
570
571 ret = glfs_ftruncate(s->fd, offset);
572 if (ret < 0) {
573 return -errno;
574 }
575
576 return 0;
577 }
578
579 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
580 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
581 {
582 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
583 }
584
585 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
586 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
587 {
588 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
589 }
590
591 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
592 {
593 int ret;
594 GlusterAIOCB acb;
595 BDRVGlusterState *s = bs->opaque;
596
597 acb.size = 0;
598 acb.ret = 0;
599 acb.coroutine = qemu_coroutine_self();
600 acb.aio_context = bdrv_get_aio_context(bs);
601
602 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
603 if (ret < 0) {
604 return -errno;
605 }
606
607 qemu_coroutine_yield();
608 return acb.ret;
609 }
610
611 #ifdef CONFIG_GLUSTERFS_DISCARD
612 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
613 int64_t sector_num, int nb_sectors)
614 {
615 int ret;
616 GlusterAIOCB acb;
617 BDRVGlusterState *s = bs->opaque;
618 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
619 off_t offset = sector_num * BDRV_SECTOR_SIZE;
620
621 acb.size = 0;
622 acb.ret = 0;
623 acb.coroutine = qemu_coroutine_self();
624 acb.aio_context = bdrv_get_aio_context(bs);
625
626 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
627 if (ret < 0) {
628 return -errno;
629 }
630
631 qemu_coroutine_yield();
632 return acb.ret;
633 }
634 #endif
635
636 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
637 {
638 BDRVGlusterState *s = bs->opaque;
639 int64_t ret;
640
641 ret = glfs_lseek(s->fd, 0, SEEK_END);
642 if (ret < 0) {
643 return -errno;
644 } else {
645 return ret;
646 }
647 }
648
649 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
650 {
651 BDRVGlusterState *s = bs->opaque;
652 struct stat st;
653 int ret;
654
655 ret = glfs_fstat(s->fd, &st);
656 if (ret < 0) {
657 return -errno;
658 } else {
659 return st.st_blocks * 512;
660 }
661 }
662
663 static void qemu_gluster_close(BlockDriverState *bs)
664 {
665 BDRVGlusterState *s = bs->opaque;
666
667 if (s->fd) {
668 glfs_close(s->fd);
669 s->fd = NULL;
670 }
671 glfs_fini(s->glfs);
672 }
673
674 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
675 {
676 /* GlusterFS volume could be backed by a block device */
677 return 0;
678 }
679
680 static QemuOptsList qemu_gluster_create_opts = {
681 .name = "qemu-gluster-create-opts",
682 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
683 .desc = {
684 {
685 .name = BLOCK_OPT_SIZE,
686 .type = QEMU_OPT_SIZE,
687 .help = "Virtual disk size"
688 },
689 {
690 .name = BLOCK_OPT_PREALLOC,
691 .type = QEMU_OPT_STRING,
692 .help = "Preallocation mode (allowed values: off, full)"
693 },
694 { /* end of list */ }
695 }
696 };
697
698 static BlockDriver bdrv_gluster = {
699 .format_name = "gluster",
700 .protocol_name = "gluster",
701 .instance_size = sizeof(BDRVGlusterState),
702 .bdrv_needs_filename = true,
703 .bdrv_file_open = qemu_gluster_open,
704 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
705 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
706 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
707 .bdrv_close = qemu_gluster_close,
708 .bdrv_create = qemu_gluster_create,
709 .bdrv_getlength = qemu_gluster_getlength,
710 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
711 .bdrv_truncate = qemu_gluster_truncate,
712 .bdrv_co_readv = qemu_gluster_co_readv,
713 .bdrv_co_writev = qemu_gluster_co_writev,
714 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
715 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
716 #ifdef CONFIG_GLUSTERFS_DISCARD
717 .bdrv_co_discard = qemu_gluster_co_discard,
718 #endif
719 #ifdef CONFIG_GLUSTERFS_ZEROFILL
720 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
721 #endif
722 .create_opts = &qemu_gluster_create_opts,
723 };
724
725 static BlockDriver bdrv_gluster_tcp = {
726 .format_name = "gluster",
727 .protocol_name = "gluster+tcp",
728 .instance_size = sizeof(BDRVGlusterState),
729 .bdrv_needs_filename = true,
730 .bdrv_file_open = qemu_gluster_open,
731 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
732 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
733 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
734 .bdrv_close = qemu_gluster_close,
735 .bdrv_create = qemu_gluster_create,
736 .bdrv_getlength = qemu_gluster_getlength,
737 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
738 .bdrv_truncate = qemu_gluster_truncate,
739 .bdrv_co_readv = qemu_gluster_co_readv,
740 .bdrv_co_writev = qemu_gluster_co_writev,
741 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
742 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
743 #ifdef CONFIG_GLUSTERFS_DISCARD
744 .bdrv_co_discard = qemu_gluster_co_discard,
745 #endif
746 #ifdef CONFIG_GLUSTERFS_ZEROFILL
747 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
748 #endif
749 .create_opts = &qemu_gluster_create_opts,
750 };
751
752 static BlockDriver bdrv_gluster_unix = {
753 .format_name = "gluster",
754 .protocol_name = "gluster+unix",
755 .instance_size = sizeof(BDRVGlusterState),
756 .bdrv_needs_filename = true,
757 .bdrv_file_open = qemu_gluster_open,
758 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
759 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
760 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
761 .bdrv_close = qemu_gluster_close,
762 .bdrv_create = qemu_gluster_create,
763 .bdrv_getlength = qemu_gluster_getlength,
764 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
765 .bdrv_truncate = qemu_gluster_truncate,
766 .bdrv_co_readv = qemu_gluster_co_readv,
767 .bdrv_co_writev = qemu_gluster_co_writev,
768 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
769 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
770 #ifdef CONFIG_GLUSTERFS_DISCARD
771 .bdrv_co_discard = qemu_gluster_co_discard,
772 #endif
773 #ifdef CONFIG_GLUSTERFS_ZEROFILL
774 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
775 #endif
776 .create_opts = &qemu_gluster_create_opts,
777 };
778
779 static BlockDriver bdrv_gluster_rdma = {
780 .format_name = "gluster",
781 .protocol_name = "gluster+rdma",
782 .instance_size = sizeof(BDRVGlusterState),
783 .bdrv_needs_filename = true,
784 .bdrv_file_open = qemu_gluster_open,
785 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
786 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
787 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
788 .bdrv_close = qemu_gluster_close,
789 .bdrv_create = qemu_gluster_create,
790 .bdrv_getlength = qemu_gluster_getlength,
791 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
792 .bdrv_truncate = qemu_gluster_truncate,
793 .bdrv_co_readv = qemu_gluster_co_readv,
794 .bdrv_co_writev = qemu_gluster_co_writev,
795 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
796 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
797 #ifdef CONFIG_GLUSTERFS_DISCARD
798 .bdrv_co_discard = qemu_gluster_co_discard,
799 #endif
800 #ifdef CONFIG_GLUSTERFS_ZEROFILL
801 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
802 #endif
803 .create_opts = &qemu_gluster_create_opts,
804 };
805
806 static void bdrv_gluster_init(void)
807 {
808 bdrv_register(&bdrv_gluster_rdma);
809 bdrv_register(&bdrv_gluster_unix);
810 bdrv_register(&bdrv_gluster_tcp);
811 bdrv_register(&bdrv_gluster);
812 }
813
814 block_init(bdrv_gluster_init);