]> git.proxmox.com Git - mirror_qemu.git/blob - block/gluster.c
block/gluster: code movement of qemu_gluster_close()
[mirror_qemu.git] / block / gluster.c
1 /*
2 * GlusterFS backend for QEMU
3 *
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10 #include "qemu/osdep.h"
11 #include <glusterfs/api/glfs.h>
12 #include "block/block_int.h"
13 #include "qapi/error.h"
14 #include "qemu/uri.h"
15
16 typedef struct GlusterAIOCB {
17 int64_t size;
18 int ret;
19 QEMUBH *bh;
20 Coroutine *coroutine;
21 AioContext *aio_context;
22 } GlusterAIOCB;
23
24 typedef struct BDRVGlusterState {
25 struct glfs *glfs;
26 struct glfs_fd *fd;
27 } BDRVGlusterState;
28
29 typedef struct GlusterConf {
30 char *server;
31 int port;
32 char *volname;
33 char *image;
34 char *transport;
35 } GlusterConf;
36
37 static void qemu_gluster_gconf_free(GlusterConf *gconf)
38 {
39 if (gconf) {
40 g_free(gconf->server);
41 g_free(gconf->volname);
42 g_free(gconf->image);
43 g_free(gconf->transport);
44 g_free(gconf);
45 }
46 }
47
48 static int parse_volume_options(GlusterConf *gconf, char *path)
49 {
50 char *p, *q;
51
52 if (!path) {
53 return -EINVAL;
54 }
55
56 /* volume */
57 p = q = path + strspn(path, "/");
58 p += strcspn(p, "/");
59 if (*p == '\0') {
60 return -EINVAL;
61 }
62 gconf->volname = g_strndup(q, p - q);
63
64 /* image */
65 p += strspn(p, "/");
66 if (*p == '\0') {
67 return -EINVAL;
68 }
69 gconf->image = g_strdup(p);
70 return 0;
71 }
72
73 /*
74 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
75 *
76 * 'gluster' is the protocol.
77 *
78 * 'transport' specifies the transport type used to connect to gluster
79 * management daemon (glusterd). Valid transport types are
80 * tcp, unix and rdma. If a transport type isn't specified, then tcp
81 * type is assumed.
82 *
83 * 'server' specifies the server where the volume file specification for
84 * the given volume resides. This can be either hostname, ipv4 address
85 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
86 * If transport type is 'unix', then 'server' field should not be specified.
87 * The 'socket' field needs to be populated with the path to unix domain
88 * socket.
89 *
90 * 'port' is the port number on which glusterd is listening. This is optional
91 * and if not specified, QEMU will send 0 which will make gluster to use the
92 * default port. If the transport type is unix, then 'port' should not be
93 * specified.
94 *
95 * 'volname' is the name of the gluster volume which contains the VM image.
96 *
97 * 'image' is the path to the actual VM image that resides on gluster volume.
98 *
99 * Examples:
100 *
101 * file=gluster://1.2.3.4/testvol/a.img
102 * file=gluster+tcp://1.2.3.4/testvol/a.img
103 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
104 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
105 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
106 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
107 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
108 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
109 */
110 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
111 {
112 URI *uri;
113 QueryParams *qp = NULL;
114 bool is_unix = false;
115 int ret = 0;
116
117 uri = uri_parse(filename);
118 if (!uri) {
119 return -EINVAL;
120 }
121
122 /* transport */
123 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
124 gconf->transport = g_strdup("tcp");
125 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
126 gconf->transport = g_strdup("tcp");
127 } else if (!strcmp(uri->scheme, "gluster+unix")) {
128 gconf->transport = g_strdup("unix");
129 is_unix = true;
130 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
131 gconf->transport = g_strdup("rdma");
132 } else {
133 ret = -EINVAL;
134 goto out;
135 }
136
137 ret = parse_volume_options(gconf, uri->path);
138 if (ret < 0) {
139 goto out;
140 }
141
142 qp = query_params_parse(uri->query);
143 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
144 ret = -EINVAL;
145 goto out;
146 }
147
148 if (is_unix) {
149 if (uri->server || uri->port) {
150 ret = -EINVAL;
151 goto out;
152 }
153 if (strcmp(qp->p[0].name, "socket")) {
154 ret = -EINVAL;
155 goto out;
156 }
157 gconf->server = g_strdup(qp->p[0].value);
158 } else {
159 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
160 gconf->port = uri->port;
161 }
162
163 out:
164 if (qp) {
165 query_params_free(qp);
166 }
167 uri_free(uri);
168 return ret;
169 }
170
171 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
172 Error **errp)
173 {
174 struct glfs *glfs = NULL;
175 int ret;
176 int old_errno;
177
178 ret = qemu_gluster_parseuri(gconf, filename);
179 if (ret < 0) {
180 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
181 "volname/image[?socket=...]");
182 errno = -ret;
183 goto out;
184 }
185
186 glfs = glfs_new(gconf->volname);
187 if (!glfs) {
188 goto out;
189 }
190
191 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
192 gconf->port);
193 if (ret < 0) {
194 goto out;
195 }
196
197 /*
198 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
199 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
200 */
201 ret = glfs_set_logging(glfs, "-", 4);
202 if (ret < 0) {
203 goto out;
204 }
205
206 ret = glfs_init(glfs);
207 if (ret) {
208 error_setg_errno(errp, errno,
209 "Gluster connection failed for server=%s port=%d "
210 "volume=%s image=%s transport=%s", gconf->server,
211 gconf->port, gconf->volname, gconf->image,
212 gconf->transport);
213
214 /* glfs_init sometimes doesn't set errno although docs suggest that */
215 if (errno == 0)
216 errno = EINVAL;
217
218 goto out;
219 }
220 return glfs;
221
222 out:
223 if (glfs) {
224 old_errno = errno;
225 glfs_fini(glfs);
226 errno = old_errno;
227 }
228 return NULL;
229 }
230
231 static void qemu_gluster_complete_aio(void *opaque)
232 {
233 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
234
235 qemu_bh_delete(acb->bh);
236 acb->bh = NULL;
237 qemu_coroutine_enter(acb->coroutine, NULL);
238 }
239
240 /*
241 * AIO callback routine called from GlusterFS thread.
242 */
243 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
244 {
245 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
246
247 if (!ret || ret == acb->size) {
248 acb->ret = 0; /* Success */
249 } else if (ret < 0) {
250 acb->ret = -errno; /* Read/Write failed */
251 } else {
252 acb->ret = -EIO; /* Partial read/write - fail it */
253 }
254
255 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
256 qemu_bh_schedule(acb->bh);
257 }
258
259 /* TODO Convert to fine grained options */
260 static QemuOptsList runtime_opts = {
261 .name = "gluster",
262 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
263 .desc = {
264 {
265 .name = "filename",
266 .type = QEMU_OPT_STRING,
267 .help = "URL to the gluster image",
268 },
269 { /* end of list */ }
270 },
271 };
272
273 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
274 {
275 assert(open_flags != NULL);
276
277 *open_flags |= O_BINARY;
278
279 if (bdrv_flags & BDRV_O_RDWR) {
280 *open_flags |= O_RDWR;
281 } else {
282 *open_flags |= O_RDONLY;
283 }
284
285 if ((bdrv_flags & BDRV_O_NOCACHE)) {
286 *open_flags |= O_DIRECT;
287 }
288 }
289
290 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
291 int bdrv_flags, Error **errp)
292 {
293 BDRVGlusterState *s = bs->opaque;
294 int open_flags = 0;
295 int ret = 0;
296 GlusterConf *gconf = g_new0(GlusterConf, 1);
297 QemuOpts *opts;
298 Error *local_err = NULL;
299 const char *filename;
300
301 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
302 qemu_opts_absorb_qdict(opts, options, &local_err);
303 if (local_err) {
304 error_propagate(errp, local_err);
305 ret = -EINVAL;
306 goto out;
307 }
308
309 filename = qemu_opt_get(opts, "filename");
310
311 s->glfs = qemu_gluster_init(gconf, filename, errp);
312 if (!s->glfs) {
313 ret = -errno;
314 goto out;
315 }
316
317 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
318
319 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
320 if (!s->fd) {
321 ret = -errno;
322 }
323
324 out:
325 qemu_opts_del(opts);
326 qemu_gluster_gconf_free(gconf);
327 if (!ret) {
328 return ret;
329 }
330 if (s->fd) {
331 glfs_close(s->fd);
332 }
333 if (s->glfs) {
334 glfs_fini(s->glfs);
335 }
336 return ret;
337 }
338
339 typedef struct BDRVGlusterReopenState {
340 struct glfs *glfs;
341 struct glfs_fd *fd;
342 } BDRVGlusterReopenState;
343
344
345 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
346 BlockReopenQueue *queue, Error **errp)
347 {
348 int ret = 0;
349 BDRVGlusterReopenState *reop_s;
350 GlusterConf *gconf = NULL;
351 int open_flags = 0;
352
353 assert(state != NULL);
354 assert(state->bs != NULL);
355
356 state->opaque = g_new0(BDRVGlusterReopenState, 1);
357 reop_s = state->opaque;
358
359 qemu_gluster_parse_flags(state->flags, &open_flags);
360
361 gconf = g_new0(GlusterConf, 1);
362
363 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
364 if (reop_s->glfs == NULL) {
365 ret = -errno;
366 goto exit;
367 }
368
369 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
370 if (reop_s->fd == NULL) {
371 /* reops->glfs will be cleaned up in _abort */
372 ret = -errno;
373 goto exit;
374 }
375
376 exit:
377 /* state->opaque will be freed in either the _abort or _commit */
378 qemu_gluster_gconf_free(gconf);
379 return ret;
380 }
381
382 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
383 {
384 BDRVGlusterReopenState *reop_s = state->opaque;
385 BDRVGlusterState *s = state->bs->opaque;
386
387
388 /* close the old */
389 if (s->fd) {
390 glfs_close(s->fd);
391 }
392 if (s->glfs) {
393 glfs_fini(s->glfs);
394 }
395
396 /* use the newly opened image / connection */
397 s->fd = reop_s->fd;
398 s->glfs = reop_s->glfs;
399
400 g_free(state->opaque);
401 state->opaque = NULL;
402
403 return;
404 }
405
406
407 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
408 {
409 BDRVGlusterReopenState *reop_s = state->opaque;
410
411 if (reop_s == NULL) {
412 return;
413 }
414
415 if (reop_s->fd) {
416 glfs_close(reop_s->fd);
417 }
418
419 if (reop_s->glfs) {
420 glfs_fini(reop_s->glfs);
421 }
422
423 g_free(state->opaque);
424 state->opaque = NULL;
425
426 return;
427 }
428
429 #ifdef CONFIG_GLUSTERFS_ZEROFILL
430 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
431 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
432 {
433 int ret;
434 GlusterAIOCB acb;
435 BDRVGlusterState *s = bs->opaque;
436 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
437 off_t offset = sector_num * BDRV_SECTOR_SIZE;
438
439 acb.size = size;
440 acb.ret = 0;
441 acb.coroutine = qemu_coroutine_self();
442 acb.aio_context = bdrv_get_aio_context(bs);
443
444 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
445 if (ret < 0) {
446 return -errno;
447 }
448
449 qemu_coroutine_yield();
450 return acb.ret;
451 }
452
453 static inline bool gluster_supports_zerofill(void)
454 {
455 return 1;
456 }
457
458 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
459 int64_t size)
460 {
461 return glfs_zerofill(fd, offset, size);
462 }
463
464 #else
465 static inline bool gluster_supports_zerofill(void)
466 {
467 return 0;
468 }
469
470 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
471 int64_t size)
472 {
473 return 0;
474 }
475 #endif
476
477 static int qemu_gluster_create(const char *filename,
478 QemuOpts *opts, Error **errp)
479 {
480 struct glfs *glfs;
481 struct glfs_fd *fd;
482 int ret = 0;
483 int prealloc = 0;
484 int64_t total_size = 0;
485 char *tmp = NULL;
486 GlusterConf *gconf = g_new0(GlusterConf, 1);
487
488 glfs = qemu_gluster_init(gconf, filename, errp);
489 if (!glfs) {
490 ret = -errno;
491 goto out;
492 }
493
494 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
495 BDRV_SECTOR_SIZE);
496
497 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
498 if (!tmp || !strcmp(tmp, "off")) {
499 prealloc = 0;
500 } else if (!strcmp(tmp, "full") &&
501 gluster_supports_zerofill()) {
502 prealloc = 1;
503 } else {
504 error_setg(errp, "Invalid preallocation mode: '%s'"
505 " or GlusterFS doesn't support zerofill API",
506 tmp);
507 ret = -EINVAL;
508 goto out;
509 }
510
511 fd = glfs_creat(glfs, gconf->image,
512 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
513 if (!fd) {
514 ret = -errno;
515 } else {
516 if (!glfs_ftruncate(fd, total_size)) {
517 if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
518 ret = -errno;
519 }
520 } else {
521 ret = -errno;
522 }
523
524 if (glfs_close(fd) != 0) {
525 ret = -errno;
526 }
527 }
528 out:
529 g_free(tmp);
530 qemu_gluster_gconf_free(gconf);
531 if (glfs) {
532 glfs_fini(glfs);
533 }
534 return ret;
535 }
536
537 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
538 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
539 {
540 int ret;
541 GlusterAIOCB acb;
542 BDRVGlusterState *s = bs->opaque;
543 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
544 off_t offset = sector_num * BDRV_SECTOR_SIZE;
545
546 acb.size = size;
547 acb.ret = 0;
548 acb.coroutine = qemu_coroutine_self();
549 acb.aio_context = bdrv_get_aio_context(bs);
550
551 if (write) {
552 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
553 gluster_finish_aiocb, &acb);
554 } else {
555 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
556 gluster_finish_aiocb, &acb);
557 }
558
559 if (ret < 0) {
560 return -errno;
561 }
562
563 qemu_coroutine_yield();
564 return acb.ret;
565 }
566
567 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
568 {
569 int ret;
570 BDRVGlusterState *s = bs->opaque;
571
572 ret = glfs_ftruncate(s->fd, offset);
573 if (ret < 0) {
574 return -errno;
575 }
576
577 return 0;
578 }
579
580 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
581 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
582 {
583 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
584 }
585
586 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
587 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
588 {
589 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
590 }
591
592 static void qemu_gluster_close(BlockDriverState *bs)
593 {
594 BDRVGlusterState *s = bs->opaque;
595
596 if (s->fd) {
597 glfs_close(s->fd);
598 s->fd = NULL;
599 }
600 glfs_fini(s->glfs);
601 }
602
603 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
604 {
605 int ret;
606 GlusterAIOCB acb;
607 BDRVGlusterState *s = bs->opaque;
608
609 acb.size = 0;
610 acb.ret = 0;
611 acb.coroutine = qemu_coroutine_self();
612 acb.aio_context = bdrv_get_aio_context(bs);
613
614 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
615 if (ret < 0) {
616 return -errno;
617 }
618
619 qemu_coroutine_yield();
620 return acb.ret;
621 }
622
623 #ifdef CONFIG_GLUSTERFS_DISCARD
624 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
625 int64_t sector_num, int nb_sectors)
626 {
627 int ret;
628 GlusterAIOCB acb;
629 BDRVGlusterState *s = bs->opaque;
630 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
631 off_t offset = sector_num * BDRV_SECTOR_SIZE;
632
633 acb.size = 0;
634 acb.ret = 0;
635 acb.coroutine = qemu_coroutine_self();
636 acb.aio_context = bdrv_get_aio_context(bs);
637
638 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
639 if (ret < 0) {
640 return -errno;
641 }
642
643 qemu_coroutine_yield();
644 return acb.ret;
645 }
646 #endif
647
648 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
649 {
650 BDRVGlusterState *s = bs->opaque;
651 int64_t ret;
652
653 ret = glfs_lseek(s->fd, 0, SEEK_END);
654 if (ret < 0) {
655 return -errno;
656 } else {
657 return ret;
658 }
659 }
660
661 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
662 {
663 BDRVGlusterState *s = bs->opaque;
664 struct stat st;
665 int ret;
666
667 ret = glfs_fstat(s->fd, &st);
668 if (ret < 0) {
669 return -errno;
670 } else {
671 return st.st_blocks * 512;
672 }
673 }
674
675 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
676 {
677 /* GlusterFS volume could be backed by a block device */
678 return 0;
679 }
680
681 static QemuOptsList qemu_gluster_create_opts = {
682 .name = "qemu-gluster-create-opts",
683 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
684 .desc = {
685 {
686 .name = BLOCK_OPT_SIZE,
687 .type = QEMU_OPT_SIZE,
688 .help = "Virtual disk size"
689 },
690 {
691 .name = BLOCK_OPT_PREALLOC,
692 .type = QEMU_OPT_STRING,
693 .help = "Preallocation mode (allowed values: off, full)"
694 },
695 { /* end of list */ }
696 }
697 };
698
699 static BlockDriver bdrv_gluster = {
700 .format_name = "gluster",
701 .protocol_name = "gluster",
702 .instance_size = sizeof(BDRVGlusterState),
703 .bdrv_needs_filename = true,
704 .bdrv_file_open = qemu_gluster_open,
705 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
706 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
707 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
708 .bdrv_close = qemu_gluster_close,
709 .bdrv_create = qemu_gluster_create,
710 .bdrv_getlength = qemu_gluster_getlength,
711 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
712 .bdrv_truncate = qemu_gluster_truncate,
713 .bdrv_co_readv = qemu_gluster_co_readv,
714 .bdrv_co_writev = qemu_gluster_co_writev,
715 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
716 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
717 #ifdef CONFIG_GLUSTERFS_DISCARD
718 .bdrv_co_discard = qemu_gluster_co_discard,
719 #endif
720 #ifdef CONFIG_GLUSTERFS_ZEROFILL
721 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
722 #endif
723 .create_opts = &qemu_gluster_create_opts,
724 };
725
726 static BlockDriver bdrv_gluster_tcp = {
727 .format_name = "gluster",
728 .protocol_name = "gluster+tcp",
729 .instance_size = sizeof(BDRVGlusterState),
730 .bdrv_needs_filename = true,
731 .bdrv_file_open = qemu_gluster_open,
732 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
733 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
734 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
735 .bdrv_close = qemu_gluster_close,
736 .bdrv_create = qemu_gluster_create,
737 .bdrv_getlength = qemu_gluster_getlength,
738 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
739 .bdrv_truncate = qemu_gluster_truncate,
740 .bdrv_co_readv = qemu_gluster_co_readv,
741 .bdrv_co_writev = qemu_gluster_co_writev,
742 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
743 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
744 #ifdef CONFIG_GLUSTERFS_DISCARD
745 .bdrv_co_discard = qemu_gluster_co_discard,
746 #endif
747 #ifdef CONFIG_GLUSTERFS_ZEROFILL
748 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
749 #endif
750 .create_opts = &qemu_gluster_create_opts,
751 };
752
753 static BlockDriver bdrv_gluster_unix = {
754 .format_name = "gluster",
755 .protocol_name = "gluster+unix",
756 .instance_size = sizeof(BDRVGlusterState),
757 .bdrv_needs_filename = true,
758 .bdrv_file_open = qemu_gluster_open,
759 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
760 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
761 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
762 .bdrv_close = qemu_gluster_close,
763 .bdrv_create = qemu_gluster_create,
764 .bdrv_getlength = qemu_gluster_getlength,
765 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
766 .bdrv_truncate = qemu_gluster_truncate,
767 .bdrv_co_readv = qemu_gluster_co_readv,
768 .bdrv_co_writev = qemu_gluster_co_writev,
769 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
770 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
771 #ifdef CONFIG_GLUSTERFS_DISCARD
772 .bdrv_co_discard = qemu_gluster_co_discard,
773 #endif
774 #ifdef CONFIG_GLUSTERFS_ZEROFILL
775 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
776 #endif
777 .create_opts = &qemu_gluster_create_opts,
778 };
779
780 static BlockDriver bdrv_gluster_rdma = {
781 .format_name = "gluster",
782 .protocol_name = "gluster+rdma",
783 .instance_size = sizeof(BDRVGlusterState),
784 .bdrv_needs_filename = true,
785 .bdrv_file_open = qemu_gluster_open,
786 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
787 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
788 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
789 .bdrv_close = qemu_gluster_close,
790 .bdrv_create = qemu_gluster_create,
791 .bdrv_getlength = qemu_gluster_getlength,
792 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
793 .bdrv_truncate = qemu_gluster_truncate,
794 .bdrv_co_readv = qemu_gluster_co_readv,
795 .bdrv_co_writev = qemu_gluster_co_writev,
796 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
797 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
798 #ifdef CONFIG_GLUSTERFS_DISCARD
799 .bdrv_co_discard = qemu_gluster_co_discard,
800 #endif
801 #ifdef CONFIG_GLUSTERFS_ZEROFILL
802 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
803 #endif
804 .create_opts = &qemu_gluster_create_opts,
805 };
806
807 static void bdrv_gluster_init(void)
808 {
809 bdrv_register(&bdrv_gluster_rdma);
810 bdrv_register(&bdrv_gluster_unix);
811 bdrv_register(&bdrv_gluster_tcp);
812 bdrv_register(&bdrv_gluster);
813 }
814
815 block_init(bdrv_gluster_init);