]> git.proxmox.com Git - mirror_qemu.git/blob - block/gluster.c
block/ssh: Propagate errors through authenticate()
[mirror_qemu.git] / block / gluster.c
1 /*
2 * GlusterFS backend for QEMU
3 *
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10 #include <glusterfs/api/glfs.h>
11 #include "block/block_int.h"
12 #include "qemu/uri.h"
13
14 typedef struct GlusterAIOCB {
15 int64_t size;
16 int ret;
17 QEMUBH *bh;
18 Coroutine *coroutine;
19 } GlusterAIOCB;
20
21 typedef struct BDRVGlusterState {
22 struct glfs *glfs;
23 struct glfs_fd *fd;
24 } BDRVGlusterState;
25
26 typedef struct GlusterConf {
27 char *server;
28 int port;
29 char *volname;
30 char *image;
31 char *transport;
32 } GlusterConf;
33
34 static void qemu_gluster_gconf_free(GlusterConf *gconf)
35 {
36 if (gconf) {
37 g_free(gconf->server);
38 g_free(gconf->volname);
39 g_free(gconf->image);
40 g_free(gconf->transport);
41 g_free(gconf);
42 }
43 }
44
45 static int parse_volume_options(GlusterConf *gconf, char *path)
46 {
47 char *p, *q;
48
49 if (!path) {
50 return -EINVAL;
51 }
52
53 /* volume */
54 p = q = path + strspn(path, "/");
55 p += strcspn(p, "/");
56 if (*p == '\0') {
57 return -EINVAL;
58 }
59 gconf->volname = g_strndup(q, p - q);
60
61 /* image */
62 p += strspn(p, "/");
63 if (*p == '\0') {
64 return -EINVAL;
65 }
66 gconf->image = g_strdup(p);
67 return 0;
68 }
69
70 /*
71 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
72 *
73 * 'gluster' is the protocol.
74 *
75 * 'transport' specifies the transport type used to connect to gluster
76 * management daemon (glusterd). Valid transport types are
77 * tcp, unix and rdma. If a transport type isn't specified, then tcp
78 * type is assumed.
79 *
80 * 'server' specifies the server where the volume file specification for
81 * the given volume resides. This can be either hostname, ipv4 address
82 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
83 * If transport type is 'unix', then 'server' field should not be specified.
84 * The 'socket' field needs to be populated with the path to unix domain
85 * socket.
86 *
87 * 'port' is the port number on which glusterd is listening. This is optional
88 * and if not specified, QEMU will send 0 which will make gluster to use the
89 * default port. If the transport type is unix, then 'port' should not be
90 * specified.
91 *
92 * 'volname' is the name of the gluster volume which contains the VM image.
93 *
94 * 'image' is the path to the actual VM image that resides on gluster volume.
95 *
96 * Examples:
97 *
98 * file=gluster://1.2.3.4/testvol/a.img
99 * file=gluster+tcp://1.2.3.4/testvol/a.img
100 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
101 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
102 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
103 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
104 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
105 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
106 */
107 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
108 {
109 URI *uri;
110 QueryParams *qp = NULL;
111 bool is_unix = false;
112 int ret = 0;
113
114 uri = uri_parse(filename);
115 if (!uri) {
116 return -EINVAL;
117 }
118
119 /* transport */
120 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
121 gconf->transport = g_strdup("tcp");
122 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
123 gconf->transport = g_strdup("tcp");
124 } else if (!strcmp(uri->scheme, "gluster+unix")) {
125 gconf->transport = g_strdup("unix");
126 is_unix = true;
127 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
128 gconf->transport = g_strdup("rdma");
129 } else {
130 ret = -EINVAL;
131 goto out;
132 }
133
134 ret = parse_volume_options(gconf, uri->path);
135 if (ret < 0) {
136 goto out;
137 }
138
139 qp = query_params_parse(uri->query);
140 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
141 ret = -EINVAL;
142 goto out;
143 }
144
145 if (is_unix) {
146 if (uri->server || uri->port) {
147 ret = -EINVAL;
148 goto out;
149 }
150 if (strcmp(qp->p[0].name, "socket")) {
151 ret = -EINVAL;
152 goto out;
153 }
154 gconf->server = g_strdup(qp->p[0].value);
155 } else {
156 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
157 gconf->port = uri->port;
158 }
159
160 out:
161 if (qp) {
162 query_params_free(qp);
163 }
164 uri_free(uri);
165 return ret;
166 }
167
168 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
169 Error **errp)
170 {
171 struct glfs *glfs = NULL;
172 int ret;
173 int old_errno;
174
175 ret = qemu_gluster_parseuri(gconf, filename);
176 if (ret < 0) {
177 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
178 "volname/image[?socket=...]");
179 errno = -ret;
180 goto out;
181 }
182
183 glfs = glfs_new(gconf->volname);
184 if (!glfs) {
185 goto out;
186 }
187
188 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
189 gconf->port);
190 if (ret < 0) {
191 goto out;
192 }
193
194 /*
195 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
196 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
197 */
198 ret = glfs_set_logging(glfs, "-", 4);
199 if (ret < 0) {
200 goto out;
201 }
202
203 ret = glfs_init(glfs);
204 if (ret) {
205 error_setg_errno(errp, errno,
206 "Gluster connection failed for server=%s port=%d "
207 "volume=%s image=%s transport=%s", gconf->server,
208 gconf->port, gconf->volname, gconf->image,
209 gconf->transport);
210
211 /* glfs_init sometimes doesn't set errno although docs suggest that */
212 if (errno == 0)
213 errno = EINVAL;
214
215 goto out;
216 }
217 return glfs;
218
219 out:
220 if (glfs) {
221 old_errno = errno;
222 glfs_fini(glfs);
223 errno = old_errno;
224 }
225 return NULL;
226 }
227
228 static void qemu_gluster_complete_aio(void *opaque)
229 {
230 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
231
232 qemu_bh_delete(acb->bh);
233 acb->bh = NULL;
234 qemu_coroutine_enter(acb->coroutine, NULL);
235 }
236
237 /*
238 * AIO callback routine called from GlusterFS thread.
239 */
240 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
241 {
242 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
243
244 if (!ret || ret == acb->size) {
245 acb->ret = 0; /* Success */
246 } else if (ret < 0) {
247 acb->ret = ret; /* Read/Write failed */
248 } else {
249 acb->ret = -EIO; /* Partial read/write - fail it */
250 }
251
252 acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
253 qemu_bh_schedule(acb->bh);
254 }
255
256 /* TODO Convert to fine grained options */
257 static QemuOptsList runtime_opts = {
258 .name = "gluster",
259 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
260 .desc = {
261 {
262 .name = "filename",
263 .type = QEMU_OPT_STRING,
264 .help = "URL to the gluster image",
265 },
266 { /* end of list */ }
267 },
268 };
269
270 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
271 {
272 assert(open_flags != NULL);
273
274 *open_flags |= O_BINARY;
275
276 if (bdrv_flags & BDRV_O_RDWR) {
277 *open_flags |= O_RDWR;
278 } else {
279 *open_flags |= O_RDONLY;
280 }
281
282 if ((bdrv_flags & BDRV_O_NOCACHE)) {
283 *open_flags |= O_DIRECT;
284 }
285 }
286
287 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
288 int bdrv_flags, Error **errp)
289 {
290 BDRVGlusterState *s = bs->opaque;
291 int open_flags = 0;
292 int ret = 0;
293 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
294 QemuOpts *opts;
295 Error *local_err = NULL;
296 const char *filename;
297
298 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
299 qemu_opts_absorb_qdict(opts, options, &local_err);
300 if (local_err) {
301 error_propagate(errp, local_err);
302 ret = -EINVAL;
303 goto out;
304 }
305
306 filename = qemu_opt_get(opts, "filename");
307
308 s->glfs = qemu_gluster_init(gconf, filename, errp);
309 if (!s->glfs) {
310 ret = -errno;
311 goto out;
312 }
313
314 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
315
316 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
317 if (!s->fd) {
318 ret = -errno;
319 }
320
321 out:
322 qemu_opts_del(opts);
323 qemu_gluster_gconf_free(gconf);
324 if (!ret) {
325 return ret;
326 }
327 if (s->fd) {
328 glfs_close(s->fd);
329 }
330 if (s->glfs) {
331 glfs_fini(s->glfs);
332 }
333 return ret;
334 }
335
336 typedef struct BDRVGlusterReopenState {
337 struct glfs *glfs;
338 struct glfs_fd *fd;
339 } BDRVGlusterReopenState;
340
341
342 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
343 BlockReopenQueue *queue, Error **errp)
344 {
345 int ret = 0;
346 BDRVGlusterReopenState *reop_s;
347 GlusterConf *gconf = NULL;
348 int open_flags = 0;
349
350 assert(state != NULL);
351 assert(state->bs != NULL);
352
353 state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState));
354 reop_s = state->opaque;
355
356 qemu_gluster_parse_flags(state->flags, &open_flags);
357
358 gconf = g_malloc0(sizeof(GlusterConf));
359
360 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
361 if (reop_s->glfs == NULL) {
362 ret = -errno;
363 goto exit;
364 }
365
366 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
367 if (reop_s->fd == NULL) {
368 /* reops->glfs will be cleaned up in _abort */
369 ret = -errno;
370 goto exit;
371 }
372
373 exit:
374 /* state->opaque will be freed in either the _abort or _commit */
375 qemu_gluster_gconf_free(gconf);
376 return ret;
377 }
378
379 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
380 {
381 BDRVGlusterReopenState *reop_s = state->opaque;
382 BDRVGlusterState *s = state->bs->opaque;
383
384
385 /* close the old */
386 if (s->fd) {
387 glfs_close(s->fd);
388 }
389 if (s->glfs) {
390 glfs_fini(s->glfs);
391 }
392
393 /* use the newly opened image / connection */
394 s->fd = reop_s->fd;
395 s->glfs = reop_s->glfs;
396
397 g_free(state->opaque);
398 state->opaque = NULL;
399
400 return;
401 }
402
403
404 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
405 {
406 BDRVGlusterReopenState *reop_s = state->opaque;
407
408 if (reop_s == NULL) {
409 return;
410 }
411
412 if (reop_s->fd) {
413 glfs_close(reop_s->fd);
414 }
415
416 if (reop_s->glfs) {
417 glfs_fini(reop_s->glfs);
418 }
419
420 g_free(state->opaque);
421 state->opaque = NULL;
422
423 return;
424 }
425
426 #ifdef CONFIG_GLUSTERFS_ZEROFILL
427 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
428 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
429 {
430 int ret;
431 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
432 BDRVGlusterState *s = bs->opaque;
433 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
434 off_t offset = sector_num * BDRV_SECTOR_SIZE;
435
436 acb->size = size;
437 acb->ret = 0;
438 acb->coroutine = qemu_coroutine_self();
439
440 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
441 if (ret < 0) {
442 ret = -errno;
443 goto out;
444 }
445
446 qemu_coroutine_yield();
447 ret = acb->ret;
448
449 out:
450 g_slice_free(GlusterAIOCB, acb);
451 return ret;
452 }
453
454 static inline bool gluster_supports_zerofill(void)
455 {
456 return 1;
457 }
458
459 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
460 int64_t size)
461 {
462 return glfs_zerofill(fd, offset, size);
463 }
464
465 #else
466 static inline bool gluster_supports_zerofill(void)
467 {
468 return 0;
469 }
470
471 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
472 int64_t size)
473 {
474 return 0;
475 }
476 #endif
477
478 static int qemu_gluster_create(const char *filename,
479 QEMUOptionParameter *options, Error **errp)
480 {
481 struct glfs *glfs;
482 struct glfs_fd *fd;
483 int ret = 0;
484 int prealloc = 0;
485 int64_t total_size = 0;
486 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
487
488 glfs = qemu_gluster_init(gconf, filename, errp);
489 if (!glfs) {
490 ret = -errno;
491 goto out;
492 }
493
494 while (options && options->name) {
495 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
496 total_size = options->value.n / BDRV_SECTOR_SIZE;
497 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
498 if (!options->value.s || !strcmp(options->value.s, "off")) {
499 prealloc = 0;
500 } else if (!strcmp(options->value.s, "full") &&
501 gluster_supports_zerofill()) {
502 prealloc = 1;
503 } else {
504 error_setg(errp, "Invalid preallocation mode: '%s'"
505 " or GlusterFS doesn't support zerofill API",
506 options->value.s);
507 ret = -EINVAL;
508 goto out;
509 }
510 }
511 options++;
512 }
513
514 fd = glfs_creat(glfs, gconf->image,
515 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
516 if (!fd) {
517 ret = -errno;
518 } else {
519 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
520 if (prealloc && qemu_gluster_zerofill(fd, 0,
521 total_size * BDRV_SECTOR_SIZE)) {
522 ret = -errno;
523 }
524 } else {
525 ret = -errno;
526 }
527
528 if (glfs_close(fd) != 0) {
529 ret = -errno;
530 }
531 }
532 out:
533 qemu_gluster_gconf_free(gconf);
534 if (glfs) {
535 glfs_fini(glfs);
536 }
537 return ret;
538 }
539
540 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
541 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
542 {
543 int ret;
544 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
545 BDRVGlusterState *s = bs->opaque;
546 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
547 off_t offset = sector_num * BDRV_SECTOR_SIZE;
548
549 acb->size = size;
550 acb->ret = 0;
551 acb->coroutine = qemu_coroutine_self();
552
553 if (write) {
554 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
555 &gluster_finish_aiocb, acb);
556 } else {
557 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
558 &gluster_finish_aiocb, acb);
559 }
560
561 if (ret < 0) {
562 ret = -errno;
563 goto out;
564 }
565
566 qemu_coroutine_yield();
567 ret = acb->ret;
568
569 out:
570 g_slice_free(GlusterAIOCB, acb);
571 return ret;
572 }
573
574 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
575 {
576 int ret;
577 BDRVGlusterState *s = bs->opaque;
578
579 ret = glfs_ftruncate(s->fd, offset);
580 if (ret < 0) {
581 return -errno;
582 }
583
584 return 0;
585 }
586
587 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
588 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
589 {
590 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
591 }
592
593 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
594 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
595 {
596 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
597 }
598
599 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
600 {
601 int ret;
602 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
603 BDRVGlusterState *s = bs->opaque;
604
605 acb->size = 0;
606 acb->ret = 0;
607 acb->coroutine = qemu_coroutine_self();
608
609 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
610 if (ret < 0) {
611 ret = -errno;
612 goto out;
613 }
614
615 qemu_coroutine_yield();
616 ret = acb->ret;
617
618 out:
619 g_slice_free(GlusterAIOCB, acb);
620 return ret;
621 }
622
623 #ifdef CONFIG_GLUSTERFS_DISCARD
624 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
625 int64_t sector_num, int nb_sectors)
626 {
627 int ret;
628 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
629 BDRVGlusterState *s = bs->opaque;
630 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
631 off_t offset = sector_num * BDRV_SECTOR_SIZE;
632
633 acb->size = 0;
634 acb->ret = 0;
635 acb->coroutine = qemu_coroutine_self();
636
637 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
638 if (ret < 0) {
639 ret = -errno;
640 goto out;
641 }
642
643 qemu_coroutine_yield();
644 ret = acb->ret;
645
646 out:
647 g_slice_free(GlusterAIOCB, acb);
648 return ret;
649 }
650 #endif
651
652 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
653 {
654 BDRVGlusterState *s = bs->opaque;
655 int64_t ret;
656
657 ret = glfs_lseek(s->fd, 0, SEEK_END);
658 if (ret < 0) {
659 return -errno;
660 } else {
661 return ret;
662 }
663 }
664
665 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
666 {
667 BDRVGlusterState *s = bs->opaque;
668 struct stat st;
669 int ret;
670
671 ret = glfs_fstat(s->fd, &st);
672 if (ret < 0) {
673 return -errno;
674 } else {
675 return st.st_blocks * 512;
676 }
677 }
678
679 static void qemu_gluster_close(BlockDriverState *bs)
680 {
681 BDRVGlusterState *s = bs->opaque;
682
683 if (s->fd) {
684 glfs_close(s->fd);
685 s->fd = NULL;
686 }
687 glfs_fini(s->glfs);
688 }
689
690 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
691 {
692 /* GlusterFS volume could be backed by a block device */
693 return 0;
694 }
695
696 static QEMUOptionParameter qemu_gluster_create_options[] = {
697 {
698 .name = BLOCK_OPT_SIZE,
699 .type = OPT_SIZE,
700 .help = "Virtual disk size"
701 },
702 {
703 .name = BLOCK_OPT_PREALLOC,
704 .type = OPT_STRING,
705 .help = "Preallocation mode (allowed values: off, full)"
706 },
707 { NULL }
708 };
709
710 static BlockDriver bdrv_gluster = {
711 .format_name = "gluster",
712 .protocol_name = "gluster",
713 .instance_size = sizeof(BDRVGlusterState),
714 .bdrv_needs_filename = true,
715 .bdrv_file_open = qemu_gluster_open,
716 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
717 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
718 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
719 .bdrv_close = qemu_gluster_close,
720 .bdrv_create = qemu_gluster_create,
721 .bdrv_getlength = qemu_gluster_getlength,
722 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
723 .bdrv_truncate = qemu_gluster_truncate,
724 .bdrv_co_readv = qemu_gluster_co_readv,
725 .bdrv_co_writev = qemu_gluster_co_writev,
726 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
727 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
728 #ifdef CONFIG_GLUSTERFS_DISCARD
729 .bdrv_co_discard = qemu_gluster_co_discard,
730 #endif
731 #ifdef CONFIG_GLUSTERFS_ZEROFILL
732 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
733 #endif
734 .create_options = qemu_gluster_create_options,
735 };
736
737 static BlockDriver bdrv_gluster_tcp = {
738 .format_name = "gluster",
739 .protocol_name = "gluster+tcp",
740 .instance_size = sizeof(BDRVGlusterState),
741 .bdrv_needs_filename = true,
742 .bdrv_file_open = qemu_gluster_open,
743 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
744 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
745 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
746 .bdrv_close = qemu_gluster_close,
747 .bdrv_create = qemu_gluster_create,
748 .bdrv_getlength = qemu_gluster_getlength,
749 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
750 .bdrv_truncate = qemu_gluster_truncate,
751 .bdrv_co_readv = qemu_gluster_co_readv,
752 .bdrv_co_writev = qemu_gluster_co_writev,
753 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
754 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
755 #ifdef CONFIG_GLUSTERFS_DISCARD
756 .bdrv_co_discard = qemu_gluster_co_discard,
757 #endif
758 #ifdef CONFIG_GLUSTERFS_ZEROFILL
759 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
760 #endif
761 .create_options = qemu_gluster_create_options,
762 };
763
764 static BlockDriver bdrv_gluster_unix = {
765 .format_name = "gluster",
766 .protocol_name = "gluster+unix",
767 .instance_size = sizeof(BDRVGlusterState),
768 .bdrv_needs_filename = true,
769 .bdrv_file_open = qemu_gluster_open,
770 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
771 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
772 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
773 .bdrv_close = qemu_gluster_close,
774 .bdrv_create = qemu_gluster_create,
775 .bdrv_getlength = qemu_gluster_getlength,
776 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
777 .bdrv_truncate = qemu_gluster_truncate,
778 .bdrv_co_readv = qemu_gluster_co_readv,
779 .bdrv_co_writev = qemu_gluster_co_writev,
780 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
781 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
782 #ifdef CONFIG_GLUSTERFS_DISCARD
783 .bdrv_co_discard = qemu_gluster_co_discard,
784 #endif
785 #ifdef CONFIG_GLUSTERFS_ZEROFILL
786 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
787 #endif
788 .create_options = qemu_gluster_create_options,
789 };
790
791 static BlockDriver bdrv_gluster_rdma = {
792 .format_name = "gluster",
793 .protocol_name = "gluster+rdma",
794 .instance_size = sizeof(BDRVGlusterState),
795 .bdrv_needs_filename = true,
796 .bdrv_file_open = qemu_gluster_open,
797 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
798 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
799 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
800 .bdrv_close = qemu_gluster_close,
801 .bdrv_create = qemu_gluster_create,
802 .bdrv_getlength = qemu_gluster_getlength,
803 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
804 .bdrv_truncate = qemu_gluster_truncate,
805 .bdrv_co_readv = qemu_gluster_co_readv,
806 .bdrv_co_writev = qemu_gluster_co_writev,
807 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
808 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
809 #ifdef CONFIG_GLUSTERFS_DISCARD
810 .bdrv_co_discard = qemu_gluster_co_discard,
811 #endif
812 #ifdef CONFIG_GLUSTERFS_ZEROFILL
813 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
814 #endif
815 .create_options = qemu_gluster_create_options,
816 };
817
818 static void bdrv_gluster_init(void)
819 {
820 bdrv_register(&bdrv_gluster_rdma);
821 bdrv_register(&bdrv_gluster_unix);
822 bdrv_register(&bdrv_gluster_tcp);
823 bdrv_register(&bdrv_gluster);
824 }
825
826 block_init(bdrv_gluster_init);