]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * GlusterFS backend for QEMU | |
3 | * | |
4 | * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> | |
5 | * | |
6 | * Pipe handling mechanism in AIO implementation is derived from | |
7 | * block/rbd.c. Hence, | |
8 | * | |
9 | * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>, | |
10 | * Josh Durgin <josh.durgin@dreamhost.com> | |
11 | * | |
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
13 | * the COPYING file in the top-level directory. | |
14 | * | |
15 | * Contributions after 2012-01-13 are licensed under the terms of the | |
16 | * GNU GPL, version 2 or (at your option) any later version. | |
17 | */ | |
18 | #include <glusterfs/api/glfs.h> | |
19 | #include "block/block_int.h" | |
20 | #include "qemu/sockets.h" | |
21 | #include "qemu/uri.h" | |
22 | ||
23 | typedef struct GlusterAIOCB { | |
24 | int64_t size; | |
25 | int ret; | |
26 | QEMUBH *bh; | |
27 | Coroutine *coroutine; | |
28 | } GlusterAIOCB; | |
29 | ||
30 | typedef struct BDRVGlusterState { | |
31 | struct glfs *glfs; | |
32 | struct glfs_fd *fd; | |
33 | } BDRVGlusterState; | |
34 | ||
35 | #define GLUSTER_FD_READ 0 | |
36 | #define GLUSTER_FD_WRITE 1 | |
37 | ||
38 | typedef struct GlusterConf { | |
39 | char *server; | |
40 | int port; | |
41 | char *volname; | |
42 | char *image; | |
43 | char *transport; | |
44 | } GlusterConf; | |
45 | ||
46 | static void qemu_gluster_gconf_free(GlusterConf *gconf) | |
47 | { | |
48 | if (gconf) { | |
49 | g_free(gconf->server); | |
50 | g_free(gconf->volname); | |
51 | g_free(gconf->image); | |
52 | g_free(gconf->transport); | |
53 | g_free(gconf); | |
54 | } | |
55 | } | |
56 | ||
57 | static int parse_volume_options(GlusterConf *gconf, char *path) | |
58 | { | |
59 | char *p, *q; | |
60 | ||
61 | if (!path) { | |
62 | return -EINVAL; | |
63 | } | |
64 | ||
65 | /* volume */ | |
66 | p = q = path + strspn(path, "/"); | |
67 | p += strcspn(p, "/"); | |
68 | if (*p == '\0') { | |
69 | return -EINVAL; | |
70 | } | |
71 | gconf->volname = g_strndup(q, p - q); | |
72 | ||
73 | /* image */ | |
74 | p += strspn(p, "/"); | |
75 | if (*p == '\0') { | |
76 | return -EINVAL; | |
77 | } | |
78 | gconf->image = g_strdup(p); | |
79 | return 0; | |
80 | } | |
81 | ||
82 | /* | |
83 | * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] | |
84 | * | |
85 | * 'gluster' is the protocol. | |
86 | * | |
87 | * 'transport' specifies the transport type used to connect to gluster | |
88 | * management daemon (glusterd). Valid transport types are | |
89 | * tcp, unix and rdma. If a transport type isn't specified, then tcp | |
90 | * type is assumed. | |
91 | * | |
92 | * 'server' specifies the server where the volume file specification for | |
93 | * the given volume resides. This can be either hostname, ipv4 address | |
94 | * or ipv6 address. ipv6 address needs to be within square brackets [ ]. | |
95 | * If transport type is 'unix', then 'server' field should not be specifed. | |
96 | * The 'socket' field needs to be populated with the path to unix domain | |
97 | * socket. | |
98 | * | |
99 | * 'port' is the port number on which glusterd is listening. This is optional | |
100 | * and if not specified, QEMU will send 0 which will make gluster to use the | |
101 | * default port. If the transport type is unix, then 'port' should not be | |
102 | * specified. | |
103 | * | |
104 | * 'volname' is the name of the gluster volume which contains the VM image. | |
105 | * | |
106 | * 'image' is the path to the actual VM image that resides on gluster volume. | |
107 | * | |
108 | * Examples: | |
109 | * | |
110 | * file=gluster://1.2.3.4/testvol/a.img | |
111 | * file=gluster+tcp://1.2.3.4/testvol/a.img | |
112 | * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img | |
113 | * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img | |
114 | * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img | |
115 | * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img | |
116 | * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket | |
117 | * file=gluster+rdma://1.2.3.4:24007/testvol/a.img | |
118 | */ | |
119 | static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) | |
120 | { | |
121 | URI *uri; | |
122 | QueryParams *qp = NULL; | |
123 | bool is_unix = false; | |
124 | int ret = 0; | |
125 | ||
126 | uri = uri_parse(filename); | |
127 | if (!uri) { | |
128 | return -EINVAL; | |
129 | } | |
130 | ||
131 | /* transport */ | |
132 | if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { | |
133 | gconf->transport = g_strdup("tcp"); | |
134 | } else if (!strcmp(uri->scheme, "gluster+tcp")) { | |
135 | gconf->transport = g_strdup("tcp"); | |
136 | } else if (!strcmp(uri->scheme, "gluster+unix")) { | |
137 | gconf->transport = g_strdup("unix"); | |
138 | is_unix = true; | |
139 | } else if (!strcmp(uri->scheme, "gluster+rdma")) { | |
140 | gconf->transport = g_strdup("rdma"); | |
141 | } else { | |
142 | ret = -EINVAL; | |
143 | goto out; | |
144 | } | |
145 | ||
146 | ret = parse_volume_options(gconf, uri->path); | |
147 | if (ret < 0) { | |
148 | goto out; | |
149 | } | |
150 | ||
151 | qp = query_params_parse(uri->query); | |
152 | if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { | |
153 | ret = -EINVAL; | |
154 | goto out; | |
155 | } | |
156 | ||
157 | if (is_unix) { | |
158 | if (uri->server || uri->port) { | |
159 | ret = -EINVAL; | |
160 | goto out; | |
161 | } | |
162 | if (strcmp(qp->p[0].name, "socket")) { | |
163 | ret = -EINVAL; | |
164 | goto out; | |
165 | } | |
166 | gconf->server = g_strdup(qp->p[0].value); | |
167 | } else { | |
168 | gconf->server = g_strdup(uri->server ? uri->server : "localhost"); | |
169 | gconf->port = uri->port; | |
170 | } | |
171 | ||
172 | out: | |
173 | if (qp) { | |
174 | query_params_free(qp); | |
175 | } | |
176 | uri_free(uri); | |
177 | return ret; | |
178 | } | |
179 | ||
180 | static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, | |
181 | Error **errp) | |
182 | { | |
183 | struct glfs *glfs = NULL; | |
184 | int ret; | |
185 | int old_errno; | |
186 | ||
187 | ret = qemu_gluster_parseuri(gconf, filename); | |
188 | if (ret < 0) { | |
189 | error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" | |
190 | "volname/image[?socket=...]"); | |
191 | errno = -ret; | |
192 | goto out; | |
193 | } | |
194 | ||
195 | glfs = glfs_new(gconf->volname); | |
196 | if (!glfs) { | |
197 | goto out; | |
198 | } | |
199 | ||
200 | ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, | |
201 | gconf->port); | |
202 | if (ret < 0) { | |
203 | goto out; | |
204 | } | |
205 | ||
206 | /* | |
207 | * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when | |
208 | * GlusterFS makes GF_LOG_* macros available to libgfapi users. | |
209 | */ | |
210 | ret = glfs_set_logging(glfs, "-", 4); | |
211 | if (ret < 0) { | |
212 | goto out; | |
213 | } | |
214 | ||
215 | ret = glfs_init(glfs); | |
216 | if (ret) { | |
217 | error_setg_errno(errp, errno, | |
218 | "Gluster connection failed for server=%s port=%d " | |
219 | "volume=%s image=%s transport=%s", gconf->server, | |
220 | gconf->port, gconf->volname, gconf->image, | |
221 | gconf->transport); | |
222 | goto out; | |
223 | } | |
224 | return glfs; | |
225 | ||
226 | out: | |
227 | if (glfs) { | |
228 | old_errno = errno; | |
229 | glfs_fini(glfs); | |
230 | errno = old_errno; | |
231 | } | |
232 | return NULL; | |
233 | } | |
234 | ||
235 | static void qemu_gluster_complete_aio(void *opaque) | |
236 | { | |
237 | GlusterAIOCB *acb = (GlusterAIOCB *)opaque; | |
238 | ||
239 | qemu_bh_delete(acb->bh); | |
240 | acb->bh = NULL; | |
241 | qemu_coroutine_enter(acb->coroutine, NULL); | |
242 | } | |
243 | ||
244 | /* | |
245 | * AIO callback routine called from GlusterFS thread. | |
246 | */ | |
247 | static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) | |
248 | { | |
249 | GlusterAIOCB *acb = (GlusterAIOCB *)arg; | |
250 | ||
251 | if (!ret || ret == acb->size) { | |
252 | acb->ret = 0; /* Success */ | |
253 | } else if (ret < 0) { | |
254 | acb->ret = ret; /* Read/Write failed */ | |
255 | } else { | |
256 | acb->ret = -EIO; /* Partial read/write - fail it */ | |
257 | } | |
258 | ||
259 | acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb); | |
260 | qemu_bh_schedule(acb->bh); | |
261 | } | |
262 | ||
263 | /* TODO Convert to fine grained options */ | |
264 | static QemuOptsList runtime_opts = { | |
265 | .name = "gluster", | |
266 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | |
267 | .desc = { | |
268 | { | |
269 | .name = "filename", | |
270 | .type = QEMU_OPT_STRING, | |
271 | .help = "URL to the gluster image", | |
272 | }, | |
273 | { /* end of list */ } | |
274 | }, | |
275 | }; | |
276 | ||
277 | static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) | |
278 | { | |
279 | assert(open_flags != NULL); | |
280 | ||
281 | *open_flags |= O_BINARY; | |
282 | ||
283 | if (bdrv_flags & BDRV_O_RDWR) { | |
284 | *open_flags |= O_RDWR; | |
285 | } else { | |
286 | *open_flags |= O_RDONLY; | |
287 | } | |
288 | ||
289 | if ((bdrv_flags & BDRV_O_NOCACHE)) { | |
290 | *open_flags |= O_DIRECT; | |
291 | } | |
292 | } | |
293 | ||
294 | static int qemu_gluster_open(BlockDriverState *bs, QDict *options, | |
295 | int bdrv_flags, Error **errp) | |
296 | { | |
297 | BDRVGlusterState *s = bs->opaque; | |
298 | int open_flags = 0; | |
299 | int ret = 0; | |
300 | GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); | |
301 | QemuOpts *opts; | |
302 | Error *local_err = NULL; | |
303 | const char *filename; | |
304 | ||
305 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); | |
306 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
307 | if (local_err) { | |
308 | error_propagate(errp, local_err); | |
309 | ret = -EINVAL; | |
310 | goto out; | |
311 | } | |
312 | ||
313 | filename = qemu_opt_get(opts, "filename"); | |
314 | ||
315 | s->glfs = qemu_gluster_init(gconf, filename, errp); | |
316 | if (!s->glfs) { | |
317 | ret = -errno; | |
318 | goto out; | |
319 | } | |
320 | ||
321 | qemu_gluster_parse_flags(bdrv_flags, &open_flags); | |
322 | ||
323 | s->fd = glfs_open(s->glfs, gconf->image, open_flags); | |
324 | if (!s->fd) { | |
325 | ret = -errno; | |
326 | } | |
327 | ||
328 | out: | |
329 | qemu_opts_del(opts); | |
330 | qemu_gluster_gconf_free(gconf); | |
331 | if (!ret) { | |
332 | return ret; | |
333 | } | |
334 | if (s->fd) { | |
335 | glfs_close(s->fd); | |
336 | } | |
337 | if (s->glfs) { | |
338 | glfs_fini(s->glfs); | |
339 | } | |
340 | return ret; | |
341 | } | |
342 | ||
343 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
344 | static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, | |
345 | int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) | |
346 | { | |
347 | int ret; | |
348 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
349 | BDRVGlusterState *s = bs->opaque; | |
350 | off_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
351 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
352 | ||
353 | acb->size = size; | |
354 | acb->ret = 0; | |
355 | acb->coroutine = qemu_coroutine_self(); | |
356 | ||
357 | ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); | |
358 | if (ret < 0) { | |
359 | ret = -errno; | |
360 | goto out; | |
361 | } | |
362 | ||
363 | qemu_coroutine_yield(); | |
364 | ret = acb->ret; | |
365 | ||
366 | out: | |
367 | g_slice_free(GlusterAIOCB, acb); | |
368 | return ret; | |
369 | } | |
370 | ||
371 | static inline bool gluster_supports_zerofill(void) | |
372 | { | |
373 | return 1; | |
374 | } | |
375 | ||
376 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
377 | int64_t size) | |
378 | { | |
379 | return glfs_zerofill(fd, offset, size); | |
380 | } | |
381 | ||
382 | #else | |
383 | static inline bool gluster_supports_zerofill(void) | |
384 | { | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
389 | int64_t size) | |
390 | { | |
391 | return 0; | |
392 | } | |
393 | #endif | |
394 | ||
395 | static int qemu_gluster_create(const char *filename, | |
396 | QEMUOptionParameter *options, Error **errp) | |
397 | { | |
398 | struct glfs *glfs; | |
399 | struct glfs_fd *fd; | |
400 | int ret = 0; | |
401 | int prealloc = 0; | |
402 | int64_t total_size = 0; | |
403 | GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); | |
404 | ||
405 | glfs = qemu_gluster_init(gconf, filename, errp); | |
406 | if (!glfs) { | |
407 | ret = -EINVAL; | |
408 | goto out; | |
409 | } | |
410 | ||
411 | while (options && options->name) { | |
412 | if (!strcmp(options->name, BLOCK_OPT_SIZE)) { | |
413 | total_size = options->value.n / BDRV_SECTOR_SIZE; | |
414 | } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { | |
415 | if (!options->value.s || !strcmp(options->value.s, "off")) { | |
416 | prealloc = 0; | |
417 | } else if (!strcmp(options->value.s, "full") && | |
418 | gluster_supports_zerofill()) { | |
419 | prealloc = 1; | |
420 | } else { | |
421 | error_setg(errp, "Invalid preallocation mode: '%s'" | |
422 | " or GlusterFS doesn't support zerofill API", | |
423 | options->value.s); | |
424 | ret = -EINVAL; | |
425 | goto out; | |
426 | } | |
427 | } | |
428 | options++; | |
429 | } | |
430 | ||
431 | fd = glfs_creat(glfs, gconf->image, | |
432 | O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); | |
433 | if (!fd) { | |
434 | ret = -errno; | |
435 | } else { | |
436 | if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { | |
437 | if (prealloc && qemu_gluster_zerofill(fd, 0, | |
438 | total_size * BDRV_SECTOR_SIZE)) { | |
439 | ret = -errno; | |
440 | } | |
441 | } else { | |
442 | ret = -errno; | |
443 | } | |
444 | ||
445 | if (glfs_close(fd) != 0) { | |
446 | ret = -errno; | |
447 | } | |
448 | } | |
449 | out: | |
450 | qemu_gluster_gconf_free(gconf); | |
451 | if (glfs) { | |
452 | glfs_fini(glfs); | |
453 | } | |
454 | return ret; | |
455 | } | |
456 | ||
457 | static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, | |
458 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) | |
459 | { | |
460 | int ret; | |
461 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
462 | BDRVGlusterState *s = bs->opaque; | |
463 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
464 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
465 | ||
466 | acb->size = size; | |
467 | acb->ret = 0; | |
468 | acb->coroutine = qemu_coroutine_self(); | |
469 | ||
470 | if (write) { | |
471 | ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
472 | &gluster_finish_aiocb, acb); | |
473 | } else { | |
474 | ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
475 | &gluster_finish_aiocb, acb); | |
476 | } | |
477 | ||
478 | if (ret < 0) { | |
479 | ret = -errno; | |
480 | goto out; | |
481 | } | |
482 | ||
483 | qemu_coroutine_yield(); | |
484 | ret = acb->ret; | |
485 | ||
486 | out: | |
487 | g_slice_free(GlusterAIOCB, acb); | |
488 | return ret; | |
489 | } | |
490 | ||
491 | static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) | |
492 | { | |
493 | int ret; | |
494 | BDRVGlusterState *s = bs->opaque; | |
495 | ||
496 | ret = glfs_ftruncate(s->fd, offset); | |
497 | if (ret < 0) { | |
498 | return -errno; | |
499 | } | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, | |
505 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
506 | { | |
507 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); | |
508 | } | |
509 | ||
510 | static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, | |
511 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
512 | { | |
513 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); | |
514 | } | |
515 | ||
516 | static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) | |
517 | { | |
518 | int ret; | |
519 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
520 | BDRVGlusterState *s = bs->opaque; | |
521 | ||
522 | acb->size = 0; | |
523 | acb->ret = 0; | |
524 | acb->coroutine = qemu_coroutine_self(); | |
525 | ||
526 | ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); | |
527 | if (ret < 0) { | |
528 | ret = -errno; | |
529 | goto out; | |
530 | } | |
531 | ||
532 | qemu_coroutine_yield(); | |
533 | ret = acb->ret; | |
534 | ||
535 | out: | |
536 | g_slice_free(GlusterAIOCB, acb); | |
537 | return ret; | |
538 | } | |
539 | ||
540 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
541 | static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, | |
542 | int64_t sector_num, int nb_sectors) | |
543 | { | |
544 | int ret; | |
545 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
546 | BDRVGlusterState *s = bs->opaque; | |
547 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
548 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
549 | ||
550 | acb->size = 0; | |
551 | acb->ret = 0; | |
552 | acb->coroutine = qemu_coroutine_self(); | |
553 | ||
554 | ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); | |
555 | if (ret < 0) { | |
556 | ret = -errno; | |
557 | goto out; | |
558 | } | |
559 | ||
560 | qemu_coroutine_yield(); | |
561 | ret = acb->ret; | |
562 | ||
563 | out: | |
564 | g_slice_free(GlusterAIOCB, acb); | |
565 | return ret; | |
566 | } | |
567 | #endif | |
568 | ||
569 | static int64_t qemu_gluster_getlength(BlockDriverState *bs) | |
570 | { | |
571 | BDRVGlusterState *s = bs->opaque; | |
572 | int64_t ret; | |
573 | ||
574 | ret = glfs_lseek(s->fd, 0, SEEK_END); | |
575 | if (ret < 0) { | |
576 | return -errno; | |
577 | } else { | |
578 | return ret; | |
579 | } | |
580 | } | |
581 | ||
582 | static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) | |
583 | { | |
584 | BDRVGlusterState *s = bs->opaque; | |
585 | struct stat st; | |
586 | int ret; | |
587 | ||
588 | ret = glfs_fstat(s->fd, &st); | |
589 | if (ret < 0) { | |
590 | return -errno; | |
591 | } else { | |
592 | return st.st_blocks * 512; | |
593 | } | |
594 | } | |
595 | ||
596 | static void qemu_gluster_close(BlockDriverState *bs) | |
597 | { | |
598 | BDRVGlusterState *s = bs->opaque; | |
599 | ||
600 | if (s->fd) { | |
601 | glfs_close(s->fd); | |
602 | s->fd = NULL; | |
603 | } | |
604 | glfs_fini(s->glfs); | |
605 | } | |
606 | ||
607 | static int qemu_gluster_has_zero_init(BlockDriverState *bs) | |
608 | { | |
609 | /* GlusterFS volume could be backed by a block device */ | |
610 | return 0; | |
611 | } | |
612 | ||
613 | static QEMUOptionParameter qemu_gluster_create_options[] = { | |
614 | { | |
615 | .name = BLOCK_OPT_SIZE, | |
616 | .type = OPT_SIZE, | |
617 | .help = "Virtual disk size" | |
618 | }, | |
619 | { | |
620 | .name = BLOCK_OPT_PREALLOC, | |
621 | .type = OPT_STRING, | |
622 | .help = "Preallocation mode (allowed values: off, full)" | |
623 | }, | |
624 | { NULL } | |
625 | }; | |
626 | ||
627 | static BlockDriver bdrv_gluster = { | |
628 | .format_name = "gluster", | |
629 | .protocol_name = "gluster", | |
630 | .instance_size = sizeof(BDRVGlusterState), | |
631 | .bdrv_needs_filename = true, | |
632 | .bdrv_file_open = qemu_gluster_open, | |
633 | .bdrv_close = qemu_gluster_close, | |
634 | .bdrv_create = qemu_gluster_create, | |
635 | .bdrv_getlength = qemu_gluster_getlength, | |
636 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
637 | .bdrv_truncate = qemu_gluster_truncate, | |
638 | .bdrv_co_readv = qemu_gluster_co_readv, | |
639 | .bdrv_co_writev = qemu_gluster_co_writev, | |
640 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
641 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
642 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
643 | .bdrv_co_discard = qemu_gluster_co_discard, | |
644 | #endif | |
645 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
646 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
647 | #endif | |
648 | .create_options = qemu_gluster_create_options, | |
649 | }; | |
650 | ||
651 | static BlockDriver bdrv_gluster_tcp = { | |
652 | .format_name = "gluster", | |
653 | .protocol_name = "gluster+tcp", | |
654 | .instance_size = sizeof(BDRVGlusterState), | |
655 | .bdrv_needs_filename = true, | |
656 | .bdrv_file_open = qemu_gluster_open, | |
657 | .bdrv_close = qemu_gluster_close, | |
658 | .bdrv_create = qemu_gluster_create, | |
659 | .bdrv_getlength = qemu_gluster_getlength, | |
660 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
661 | .bdrv_truncate = qemu_gluster_truncate, | |
662 | .bdrv_co_readv = qemu_gluster_co_readv, | |
663 | .bdrv_co_writev = qemu_gluster_co_writev, | |
664 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
665 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
666 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
667 | .bdrv_co_discard = qemu_gluster_co_discard, | |
668 | #endif | |
669 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
670 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
671 | #endif | |
672 | .create_options = qemu_gluster_create_options, | |
673 | }; | |
674 | ||
675 | static BlockDriver bdrv_gluster_unix = { | |
676 | .format_name = "gluster", | |
677 | .protocol_name = "gluster+unix", | |
678 | .instance_size = sizeof(BDRVGlusterState), | |
679 | .bdrv_needs_filename = true, | |
680 | .bdrv_file_open = qemu_gluster_open, | |
681 | .bdrv_close = qemu_gluster_close, | |
682 | .bdrv_create = qemu_gluster_create, | |
683 | .bdrv_getlength = qemu_gluster_getlength, | |
684 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
685 | .bdrv_truncate = qemu_gluster_truncate, | |
686 | .bdrv_co_readv = qemu_gluster_co_readv, | |
687 | .bdrv_co_writev = qemu_gluster_co_writev, | |
688 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
689 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
690 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
691 | .bdrv_co_discard = qemu_gluster_co_discard, | |
692 | #endif | |
693 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
694 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
695 | #endif | |
696 | .create_options = qemu_gluster_create_options, | |
697 | }; | |
698 | ||
699 | static BlockDriver bdrv_gluster_rdma = { | |
700 | .format_name = "gluster", | |
701 | .protocol_name = "gluster+rdma", | |
702 | .instance_size = sizeof(BDRVGlusterState), | |
703 | .bdrv_needs_filename = true, | |
704 | .bdrv_file_open = qemu_gluster_open, | |
705 | .bdrv_close = qemu_gluster_close, | |
706 | .bdrv_create = qemu_gluster_create, | |
707 | .bdrv_getlength = qemu_gluster_getlength, | |
708 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
709 | .bdrv_truncate = qemu_gluster_truncate, | |
710 | .bdrv_co_readv = qemu_gluster_co_readv, | |
711 | .bdrv_co_writev = qemu_gluster_co_writev, | |
712 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
713 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
714 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
715 | .bdrv_co_discard = qemu_gluster_co_discard, | |
716 | #endif | |
717 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
718 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
719 | #endif | |
720 | .create_options = qemu_gluster_create_options, | |
721 | }; | |
722 | ||
723 | static void bdrv_gluster_init(void) | |
724 | { | |
725 | bdrv_register(&bdrv_gluster_rdma); | |
726 | bdrv_register(&bdrv_gluster_unix); | |
727 | bdrv_register(&bdrv_gluster_tcp); | |
728 | bdrv_register(&bdrv_gluster); | |
729 | } | |
730 | ||
731 | block_init(bdrv_gluster_init); |