]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * GlusterFS backend for QEMU | |
3 | * | |
4 | * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include "qemu/osdep.h" | |
12 | #include "qemu/units.h" | |
13 | #include <glusterfs/api/glfs.h> | |
14 | #include "block/block_int.h" | |
15 | #include "block/qdict.h" | |
16 | #include "qapi/error.h" | |
17 | #include "qapi/qmp/qdict.h" | |
18 | #include "qapi/qmp/qerror.h" | |
19 | #include "qemu/uri.h" | |
20 | #include "qemu/error-report.h" | |
21 | #include "qemu/module.h" | |
22 | #include "qemu/option.h" | |
23 | #include "qemu/cutils.h" | |
24 | ||
25 | #ifdef CONFIG_GLUSTERFS_FTRUNCATE_HAS_STAT | |
26 | # define glfs_ftruncate(fd, offset) glfs_ftruncate(fd, offset, NULL, NULL) | |
27 | #endif | |
28 | ||
29 | #define GLUSTER_OPT_FILENAME "filename" | |
30 | #define GLUSTER_OPT_VOLUME "volume" | |
31 | #define GLUSTER_OPT_PATH "path" | |
32 | #define GLUSTER_OPT_TYPE "type" | |
33 | #define GLUSTER_OPT_SERVER_PATTERN "server." | |
34 | #define GLUSTER_OPT_HOST "host" | |
35 | #define GLUSTER_OPT_PORT "port" | |
36 | #define GLUSTER_OPT_TO "to" | |
37 | #define GLUSTER_OPT_IPV4 "ipv4" | |
38 | #define GLUSTER_OPT_IPV6 "ipv6" | |
39 | #define GLUSTER_OPT_SOCKET "socket" | |
40 | #define GLUSTER_OPT_DEBUG "debug" | |
41 | #define GLUSTER_DEFAULT_PORT 24007 | |
42 | #define GLUSTER_DEBUG_DEFAULT 4 | |
43 | #define GLUSTER_DEBUG_MAX 9 | |
44 | #define GLUSTER_OPT_LOGFILE "logfile" | |
45 | #define GLUSTER_LOGFILE_DEFAULT "-" /* handled in libgfapi as /dev/stderr */ | |
46 | /* | |
47 | * Several versions of GlusterFS (3.12? -> 6.0.1) fail when the transfer size | |
48 | * is greater or equal to 1024 MiB, so we are limiting the transfer size to 512 | |
49 | * MiB to avoid this rare issue. | |
50 | */ | |
51 | #define GLUSTER_MAX_TRANSFER (512 * MiB) | |
52 | ||
53 | #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" | |
54 | ||
55 | typedef struct GlusterAIOCB { | |
56 | int64_t size; | |
57 | int ret; | |
58 | Coroutine *coroutine; | |
59 | AioContext *aio_context; | |
60 | } GlusterAIOCB; | |
61 | ||
62 | typedef struct BDRVGlusterState { | |
63 | struct glfs *glfs; | |
64 | struct glfs_fd *fd; | |
65 | char *logfile; | |
66 | bool supports_seek_data; | |
67 | int debug; | |
68 | } BDRVGlusterState; | |
69 | ||
70 | typedef struct BDRVGlusterReopenState { | |
71 | struct glfs *glfs; | |
72 | struct glfs_fd *fd; | |
73 | } BDRVGlusterReopenState; | |
74 | ||
75 | ||
76 | typedef struct GlfsPreopened { | |
77 | char *volume; | |
78 | glfs_t *fs; | |
79 | int ref; | |
80 | } GlfsPreopened; | |
81 | ||
82 | typedef struct ListElement { | |
83 | QLIST_ENTRY(ListElement) list; | |
84 | GlfsPreopened saved; | |
85 | } ListElement; | |
86 | ||
87 | static QLIST_HEAD(, ListElement) glfs_list; | |
88 | ||
89 | static QemuOptsList qemu_gluster_create_opts = { | |
90 | .name = "qemu-gluster-create-opts", | |
91 | .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), | |
92 | .desc = { | |
93 | { | |
94 | .name = BLOCK_OPT_SIZE, | |
95 | .type = QEMU_OPT_SIZE, | |
96 | .help = "Virtual disk size" | |
97 | }, | |
98 | { | |
99 | .name = BLOCK_OPT_PREALLOC, | |
100 | .type = QEMU_OPT_STRING, | |
101 | .help = "Preallocation mode (allowed values: off" | |
102 | #ifdef CONFIG_GLUSTERFS_FALLOCATE | |
103 | ", falloc" | |
104 | #endif | |
105 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
106 | ", full" | |
107 | #endif | |
108 | ")" | |
109 | }, | |
110 | { | |
111 | .name = GLUSTER_OPT_DEBUG, | |
112 | .type = QEMU_OPT_NUMBER, | |
113 | .help = "Gluster log level, valid range is 0-9", | |
114 | }, | |
115 | { | |
116 | .name = GLUSTER_OPT_LOGFILE, | |
117 | .type = QEMU_OPT_STRING, | |
118 | .help = "Logfile path of libgfapi", | |
119 | }, | |
120 | { /* end of list */ } | |
121 | } | |
122 | }; | |
123 | ||
124 | static QemuOptsList runtime_opts = { | |
125 | .name = "gluster", | |
126 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | |
127 | .desc = { | |
128 | { | |
129 | .name = GLUSTER_OPT_FILENAME, | |
130 | .type = QEMU_OPT_STRING, | |
131 | .help = "URL to the gluster image", | |
132 | }, | |
133 | { | |
134 | .name = GLUSTER_OPT_DEBUG, | |
135 | .type = QEMU_OPT_NUMBER, | |
136 | .help = "Gluster log level, valid range is 0-9", | |
137 | }, | |
138 | { | |
139 | .name = GLUSTER_OPT_LOGFILE, | |
140 | .type = QEMU_OPT_STRING, | |
141 | .help = "Logfile path of libgfapi", | |
142 | }, | |
143 | { /* end of list */ } | |
144 | }, | |
145 | }; | |
146 | ||
147 | static QemuOptsList runtime_json_opts = { | |
148 | .name = "gluster_json", | |
149 | .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), | |
150 | .desc = { | |
151 | { | |
152 | .name = GLUSTER_OPT_VOLUME, | |
153 | .type = QEMU_OPT_STRING, | |
154 | .help = "name of gluster volume where VM image resides", | |
155 | }, | |
156 | { | |
157 | .name = GLUSTER_OPT_PATH, | |
158 | .type = QEMU_OPT_STRING, | |
159 | .help = "absolute path to image file in gluster volume", | |
160 | }, | |
161 | { | |
162 | .name = GLUSTER_OPT_DEBUG, | |
163 | .type = QEMU_OPT_NUMBER, | |
164 | .help = "Gluster log level, valid range is 0-9", | |
165 | }, | |
166 | { /* end of list */ } | |
167 | }, | |
168 | }; | |
169 | ||
170 | static QemuOptsList runtime_type_opts = { | |
171 | .name = "gluster_type", | |
172 | .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), | |
173 | .desc = { | |
174 | { | |
175 | .name = GLUSTER_OPT_TYPE, | |
176 | .type = QEMU_OPT_STRING, | |
177 | .help = "inet|unix", | |
178 | }, | |
179 | { /* end of list */ } | |
180 | }, | |
181 | }; | |
182 | ||
183 | static QemuOptsList runtime_unix_opts = { | |
184 | .name = "gluster_unix", | |
185 | .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), | |
186 | .desc = { | |
187 | { | |
188 | .name = GLUSTER_OPT_SOCKET, | |
189 | .type = QEMU_OPT_STRING, | |
190 | .help = "socket file path (legacy)", | |
191 | }, | |
192 | { | |
193 | .name = GLUSTER_OPT_PATH, | |
194 | .type = QEMU_OPT_STRING, | |
195 | .help = "socket file path (QAPI)", | |
196 | }, | |
197 | { /* end of list */ } | |
198 | }, | |
199 | }; | |
200 | ||
201 | static QemuOptsList runtime_inet_opts = { | |
202 | .name = "gluster_inet", | |
203 | .head = QTAILQ_HEAD_INITIALIZER(runtime_inet_opts.head), | |
204 | .desc = { | |
205 | { | |
206 | .name = GLUSTER_OPT_TYPE, | |
207 | .type = QEMU_OPT_STRING, | |
208 | .help = "inet|unix", | |
209 | }, | |
210 | { | |
211 | .name = GLUSTER_OPT_HOST, | |
212 | .type = QEMU_OPT_STRING, | |
213 | .help = "host address (hostname/ipv4/ipv6 addresses)", | |
214 | }, | |
215 | { | |
216 | .name = GLUSTER_OPT_PORT, | |
217 | .type = QEMU_OPT_STRING, | |
218 | .help = "port number on which glusterd is listening (default 24007)", | |
219 | }, | |
220 | { | |
221 | .name = "to", | |
222 | .type = QEMU_OPT_NUMBER, | |
223 | .help = "max port number, not supported by gluster", | |
224 | }, | |
225 | { | |
226 | .name = "ipv4", | |
227 | .type = QEMU_OPT_BOOL, | |
228 | .help = "ipv4 bool value, not supported by gluster", | |
229 | }, | |
230 | { | |
231 | .name = "ipv6", | |
232 | .type = QEMU_OPT_BOOL, | |
233 | .help = "ipv6 bool value, not supported by gluster", | |
234 | }, | |
235 | { /* end of list */ } | |
236 | }, | |
237 | }; | |
238 | ||
239 | static void glfs_set_preopened(const char *volume, glfs_t *fs) | |
240 | { | |
241 | ListElement *entry = NULL; | |
242 | ||
243 | entry = g_new(ListElement, 1); | |
244 | ||
245 | entry->saved.volume = g_strdup(volume); | |
246 | ||
247 | entry->saved.fs = fs; | |
248 | entry->saved.ref = 1; | |
249 | ||
250 | QLIST_INSERT_HEAD(&glfs_list, entry, list); | |
251 | } | |
252 | ||
253 | static glfs_t *glfs_find_preopened(const char *volume) | |
254 | { | |
255 | ListElement *entry = NULL; | |
256 | ||
257 | QLIST_FOREACH(entry, &glfs_list, list) { | |
258 | if (strcmp(entry->saved.volume, volume) == 0) { | |
259 | entry->saved.ref++; | |
260 | return entry->saved.fs; | |
261 | } | |
262 | } | |
263 | ||
264 | return NULL; | |
265 | } | |
266 | ||
267 | static void glfs_clear_preopened(glfs_t *fs) | |
268 | { | |
269 | ListElement *entry = NULL; | |
270 | ListElement *next; | |
271 | ||
272 | if (fs == NULL) { | |
273 | return; | |
274 | } | |
275 | ||
276 | QLIST_FOREACH_SAFE(entry, &glfs_list, list, next) { | |
277 | if (entry->saved.fs == fs) { | |
278 | if (--entry->saved.ref) { | |
279 | return; | |
280 | } | |
281 | ||
282 | QLIST_REMOVE(entry, list); | |
283 | ||
284 | glfs_fini(entry->saved.fs); | |
285 | g_free(entry->saved.volume); | |
286 | g_free(entry); | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) | |
292 | { | |
293 | char *p, *q; | |
294 | ||
295 | if (!path) { | |
296 | return -EINVAL; | |
297 | } | |
298 | ||
299 | /* volume */ | |
300 | p = q = path + strspn(path, "/"); | |
301 | p += strcspn(p, "/"); | |
302 | if (*p == '\0') { | |
303 | return -EINVAL; | |
304 | } | |
305 | gconf->volume = g_strndup(q, p - q); | |
306 | ||
307 | /* path */ | |
308 | p += strspn(p, "/"); | |
309 | if (*p == '\0') { | |
310 | return -EINVAL; | |
311 | } | |
312 | gconf->path = g_strdup(p); | |
313 | return 0; | |
314 | } | |
315 | ||
316 | /* | |
317 | * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...] | |
318 | * | |
319 | * 'gluster' is the protocol. | |
320 | * | |
321 | * 'transport' specifies the transport type used to connect to gluster | |
322 | * management daemon (glusterd). Valid transport types are | |
323 | * tcp or unix. If a transport type isn't specified, then tcp type is assumed. | |
324 | * | |
325 | * 'host' specifies the host where the volume file specification for | |
326 | * the given volume resides. This can be either hostname or ipv4 address. | |
327 | * If transport type is 'unix', then 'host' field should not be specified. | |
328 | * The 'socket' field needs to be populated with the path to unix domain | |
329 | * socket. | |
330 | * | |
331 | * 'port' is the port number on which glusterd is listening. This is optional | |
332 | * and if not specified, QEMU will send 0 which will make gluster to use the | |
333 | * default port. If the transport type is unix, then 'port' should not be | |
334 | * specified. | |
335 | * | |
336 | * 'volume' is the name of the gluster volume which contains the VM image. | |
337 | * | |
338 | * 'path' is the path to the actual VM image that resides on gluster volume. | |
339 | * | |
340 | * Examples: | |
341 | * | |
342 | * file=gluster://1.2.3.4/testvol/a.img | |
343 | * file=gluster+tcp://1.2.3.4/testvol/a.img | |
344 | * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img | |
345 | * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img | |
346 | * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket | |
347 | */ | |
348 | static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, | |
349 | const char *filename) | |
350 | { | |
351 | SocketAddress *gsconf; | |
352 | URI *uri; | |
353 | QueryParams *qp = NULL; | |
354 | bool is_unix = false; | |
355 | int ret = 0; | |
356 | ||
357 | uri = uri_parse(filename); | |
358 | if (!uri) { | |
359 | return -EINVAL; | |
360 | } | |
361 | ||
362 | gconf->server = g_new0(SocketAddressList, 1); | |
363 | gconf->server->value = gsconf = g_new0(SocketAddress, 1); | |
364 | ||
365 | /* transport */ | |
366 | if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { | |
367 | gsconf->type = SOCKET_ADDRESS_TYPE_INET; | |
368 | } else if (!strcmp(uri->scheme, "gluster+tcp")) { | |
369 | gsconf->type = SOCKET_ADDRESS_TYPE_INET; | |
370 | } else if (!strcmp(uri->scheme, "gluster+unix")) { | |
371 | gsconf->type = SOCKET_ADDRESS_TYPE_UNIX; | |
372 | is_unix = true; | |
373 | } else if (!strcmp(uri->scheme, "gluster+rdma")) { | |
374 | gsconf->type = SOCKET_ADDRESS_TYPE_INET; | |
375 | warn_report("rdma feature is not supported, falling back to tcp"); | |
376 | } else { | |
377 | ret = -EINVAL; | |
378 | goto out; | |
379 | } | |
380 | ||
381 | ret = parse_volume_options(gconf, uri->path); | |
382 | if (ret < 0) { | |
383 | goto out; | |
384 | } | |
385 | ||
386 | qp = query_params_parse(uri->query); | |
387 | if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { | |
388 | ret = -EINVAL; | |
389 | goto out; | |
390 | } | |
391 | ||
392 | if (is_unix) { | |
393 | if (uri->server || uri->port) { | |
394 | ret = -EINVAL; | |
395 | goto out; | |
396 | } | |
397 | if (strcmp(qp->p[0].name, "socket")) { | |
398 | ret = -EINVAL; | |
399 | goto out; | |
400 | } | |
401 | gsconf->u.q_unix.path = g_strdup(qp->p[0].value); | |
402 | } else { | |
403 | gsconf->u.inet.host = g_strdup(uri->server ? uri->server : "localhost"); | |
404 | if (uri->port) { | |
405 | gsconf->u.inet.port = g_strdup_printf("%d", uri->port); | |
406 | } else { | |
407 | gsconf->u.inet.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT); | |
408 | } | |
409 | } | |
410 | ||
411 | out: | |
412 | if (qp) { | |
413 | query_params_free(qp); | |
414 | } | |
415 | uri_free(uri); | |
416 | return ret; | |
417 | } | |
418 | ||
419 | static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, | |
420 | Error **errp) | |
421 | { | |
422 | struct glfs *glfs; | |
423 | int ret; | |
424 | int old_errno; | |
425 | SocketAddressList *server; | |
426 | unsigned long long port; | |
427 | ||
428 | glfs = glfs_find_preopened(gconf->volume); | |
429 | if (glfs) { | |
430 | return glfs; | |
431 | } | |
432 | ||
433 | glfs = glfs_new(gconf->volume); | |
434 | if (!glfs) { | |
435 | goto out; | |
436 | } | |
437 | ||
438 | glfs_set_preopened(gconf->volume, glfs); | |
439 | ||
440 | for (server = gconf->server; server; server = server->next) { | |
441 | switch (server->value->type) { | |
442 | case SOCKET_ADDRESS_TYPE_UNIX: | |
443 | ret = glfs_set_volfile_server(glfs, "unix", | |
444 | server->value->u.q_unix.path, 0); | |
445 | break; | |
446 | case SOCKET_ADDRESS_TYPE_INET: | |
447 | if (parse_uint_full(server->value->u.inet.port, &port, 10) < 0 || | |
448 | port > 65535) { | |
449 | error_setg(errp, "'%s' is not a valid port number", | |
450 | server->value->u.inet.port); | |
451 | errno = EINVAL; | |
452 | goto out; | |
453 | } | |
454 | ret = glfs_set_volfile_server(glfs, "tcp", | |
455 | server->value->u.inet.host, | |
456 | (int)port); | |
457 | break; | |
458 | case SOCKET_ADDRESS_TYPE_VSOCK: | |
459 | case SOCKET_ADDRESS_TYPE_FD: | |
460 | default: | |
461 | abort(); | |
462 | } | |
463 | ||
464 | if (ret < 0) { | |
465 | goto out; | |
466 | } | |
467 | } | |
468 | ||
469 | ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug); | |
470 | if (ret < 0) { | |
471 | goto out; | |
472 | } | |
473 | ||
474 | ret = glfs_init(glfs); | |
475 | if (ret) { | |
476 | error_setg(errp, "Gluster connection for volume %s, path %s failed" | |
477 | " to connect", gconf->volume, gconf->path); | |
478 | for (server = gconf->server; server; server = server->next) { | |
479 | if (server->value->type == SOCKET_ADDRESS_TYPE_UNIX) { | |
480 | error_append_hint(errp, "hint: failed on socket %s ", | |
481 | server->value->u.q_unix.path); | |
482 | } else { | |
483 | error_append_hint(errp, "hint: failed on host %s and port %s ", | |
484 | server->value->u.inet.host, | |
485 | server->value->u.inet.port); | |
486 | } | |
487 | } | |
488 | ||
489 | error_append_hint(errp, "Please refer to gluster logs for more info\n"); | |
490 | ||
491 | /* glfs_init sometimes doesn't set errno although docs suggest that */ | |
492 | if (errno == 0) { | |
493 | errno = EINVAL; | |
494 | } | |
495 | ||
496 | goto out; | |
497 | } | |
498 | return glfs; | |
499 | ||
500 | out: | |
501 | if (glfs) { | |
502 | old_errno = errno; | |
503 | glfs_clear_preopened(glfs); | |
504 | errno = old_errno; | |
505 | } | |
506 | return NULL; | |
507 | } | |
508 | ||
509 | /* | |
510 | * Convert the json formatted command line into qapi. | |
511 | */ | |
512 | static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, | |
513 | QDict *options, Error **errp) | |
514 | { | |
515 | QemuOpts *opts; | |
516 | SocketAddress *gsconf = NULL; | |
517 | SocketAddressList *curr = NULL; | |
518 | QDict *backing_options = NULL; | |
519 | Error *local_err = NULL; | |
520 | char *str = NULL; | |
521 | const char *ptr; | |
522 | int i, type, num_servers; | |
523 | ||
524 | /* create opts info from runtime_json_opts list */ | |
525 | opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); | |
526 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
527 | if (local_err) { | |
528 | goto out; | |
529 | } | |
530 | ||
531 | num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); | |
532 | if (num_servers < 1) { | |
533 | error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); | |
534 | goto out; | |
535 | } | |
536 | ||
537 | ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); | |
538 | if (!ptr) { | |
539 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); | |
540 | goto out; | |
541 | } | |
542 | gconf->volume = g_strdup(ptr); | |
543 | ||
544 | ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); | |
545 | if (!ptr) { | |
546 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); | |
547 | goto out; | |
548 | } | |
549 | gconf->path = g_strdup(ptr); | |
550 | qemu_opts_del(opts); | |
551 | ||
552 | for (i = 0; i < num_servers; i++) { | |
553 | str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); | |
554 | qdict_extract_subqdict(options, &backing_options, str); | |
555 | ||
556 | /* create opts info from runtime_type_opts list */ | |
557 | opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); | |
558 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
559 | if (local_err) { | |
560 | goto out; | |
561 | } | |
562 | ||
563 | ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); | |
564 | if (!ptr) { | |
565 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); | |
566 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
567 | goto out; | |
568 | ||
569 | } | |
570 | gsconf = g_new0(SocketAddress, 1); | |
571 | if (!strcmp(ptr, "tcp")) { | |
572 | ptr = "inet"; /* accept legacy "tcp" */ | |
573 | } | |
574 | type = qapi_enum_parse(&SocketAddressType_lookup, ptr, -1, NULL); | |
575 | if (type != SOCKET_ADDRESS_TYPE_INET | |
576 | && type != SOCKET_ADDRESS_TYPE_UNIX) { | |
577 | error_setg(&local_err, | |
578 | "Parameter '%s' may be 'inet' or 'unix'", | |
579 | GLUSTER_OPT_TYPE); | |
580 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
581 | goto out; | |
582 | } | |
583 | gsconf->type = type; | |
584 | qemu_opts_del(opts); | |
585 | ||
586 | if (gsconf->type == SOCKET_ADDRESS_TYPE_INET) { | |
587 | /* create opts info from runtime_inet_opts list */ | |
588 | opts = qemu_opts_create(&runtime_inet_opts, NULL, 0, &error_abort); | |
589 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
590 | if (local_err) { | |
591 | goto out; | |
592 | } | |
593 | ||
594 | ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); | |
595 | if (!ptr) { | |
596 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
597 | GLUSTER_OPT_HOST); | |
598 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
599 | goto out; | |
600 | } | |
601 | gsconf->u.inet.host = g_strdup(ptr); | |
602 | ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); | |
603 | if (!ptr) { | |
604 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
605 | GLUSTER_OPT_PORT); | |
606 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
607 | goto out; | |
608 | } | |
609 | gsconf->u.inet.port = g_strdup(ptr); | |
610 | ||
611 | /* defend for unsupported fields in InetSocketAddress, | |
612 | * i.e. @ipv4, @ipv6 and @to | |
613 | */ | |
614 | ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); | |
615 | if (ptr) { | |
616 | gsconf->u.inet.has_to = true; | |
617 | } | |
618 | ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); | |
619 | if (ptr) { | |
620 | gsconf->u.inet.has_ipv4 = true; | |
621 | } | |
622 | ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); | |
623 | if (ptr) { | |
624 | gsconf->u.inet.has_ipv6 = true; | |
625 | } | |
626 | if (gsconf->u.inet.has_to) { | |
627 | error_setg(&local_err, "Parameter 'to' not supported"); | |
628 | goto out; | |
629 | } | |
630 | if (gsconf->u.inet.has_ipv4 || gsconf->u.inet.has_ipv6) { | |
631 | error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); | |
632 | goto out; | |
633 | } | |
634 | qemu_opts_del(opts); | |
635 | } else { | |
636 | /* create opts info from runtime_unix_opts list */ | |
637 | opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); | |
638 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
639 | if (local_err) { | |
640 | goto out; | |
641 | } | |
642 | ||
643 | ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); | |
644 | if (!ptr) { | |
645 | ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); | |
646 | } else if (qemu_opt_get(opts, GLUSTER_OPT_SOCKET)) { | |
647 | error_setg(&local_err, | |
648 | "Conflicting parameters 'path' and 'socket'"); | |
649 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
650 | goto out; | |
651 | } | |
652 | if (!ptr) { | |
653 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
654 | GLUSTER_OPT_PATH); | |
655 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
656 | goto out; | |
657 | } | |
658 | gsconf->u.q_unix.path = g_strdup(ptr); | |
659 | qemu_opts_del(opts); | |
660 | } | |
661 | ||
662 | if (gconf->server == NULL) { | |
663 | gconf->server = g_new0(SocketAddressList, 1); | |
664 | gconf->server->value = gsconf; | |
665 | curr = gconf->server; | |
666 | } else { | |
667 | curr->next = g_new0(SocketAddressList, 1); | |
668 | curr->next->value = gsconf; | |
669 | curr = curr->next; | |
670 | } | |
671 | gsconf = NULL; | |
672 | ||
673 | qobject_unref(backing_options); | |
674 | backing_options = NULL; | |
675 | g_free(str); | |
676 | str = NULL; | |
677 | } | |
678 | ||
679 | return 0; | |
680 | ||
681 | out: | |
682 | error_propagate(errp, local_err); | |
683 | qapi_free_SocketAddress(gsconf); | |
684 | qemu_opts_del(opts); | |
685 | g_free(str); | |
686 | qobject_unref(backing_options); | |
687 | errno = EINVAL; | |
688 | return -errno; | |
689 | } | |
690 | ||
691 | /* Converts options given in @filename and the @options QDict into the QAPI | |
692 | * object @gconf. */ | |
693 | static int qemu_gluster_parse(BlockdevOptionsGluster *gconf, | |
694 | const char *filename, | |
695 | QDict *options, Error **errp) | |
696 | { | |
697 | int ret; | |
698 | if (filename) { | |
699 | ret = qemu_gluster_parse_uri(gconf, filename); | |
700 | if (ret < 0) { | |
701 | error_setg(errp, "invalid URI %s", filename); | |
702 | error_append_hint(errp, "Usage: file=gluster[+transport]://" | |
703 | "[host[:port]]volume/path[?socket=...]" | |
704 | "[,file.debug=N]" | |
705 | "[,file.logfile=/path/filename.log]\n"); | |
706 | return ret; | |
707 | } | |
708 | } else { | |
709 | ret = qemu_gluster_parse_json(gconf, options, errp); | |
710 | if (ret < 0) { | |
711 | error_append_hint(errp, "Usage: " | |
712 | "-drive driver=qcow2,file.driver=gluster," | |
713 | "file.volume=testvol,file.path=/path/a.qcow2" | |
714 | "[,file.debug=9]" | |
715 | "[,file.logfile=/path/filename.log]," | |
716 | "file.server.0.type=inet," | |
717 | "file.server.0.host=1.2.3.4," | |
718 | "file.server.0.port=24007," | |
719 | "file.server.1.transport=unix," | |
720 | "file.server.1.path=/var/run/glusterd.socket ..." | |
721 | "\n"); | |
722 | return ret; | |
723 | } | |
724 | } | |
725 | ||
726 | return 0; | |
727 | } | |
728 | ||
729 | static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, | |
730 | const char *filename, | |
731 | QDict *options, Error **errp) | |
732 | { | |
733 | int ret; | |
734 | ||
735 | ret = qemu_gluster_parse(gconf, filename, options, errp); | |
736 | if (ret < 0) { | |
737 | errno = -ret; | |
738 | return NULL; | |
739 | } | |
740 | ||
741 | return qemu_gluster_glfs_init(gconf, errp); | |
742 | } | |
743 | ||
744 | /* | |
745 | * AIO callback routine called from GlusterFS thread. | |
746 | */ | |
747 | static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, | |
748 | #ifdef CONFIG_GLUSTERFS_IOCB_HAS_STAT | |
749 | struct glfs_stat *pre, struct glfs_stat *post, | |
750 | #endif | |
751 | void *arg) | |
752 | { | |
753 | GlusterAIOCB *acb = (GlusterAIOCB *)arg; | |
754 | ||
755 | if (!ret || ret == acb->size) { | |
756 | acb->ret = 0; /* Success */ | |
757 | } else if (ret < 0) { | |
758 | acb->ret = -errno; /* Read/Write failed */ | |
759 | } else { | |
760 | acb->ret = -EIO; /* Partial read/write - fail it */ | |
761 | } | |
762 | ||
763 | aio_co_schedule(acb->aio_context, acb->coroutine); | |
764 | } | |
765 | ||
766 | static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) | |
767 | { | |
768 | assert(open_flags != NULL); | |
769 | ||
770 | *open_flags |= O_BINARY; | |
771 | ||
772 | if (bdrv_flags & BDRV_O_RDWR) { | |
773 | *open_flags |= O_RDWR; | |
774 | } else { | |
775 | *open_flags |= O_RDONLY; | |
776 | } | |
777 | ||
778 | if ((bdrv_flags & BDRV_O_NOCACHE)) { | |
779 | *open_flags |= O_DIRECT; | |
780 | } | |
781 | } | |
782 | ||
783 | /* | |
784 | * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of | |
785 | * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used. | |
786 | * - Corrected versions return -1 and set errno to EINVAL. | |
787 | * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set | |
788 | * errno to ENXIO when SEEK_DATA is called with a position of EOF. | |
789 | */ | |
790 | static bool qemu_gluster_test_seek(struct glfs_fd *fd) | |
791 | { | |
792 | off_t ret = 0; | |
793 | ||
794 | #if defined SEEK_HOLE && defined SEEK_DATA | |
795 | off_t eof; | |
796 | ||
797 | eof = glfs_lseek(fd, 0, SEEK_END); | |
798 | if (eof < 0) { | |
799 | /* this should never occur */ | |
800 | return false; | |
801 | } | |
802 | ||
803 | /* this should always fail with ENXIO if SEEK_DATA is supported */ | |
804 | ret = glfs_lseek(fd, eof, SEEK_DATA); | |
805 | #endif | |
806 | ||
807 | return (ret < 0) && (errno == ENXIO); | |
808 | } | |
809 | ||
810 | static int qemu_gluster_open(BlockDriverState *bs, QDict *options, | |
811 | int bdrv_flags, Error **errp) | |
812 | { | |
813 | BDRVGlusterState *s = bs->opaque; | |
814 | int open_flags = 0; | |
815 | int ret = 0; | |
816 | BlockdevOptionsGluster *gconf = NULL; | |
817 | QemuOpts *opts; | |
818 | Error *local_err = NULL; | |
819 | const char *filename, *logfile; | |
820 | ||
821 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); | |
822 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
823 | if (local_err) { | |
824 | error_propagate(errp, local_err); | |
825 | ret = -EINVAL; | |
826 | goto out; | |
827 | } | |
828 | ||
829 | filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME); | |
830 | ||
831 | s->debug = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG, | |
832 | GLUSTER_DEBUG_DEFAULT); | |
833 | if (s->debug < 0) { | |
834 | s->debug = 0; | |
835 | } else if (s->debug > GLUSTER_DEBUG_MAX) { | |
836 | s->debug = GLUSTER_DEBUG_MAX; | |
837 | } | |
838 | ||
839 | gconf = g_new0(BlockdevOptionsGluster, 1); | |
840 | gconf->debug = s->debug; | |
841 | gconf->has_debug = true; | |
842 | ||
843 | logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE); | |
844 | s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT); | |
845 | ||
846 | gconf->logfile = g_strdup(s->logfile); | |
847 | gconf->has_logfile = true; | |
848 | ||
849 | s->glfs = qemu_gluster_init(gconf, filename, options, errp); | |
850 | if (!s->glfs) { | |
851 | ret = -errno; | |
852 | goto out; | |
853 | } | |
854 | ||
855 | #ifdef CONFIG_GLUSTERFS_XLATOR_OPT | |
856 | /* Without this, if fsync fails for a recoverable reason (for instance, | |
857 | * ENOSPC), gluster will dump its cache, preventing retries. This means | |
858 | * almost certain data loss. Not all gluster versions support the | |
859 | * 'resync-failed-syncs-after-fsync' key value, but there is no way to | |
860 | * discover during runtime if it is supported (this api returns success for | |
861 | * unknown key/value pairs) */ | |
862 | ret = glfs_set_xlator_option(s->glfs, "*-write-behind", | |
863 | "resync-failed-syncs-after-fsync", | |
864 | "on"); | |
865 | if (ret < 0) { | |
866 | error_setg_errno(errp, errno, "Unable to set xlator key/value pair"); | |
867 | ret = -errno; | |
868 | goto out; | |
869 | } | |
870 | #endif | |
871 | ||
872 | qemu_gluster_parse_flags(bdrv_flags, &open_flags); | |
873 | ||
874 | s->fd = glfs_open(s->glfs, gconf->path, open_flags); | |
875 | ret = s->fd ? 0 : -errno; | |
876 | ||
877 | if (ret == -EACCES || ret == -EROFS) { | |
878 | /* Try to degrade to read-only, but if it doesn't work, still use the | |
879 | * normal error message. */ | |
880 | if (bdrv_apply_auto_read_only(bs, NULL, NULL) == 0) { | |
881 | open_flags = (open_flags & ~O_RDWR) | O_RDONLY; | |
882 | s->fd = glfs_open(s->glfs, gconf->path, open_flags); | |
883 | ret = s->fd ? 0 : -errno; | |
884 | } | |
885 | } | |
886 | ||
887 | s->supports_seek_data = qemu_gluster_test_seek(s->fd); | |
888 | ||
889 | out: | |
890 | qemu_opts_del(opts); | |
891 | qapi_free_BlockdevOptionsGluster(gconf); | |
892 | if (!ret) { | |
893 | return ret; | |
894 | } | |
895 | g_free(s->logfile); | |
896 | if (s->fd) { | |
897 | glfs_close(s->fd); | |
898 | } | |
899 | ||
900 | glfs_clear_preopened(s->glfs); | |
901 | ||
902 | return ret; | |
903 | } | |
904 | ||
905 | static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp) | |
906 | { | |
907 | bs->bl.max_transfer = GLUSTER_MAX_TRANSFER; | |
908 | } | |
909 | ||
910 | static int qemu_gluster_reopen_prepare(BDRVReopenState *state, | |
911 | BlockReopenQueue *queue, Error **errp) | |
912 | { | |
913 | int ret = 0; | |
914 | BDRVGlusterState *s; | |
915 | BDRVGlusterReopenState *reop_s; | |
916 | BlockdevOptionsGluster *gconf; | |
917 | int open_flags = 0; | |
918 | ||
919 | assert(state != NULL); | |
920 | assert(state->bs != NULL); | |
921 | ||
922 | s = state->bs->opaque; | |
923 | ||
924 | state->opaque = g_new0(BDRVGlusterReopenState, 1); | |
925 | reop_s = state->opaque; | |
926 | ||
927 | qemu_gluster_parse_flags(state->flags, &open_flags); | |
928 | ||
929 | gconf = g_new0(BlockdevOptionsGluster, 1); | |
930 | gconf->debug = s->debug; | |
931 | gconf->has_debug = true; | |
932 | gconf->logfile = g_strdup(s->logfile); | |
933 | gconf->has_logfile = true; | |
934 | reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); | |
935 | if (reop_s->glfs == NULL) { | |
936 | ret = -errno; | |
937 | goto exit; | |
938 | } | |
939 | ||
940 | #ifdef CONFIG_GLUSTERFS_XLATOR_OPT | |
941 | ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind", | |
942 | "resync-failed-syncs-after-fsync", "on"); | |
943 | if (ret < 0) { | |
944 | error_setg_errno(errp, errno, "Unable to set xlator key/value pair"); | |
945 | ret = -errno; | |
946 | goto exit; | |
947 | } | |
948 | #endif | |
949 | ||
950 | reop_s->fd = glfs_open(reop_s->glfs, gconf->path, open_flags); | |
951 | if (reop_s->fd == NULL) { | |
952 | /* reops->glfs will be cleaned up in _abort */ | |
953 | ret = -errno; | |
954 | goto exit; | |
955 | } | |
956 | ||
957 | exit: | |
958 | /* state->opaque will be freed in either the _abort or _commit */ | |
959 | qapi_free_BlockdevOptionsGluster(gconf); | |
960 | return ret; | |
961 | } | |
962 | ||
963 | static void qemu_gluster_reopen_commit(BDRVReopenState *state) | |
964 | { | |
965 | BDRVGlusterReopenState *reop_s = state->opaque; | |
966 | BDRVGlusterState *s = state->bs->opaque; | |
967 | ||
968 | ||
969 | /* close the old */ | |
970 | if (s->fd) { | |
971 | glfs_close(s->fd); | |
972 | } | |
973 | ||
974 | glfs_clear_preopened(s->glfs); | |
975 | ||
976 | /* use the newly opened image / connection */ | |
977 | s->fd = reop_s->fd; | |
978 | s->glfs = reop_s->glfs; | |
979 | ||
980 | g_free(state->opaque); | |
981 | state->opaque = NULL; | |
982 | ||
983 | return; | |
984 | } | |
985 | ||
986 | ||
987 | static void qemu_gluster_reopen_abort(BDRVReopenState *state) | |
988 | { | |
989 | BDRVGlusterReopenState *reop_s = state->opaque; | |
990 | ||
991 | if (reop_s == NULL) { | |
992 | return; | |
993 | } | |
994 | ||
995 | if (reop_s->fd) { | |
996 | glfs_close(reop_s->fd); | |
997 | } | |
998 | ||
999 | glfs_clear_preopened(reop_s->glfs); | |
1000 | ||
1001 | g_free(state->opaque); | |
1002 | state->opaque = NULL; | |
1003 | ||
1004 | return; | |
1005 | } | |
1006 | ||
1007 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1008 | static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, | |
1009 | int64_t offset, | |
1010 | int size, | |
1011 | BdrvRequestFlags flags) | |
1012 | { | |
1013 | int ret; | |
1014 | GlusterAIOCB acb; | |
1015 | BDRVGlusterState *s = bs->opaque; | |
1016 | ||
1017 | acb.size = size; | |
1018 | acb.ret = 0; | |
1019 | acb.coroutine = qemu_coroutine_self(); | |
1020 | acb.aio_context = bdrv_get_aio_context(bs); | |
1021 | ||
1022 | ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb); | |
1023 | if (ret < 0) { | |
1024 | return -errno; | |
1025 | } | |
1026 | ||
1027 | qemu_coroutine_yield(); | |
1028 | return acb.ret; | |
1029 | } | |
1030 | #endif | |
1031 | ||
1032 | static int qemu_gluster_do_truncate(struct glfs_fd *fd, int64_t offset, | |
1033 | PreallocMode prealloc, Error **errp) | |
1034 | { | |
1035 | int64_t current_length; | |
1036 | ||
1037 | current_length = glfs_lseek(fd, 0, SEEK_END); | |
1038 | if (current_length < 0) { | |
1039 | error_setg_errno(errp, errno, "Failed to determine current size"); | |
1040 | return -errno; | |
1041 | } | |
1042 | ||
1043 | if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { | |
1044 | error_setg(errp, "Cannot use preallocation for shrinking files"); | |
1045 | return -ENOTSUP; | |
1046 | } | |
1047 | ||
1048 | if (current_length == offset) { | |
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | switch (prealloc) { | |
1053 | #ifdef CONFIG_GLUSTERFS_FALLOCATE | |
1054 | case PREALLOC_MODE_FALLOC: | |
1055 | if (glfs_fallocate(fd, 0, current_length, offset - current_length)) { | |
1056 | error_setg_errno(errp, errno, "Could not preallocate data"); | |
1057 | return -errno; | |
1058 | } | |
1059 | break; | |
1060 | #endif /* CONFIG_GLUSTERFS_FALLOCATE */ | |
1061 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1062 | case PREALLOC_MODE_FULL: | |
1063 | if (glfs_ftruncate(fd, offset)) { | |
1064 | error_setg_errno(errp, errno, "Could not resize file"); | |
1065 | return -errno; | |
1066 | } | |
1067 | if (glfs_zerofill(fd, current_length, offset - current_length)) { | |
1068 | error_setg_errno(errp, errno, "Could not zerofill the new area"); | |
1069 | return -errno; | |
1070 | } | |
1071 | break; | |
1072 | #endif /* CONFIG_GLUSTERFS_ZEROFILL */ | |
1073 | case PREALLOC_MODE_OFF: | |
1074 | if (glfs_ftruncate(fd, offset)) { | |
1075 | error_setg_errno(errp, errno, "Could not resize file"); | |
1076 | return -errno; | |
1077 | } | |
1078 | break; | |
1079 | default: | |
1080 | error_setg(errp, "Unsupported preallocation mode: %s", | |
1081 | PreallocMode_str(prealloc)); | |
1082 | return -EINVAL; | |
1083 | } | |
1084 | ||
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | static int qemu_gluster_co_create(BlockdevCreateOptions *options, | |
1089 | Error **errp) | |
1090 | { | |
1091 | BlockdevCreateOptionsGluster *opts = &options->u.gluster; | |
1092 | struct glfs *glfs; | |
1093 | struct glfs_fd *fd = NULL; | |
1094 | int ret = 0; | |
1095 | ||
1096 | assert(options->driver == BLOCKDEV_DRIVER_GLUSTER); | |
1097 | ||
1098 | glfs = qemu_gluster_glfs_init(opts->location, errp); | |
1099 | if (!glfs) { | |
1100 | ret = -errno; | |
1101 | goto out; | |
1102 | } | |
1103 | ||
1104 | fd = glfs_creat(glfs, opts->location->path, | |
1105 | O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); | |
1106 | if (!fd) { | |
1107 | ret = -errno; | |
1108 | goto out; | |
1109 | } | |
1110 | ||
1111 | ret = qemu_gluster_do_truncate(fd, opts->size, opts->preallocation, errp); | |
1112 | ||
1113 | out: | |
1114 | if (fd) { | |
1115 | if (glfs_close(fd) != 0 && ret == 0) { | |
1116 | ret = -errno; | |
1117 | } | |
1118 | } | |
1119 | glfs_clear_preopened(glfs); | |
1120 | return ret; | |
1121 | } | |
1122 | ||
1123 | static int coroutine_fn qemu_gluster_co_create_opts(const char *filename, | |
1124 | QemuOpts *opts, | |
1125 | Error **errp) | |
1126 | { | |
1127 | BlockdevCreateOptions *options; | |
1128 | BlockdevCreateOptionsGluster *gopts; | |
1129 | BlockdevOptionsGluster *gconf; | |
1130 | char *tmp = NULL; | |
1131 | Error *local_err = NULL; | |
1132 | int ret; | |
1133 | ||
1134 | options = g_new0(BlockdevCreateOptions, 1); | |
1135 | options->driver = BLOCKDEV_DRIVER_GLUSTER; | |
1136 | gopts = &options->u.gluster; | |
1137 | ||
1138 | gconf = g_new0(BlockdevOptionsGluster, 1); | |
1139 | gopts->location = gconf; | |
1140 | ||
1141 | gopts->size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | |
1142 | BDRV_SECTOR_SIZE); | |
1143 | ||
1144 | tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); | |
1145 | gopts->preallocation = qapi_enum_parse(&PreallocMode_lookup, tmp, | |
1146 | PREALLOC_MODE_OFF, &local_err); | |
1147 | g_free(tmp); | |
1148 | if (local_err) { | |
1149 | error_propagate(errp, local_err); | |
1150 | ret = -EINVAL; | |
1151 | goto fail; | |
1152 | } | |
1153 | ||
1154 | gconf->debug = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG, | |
1155 | GLUSTER_DEBUG_DEFAULT); | |
1156 | if (gconf->debug < 0) { | |
1157 | gconf->debug = 0; | |
1158 | } else if (gconf->debug > GLUSTER_DEBUG_MAX) { | |
1159 | gconf->debug = GLUSTER_DEBUG_MAX; | |
1160 | } | |
1161 | gconf->has_debug = true; | |
1162 | ||
1163 | gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE); | |
1164 | if (!gconf->logfile) { | |
1165 | gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT); | |
1166 | } | |
1167 | gconf->has_logfile = true; | |
1168 | ||
1169 | ret = qemu_gluster_parse(gconf, filename, NULL, errp); | |
1170 | if (ret < 0) { | |
1171 | goto fail; | |
1172 | } | |
1173 | ||
1174 | ret = qemu_gluster_co_create(options, errp); | |
1175 | if (ret < 0) { | |
1176 | goto fail; | |
1177 | } | |
1178 | ||
1179 | ret = 0; | |
1180 | fail: | |
1181 | qapi_free_BlockdevCreateOptions(options); | |
1182 | return ret; | |
1183 | } | |
1184 | ||
1185 | static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, | |
1186 | int64_t sector_num, int nb_sectors, | |
1187 | QEMUIOVector *qiov, int write) | |
1188 | { | |
1189 | int ret; | |
1190 | GlusterAIOCB acb; | |
1191 | BDRVGlusterState *s = bs->opaque; | |
1192 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
1193 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
1194 | ||
1195 | acb.size = size; | |
1196 | acb.ret = 0; | |
1197 | acb.coroutine = qemu_coroutine_self(); | |
1198 | acb.aio_context = bdrv_get_aio_context(bs); | |
1199 | ||
1200 | if (write) { | |
1201 | ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
1202 | gluster_finish_aiocb, &acb); | |
1203 | } else { | |
1204 | ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
1205 | gluster_finish_aiocb, &acb); | |
1206 | } | |
1207 | ||
1208 | if (ret < 0) { | |
1209 | return -errno; | |
1210 | } | |
1211 | ||
1212 | qemu_coroutine_yield(); | |
1213 | return acb.ret; | |
1214 | } | |
1215 | ||
1216 | static coroutine_fn int qemu_gluster_co_truncate(BlockDriverState *bs, | |
1217 | int64_t offset, | |
1218 | PreallocMode prealloc, | |
1219 | Error **errp) | |
1220 | { | |
1221 | BDRVGlusterState *s = bs->opaque; | |
1222 | return qemu_gluster_do_truncate(s->fd, offset, prealloc, errp); | |
1223 | } | |
1224 | ||
1225 | static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, | |
1226 | int64_t sector_num, | |
1227 | int nb_sectors, | |
1228 | QEMUIOVector *qiov) | |
1229 | { | |
1230 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); | |
1231 | } | |
1232 | ||
1233 | static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, | |
1234 | int64_t sector_num, | |
1235 | int nb_sectors, | |
1236 | QEMUIOVector *qiov, | |
1237 | int flags) | |
1238 | { | |
1239 | assert(!flags); | |
1240 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); | |
1241 | } | |
1242 | ||
1243 | static void qemu_gluster_close(BlockDriverState *bs) | |
1244 | { | |
1245 | BDRVGlusterState *s = bs->opaque; | |
1246 | ||
1247 | g_free(s->logfile); | |
1248 | if (s->fd) { | |
1249 | glfs_close(s->fd); | |
1250 | s->fd = NULL; | |
1251 | } | |
1252 | glfs_clear_preopened(s->glfs); | |
1253 | } | |
1254 | ||
1255 | static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) | |
1256 | { | |
1257 | int ret; | |
1258 | GlusterAIOCB acb; | |
1259 | BDRVGlusterState *s = bs->opaque; | |
1260 | ||
1261 | acb.size = 0; | |
1262 | acb.ret = 0; | |
1263 | acb.coroutine = qemu_coroutine_self(); | |
1264 | acb.aio_context = bdrv_get_aio_context(bs); | |
1265 | ||
1266 | ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb); | |
1267 | if (ret < 0) { | |
1268 | ret = -errno; | |
1269 | goto error; | |
1270 | } | |
1271 | ||
1272 | qemu_coroutine_yield(); | |
1273 | if (acb.ret < 0) { | |
1274 | ret = acb.ret; | |
1275 | goto error; | |
1276 | } | |
1277 | ||
1278 | return acb.ret; | |
1279 | ||
1280 | error: | |
1281 | /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache | |
1282 | * after a fsync failure, so we have no way of allowing the guest to safely | |
1283 | * continue. Gluster versions prior to 3.5.6 don't retain the cache | |
1284 | * either, but will invalidate the fd on error, so this is again our only | |
1285 | * option. | |
1286 | * | |
1287 | * The 'resync-failed-syncs-after-fsync' xlator option for the | |
1288 | * write-behind cache will cause later gluster versions to retain its | |
1289 | * cache after error, so long as the fd remains open. However, we | |
1290 | * currently have no way of knowing if this option is supported. | |
1291 | * | |
1292 | * TODO: Once gluster provides a way for us to determine if the option | |
1293 | * is supported, bypass the closure and setting drv to NULL. */ | |
1294 | qemu_gluster_close(bs); | |
1295 | bs->drv = NULL; | |
1296 | return ret; | |
1297 | } | |
1298 | ||
1299 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
1300 | static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs, | |
1301 | int64_t offset, int size) | |
1302 | { | |
1303 | int ret; | |
1304 | GlusterAIOCB acb; | |
1305 | BDRVGlusterState *s = bs->opaque; | |
1306 | ||
1307 | acb.size = 0; | |
1308 | acb.ret = 0; | |
1309 | acb.coroutine = qemu_coroutine_self(); | |
1310 | acb.aio_context = bdrv_get_aio_context(bs); | |
1311 | ||
1312 | ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb); | |
1313 | if (ret < 0) { | |
1314 | return -errno; | |
1315 | } | |
1316 | ||
1317 | qemu_coroutine_yield(); | |
1318 | return acb.ret; | |
1319 | } | |
1320 | #endif | |
1321 | ||
1322 | static int64_t qemu_gluster_getlength(BlockDriverState *bs) | |
1323 | { | |
1324 | BDRVGlusterState *s = bs->opaque; | |
1325 | int64_t ret; | |
1326 | ||
1327 | ret = glfs_lseek(s->fd, 0, SEEK_END); | |
1328 | if (ret < 0) { | |
1329 | return -errno; | |
1330 | } else { | |
1331 | return ret; | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) | |
1336 | { | |
1337 | BDRVGlusterState *s = bs->opaque; | |
1338 | struct stat st; | |
1339 | int ret; | |
1340 | ||
1341 | ret = glfs_fstat(s->fd, &st); | |
1342 | if (ret < 0) { | |
1343 | return -errno; | |
1344 | } else { | |
1345 | return st.st_blocks * 512; | |
1346 | } | |
1347 | } | |
1348 | ||
1349 | static int qemu_gluster_has_zero_init(BlockDriverState *bs) | |
1350 | { | |
1351 | /* GlusterFS volume could be backed by a block device */ | |
1352 | return 0; | |
1353 | } | |
1354 | ||
1355 | /* | |
1356 | * Find allocation range in @bs around offset @start. | |
1357 | * May change underlying file descriptor's file offset. | |
1358 | * If @start is not in a hole, store @start in @data, and the | |
1359 | * beginning of the next hole in @hole, and return 0. | |
1360 | * If @start is in a non-trailing hole, store @start in @hole and the | |
1361 | * beginning of the next non-hole in @data, and return 0. | |
1362 | * If @start is in a trailing hole or beyond EOF, return -ENXIO. | |
1363 | * If we can't find out, return a negative errno other than -ENXIO. | |
1364 | * | |
1365 | * (Shamefully copied from file-posix.c, only minuscule adaptions.) | |
1366 | */ | |
1367 | static int find_allocation(BlockDriverState *bs, off_t start, | |
1368 | off_t *data, off_t *hole) | |
1369 | { | |
1370 | BDRVGlusterState *s = bs->opaque; | |
1371 | ||
1372 | if (!s->supports_seek_data) { | |
1373 | goto exit; | |
1374 | } | |
1375 | ||
1376 | #if defined SEEK_HOLE && defined SEEK_DATA | |
1377 | off_t offs; | |
1378 | ||
1379 | /* | |
1380 | * SEEK_DATA cases: | |
1381 | * D1. offs == start: start is in data | |
1382 | * D2. offs > start: start is in a hole, next data at offs | |
1383 | * D3. offs < 0, errno = ENXIO: either start is in a trailing hole | |
1384 | * or start is beyond EOF | |
1385 | * If the latter happens, the file has been truncated behind | |
1386 | * our back since we opened it. All bets are off then. | |
1387 | * Treating like a trailing hole is simplest. | |
1388 | * D4. offs < 0, errno != ENXIO: we learned nothing | |
1389 | */ | |
1390 | offs = glfs_lseek(s->fd, start, SEEK_DATA); | |
1391 | if (offs < 0) { | |
1392 | return -errno; /* D3 or D4 */ | |
1393 | } | |
1394 | ||
1395 | if (offs < start) { | |
1396 | /* This is not a valid return by lseek(). We are safe to just return | |
1397 | * -EIO in this case, and we'll treat it like D4. Unfortunately some | |
1398 | * versions of gluster server will return offs < start, so an assert | |
1399 | * here will unnecessarily abort QEMU. */ | |
1400 | return -EIO; | |
1401 | } | |
1402 | ||
1403 | if (offs > start) { | |
1404 | /* D2: in hole, next data at offs */ | |
1405 | *hole = start; | |
1406 | *data = offs; | |
1407 | return 0; | |
1408 | } | |
1409 | ||
1410 | /* D1: in data, end not yet known */ | |
1411 | ||
1412 | /* | |
1413 | * SEEK_HOLE cases: | |
1414 | * H1. offs == start: start is in a hole | |
1415 | * If this happens here, a hole has been dug behind our back | |
1416 | * since the previous lseek(). | |
1417 | * H2. offs > start: either start is in data, next hole at offs, | |
1418 | * or start is in trailing hole, EOF at offs | |
1419 | * Linux treats trailing holes like any other hole: offs == | |
1420 | * start. Solaris seeks to EOF instead: offs > start (blech). | |
1421 | * If that happens here, a hole has been dug behind our back | |
1422 | * since the previous lseek(). | |
1423 | * H3. offs < 0, errno = ENXIO: start is beyond EOF | |
1424 | * If this happens, the file has been truncated behind our | |
1425 | * back since we opened it. Treat it like a trailing hole. | |
1426 | * H4. offs < 0, errno != ENXIO: we learned nothing | |
1427 | * Pretend we know nothing at all, i.e. "forget" about D1. | |
1428 | */ | |
1429 | offs = glfs_lseek(s->fd, start, SEEK_HOLE); | |
1430 | if (offs < 0) { | |
1431 | return -errno; /* D1 and (H3 or H4) */ | |
1432 | } | |
1433 | ||
1434 | if (offs < start) { | |
1435 | /* This is not a valid return by lseek(). We are safe to just return | |
1436 | * -EIO in this case, and we'll treat it like H4. Unfortunately some | |
1437 | * versions of gluster server will return offs < start, so an assert | |
1438 | * here will unnecessarily abort QEMU. */ | |
1439 | return -EIO; | |
1440 | } | |
1441 | ||
1442 | if (offs > start) { | |
1443 | /* | |
1444 | * D1 and H2: either in data, next hole at offs, or it was in | |
1445 | * data but is now in a trailing hole. In the latter case, | |
1446 | * all bets are off. Treating it as if it there was data all | |
1447 | * the way to EOF is safe, so simply do that. | |
1448 | */ | |
1449 | *data = start; | |
1450 | *hole = offs; | |
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | /* D1 and H1 */ | |
1455 | return -EBUSY; | |
1456 | #endif | |
1457 | ||
1458 | exit: | |
1459 | return -ENOTSUP; | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * Returns the allocation status of the specified offset. | |
1464 | * | |
1465 | * The block layer guarantees 'offset' and 'bytes' are within bounds. | |
1466 | * | |
1467 | * 'pnum' is set to the number of bytes (including and immediately following | |
1468 | * the specified offset) that are known to be in the same | |
1469 | * allocated/unallocated state. | |
1470 | * | |
1471 | * 'bytes' is the max value 'pnum' should be set to. | |
1472 | * | |
1473 | * (Based on raw_co_block_status() from file-posix.c.) | |
1474 | */ | |
1475 | static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs, | |
1476 | bool want_zero, | |
1477 | int64_t offset, | |
1478 | int64_t bytes, | |
1479 | int64_t *pnum, | |
1480 | int64_t *map, | |
1481 | BlockDriverState **file) | |
1482 | { | |
1483 | BDRVGlusterState *s = bs->opaque; | |
1484 | off_t data = 0, hole = 0; | |
1485 | int ret = -EINVAL; | |
1486 | ||
1487 | if (!s->fd) { | |
1488 | return ret; | |
1489 | } | |
1490 | ||
1491 | if (!want_zero) { | |
1492 | *pnum = bytes; | |
1493 | *map = offset; | |
1494 | *file = bs; | |
1495 | return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; | |
1496 | } | |
1497 | ||
1498 | ret = find_allocation(bs, offset, &data, &hole); | |
1499 | if (ret == -ENXIO) { | |
1500 | /* Trailing hole */ | |
1501 | *pnum = bytes; | |
1502 | ret = BDRV_BLOCK_ZERO; | |
1503 | } else if (ret < 0) { | |
1504 | /* No info available, so pretend there are no holes */ | |
1505 | *pnum = bytes; | |
1506 | ret = BDRV_BLOCK_DATA; | |
1507 | } else if (data == offset) { | |
1508 | /* On a data extent, compute bytes to the end of the extent, | |
1509 | * possibly including a partial sector at EOF. */ | |
1510 | *pnum = MIN(bytes, hole - offset); | |
1511 | ret = BDRV_BLOCK_DATA; | |
1512 | } else { | |
1513 | /* On a hole, compute bytes to the beginning of the next extent. */ | |
1514 | assert(hole == offset); | |
1515 | *pnum = MIN(bytes, data - offset); | |
1516 | ret = BDRV_BLOCK_ZERO; | |
1517 | } | |
1518 | ||
1519 | *map = offset; | |
1520 | *file = bs; | |
1521 | ||
1522 | return ret | BDRV_BLOCK_OFFSET_VALID; | |
1523 | } | |
1524 | ||
1525 | ||
1526 | static const char *const gluster_strong_open_opts[] = { | |
1527 | GLUSTER_OPT_VOLUME, | |
1528 | GLUSTER_OPT_PATH, | |
1529 | GLUSTER_OPT_TYPE, | |
1530 | GLUSTER_OPT_SERVER_PATTERN, | |
1531 | GLUSTER_OPT_HOST, | |
1532 | GLUSTER_OPT_PORT, | |
1533 | GLUSTER_OPT_TO, | |
1534 | GLUSTER_OPT_IPV4, | |
1535 | GLUSTER_OPT_IPV6, | |
1536 | GLUSTER_OPT_SOCKET, | |
1537 | ||
1538 | NULL | |
1539 | }; | |
1540 | ||
1541 | static BlockDriver bdrv_gluster = { | |
1542 | .format_name = "gluster", | |
1543 | .protocol_name = "gluster", | |
1544 | .instance_size = sizeof(BDRVGlusterState), | |
1545 | .bdrv_needs_filename = false, | |
1546 | .bdrv_file_open = qemu_gluster_open, | |
1547 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
1548 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1549 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
1550 | .bdrv_close = qemu_gluster_close, | |
1551 | .bdrv_co_create = qemu_gluster_co_create, | |
1552 | .bdrv_co_create_opts = qemu_gluster_co_create_opts, | |
1553 | .bdrv_getlength = qemu_gluster_getlength, | |
1554 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
1555 | .bdrv_co_truncate = qemu_gluster_co_truncate, | |
1556 | .bdrv_co_readv = qemu_gluster_co_readv, | |
1557 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1558 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
1559 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
1560 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
1561 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, | |
1562 | #endif | |
1563 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1564 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, | |
1565 | #endif | |
1566 | .bdrv_co_block_status = qemu_gluster_co_block_status, | |
1567 | .bdrv_refresh_limits = qemu_gluster_refresh_limits, | |
1568 | .create_opts = &qemu_gluster_create_opts, | |
1569 | .strong_runtime_opts = gluster_strong_open_opts, | |
1570 | }; | |
1571 | ||
1572 | static BlockDriver bdrv_gluster_tcp = { | |
1573 | .format_name = "gluster", | |
1574 | .protocol_name = "gluster+tcp", | |
1575 | .instance_size = sizeof(BDRVGlusterState), | |
1576 | .bdrv_needs_filename = false, | |
1577 | .bdrv_file_open = qemu_gluster_open, | |
1578 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
1579 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1580 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
1581 | .bdrv_close = qemu_gluster_close, | |
1582 | .bdrv_co_create = qemu_gluster_co_create, | |
1583 | .bdrv_co_create_opts = qemu_gluster_co_create_opts, | |
1584 | .bdrv_getlength = qemu_gluster_getlength, | |
1585 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
1586 | .bdrv_co_truncate = qemu_gluster_co_truncate, | |
1587 | .bdrv_co_readv = qemu_gluster_co_readv, | |
1588 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1589 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
1590 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
1591 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
1592 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, | |
1593 | #endif | |
1594 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1595 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, | |
1596 | #endif | |
1597 | .bdrv_co_block_status = qemu_gluster_co_block_status, | |
1598 | .bdrv_refresh_limits = qemu_gluster_refresh_limits, | |
1599 | .create_opts = &qemu_gluster_create_opts, | |
1600 | .strong_runtime_opts = gluster_strong_open_opts, | |
1601 | }; | |
1602 | ||
1603 | static BlockDriver bdrv_gluster_unix = { | |
1604 | .format_name = "gluster", | |
1605 | .protocol_name = "gluster+unix", | |
1606 | .instance_size = sizeof(BDRVGlusterState), | |
1607 | .bdrv_needs_filename = true, | |
1608 | .bdrv_file_open = qemu_gluster_open, | |
1609 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
1610 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1611 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
1612 | .bdrv_close = qemu_gluster_close, | |
1613 | .bdrv_co_create = qemu_gluster_co_create, | |
1614 | .bdrv_co_create_opts = qemu_gluster_co_create_opts, | |
1615 | .bdrv_getlength = qemu_gluster_getlength, | |
1616 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
1617 | .bdrv_co_truncate = qemu_gluster_co_truncate, | |
1618 | .bdrv_co_readv = qemu_gluster_co_readv, | |
1619 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1620 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
1621 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
1622 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
1623 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, | |
1624 | #endif | |
1625 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1626 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, | |
1627 | #endif | |
1628 | .bdrv_co_block_status = qemu_gluster_co_block_status, | |
1629 | .bdrv_refresh_limits = qemu_gluster_refresh_limits, | |
1630 | .create_opts = &qemu_gluster_create_opts, | |
1631 | .strong_runtime_opts = gluster_strong_open_opts, | |
1632 | }; | |
1633 | ||
1634 | /* rdma is deprecated (actually never supported for volfile fetch). | |
1635 | * Let's maintain it for the protocol compatibility, to make sure things | |
1636 | * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp | |
1637 | * protocol with a warning. | |
1638 | * TODO: remove gluster+rdma interface support | |
1639 | */ | |
1640 | static BlockDriver bdrv_gluster_rdma = { | |
1641 | .format_name = "gluster", | |
1642 | .protocol_name = "gluster+rdma", | |
1643 | .instance_size = sizeof(BDRVGlusterState), | |
1644 | .bdrv_needs_filename = true, | |
1645 | .bdrv_file_open = qemu_gluster_open, | |
1646 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
1647 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1648 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
1649 | .bdrv_close = qemu_gluster_close, | |
1650 | .bdrv_co_create = qemu_gluster_co_create, | |
1651 | .bdrv_co_create_opts = qemu_gluster_co_create_opts, | |
1652 | .bdrv_getlength = qemu_gluster_getlength, | |
1653 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
1654 | .bdrv_co_truncate = qemu_gluster_co_truncate, | |
1655 | .bdrv_co_readv = qemu_gluster_co_readv, | |
1656 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1657 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
1658 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
1659 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
1660 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, | |
1661 | #endif | |
1662 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
1663 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, | |
1664 | #endif | |
1665 | .bdrv_co_block_status = qemu_gluster_co_block_status, | |
1666 | .bdrv_refresh_limits = qemu_gluster_refresh_limits, | |
1667 | .create_opts = &qemu_gluster_create_opts, | |
1668 | .strong_runtime_opts = gluster_strong_open_opts, | |
1669 | }; | |
1670 | ||
1671 | static void bdrv_gluster_init(void) | |
1672 | { | |
1673 | bdrv_register(&bdrv_gluster_rdma); | |
1674 | bdrv_register(&bdrv_gluster_unix); | |
1675 | bdrv_register(&bdrv_gluster_tcp); | |
1676 | bdrv_register(&bdrv_gluster); | |
1677 | } | |
1678 | ||
1679 | block_init(bdrv_gluster_init); |