]>
Commit | Line | Data |
---|---|---|
8d6d89cb BR |
1 | /* |
2 | * GlusterFS backend for QEMU | |
3 | * | |
4 | * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> | |
5 | * | |
85c09bc0 BR |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
7 | * See the COPYING file in the top-level directory. | |
8d6d89cb | 8 | * |
8d6d89cb | 9 | */ |
80c71a24 | 10 | #include "qemu/osdep.h" |
8d6d89cb | 11 | #include <glusterfs/api/glfs.h> |
737e150e | 12 | #include "block/block_int.h" |
da34e65c | 13 | #include "qapi/error.h" |
6c7189bb | 14 | #include "qapi/qmp/qerror.h" |
1de7afc9 | 15 | #include "qemu/uri.h" |
0552ff24 | 16 | #include "qemu/error-report.h" |
c56ac33b | 17 | #include "qemu/cutils.h" |
8d6d89cb | 18 | |
f70c50c8 | 19 | #define GLUSTER_OPT_FILENAME "filename" |
6c7189bb PKK |
20 | #define GLUSTER_OPT_VOLUME "volume" |
21 | #define GLUSTER_OPT_PATH "path" | |
22 | #define GLUSTER_OPT_TYPE "type" | |
23 | #define GLUSTER_OPT_SERVER_PATTERN "server." | |
24 | #define GLUSTER_OPT_HOST "host" | |
25 | #define GLUSTER_OPT_PORT "port" | |
26 | #define GLUSTER_OPT_TO "to" | |
27 | #define GLUSTER_OPT_IPV4 "ipv4" | |
28 | #define GLUSTER_OPT_IPV6 "ipv6" | |
29 | #define GLUSTER_OPT_SOCKET "socket" | |
f70c50c8 | 30 | #define GLUSTER_OPT_DEBUG "debug" |
7edac2dd | 31 | #define GLUSTER_DEFAULT_PORT 24007 |
f70c50c8 PKK |
32 | #define GLUSTER_DEBUG_DEFAULT 4 |
33 | #define GLUSTER_DEBUG_MAX 9 | |
e9db8ff3 PKK |
34 | #define GLUSTER_OPT_LOGFILE "logfile" |
35 | #define GLUSTER_LOGFILE_DEFAULT "-" /* handled in libgfapi as /dev/stderr */ | |
f70c50c8 | 36 | |
6c7189bb | 37 | #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" |
f70c50c8 | 38 | |
8d6d89cb | 39 | typedef struct GlusterAIOCB { |
8d6d89cb BR |
40 | int64_t size; |
41 | int ret; | |
15744b0b | 42 | Coroutine *coroutine; |
6ee50af2 | 43 | AioContext *aio_context; |
8d6d89cb BR |
44 | } GlusterAIOCB; |
45 | ||
46 | typedef struct BDRVGlusterState { | |
47 | struct glfs *glfs; | |
8d6d89cb | 48 | struct glfs_fd *fd; |
e9db8ff3 | 49 | char *logfile; |
947eb203 | 50 | bool supports_seek_data; |
7eac868a | 51 | int debug_level; |
8d6d89cb BR |
52 | } BDRVGlusterState; |
53 | ||
f70c50c8 PKK |
54 | typedef struct BDRVGlusterReopenState { |
55 | struct glfs *glfs; | |
56 | struct glfs_fd *fd; | |
57 | } BDRVGlusterReopenState; | |
58 | ||
f70c50c8 | 59 | |
6349c154 PKK |
60 | typedef struct GlfsPreopened { |
61 | char *volume; | |
62 | glfs_t *fs; | |
63 | int ref; | |
64 | } GlfsPreopened; | |
65 | ||
66 | typedef struct ListElement { | |
67 | QLIST_ENTRY(ListElement) list; | |
68 | GlfsPreopened saved; | |
69 | } ListElement; | |
70 | ||
71 | static QLIST_HEAD(glfs_list, ListElement) glfs_list; | |
72 | ||
f70c50c8 PKK |
73 | static QemuOptsList qemu_gluster_create_opts = { |
74 | .name = "qemu-gluster-create-opts", | |
75 | .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), | |
76 | .desc = { | |
77 | { | |
78 | .name = BLOCK_OPT_SIZE, | |
79 | .type = QEMU_OPT_SIZE, | |
80 | .help = "Virtual disk size" | |
81 | }, | |
82 | { | |
83 | .name = BLOCK_OPT_PREALLOC, | |
84 | .type = QEMU_OPT_STRING, | |
85 | .help = "Preallocation mode (allowed values: off, full)" | |
86 | }, | |
87 | { | |
88 | .name = GLUSTER_OPT_DEBUG, | |
89 | .type = QEMU_OPT_NUMBER, | |
90 | .help = "Gluster log level, valid range is 0-9", | |
91 | }, | |
e9db8ff3 PKK |
92 | { |
93 | .name = GLUSTER_OPT_LOGFILE, | |
94 | .type = QEMU_OPT_STRING, | |
95 | .help = "Logfile path of libgfapi", | |
96 | }, | |
f70c50c8 PKK |
97 | { /* end of list */ } |
98 | } | |
99 | }; | |
100 | ||
101 | static QemuOptsList runtime_opts = { | |
102 | .name = "gluster", | |
103 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | |
104 | .desc = { | |
105 | { | |
106 | .name = GLUSTER_OPT_FILENAME, | |
107 | .type = QEMU_OPT_STRING, | |
108 | .help = "URL to the gluster image", | |
109 | }, | |
110 | { | |
111 | .name = GLUSTER_OPT_DEBUG, | |
112 | .type = QEMU_OPT_NUMBER, | |
113 | .help = "Gluster log level, valid range is 0-9", | |
114 | }, | |
e9db8ff3 PKK |
115 | { |
116 | .name = GLUSTER_OPT_LOGFILE, | |
117 | .type = QEMU_OPT_STRING, | |
118 | .help = "Logfile path of libgfapi", | |
119 | }, | |
f70c50c8 PKK |
120 | { /* end of list */ } |
121 | }, | |
122 | }; | |
123 | ||
6c7189bb PKK |
124 | static QemuOptsList runtime_json_opts = { |
125 | .name = "gluster_json", | |
126 | .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), | |
127 | .desc = { | |
128 | { | |
129 | .name = GLUSTER_OPT_VOLUME, | |
130 | .type = QEMU_OPT_STRING, | |
131 | .help = "name of gluster volume where VM image resides", | |
132 | }, | |
133 | { | |
134 | .name = GLUSTER_OPT_PATH, | |
135 | .type = QEMU_OPT_STRING, | |
136 | .help = "absolute path to image file in gluster volume", | |
137 | }, | |
138 | { | |
139 | .name = GLUSTER_OPT_DEBUG, | |
140 | .type = QEMU_OPT_NUMBER, | |
141 | .help = "Gluster log level, valid range is 0-9", | |
142 | }, | |
143 | { /* end of list */ } | |
144 | }, | |
145 | }; | |
146 | ||
147 | static QemuOptsList runtime_type_opts = { | |
148 | .name = "gluster_type", | |
149 | .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), | |
150 | .desc = { | |
151 | { | |
152 | .name = GLUSTER_OPT_TYPE, | |
153 | .type = QEMU_OPT_STRING, | |
154 | .help = "tcp|unix", | |
155 | }, | |
156 | { /* end of list */ } | |
157 | }, | |
158 | }; | |
159 | ||
160 | static QemuOptsList runtime_unix_opts = { | |
161 | .name = "gluster_unix", | |
162 | .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), | |
163 | .desc = { | |
164 | { | |
165 | .name = GLUSTER_OPT_SOCKET, | |
166 | .type = QEMU_OPT_STRING, | |
167 | .help = "socket file path)", | |
168 | }, | |
169 | { /* end of list */ } | |
170 | }, | |
171 | }; | |
172 | ||
173 | static QemuOptsList runtime_tcp_opts = { | |
174 | .name = "gluster_tcp", | |
175 | .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), | |
176 | .desc = { | |
177 | { | |
178 | .name = GLUSTER_OPT_TYPE, | |
179 | .type = QEMU_OPT_STRING, | |
180 | .help = "tcp|unix", | |
181 | }, | |
182 | { | |
183 | .name = GLUSTER_OPT_HOST, | |
184 | .type = QEMU_OPT_STRING, | |
185 | .help = "host address (hostname/ipv4/ipv6 addresses)", | |
186 | }, | |
187 | { | |
188 | .name = GLUSTER_OPT_PORT, | |
189 | .type = QEMU_OPT_NUMBER, | |
190 | .help = "port number on which glusterd is listening (default 24007)", | |
191 | }, | |
192 | { | |
193 | .name = "to", | |
194 | .type = QEMU_OPT_NUMBER, | |
195 | .help = "max port number, not supported by gluster", | |
196 | }, | |
197 | { | |
198 | .name = "ipv4", | |
199 | .type = QEMU_OPT_BOOL, | |
200 | .help = "ipv4 bool value, not supported by gluster", | |
201 | }, | |
202 | { | |
203 | .name = "ipv6", | |
204 | .type = QEMU_OPT_BOOL, | |
205 | .help = "ipv6 bool value, not supported by gluster", | |
206 | }, | |
207 | { /* end of list */ } | |
208 | }, | |
209 | }; | |
f70c50c8 | 210 | |
6349c154 PKK |
211 | static void glfs_set_preopened(const char *volume, glfs_t *fs) |
212 | { | |
213 | ListElement *entry = NULL; | |
214 | ||
215 | entry = g_new(ListElement, 1); | |
216 | ||
217 | entry->saved.volume = g_strdup(volume); | |
218 | ||
219 | entry->saved.fs = fs; | |
220 | entry->saved.ref = 1; | |
221 | ||
222 | QLIST_INSERT_HEAD(&glfs_list, entry, list); | |
223 | } | |
224 | ||
225 | static glfs_t *glfs_find_preopened(const char *volume) | |
226 | { | |
227 | ListElement *entry = NULL; | |
228 | ||
229 | QLIST_FOREACH(entry, &glfs_list, list) { | |
230 | if (strcmp(entry->saved.volume, volume) == 0) { | |
231 | entry->saved.ref++; | |
232 | return entry->saved.fs; | |
233 | } | |
234 | } | |
235 | ||
236 | return NULL; | |
237 | } | |
238 | ||
239 | static void glfs_clear_preopened(glfs_t *fs) | |
240 | { | |
241 | ListElement *entry = NULL; | |
242 | ||
243 | if (fs == NULL) { | |
244 | return; | |
245 | } | |
246 | ||
247 | QLIST_FOREACH(entry, &glfs_list, list) { | |
248 | if (entry->saved.fs == fs) { | |
249 | if (--entry->saved.ref) { | |
250 | return; | |
251 | } | |
252 | ||
253 | QLIST_REMOVE(entry, list); | |
254 | ||
255 | glfs_fini(entry->saved.fs); | |
256 | g_free(entry->saved.volume); | |
257 | g_free(entry); | |
258 | } | |
259 | } | |
260 | } | |
261 | ||
7edac2dd | 262 | static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) |
8d6d89cb BR |
263 | { |
264 | char *p, *q; | |
265 | ||
266 | if (!path) { | |
267 | return -EINVAL; | |
268 | } | |
269 | ||
270 | /* volume */ | |
271 | p = q = path + strspn(path, "/"); | |
272 | p += strcspn(p, "/"); | |
273 | if (*p == '\0') { | |
274 | return -EINVAL; | |
275 | } | |
d5cf4079 | 276 | gconf->volume = g_strndup(q, p - q); |
8d6d89cb | 277 | |
d5cf4079 | 278 | /* path */ |
8d6d89cb BR |
279 | p += strspn(p, "/"); |
280 | if (*p == '\0') { | |
281 | return -EINVAL; | |
282 | } | |
d5cf4079 | 283 | gconf->path = g_strdup(p); |
8d6d89cb BR |
284 | return 0; |
285 | } | |
286 | ||
287 | /* | |
d5cf4079 | 288 | * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...] |
8d6d89cb BR |
289 | * |
290 | * 'gluster' is the protocol. | |
291 | * | |
292 | * 'transport' specifies the transport type used to connect to gluster | |
293 | * management daemon (glusterd). Valid transport types are | |
0552ff24 | 294 | * tcp or unix. If a transport type isn't specified, then tcp type is assumed. |
8d6d89cb | 295 | * |
d5cf4079 | 296 | * 'host' specifies the host where the volume file specification for |
0552ff24 | 297 | * the given volume resides. This can be either hostname or ipv4 address. |
d5cf4079 | 298 | * If transport type is 'unix', then 'host' field should not be specified. |
8d6d89cb BR |
299 | * The 'socket' field needs to be populated with the path to unix domain |
300 | * socket. | |
301 | * | |
302 | * 'port' is the port number on which glusterd is listening. This is optional | |
303 | * and if not specified, QEMU will send 0 which will make gluster to use the | |
304 | * default port. If the transport type is unix, then 'port' should not be | |
305 | * specified. | |
306 | * | |
d5cf4079 | 307 | * 'volume' is the name of the gluster volume which contains the VM image. |
8d6d89cb | 308 | * |
d5cf4079 | 309 | * 'path' is the path to the actual VM image that resides on gluster volume. |
8d6d89cb BR |
310 | * |
311 | * Examples: | |
312 | * | |
313 | * file=gluster://1.2.3.4/testvol/a.img | |
314 | * file=gluster+tcp://1.2.3.4/testvol/a.img | |
315 | * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img | |
d5cf4079 | 316 | * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img |
8d6d89cb | 317 | * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket |
8d6d89cb | 318 | */ |
7edac2dd PKK |
319 | static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, |
320 | const char *filename) | |
8d6d89cb | 321 | { |
7edac2dd | 322 | GlusterServer *gsconf; |
8d6d89cb BR |
323 | URI *uri; |
324 | QueryParams *qp = NULL; | |
325 | bool is_unix = false; | |
326 | int ret = 0; | |
327 | ||
328 | uri = uri_parse(filename); | |
329 | if (!uri) { | |
330 | return -EINVAL; | |
331 | } | |
332 | ||
6c7189bb PKK |
333 | gconf->server = g_new0(GlusterServerList, 1); |
334 | gconf->server->value = gsconf = g_new0(GlusterServer, 1); | |
7edac2dd | 335 | |
8d6d89cb | 336 | /* transport */ |
24897a76 | 337 | if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { |
7edac2dd | 338 | gsconf->type = GLUSTER_TRANSPORT_TCP; |
8d6d89cb | 339 | } else if (!strcmp(uri->scheme, "gluster+tcp")) { |
7edac2dd | 340 | gsconf->type = GLUSTER_TRANSPORT_TCP; |
8d6d89cb | 341 | } else if (!strcmp(uri->scheme, "gluster+unix")) { |
7edac2dd | 342 | gsconf->type = GLUSTER_TRANSPORT_UNIX; |
8d6d89cb BR |
343 | is_unix = true; |
344 | } else if (!strcmp(uri->scheme, "gluster+rdma")) { | |
7edac2dd | 345 | gsconf->type = GLUSTER_TRANSPORT_TCP; |
0552ff24 PKK |
346 | error_report("Warning: rdma feature is not supported, falling " |
347 | "back to tcp"); | |
8d6d89cb BR |
348 | } else { |
349 | ret = -EINVAL; | |
350 | goto out; | |
351 | } | |
352 | ||
353 | ret = parse_volume_options(gconf, uri->path); | |
354 | if (ret < 0) { | |
355 | goto out; | |
356 | } | |
357 | ||
358 | qp = query_params_parse(uri->query); | |
359 | if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { | |
360 | ret = -EINVAL; | |
361 | goto out; | |
362 | } | |
363 | ||
364 | if (is_unix) { | |
365 | if (uri->server || uri->port) { | |
366 | ret = -EINVAL; | |
367 | goto out; | |
368 | } | |
369 | if (strcmp(qp->p[0].name, "socket")) { | |
370 | ret = -EINVAL; | |
371 | goto out; | |
372 | } | |
7edac2dd | 373 | gsconf->u.q_unix.path = g_strdup(qp->p[0].value); |
8d6d89cb | 374 | } else { |
7edac2dd PKK |
375 | gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost"); |
376 | if (uri->port) { | |
377 | gsconf->u.tcp.port = g_strdup_printf("%d", uri->port); | |
378 | } else { | |
379 | gsconf->u.tcp.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT); | |
380 | } | |
8d6d89cb BR |
381 | } |
382 | ||
383 | out: | |
384 | if (qp) { | |
385 | query_params_free(qp); | |
386 | } | |
387 | uri_free(uri); | |
388 | return ret; | |
389 | } | |
390 | ||
6c7189bb PKK |
391 | static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, |
392 | Error **errp) | |
8d6d89cb | 393 | { |
6c7189bb | 394 | struct glfs *glfs; |
8d6d89cb BR |
395 | int ret; |
396 | int old_errno; | |
6c7189bb | 397 | GlusterServerList *server; |
c56ac33b | 398 | unsigned long long port; |
8d6d89cb | 399 | |
6349c154 PKK |
400 | glfs = glfs_find_preopened(gconf->volume); |
401 | if (glfs) { | |
402 | return glfs; | |
403 | } | |
404 | ||
d5cf4079 | 405 | glfs = glfs_new(gconf->volume); |
8d6d89cb BR |
406 | if (!glfs) { |
407 | goto out; | |
408 | } | |
409 | ||
6349c154 PKK |
410 | glfs_set_preopened(gconf->volume, glfs); |
411 | ||
6c7189bb PKK |
412 | for (server = gconf->server; server; server = server->next) { |
413 | if (server->value->type == GLUSTER_TRANSPORT_UNIX) { | |
414 | ret = glfs_set_volfile_server(glfs, | |
415 | GlusterTransport_lookup[server->value->type], | |
416 | server->value->u.q_unix.path, 0); | |
417 | } else { | |
c56ac33b PKK |
418 | if (parse_uint_full(server->value->u.tcp.port, &port, 10) < 0 || |
419 | port > 65535) { | |
420 | error_setg(errp, "'%s' is not a valid port number", | |
421 | server->value->u.tcp.port); | |
422 | errno = EINVAL; | |
423 | goto out; | |
424 | } | |
6c7189bb PKK |
425 | ret = glfs_set_volfile_server(glfs, |
426 | GlusterTransport_lookup[server->value->type], | |
427 | server->value->u.tcp.host, | |
c56ac33b | 428 | (int)port); |
6c7189bb PKK |
429 | } |
430 | ||
431 | if (ret < 0) { | |
432 | goto out; | |
433 | } | |
8d6d89cb BR |
434 | } |
435 | ||
e9db8ff3 | 436 | ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level); |
8d6d89cb BR |
437 | if (ret < 0) { |
438 | goto out; | |
439 | } | |
440 | ||
441 | ret = glfs_init(glfs); | |
442 | if (ret) { | |
6c7189bb PKK |
443 | error_setg(errp, "Gluster connection for volume %s, path %s failed" |
444 | " to connect", gconf->volume, gconf->path); | |
445 | for (server = gconf->server; server; server = server->next) { | |
446 | if (server->value->type == GLUSTER_TRANSPORT_UNIX) { | |
447 | error_append_hint(errp, "hint: failed on socket %s ", | |
448 | server->value->u.q_unix.path); | |
449 | } else { | |
450 | error_append_hint(errp, "hint: failed on host %s and port %s ", | |
451 | server->value->u.tcp.host, | |
452 | server->value->u.tcp.port); | |
453 | } | |
7edac2dd | 454 | } |
4557117d | 455 | |
6c7189bb PKK |
456 | error_append_hint(errp, "Please refer to gluster logs for more info\n"); |
457 | ||
4557117d | 458 | /* glfs_init sometimes doesn't set errno although docs suggest that */ |
7edac2dd | 459 | if (errno == 0) { |
4557117d | 460 | errno = EINVAL; |
7edac2dd | 461 | } |
4557117d | 462 | |
8d6d89cb BR |
463 | goto out; |
464 | } | |
465 | return glfs; | |
466 | ||
467 | out: | |
468 | if (glfs) { | |
469 | old_errno = errno; | |
6349c154 | 470 | glfs_clear_preopened(glfs); |
8d6d89cb BR |
471 | errno = old_errno; |
472 | } | |
473 | return NULL; | |
474 | } | |
475 | ||
6c7189bb PKK |
476 | static int qapi_enum_parse(const char *opt) |
477 | { | |
478 | int i; | |
479 | ||
480 | if (!opt) { | |
481 | return GLUSTER_TRANSPORT__MAX; | |
482 | } | |
483 | ||
484 | for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { | |
485 | if (!strcmp(opt, GlusterTransport_lookup[i])) { | |
486 | return i; | |
487 | } | |
488 | } | |
489 | ||
490 | return i; | |
491 | } | |
492 | ||
493 | /* | |
494 | * Convert the json formatted command line into qapi. | |
495 | */ | |
496 | static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, | |
497 | QDict *options, Error **errp) | |
498 | { | |
499 | QemuOpts *opts; | |
500 | GlusterServer *gsconf; | |
501 | GlusterServerList *curr = NULL; | |
502 | QDict *backing_options = NULL; | |
503 | Error *local_err = NULL; | |
504 | char *str = NULL; | |
505 | const char *ptr; | |
506 | size_t num_servers; | |
507 | int i; | |
508 | ||
509 | /* create opts info from runtime_json_opts list */ | |
510 | opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); | |
511 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
512 | if (local_err) { | |
513 | goto out; | |
514 | } | |
515 | ||
516 | num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); | |
517 | if (num_servers < 1) { | |
518 | error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); | |
519 | goto out; | |
520 | } | |
521 | ||
522 | ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); | |
523 | if (!ptr) { | |
524 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); | |
525 | goto out; | |
526 | } | |
527 | gconf->volume = g_strdup(ptr); | |
528 | ||
529 | ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); | |
530 | if (!ptr) { | |
531 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); | |
532 | goto out; | |
533 | } | |
534 | gconf->path = g_strdup(ptr); | |
535 | qemu_opts_del(opts); | |
536 | ||
537 | for (i = 0; i < num_servers; i++) { | |
538 | str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); | |
539 | qdict_extract_subqdict(options, &backing_options, str); | |
540 | ||
541 | /* create opts info from runtime_type_opts list */ | |
542 | opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); | |
543 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
544 | if (local_err) { | |
545 | goto out; | |
546 | } | |
547 | ||
548 | ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); | |
549 | gsconf = g_new0(GlusterServer, 1); | |
550 | gsconf->type = qapi_enum_parse(ptr); | |
551 | if (!ptr) { | |
552 | error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); | |
553 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
554 | goto out; | |
555 | ||
556 | } | |
557 | if (gsconf->type == GLUSTER_TRANSPORT__MAX) { | |
558 | error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, | |
559 | GLUSTER_OPT_TYPE, "tcp or unix"); | |
560 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
561 | goto out; | |
562 | } | |
563 | qemu_opts_del(opts); | |
564 | ||
565 | if (gsconf->type == GLUSTER_TRANSPORT_TCP) { | |
566 | /* create opts info from runtime_tcp_opts list */ | |
567 | opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort); | |
568 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
569 | if (local_err) { | |
570 | goto out; | |
571 | } | |
572 | ||
573 | ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); | |
574 | if (!ptr) { | |
575 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
576 | GLUSTER_OPT_HOST); | |
577 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
578 | goto out; | |
579 | } | |
580 | gsconf->u.tcp.host = g_strdup(ptr); | |
581 | ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); | |
582 | if (!ptr) { | |
583 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
584 | GLUSTER_OPT_PORT); | |
585 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
586 | goto out; | |
587 | } | |
588 | gsconf->u.tcp.port = g_strdup(ptr); | |
589 | ||
590 | /* defend for unsupported fields in InetSocketAddress, | |
591 | * i.e. @ipv4, @ipv6 and @to | |
592 | */ | |
593 | ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); | |
594 | if (ptr) { | |
595 | gsconf->u.tcp.has_to = true; | |
596 | } | |
597 | ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); | |
598 | if (ptr) { | |
599 | gsconf->u.tcp.has_ipv4 = true; | |
600 | } | |
601 | ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); | |
602 | if (ptr) { | |
603 | gsconf->u.tcp.has_ipv6 = true; | |
604 | } | |
605 | if (gsconf->u.tcp.has_to) { | |
606 | error_setg(&local_err, "Parameter 'to' not supported"); | |
607 | goto out; | |
608 | } | |
609 | if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { | |
610 | error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); | |
611 | goto out; | |
612 | } | |
613 | qemu_opts_del(opts); | |
614 | } else { | |
615 | /* create opts info from runtime_unix_opts list */ | |
616 | opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); | |
617 | qemu_opts_absorb_qdict(opts, backing_options, &local_err); | |
618 | if (local_err) { | |
619 | goto out; | |
620 | } | |
621 | ||
622 | ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); | |
623 | if (!ptr) { | |
624 | error_setg(&local_err, QERR_MISSING_PARAMETER, | |
625 | GLUSTER_OPT_SOCKET); | |
626 | error_append_hint(&local_err, GERR_INDEX_HINT, i); | |
627 | goto out; | |
628 | } | |
629 | gsconf->u.q_unix.path = g_strdup(ptr); | |
630 | qemu_opts_del(opts); | |
631 | } | |
632 | ||
633 | if (gconf->server == NULL) { | |
634 | gconf->server = g_new0(GlusterServerList, 1); | |
635 | gconf->server->value = gsconf; | |
636 | curr = gconf->server; | |
637 | } else { | |
638 | curr->next = g_new0(GlusterServerList, 1); | |
639 | curr->next->value = gsconf; | |
640 | curr = curr->next; | |
641 | } | |
642 | ||
643 | qdict_del(backing_options, str); | |
644 | g_free(str); | |
645 | str = NULL; | |
646 | } | |
647 | ||
648 | return 0; | |
649 | ||
650 | out: | |
651 | error_propagate(errp, local_err); | |
652 | qemu_opts_del(opts); | |
653 | if (str) { | |
654 | qdict_del(backing_options, str); | |
655 | g_free(str); | |
656 | } | |
657 | errno = EINVAL; | |
658 | return -errno; | |
659 | } | |
660 | ||
661 | static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, | |
662 | const char *filename, | |
663 | QDict *options, Error **errp) | |
664 | { | |
665 | int ret; | |
666 | if (filename) { | |
667 | ret = qemu_gluster_parse_uri(gconf, filename); | |
668 | if (ret < 0) { | |
669 | error_setg(errp, "invalid URI"); | |
670 | error_append_hint(errp, "Usage: file=gluster[+transport]://" | |
e9db8ff3 PKK |
671 | "[host[:port]]volume/path[?socket=...]" |
672 | "[,file.debug=N]" | |
673 | "[,file.logfile=/path/filename.log]\n"); | |
6c7189bb PKK |
674 | errno = -ret; |
675 | return NULL; | |
676 | } | |
677 | } else { | |
678 | ret = qemu_gluster_parse_json(gconf, options, errp); | |
679 | if (ret < 0) { | |
680 | error_append_hint(errp, "Usage: " | |
681 | "-drive driver=qcow2,file.driver=gluster," | |
682 | "file.volume=testvol,file.path=/path/a.qcow2" | |
e9db8ff3 PKK |
683 | "[,file.debug=9]" |
684 | "[,file.logfile=/path/filename.log]," | |
685 | "file.server.0.type=tcp," | |
6c7189bb PKK |
686 | "file.server.0.host=1.2.3.4," |
687 | "file.server.0.port=24007," | |
688 | "file.server.1.transport=unix," | |
689 | "file.server.1.socket=/var/run/glusterd.socket ..." | |
690 | "\n"); | |
691 | errno = -ret; | |
692 | return NULL; | |
693 | } | |
694 | ||
695 | } | |
696 | ||
697 | return qemu_gluster_glfs_init(gconf, errp); | |
698 | } | |
699 | ||
15744b0b | 700 | static void qemu_gluster_complete_aio(void *opaque) |
8d6d89cb | 701 | { |
15744b0b | 702 | GlusterAIOCB *acb = (GlusterAIOCB *)opaque; |
8d6d89cb | 703 | |
0b8b8753 | 704 | qemu_coroutine_enter(acb->coroutine); |
8d6d89cb BR |
705 | } |
706 | ||
7c815372 BR |
707 | /* |
708 | * AIO callback routine called from GlusterFS thread. | |
709 | */ | |
710 | static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) | |
711 | { | |
712 | GlusterAIOCB *acb = (GlusterAIOCB *)arg; | |
713 | ||
714 | if (!ret || ret == acb->size) { | |
715 | acb->ret = 0; /* Success */ | |
716 | } else if (ret < 0) { | |
a8827453 | 717 | acb->ret = -errno; /* Read/Write failed */ |
7c815372 BR |
718 | } else { |
719 | acb->ret = -EIO; /* Partial read/write - fail it */ | |
720 | } | |
721 | ||
fffb6e12 | 722 | aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb); |
7c815372 BR |
723 | } |
724 | ||
1b37b344 JC |
725 | static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) |
726 | { | |
727 | assert(open_flags != NULL); | |
728 | ||
729 | *open_flags |= O_BINARY; | |
730 | ||
731 | if (bdrv_flags & BDRV_O_RDWR) { | |
732 | *open_flags |= O_RDWR; | |
733 | } else { | |
734 | *open_flags |= O_RDONLY; | |
735 | } | |
736 | ||
737 | if ((bdrv_flags & BDRV_O_NOCACHE)) { | |
738 | *open_flags |= O_DIRECT; | |
739 | } | |
740 | } | |
741 | ||
947eb203 NV |
742 | /* |
743 | * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of | |
744 | * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used. | |
745 | * - Corrected versions return -1 and set errno to EINVAL. | |
746 | * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set | |
747 | * errno to ENXIO when SEEK_DATA is called with a position of EOF. | |
748 | */ | |
749 | static bool qemu_gluster_test_seek(struct glfs_fd *fd) | |
750 | { | |
d9b78974 JC |
751 | off_t ret = 0; |
752 | ||
753 | #if defined SEEK_HOLE && defined SEEK_DATA | |
754 | off_t eof; | |
947eb203 NV |
755 | |
756 | eof = glfs_lseek(fd, 0, SEEK_END); | |
757 | if (eof < 0) { | |
758 | /* this should never occur */ | |
759 | return false; | |
760 | } | |
761 | ||
762 | /* this should always fail with ENXIO if SEEK_DATA is supported */ | |
763 | ret = glfs_lseek(fd, eof, SEEK_DATA); | |
d9b78974 JC |
764 | #endif |
765 | ||
947eb203 NV |
766 | return (ret < 0) && (errno == ENXIO); |
767 | } | |
768 | ||
56d1b4d2 | 769 | static int qemu_gluster_open(BlockDriverState *bs, QDict *options, |
015a1036 | 770 | int bdrv_flags, Error **errp) |
8d6d89cb BR |
771 | { |
772 | BDRVGlusterState *s = bs->opaque; | |
1b37b344 | 773 | int open_flags = 0; |
8d6d89cb | 774 | int ret = 0; |
7edac2dd | 775 | BlockdevOptionsGluster *gconf = NULL; |
b4894776 KW |
776 | QemuOpts *opts; |
777 | Error *local_err = NULL; | |
e9db8ff3 | 778 | const char *filename, *logfile; |
b4894776 | 779 | |
87ea75d5 | 780 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); |
b4894776 | 781 | qemu_opts_absorb_qdict(opts, options, &local_err); |
84d18f06 | 782 | if (local_err) { |
a7451cb8 | 783 | error_propagate(errp, local_err); |
b4894776 KW |
784 | ret = -EINVAL; |
785 | goto out; | |
786 | } | |
787 | ||
7eac868a JC |
788 | filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME); |
789 | ||
790 | s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG, | |
791 | GLUSTER_DEBUG_DEFAULT); | |
792 | if (s->debug_level < 0) { | |
793 | s->debug_level = 0; | |
794 | } else if (s->debug_level > GLUSTER_DEBUG_MAX) { | |
795 | s->debug_level = GLUSTER_DEBUG_MAX; | |
796 | } | |
b4894776 | 797 | |
7edac2dd | 798 | gconf = g_new0(BlockdevOptionsGluster, 1); |
7eac868a | 799 | gconf->debug_level = s->debug_level; |
7edac2dd | 800 | gconf->has_debug_level = true; |
e9db8ff3 PKK |
801 | |
802 | logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE); | |
803 | s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT); | |
804 | ||
805 | gconf->logfile = g_strdup(s->logfile); | |
806 | gconf->has_logfile = true; | |
807 | ||
6c7189bb | 808 | s->glfs = qemu_gluster_init(gconf, filename, options, errp); |
8d6d89cb BR |
809 | if (!s->glfs) { |
810 | ret = -errno; | |
811 | goto out; | |
812 | } | |
813 | ||
d85fa9eb JC |
814 | #ifdef CONFIG_GLUSTERFS_XLATOR_OPT |
815 | /* Without this, if fsync fails for a recoverable reason (for instance, | |
816 | * ENOSPC), gluster will dump its cache, preventing retries. This means | |
817 | * almost certain data loss. Not all gluster versions support the | |
818 | * 'resync-failed-syncs-after-fsync' key value, but there is no way to | |
819 | * discover during runtime if it is supported (this api returns success for | |
820 | * unknown key/value pairs) */ | |
821 | ret = glfs_set_xlator_option(s->glfs, "*-write-behind", | |
822 | "resync-failed-syncs-after-fsync", | |
823 | "on"); | |
824 | if (ret < 0) { | |
825 | error_setg_errno(errp, errno, "Unable to set xlator key/value pair"); | |
826 | ret = -errno; | |
827 | goto out; | |
828 | } | |
829 | #endif | |
830 | ||
1b37b344 | 831 | qemu_gluster_parse_flags(bdrv_flags, &open_flags); |
8d6d89cb | 832 | |
d5cf4079 | 833 | s->fd = glfs_open(s->glfs, gconf->path, open_flags); |
8d6d89cb BR |
834 | if (!s->fd) { |
835 | ret = -errno; | |
8d6d89cb | 836 | } |
8d6d89cb | 837 | |
947eb203 NV |
838 | s->supports_seek_data = qemu_gluster_test_seek(s->fd); |
839 | ||
8d6d89cb | 840 | out: |
b4894776 | 841 | qemu_opts_del(opts); |
7edac2dd | 842 | qapi_free_BlockdevOptionsGluster(gconf); |
8d6d89cb BR |
843 | if (!ret) { |
844 | return ret; | |
845 | } | |
e9db8ff3 | 846 | g_free(s->logfile); |
8d6d89cb BR |
847 | if (s->fd) { |
848 | glfs_close(s->fd); | |
849 | } | |
6349c154 PKK |
850 | |
851 | glfs_clear_preopened(s->glfs); | |
852 | ||
8d6d89cb BR |
853 | return ret; |
854 | } | |
855 | ||
adccfbcd JC |
856 | static int qemu_gluster_reopen_prepare(BDRVReopenState *state, |
857 | BlockReopenQueue *queue, Error **errp) | |
858 | { | |
859 | int ret = 0; | |
7eac868a | 860 | BDRVGlusterState *s; |
adccfbcd | 861 | BDRVGlusterReopenState *reop_s; |
7edac2dd | 862 | BlockdevOptionsGluster *gconf; |
adccfbcd JC |
863 | int open_flags = 0; |
864 | ||
865 | assert(state != NULL); | |
866 | assert(state->bs != NULL); | |
867 | ||
7eac868a JC |
868 | s = state->bs->opaque; |
869 | ||
5839e53b | 870 | state->opaque = g_new0(BDRVGlusterReopenState, 1); |
adccfbcd JC |
871 | reop_s = state->opaque; |
872 | ||
873 | qemu_gluster_parse_flags(state->flags, &open_flags); | |
874 | ||
7edac2dd | 875 | gconf = g_new0(BlockdevOptionsGluster, 1); |
7eac868a | 876 | gconf->debug_level = s->debug_level; |
7edac2dd | 877 | gconf->has_debug_level = true; |
e9db8ff3 PKK |
878 | gconf->logfile = g_strdup(s->logfile); |
879 | gconf->has_logfile = true; | |
6c7189bb | 880 | reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); |
adccfbcd JC |
881 | if (reop_s->glfs == NULL) { |
882 | ret = -errno; | |
883 | goto exit; | |
884 | } | |
885 | ||
d85fa9eb JC |
886 | #ifdef CONFIG_GLUSTERFS_XLATOR_OPT |
887 | ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind", | |
888 | "resync-failed-syncs-after-fsync", "on"); | |
889 | if (ret < 0) { | |
890 | error_setg_errno(errp, errno, "Unable to set xlator key/value pair"); | |
891 | ret = -errno; | |
892 | goto exit; | |
893 | } | |
894 | #endif | |
895 | ||
d5cf4079 | 896 | reop_s->fd = glfs_open(reop_s->glfs, gconf->path, open_flags); |
adccfbcd JC |
897 | if (reop_s->fd == NULL) { |
898 | /* reops->glfs will be cleaned up in _abort */ | |
899 | ret = -errno; | |
900 | goto exit; | |
901 | } | |
902 | ||
903 | exit: | |
904 | /* state->opaque will be freed in either the _abort or _commit */ | |
7edac2dd | 905 | qapi_free_BlockdevOptionsGluster(gconf); |
adccfbcd JC |
906 | return ret; |
907 | } | |
908 | ||
909 | static void qemu_gluster_reopen_commit(BDRVReopenState *state) | |
910 | { | |
911 | BDRVGlusterReopenState *reop_s = state->opaque; | |
912 | BDRVGlusterState *s = state->bs->opaque; | |
913 | ||
914 | ||
915 | /* close the old */ | |
916 | if (s->fd) { | |
917 | glfs_close(s->fd); | |
918 | } | |
6349c154 PKK |
919 | |
920 | glfs_clear_preopened(s->glfs); | |
adccfbcd JC |
921 | |
922 | /* use the newly opened image / connection */ | |
923 | s->fd = reop_s->fd; | |
924 | s->glfs = reop_s->glfs; | |
925 | ||
926 | g_free(state->opaque); | |
927 | state->opaque = NULL; | |
928 | ||
929 | return; | |
930 | } | |
931 | ||
932 | ||
933 | static void qemu_gluster_reopen_abort(BDRVReopenState *state) | |
934 | { | |
935 | BDRVGlusterReopenState *reop_s = state->opaque; | |
936 | ||
937 | if (reop_s == NULL) { | |
938 | return; | |
939 | } | |
940 | ||
941 | if (reop_s->fd) { | |
942 | glfs_close(reop_s->fd); | |
943 | } | |
944 | ||
6349c154 | 945 | glfs_clear_preopened(reop_s->glfs); |
adccfbcd JC |
946 | |
947 | g_free(state->opaque); | |
948 | state->opaque = NULL; | |
949 | ||
950 | return; | |
951 | } | |
952 | ||
7c815372 | 953 | #ifdef CONFIG_GLUSTERFS_ZEROFILL |
e88a36eb | 954 | static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, |
f70c50c8 PKK |
955 | int64_t offset, |
956 | int size, | |
957 | BdrvRequestFlags flags) | |
7c815372 BR |
958 | { |
959 | int ret; | |
c833d1e8 | 960 | GlusterAIOCB acb; |
7c815372 | 961 | BDRVGlusterState *s = bs->opaque; |
7c815372 | 962 | |
c833d1e8 PB |
963 | acb.size = size; |
964 | acb.ret = 0; | |
965 | acb.coroutine = qemu_coroutine_self(); | |
966 | acb.aio_context = bdrv_get_aio_context(bs); | |
7c815372 | 967 | |
c833d1e8 | 968 | ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb); |
7c815372 | 969 | if (ret < 0) { |
c833d1e8 | 970 | return -errno; |
7c815372 BR |
971 | } |
972 | ||
973 | qemu_coroutine_yield(); | |
c833d1e8 | 974 | return acb.ret; |
7c815372 | 975 | } |
cf7f616b BR |
976 | |
977 | static inline bool gluster_supports_zerofill(void) | |
978 | { | |
979 | return 1; | |
980 | } | |
981 | ||
982 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
f70c50c8 | 983 | int64_t size) |
cf7f616b BR |
984 | { |
985 | return glfs_zerofill(fd, offset, size); | |
986 | } | |
987 | ||
988 | #else | |
989 | static inline bool gluster_supports_zerofill(void) | |
990 | { | |
991 | return 0; | |
992 | } | |
993 | ||
994 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
f70c50c8 | 995 | int64_t size) |
cf7f616b BR |
996 | { |
997 | return 0; | |
998 | } | |
7c815372 BR |
999 | #endif |
1000 | ||
8d6d89cb | 1001 | static int qemu_gluster_create(const char *filename, |
90c772de | 1002 | QemuOpts *opts, Error **errp) |
8d6d89cb | 1003 | { |
7edac2dd | 1004 | BlockdevOptionsGluster *gconf; |
8d6d89cb BR |
1005 | struct glfs *glfs; |
1006 | struct glfs_fd *fd; | |
1007 | int ret = 0; | |
cf7f616b | 1008 | int prealloc = 0; |
8d6d89cb | 1009 | int64_t total_size = 0; |
90c772de | 1010 | char *tmp = NULL; |
8d6d89cb | 1011 | |
7edac2dd | 1012 | gconf = g_new0(BlockdevOptionsGluster, 1); |
7eac868a JC |
1013 | gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG, |
1014 | GLUSTER_DEBUG_DEFAULT); | |
1015 | if (gconf->debug_level < 0) { | |
1016 | gconf->debug_level = 0; | |
1017 | } else if (gconf->debug_level > GLUSTER_DEBUG_MAX) { | |
1018 | gconf->debug_level = GLUSTER_DEBUG_MAX; | |
1019 | } | |
7edac2dd | 1020 | gconf->has_debug_level = true; |
7eac868a | 1021 | |
e9db8ff3 PKK |
1022 | gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE); |
1023 | if (!gconf->logfile) { | |
1024 | gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT); | |
1025 | } | |
1026 | gconf->has_logfile = true; | |
1027 | ||
6c7189bb | 1028 | glfs = qemu_gluster_init(gconf, filename, NULL, errp); |
8d6d89cb | 1029 | if (!glfs) { |
4557117d | 1030 | ret = -errno; |
8d6d89cb BR |
1031 | goto out; |
1032 | } | |
1033 | ||
180e9526 HT |
1034 | total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), |
1035 | BDRV_SECTOR_SIZE); | |
90c772de CL |
1036 | |
1037 | tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); | |
1038 | if (!tmp || !strcmp(tmp, "off")) { | |
1039 | prealloc = 0; | |
f70c50c8 | 1040 | } else if (!strcmp(tmp, "full") && gluster_supports_zerofill()) { |
90c772de CL |
1041 | prealloc = 1; |
1042 | } else { | |
1043 | error_setg(errp, "Invalid preallocation mode: '%s'" | |
f70c50c8 | 1044 | " or GlusterFS doesn't support zerofill API", tmp); |
90c772de CL |
1045 | ret = -EINVAL; |
1046 | goto out; | |
8d6d89cb BR |
1047 | } |
1048 | ||
d5cf4079 | 1049 | fd = glfs_creat(glfs, gconf->path, |
f70c50c8 | 1050 | O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); |
8d6d89cb BR |
1051 | if (!fd) { |
1052 | ret = -errno; | |
1053 | } else { | |
180e9526 HT |
1054 | if (!glfs_ftruncate(fd, total_size)) { |
1055 | if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) { | |
cf7f616b BR |
1056 | ret = -errno; |
1057 | } | |
1058 | } else { | |
8d6d89cb BR |
1059 | ret = -errno; |
1060 | } | |
cf7f616b | 1061 | |
8d6d89cb BR |
1062 | if (glfs_close(fd) != 0) { |
1063 | ret = -errno; | |
1064 | } | |
1065 | } | |
1066 | out: | |
90c772de | 1067 | g_free(tmp); |
7edac2dd | 1068 | qapi_free_BlockdevOptionsGluster(gconf); |
6349c154 | 1069 | glfs_clear_preopened(glfs); |
8d6d89cb BR |
1070 | return ret; |
1071 | } | |
1072 | ||
15744b0b | 1073 | static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, |
f70c50c8 PKK |
1074 | int64_t sector_num, int nb_sectors, |
1075 | QEMUIOVector *qiov, int write) | |
8d6d89cb BR |
1076 | { |
1077 | int ret; | |
c833d1e8 | 1078 | GlusterAIOCB acb; |
8d6d89cb | 1079 | BDRVGlusterState *s = bs->opaque; |
15744b0b BR |
1080 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; |
1081 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
8d6d89cb | 1082 | |
c833d1e8 PB |
1083 | acb.size = size; |
1084 | acb.ret = 0; | |
1085 | acb.coroutine = qemu_coroutine_self(); | |
1086 | acb.aio_context = bdrv_get_aio_context(bs); | |
8d6d89cb BR |
1087 | |
1088 | if (write) { | |
1089 | ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
f70c50c8 | 1090 | gluster_finish_aiocb, &acb); |
8d6d89cb BR |
1091 | } else { |
1092 | ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
f70c50c8 | 1093 | gluster_finish_aiocb, &acb); |
8d6d89cb BR |
1094 | } |
1095 | ||
1096 | if (ret < 0) { | |
c833d1e8 | 1097 | return -errno; |
8d6d89cb | 1098 | } |
15744b0b BR |
1099 | |
1100 | qemu_coroutine_yield(); | |
c833d1e8 | 1101 | return acb.ret; |
8d6d89cb BR |
1102 | } |
1103 | ||
42ec24e2 PB |
1104 | static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) |
1105 | { | |
1106 | int ret; | |
1107 | BDRVGlusterState *s = bs->opaque; | |
1108 | ||
1109 | ret = glfs_ftruncate(s->fd, offset); | |
1110 | if (ret < 0) { | |
1111 | return -errno; | |
1112 | } | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | ||
15744b0b | 1117 | static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, |
f70c50c8 PKK |
1118 | int64_t sector_num, |
1119 | int nb_sectors, | |
1120 | QEMUIOVector *qiov) | |
8d6d89cb | 1121 | { |
15744b0b | 1122 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); |
8d6d89cb BR |
1123 | } |
1124 | ||
15744b0b | 1125 | static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, |
f70c50c8 PKK |
1126 | int64_t sector_num, |
1127 | int nb_sectors, | |
1128 | QEMUIOVector *qiov) | |
8d6d89cb | 1129 | { |
15744b0b | 1130 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); |
8d6d89cb BR |
1131 | } |
1132 | ||
5d4343e6 JC |
1133 | static void qemu_gluster_close(BlockDriverState *bs) |
1134 | { | |
1135 | BDRVGlusterState *s = bs->opaque; | |
1136 | ||
e9db8ff3 | 1137 | g_free(s->logfile); |
5d4343e6 JC |
1138 | if (s->fd) { |
1139 | glfs_close(s->fd); | |
1140 | s->fd = NULL; | |
1141 | } | |
6349c154 | 1142 | glfs_clear_preopened(s->glfs); |
5d4343e6 JC |
1143 | } |
1144 | ||
15744b0b | 1145 | static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) |
8d6d89cb BR |
1146 | { |
1147 | int ret; | |
c833d1e8 | 1148 | GlusterAIOCB acb; |
8d6d89cb BR |
1149 | BDRVGlusterState *s = bs->opaque; |
1150 | ||
c833d1e8 PB |
1151 | acb.size = 0; |
1152 | acb.ret = 0; | |
1153 | acb.coroutine = qemu_coroutine_self(); | |
1154 | acb.aio_context = bdrv_get_aio_context(bs); | |
8d6d89cb | 1155 | |
c833d1e8 | 1156 | ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb); |
8d6d89cb | 1157 | if (ret < 0) { |
d85fa9eb JC |
1158 | ret = -errno; |
1159 | goto error; | |
8d6d89cb | 1160 | } |
15744b0b BR |
1161 | |
1162 | qemu_coroutine_yield(); | |
d85fa9eb JC |
1163 | if (acb.ret < 0) { |
1164 | ret = acb.ret; | |
1165 | goto error; | |
1166 | } | |
1167 | ||
c833d1e8 | 1168 | return acb.ret; |
d85fa9eb JC |
1169 | |
1170 | error: | |
1171 | /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache | |
1172 | * after a fsync failure, so we have no way of allowing the guest to safely | |
1173 | * continue. Gluster versions prior to 3.5.6 don't retain the cache | |
1174 | * either, but will invalidate the fd on error, so this is again our only | |
1175 | * option. | |
1176 | * | |
1177 | * The 'resync-failed-syncs-after-fsync' xlator option for the | |
1178 | * write-behind cache will cause later gluster versions to retain its | |
1179 | * cache after error, so long as the fd remains open. However, we | |
1180 | * currently have no way of knowing if this option is supported. | |
1181 | * | |
1182 | * TODO: Once gluster provides a way for us to determine if the option | |
1183 | * is supported, bypass the closure and setting drv to NULL. */ | |
1184 | qemu_gluster_close(bs); | |
1185 | bs->drv = NULL; | |
1186 | return ret; | |
8d6d89cb BR |
1187 | } |
1188 | ||
0c14fb47 | 1189 | #ifdef CONFIG_GLUSTERFS_DISCARD |
1014170b EB |
1190 | static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs, |
1191 | int64_t offset, int size) | |
0c14fb47 BR |
1192 | { |
1193 | int ret; | |
c833d1e8 | 1194 | GlusterAIOCB acb; |
0c14fb47 | 1195 | BDRVGlusterState *s = bs->opaque; |
0c14fb47 | 1196 | |
c833d1e8 PB |
1197 | acb.size = 0; |
1198 | acb.ret = 0; | |
1199 | acb.coroutine = qemu_coroutine_self(); | |
1200 | acb.aio_context = bdrv_get_aio_context(bs); | |
0c14fb47 | 1201 | |
c833d1e8 | 1202 | ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb); |
0c14fb47 | 1203 | if (ret < 0) { |
c833d1e8 | 1204 | return -errno; |
0c14fb47 | 1205 | } |
15744b0b BR |
1206 | |
1207 | qemu_coroutine_yield(); | |
c833d1e8 | 1208 | return acb.ret; |
0c14fb47 BR |
1209 | } |
1210 | #endif | |
1211 | ||
8d6d89cb BR |
1212 | static int64_t qemu_gluster_getlength(BlockDriverState *bs) |
1213 | { | |
1214 | BDRVGlusterState *s = bs->opaque; | |
1215 | int64_t ret; | |
1216 | ||
1217 | ret = glfs_lseek(s->fd, 0, SEEK_END); | |
1218 | if (ret < 0) { | |
1219 | return -errno; | |
1220 | } else { | |
1221 | return ret; | |
1222 | } | |
1223 | } | |
1224 | ||
1225 | static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) | |
1226 | { | |
1227 | BDRVGlusterState *s = bs->opaque; | |
1228 | struct stat st; | |
1229 | int ret; | |
1230 | ||
1231 | ret = glfs_fstat(s->fd, &st); | |
1232 | if (ret < 0) { | |
1233 | return -errno; | |
1234 | } else { | |
1235 | return st.st_blocks * 512; | |
1236 | } | |
1237 | } | |
1238 | ||
8ab6feec KW |
1239 | static int qemu_gluster_has_zero_init(BlockDriverState *bs) |
1240 | { | |
1241 | /* GlusterFS volume could be backed by a block device */ | |
1242 | return 0; | |
1243 | } | |
1244 | ||
947eb203 NV |
1245 | /* |
1246 | * Find allocation range in @bs around offset @start. | |
1247 | * May change underlying file descriptor's file offset. | |
1248 | * If @start is not in a hole, store @start in @data, and the | |
1249 | * beginning of the next hole in @hole, and return 0. | |
1250 | * If @start is in a non-trailing hole, store @start in @hole and the | |
1251 | * beginning of the next non-hole in @data, and return 0. | |
1252 | * If @start is in a trailing hole or beyond EOF, return -ENXIO. | |
1253 | * If we can't find out, return a negative errno other than -ENXIO. | |
1254 | * | |
1255 | * (Shamefully copied from raw-posix.c, only miniscule adaptions.) | |
1256 | */ | |
1257 | static int find_allocation(BlockDriverState *bs, off_t start, | |
1258 | off_t *data, off_t *hole) | |
1259 | { | |
1260 | BDRVGlusterState *s = bs->opaque; | |
947eb203 NV |
1261 | |
1262 | if (!s->supports_seek_data) { | |
d9b78974 | 1263 | goto exit; |
947eb203 NV |
1264 | } |
1265 | ||
d9b78974 JC |
1266 | #if defined SEEK_HOLE && defined SEEK_DATA |
1267 | off_t offs; | |
1268 | ||
947eb203 NV |
1269 | /* |
1270 | * SEEK_DATA cases: | |
1271 | * D1. offs == start: start is in data | |
1272 | * D2. offs > start: start is in a hole, next data at offs | |
1273 | * D3. offs < 0, errno = ENXIO: either start is in a trailing hole | |
1274 | * or start is beyond EOF | |
1275 | * If the latter happens, the file has been truncated behind | |
1276 | * our back since we opened it. All bets are off then. | |
1277 | * Treating like a trailing hole is simplest. | |
1278 | * D4. offs < 0, errno != ENXIO: we learned nothing | |
1279 | */ | |
1280 | offs = glfs_lseek(s->fd, start, SEEK_DATA); | |
1281 | if (offs < 0) { | |
1282 | return -errno; /* D3 or D4 */ | |
1283 | } | |
1284 | assert(offs >= start); | |
1285 | ||
1286 | if (offs > start) { | |
1287 | /* D2: in hole, next data at offs */ | |
1288 | *hole = start; | |
1289 | *data = offs; | |
1290 | return 0; | |
1291 | } | |
1292 | ||
1293 | /* D1: in data, end not yet known */ | |
1294 | ||
1295 | /* | |
1296 | * SEEK_HOLE cases: | |
1297 | * H1. offs == start: start is in a hole | |
1298 | * If this happens here, a hole has been dug behind our back | |
1299 | * since the previous lseek(). | |
1300 | * H2. offs > start: either start is in data, next hole at offs, | |
1301 | * or start is in trailing hole, EOF at offs | |
1302 | * Linux treats trailing holes like any other hole: offs == | |
1303 | * start. Solaris seeks to EOF instead: offs > start (blech). | |
1304 | * If that happens here, a hole has been dug behind our back | |
1305 | * since the previous lseek(). | |
1306 | * H3. offs < 0, errno = ENXIO: start is beyond EOF | |
1307 | * If this happens, the file has been truncated behind our | |
1308 | * back since we opened it. Treat it like a trailing hole. | |
1309 | * H4. offs < 0, errno != ENXIO: we learned nothing | |
1310 | * Pretend we know nothing at all, i.e. "forget" about D1. | |
1311 | */ | |
1312 | offs = glfs_lseek(s->fd, start, SEEK_HOLE); | |
1313 | if (offs < 0) { | |
1314 | return -errno; /* D1 and (H3 or H4) */ | |
1315 | } | |
1316 | assert(offs >= start); | |
1317 | ||
1318 | if (offs > start) { | |
1319 | /* | |
1320 | * D1 and H2: either in data, next hole at offs, or it was in | |
1321 | * data but is now in a trailing hole. In the latter case, | |
1322 | * all bets are off. Treating it as if it there was data all | |
1323 | * the way to EOF is safe, so simply do that. | |
1324 | */ | |
1325 | *data = start; | |
1326 | *hole = offs; | |
1327 | return 0; | |
1328 | } | |
1329 | ||
1330 | /* D1 and H1 */ | |
1331 | return -EBUSY; | |
d9b78974 JC |
1332 | #endif |
1333 | ||
1334 | exit: | |
1335 | return -ENOTSUP; | |
947eb203 NV |
1336 | } |
1337 | ||
1338 | /* | |
1339 | * Returns the allocation status of the specified sectors. | |
1340 | * | |
1341 | * If 'sector_num' is beyond the end of the disk image the return value is 0 | |
1342 | * and 'pnum' is set to 0. | |
1343 | * | |
1344 | * 'pnum' is set to the number of sectors (including and immediately following | |
1345 | * the specified sector) that are known to be in the same | |
1346 | * allocated/unallocated state. | |
1347 | * | |
1348 | * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes | |
1349 | * beyond the end of the disk image it will be clamped. | |
1350 | * | |
1351 | * (Based on raw_co_get_block_status() from raw-posix.c.) | |
1352 | */ | |
1353 | static int64_t coroutine_fn qemu_gluster_co_get_block_status( | |
1354 | BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, | |
1355 | BlockDriverState **file) | |
1356 | { | |
1357 | BDRVGlusterState *s = bs->opaque; | |
1358 | off_t start, data = 0, hole = 0; | |
1359 | int64_t total_size; | |
1360 | int ret = -EINVAL; | |
1361 | ||
1362 | if (!s->fd) { | |
1363 | return ret; | |
1364 | } | |
1365 | ||
1366 | start = sector_num * BDRV_SECTOR_SIZE; | |
1367 | total_size = bdrv_getlength(bs); | |
1368 | if (total_size < 0) { | |
1369 | return total_size; | |
1370 | } else if (start >= total_size) { | |
1371 | *pnum = 0; | |
1372 | return 0; | |
1373 | } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) { | |
1374 | nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE); | |
1375 | } | |
1376 | ||
1377 | ret = find_allocation(bs, start, &data, &hole); | |
1378 | if (ret == -ENXIO) { | |
1379 | /* Trailing hole */ | |
1380 | *pnum = nb_sectors; | |
1381 | ret = BDRV_BLOCK_ZERO; | |
1382 | } else if (ret < 0) { | |
1383 | /* No info available, so pretend there are no holes */ | |
1384 | *pnum = nb_sectors; | |
1385 | ret = BDRV_BLOCK_DATA; | |
1386 | } else if (data == start) { | |
1387 | /* On a data extent, compute sectors to the end of the extent, | |
1388 | * possibly including a partial sector at EOF. */ | |
1389 | *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE)); | |
1390 | ret = BDRV_BLOCK_DATA; | |
1391 | } else { | |
1392 | /* On a hole, compute sectors to the beginning of the next extent. */ | |
1393 | assert(hole == start); | |
1394 | *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE); | |
1395 | ret = BDRV_BLOCK_ZERO; | |
1396 | } | |
1397 | ||
1398 | *file = bs; | |
1399 | ||
1400 | return ret | BDRV_BLOCK_OFFSET_VALID | start; | |
1401 | } | |
1402 | ||
1403 | ||
8d6d89cb BR |
1404 | static BlockDriver bdrv_gluster = { |
1405 | .format_name = "gluster", | |
1406 | .protocol_name = "gluster", | |
1407 | .instance_size = sizeof(BDRVGlusterState), | |
6c7189bb | 1408 | .bdrv_needs_filename = false, |
8d6d89cb | 1409 | .bdrv_file_open = qemu_gluster_open, |
adccfbcd JC |
1410 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, |
1411 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1412 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
8d6d89cb | 1413 | .bdrv_close = qemu_gluster_close, |
c282e1fd | 1414 | .bdrv_create = qemu_gluster_create, |
8d6d89cb BR |
1415 | .bdrv_getlength = qemu_gluster_getlength, |
1416 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
42ec24e2 | 1417 | .bdrv_truncate = qemu_gluster_truncate, |
15744b0b BR |
1418 | .bdrv_co_readv = qemu_gluster_co_readv, |
1419 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1420 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
8ab6feec | 1421 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, |
0c14fb47 | 1422 | #ifdef CONFIG_GLUSTERFS_DISCARD |
1014170b | 1423 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, |
7c815372 BR |
1424 | #endif |
1425 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
e88a36eb | 1426 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, |
0c14fb47 | 1427 | #endif |
947eb203 | 1428 | .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, |
90c772de | 1429 | .create_opts = &qemu_gluster_create_opts, |
8d6d89cb BR |
1430 | }; |
1431 | ||
1432 | static BlockDriver bdrv_gluster_tcp = { | |
1433 | .format_name = "gluster", | |
1434 | .protocol_name = "gluster+tcp", | |
1435 | .instance_size = sizeof(BDRVGlusterState), | |
6c7189bb | 1436 | .bdrv_needs_filename = false, |
8d6d89cb | 1437 | .bdrv_file_open = qemu_gluster_open, |
adccfbcd JC |
1438 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, |
1439 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1440 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
8d6d89cb | 1441 | .bdrv_close = qemu_gluster_close, |
c282e1fd | 1442 | .bdrv_create = qemu_gluster_create, |
8d6d89cb BR |
1443 | .bdrv_getlength = qemu_gluster_getlength, |
1444 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
42ec24e2 | 1445 | .bdrv_truncate = qemu_gluster_truncate, |
15744b0b BR |
1446 | .bdrv_co_readv = qemu_gluster_co_readv, |
1447 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1448 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
8ab6feec | 1449 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, |
0c14fb47 | 1450 | #ifdef CONFIG_GLUSTERFS_DISCARD |
1014170b | 1451 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, |
7c815372 BR |
1452 | #endif |
1453 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
e88a36eb | 1454 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, |
0c14fb47 | 1455 | #endif |
947eb203 | 1456 | .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, |
90c772de | 1457 | .create_opts = &qemu_gluster_create_opts, |
8d6d89cb BR |
1458 | }; |
1459 | ||
1460 | static BlockDriver bdrv_gluster_unix = { | |
1461 | .format_name = "gluster", | |
1462 | .protocol_name = "gluster+unix", | |
1463 | .instance_size = sizeof(BDRVGlusterState), | |
030be321 | 1464 | .bdrv_needs_filename = true, |
8d6d89cb | 1465 | .bdrv_file_open = qemu_gluster_open, |
adccfbcd JC |
1466 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, |
1467 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1468 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
8d6d89cb | 1469 | .bdrv_close = qemu_gluster_close, |
c282e1fd | 1470 | .bdrv_create = qemu_gluster_create, |
8d6d89cb BR |
1471 | .bdrv_getlength = qemu_gluster_getlength, |
1472 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
42ec24e2 | 1473 | .bdrv_truncate = qemu_gluster_truncate, |
15744b0b BR |
1474 | .bdrv_co_readv = qemu_gluster_co_readv, |
1475 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1476 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
8ab6feec | 1477 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, |
0c14fb47 | 1478 | #ifdef CONFIG_GLUSTERFS_DISCARD |
1014170b | 1479 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, |
7c815372 BR |
1480 | #endif |
1481 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
e88a36eb | 1482 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, |
0c14fb47 | 1483 | #endif |
947eb203 | 1484 | .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, |
90c772de | 1485 | .create_opts = &qemu_gluster_create_opts, |
8d6d89cb BR |
1486 | }; |
1487 | ||
0552ff24 PKK |
1488 | /* rdma is deprecated (actually never supported for volfile fetch). |
1489 | * Let's maintain it for the protocol compatibility, to make sure things | |
1490 | * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp | |
1491 | * protocol with a warning. | |
1492 | * TODO: remove gluster+rdma interface support | |
1493 | */ | |
8d6d89cb BR |
1494 | static BlockDriver bdrv_gluster_rdma = { |
1495 | .format_name = "gluster", | |
1496 | .protocol_name = "gluster+rdma", | |
1497 | .instance_size = sizeof(BDRVGlusterState), | |
030be321 | 1498 | .bdrv_needs_filename = true, |
8d6d89cb | 1499 | .bdrv_file_open = qemu_gluster_open, |
adccfbcd JC |
1500 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, |
1501 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
1502 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
8d6d89cb | 1503 | .bdrv_close = qemu_gluster_close, |
c282e1fd | 1504 | .bdrv_create = qemu_gluster_create, |
8d6d89cb BR |
1505 | .bdrv_getlength = qemu_gluster_getlength, |
1506 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
42ec24e2 | 1507 | .bdrv_truncate = qemu_gluster_truncate, |
15744b0b BR |
1508 | .bdrv_co_readv = qemu_gluster_co_readv, |
1509 | .bdrv_co_writev = qemu_gluster_co_writev, | |
1510 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
8ab6feec | 1511 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, |
0c14fb47 | 1512 | #ifdef CONFIG_GLUSTERFS_DISCARD |
1014170b | 1513 | .bdrv_co_pdiscard = qemu_gluster_co_pdiscard, |
7c815372 BR |
1514 | #endif |
1515 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
e88a36eb | 1516 | .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, |
0c14fb47 | 1517 | #endif |
947eb203 | 1518 | .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, |
90c772de | 1519 | .create_opts = &qemu_gluster_create_opts, |
8d6d89cb BR |
1520 | }; |
1521 | ||
1522 | static void bdrv_gluster_init(void) | |
1523 | { | |
1524 | bdrv_register(&bdrv_gluster_rdma); | |
1525 | bdrv_register(&bdrv_gluster_unix); | |
1526 | bdrv_register(&bdrv_gluster_tcp); | |
1527 | bdrv_register(&bdrv_gluster); | |
1528 | } | |
1529 | ||
1530 | block_init(bdrv_gluster_init); |