]> git.proxmox.com Git - qemu.git/blob - block/nbd.c
Merge remote-tracking branch 'bonzini/scsi-next' into staging
[qemu.git] / block / nbd.c
1 /*
2 * QEMU Block driver for NBD
3 *
4 * Copyright (C) 2008 Bull S.A.S.
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 *
7 * Some parts:
8 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
28
29 #include "qemu-common.h"
30 #include "block/nbd.h"
31 #include "qemu/uri.h"
32 #include "block/block_int.h"
33 #include "qemu/module.h"
34 #include "qemu/sockets.h"
35 #include "qapi/qmp/qjson.h"
36 #include "qapi/qmp/qint.h"
37
38 #include <sys/types.h>
39 #include <unistd.h>
40
41 #define EN_OPTSTR ":exportname="
42
43 /* #define DEBUG_NBD */
44
45 #if defined(DEBUG_NBD)
46 #define logout(fmt, ...) \
47 fprintf(stderr, "nbd\t%-24s" fmt, __func__, ##__VA_ARGS__)
48 #else
49 #define logout(fmt, ...) ((void)0)
50 #endif
51
52 #define MAX_NBD_REQUESTS 16
53 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
54 #define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
55
56 typedef struct BDRVNBDState {
57 int sock;
58 uint32_t nbdflags;
59 off_t size;
60 size_t blocksize;
61
62 CoMutex send_mutex;
63 CoMutex free_sema;
64 Coroutine *send_coroutine;
65 int in_flight;
66
67 Coroutine *recv_coroutine[MAX_NBD_REQUESTS];
68 struct nbd_reply reply;
69
70 bool is_unix;
71 QemuOpts *socket_opts;
72
73 char *export_name; /* An NBD server may export several devices */
74 } BDRVNBDState;
75
76 static int nbd_parse_uri(const char *filename, QDict *options)
77 {
78 URI *uri;
79 const char *p;
80 QueryParams *qp = NULL;
81 int ret = 0;
82 bool is_unix;
83
84 uri = uri_parse(filename);
85 if (!uri) {
86 return -EINVAL;
87 }
88
89 /* transport */
90 if (!strcmp(uri->scheme, "nbd")) {
91 is_unix = false;
92 } else if (!strcmp(uri->scheme, "nbd+tcp")) {
93 is_unix = false;
94 } else if (!strcmp(uri->scheme, "nbd+unix")) {
95 is_unix = true;
96 } else {
97 ret = -EINVAL;
98 goto out;
99 }
100
101 p = uri->path ? uri->path : "/";
102 p += strspn(p, "/");
103 if (p[0]) {
104 qdict_put(options, "export", qstring_from_str(p));
105 }
106
107 qp = query_params_parse(uri->query);
108 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
109 ret = -EINVAL;
110 goto out;
111 }
112
113 if (is_unix) {
114 /* nbd+unix:///export?socket=path */
115 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
116 ret = -EINVAL;
117 goto out;
118 }
119 qdict_put(options, "path", qstring_from_str(qp->p[0].value));
120 } else {
121 QString *host;
122 /* nbd[+tcp]://host[:port]/export */
123 if (!uri->server) {
124 ret = -EINVAL;
125 goto out;
126 }
127
128 /* strip braces from literal IPv6 address */
129 if (uri->server[0] == '[') {
130 host = qstring_from_substr(uri->server, 1,
131 strlen(uri->server) - 2);
132 } else {
133 host = qstring_from_str(uri->server);
134 }
135
136 qdict_put(options, "host", host);
137 if (uri->port) {
138 char* port_str = g_strdup_printf("%d", uri->port);
139 qdict_put(options, "port", qstring_from_str(port_str));
140 g_free(port_str);
141 }
142 }
143
144 out:
145 if (qp) {
146 query_params_free(qp);
147 }
148 uri_free(uri);
149 return ret;
150 }
151
152 static void nbd_parse_filename(const char *filename, QDict *options,
153 Error **errp)
154 {
155 char *file;
156 char *export_name;
157 const char *host_spec;
158 const char *unixpath;
159
160 if (qdict_haskey(options, "host")
161 || qdict_haskey(options, "port")
162 || qdict_haskey(options, "path"))
163 {
164 error_setg(errp, "host/port/path and a file name may not be specified "
165 "at the same time");
166 return;
167 }
168
169 if (strstr(filename, "://")) {
170 int ret = nbd_parse_uri(filename, options);
171 if (ret < 0) {
172 error_setg(errp, "No valid URL specified");
173 }
174 return;
175 }
176
177 file = g_strdup(filename);
178
179 export_name = strstr(file, EN_OPTSTR);
180 if (export_name) {
181 if (export_name[strlen(EN_OPTSTR)] == 0) {
182 goto out;
183 }
184 export_name[0] = 0; /* truncate 'file' */
185 export_name += strlen(EN_OPTSTR);
186
187 qdict_put(options, "export", qstring_from_str(export_name));
188 }
189
190 /* extract the host_spec - fail if it's not nbd:... */
191 if (!strstart(file, "nbd:", &host_spec)) {
192 error_setg(errp, "File name string for NBD must start with 'nbd:'");
193 goto out;
194 }
195
196 if (!*host_spec) {
197 goto out;
198 }
199
200 /* are we a UNIX or TCP socket? */
201 if (strstart(host_spec, "unix:", &unixpath)) {
202 qdict_put(options, "path", qstring_from_str(unixpath));
203 } else {
204 InetSocketAddress *addr = NULL;
205
206 addr = inet_parse(host_spec, errp);
207 if (error_is_set(errp)) {
208 goto out;
209 }
210
211 qdict_put(options, "host", qstring_from_str(addr->host));
212 qdict_put(options, "port", qstring_from_str(addr->port));
213 qapi_free_InetSocketAddress(addr);
214 }
215
216 out:
217 g_free(file);
218 }
219
220 static int nbd_config(BDRVNBDState *s, QDict *options)
221 {
222 Error *local_err = NULL;
223
224 if (qdict_haskey(options, "path")) {
225 if (qdict_haskey(options, "host")) {
226 qerror_report(ERROR_CLASS_GENERIC_ERROR, "path and host may not "
227 "be used at the same time.");
228 return -EINVAL;
229 }
230 s->is_unix = true;
231 } else if (qdict_haskey(options, "host")) {
232 s->is_unix = false;
233 } else {
234 return -EINVAL;
235 }
236
237 s->socket_opts = qemu_opts_create_nofail(&socket_optslist);
238
239 qemu_opts_absorb_qdict(s->socket_opts, options, &local_err);
240 if (error_is_set(&local_err)) {
241 qerror_report_err(local_err);
242 error_free(local_err);
243 return -EINVAL;
244 }
245
246 if (!qemu_opt_get(s->socket_opts, "port")) {
247 qemu_opt_set_number(s->socket_opts, "port", NBD_DEFAULT_PORT);
248 }
249
250 s->export_name = g_strdup(qdict_get_try_str(options, "export"));
251 if (s->export_name) {
252 qdict_del(options, "export");
253 }
254
255 return 0;
256 }
257
258
259 static void nbd_coroutine_start(BDRVNBDState *s, struct nbd_request *request)
260 {
261 int i;
262
263 /* Poor man semaphore. The free_sema is locked when no other request
264 * can be accepted, and unlocked after receiving one reply. */
265 if (s->in_flight >= MAX_NBD_REQUESTS - 1) {
266 qemu_co_mutex_lock(&s->free_sema);
267 assert(s->in_flight < MAX_NBD_REQUESTS);
268 }
269 s->in_flight++;
270
271 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
272 if (s->recv_coroutine[i] == NULL) {
273 s->recv_coroutine[i] = qemu_coroutine_self();
274 break;
275 }
276 }
277
278 assert(i < MAX_NBD_REQUESTS);
279 request->handle = INDEX_TO_HANDLE(s, i);
280 }
281
282 static int nbd_have_request(void *opaque)
283 {
284 BDRVNBDState *s = opaque;
285
286 return s->in_flight > 0;
287 }
288
289 static void nbd_reply_ready(void *opaque)
290 {
291 BDRVNBDState *s = opaque;
292 uint64_t i;
293 int ret;
294
295 if (s->reply.handle == 0) {
296 /* No reply already in flight. Fetch a header. It is possible
297 * that another thread has done the same thing in parallel, so
298 * the socket is not readable anymore.
299 */
300 ret = nbd_receive_reply(s->sock, &s->reply);
301 if (ret == -EAGAIN) {
302 return;
303 }
304 if (ret < 0) {
305 s->reply.handle = 0;
306 goto fail;
307 }
308 }
309
310 /* There's no need for a mutex on the receive side, because the
311 * handler acts as a synchronization point and ensures that only
312 * one coroutine is called until the reply finishes. */
313 i = HANDLE_TO_INDEX(s, s->reply.handle);
314 if (i >= MAX_NBD_REQUESTS) {
315 goto fail;
316 }
317
318 if (s->recv_coroutine[i]) {
319 qemu_coroutine_enter(s->recv_coroutine[i], NULL);
320 return;
321 }
322
323 fail:
324 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
325 if (s->recv_coroutine[i]) {
326 qemu_coroutine_enter(s->recv_coroutine[i], NULL);
327 }
328 }
329 }
330
331 static void nbd_restart_write(void *opaque)
332 {
333 BDRVNBDState *s = opaque;
334 qemu_coroutine_enter(s->send_coroutine, NULL);
335 }
336
337 static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
338 QEMUIOVector *qiov, int offset)
339 {
340 int rc, ret;
341
342 qemu_co_mutex_lock(&s->send_mutex);
343 s->send_coroutine = qemu_coroutine_self();
344 qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
345 nbd_have_request, s);
346 if (qiov) {
347 if (!s->is_unix) {
348 socket_set_cork(s->sock, 1);
349 }
350 rc = nbd_send_request(s->sock, request);
351 if (rc >= 0) {
352 ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov,
353 offset, request->len);
354 if (ret != request->len) {
355 rc = -EIO;
356 }
357 }
358 if (!s->is_unix) {
359 socket_set_cork(s->sock, 0);
360 }
361 } else {
362 rc = nbd_send_request(s->sock, request);
363 }
364 qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
365 nbd_have_request, s);
366 s->send_coroutine = NULL;
367 qemu_co_mutex_unlock(&s->send_mutex);
368 return rc;
369 }
370
371 static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request,
372 struct nbd_reply *reply,
373 QEMUIOVector *qiov, int offset)
374 {
375 int ret;
376
377 /* Wait until we're woken up by the read handler. TODO: perhaps
378 * peek at the next reply and avoid yielding if it's ours? */
379 qemu_coroutine_yield();
380 *reply = s->reply;
381 if (reply->handle != request->handle) {
382 reply->error = EIO;
383 } else {
384 if (qiov && reply->error == 0) {
385 ret = qemu_co_recvv(s->sock, qiov->iov, qiov->niov,
386 offset, request->len);
387 if (ret != request->len) {
388 reply->error = EIO;
389 }
390 }
391
392 /* Tell the read handler to read another header. */
393 s->reply.handle = 0;
394 }
395 }
396
397 static void nbd_coroutine_end(BDRVNBDState *s, struct nbd_request *request)
398 {
399 int i = HANDLE_TO_INDEX(s, request->handle);
400 s->recv_coroutine[i] = NULL;
401 if (s->in_flight-- == MAX_NBD_REQUESTS) {
402 qemu_co_mutex_unlock(&s->free_sema);
403 }
404 }
405
406 static int nbd_establish_connection(BlockDriverState *bs)
407 {
408 BDRVNBDState *s = bs->opaque;
409 int sock;
410 int ret;
411 off_t size;
412 size_t blocksize;
413
414 if (s->is_unix) {
415 sock = unix_socket_outgoing(qemu_opt_get(s->socket_opts, "path"));
416 } else {
417 sock = tcp_socket_outgoing_opts(s->socket_opts);
418 if (sock >= 0) {
419 socket_set_nodelay(sock);
420 }
421 }
422
423 /* Failed to establish connection */
424 if (sock < 0) {
425 logout("Failed to establish connection to NBD server\n");
426 return -errno;
427 }
428
429 /* NBD handshake */
430 ret = nbd_receive_negotiate(sock, s->export_name, &s->nbdflags, &size,
431 &blocksize);
432 if (ret < 0) {
433 logout("Failed to negotiate with the NBD server\n");
434 closesocket(sock);
435 return ret;
436 }
437
438 /* Now that we're connected, set the socket to be non-blocking and
439 * kick the reply mechanism. */
440 qemu_set_nonblock(sock);
441 qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL,
442 nbd_have_request, s);
443
444 s->sock = sock;
445 s->size = size;
446 s->blocksize = blocksize;
447
448 logout("Established connection with NBD server\n");
449 return 0;
450 }
451
452 static void nbd_teardown_connection(BlockDriverState *bs)
453 {
454 BDRVNBDState *s = bs->opaque;
455 struct nbd_request request;
456
457 request.type = NBD_CMD_DISC;
458 request.from = 0;
459 request.len = 0;
460 nbd_send_request(s->sock, &request);
461
462 qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
463 closesocket(s->sock);
464 }
465
466 static int nbd_open(BlockDriverState *bs, QDict *options, int flags)
467 {
468 BDRVNBDState *s = bs->opaque;
469 int result;
470
471 qemu_co_mutex_init(&s->send_mutex);
472 qemu_co_mutex_init(&s->free_sema);
473
474 /* Pop the config into our state object. Exit if invalid. */
475 result = nbd_config(s, options);
476 if (result != 0) {
477 return result;
478 }
479
480 /* establish TCP connection, return error if it fails
481 * TODO: Configurable retry-until-timeout behaviour.
482 */
483 result = nbd_establish_connection(bs);
484
485 return result;
486 }
487
488 static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
489 int nb_sectors, QEMUIOVector *qiov,
490 int offset)
491 {
492 BDRVNBDState *s = bs->opaque;
493 struct nbd_request request;
494 struct nbd_reply reply;
495 ssize_t ret;
496
497 request.type = NBD_CMD_READ;
498 request.from = sector_num * 512;
499 request.len = nb_sectors * 512;
500
501 nbd_coroutine_start(s, &request);
502 ret = nbd_co_send_request(s, &request, NULL, 0);
503 if (ret < 0) {
504 reply.error = -ret;
505 } else {
506 nbd_co_receive_reply(s, &request, &reply, qiov, offset);
507 }
508 nbd_coroutine_end(s, &request);
509 return -reply.error;
510
511 }
512
513 static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
514 int nb_sectors, QEMUIOVector *qiov,
515 int offset)
516 {
517 BDRVNBDState *s = bs->opaque;
518 struct nbd_request request;
519 struct nbd_reply reply;
520 ssize_t ret;
521
522 request.type = NBD_CMD_WRITE;
523 if (!bdrv_enable_write_cache(bs) && (s->nbdflags & NBD_FLAG_SEND_FUA)) {
524 request.type |= NBD_CMD_FLAG_FUA;
525 }
526
527 request.from = sector_num * 512;
528 request.len = nb_sectors * 512;
529
530 nbd_coroutine_start(s, &request);
531 ret = nbd_co_send_request(s, &request, qiov, offset);
532 if (ret < 0) {
533 reply.error = -ret;
534 } else {
535 nbd_co_receive_reply(s, &request, &reply, NULL, 0);
536 }
537 nbd_coroutine_end(s, &request);
538 return -reply.error;
539 }
540
541 /* qemu-nbd has a limit of slightly less than 1M per request. Try to
542 * remain aligned to 4K. */
543 #define NBD_MAX_SECTORS 2040
544
545 static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
546 int nb_sectors, QEMUIOVector *qiov)
547 {
548 int offset = 0;
549 int ret;
550 while (nb_sectors > NBD_MAX_SECTORS) {
551 ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
552 if (ret < 0) {
553 return ret;
554 }
555 offset += NBD_MAX_SECTORS * 512;
556 sector_num += NBD_MAX_SECTORS;
557 nb_sectors -= NBD_MAX_SECTORS;
558 }
559 return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
560 }
561
562 static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
563 int nb_sectors, QEMUIOVector *qiov)
564 {
565 int offset = 0;
566 int ret;
567 while (nb_sectors > NBD_MAX_SECTORS) {
568 ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
569 if (ret < 0) {
570 return ret;
571 }
572 offset += NBD_MAX_SECTORS * 512;
573 sector_num += NBD_MAX_SECTORS;
574 nb_sectors -= NBD_MAX_SECTORS;
575 }
576 return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset);
577 }
578
579 static int nbd_co_flush(BlockDriverState *bs)
580 {
581 BDRVNBDState *s = bs->opaque;
582 struct nbd_request request;
583 struct nbd_reply reply;
584 ssize_t ret;
585
586 if (!(s->nbdflags & NBD_FLAG_SEND_FLUSH)) {
587 return 0;
588 }
589
590 request.type = NBD_CMD_FLUSH;
591 if (s->nbdflags & NBD_FLAG_SEND_FUA) {
592 request.type |= NBD_CMD_FLAG_FUA;
593 }
594
595 request.from = 0;
596 request.len = 0;
597
598 nbd_coroutine_start(s, &request);
599 ret = nbd_co_send_request(s, &request, NULL, 0);
600 if (ret < 0) {
601 reply.error = -ret;
602 } else {
603 nbd_co_receive_reply(s, &request, &reply, NULL, 0);
604 }
605 nbd_coroutine_end(s, &request);
606 return -reply.error;
607 }
608
609 static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
610 int nb_sectors)
611 {
612 BDRVNBDState *s = bs->opaque;
613 struct nbd_request request;
614 struct nbd_reply reply;
615 ssize_t ret;
616
617 if (!(s->nbdflags & NBD_FLAG_SEND_TRIM)) {
618 return 0;
619 }
620 request.type = NBD_CMD_TRIM;
621 request.from = sector_num * 512;
622 request.len = nb_sectors * 512;
623
624 nbd_coroutine_start(s, &request);
625 ret = nbd_co_send_request(s, &request, NULL, 0);
626 if (ret < 0) {
627 reply.error = -ret;
628 } else {
629 nbd_co_receive_reply(s, &request, &reply, NULL, 0);
630 }
631 nbd_coroutine_end(s, &request);
632 return -reply.error;
633 }
634
635 static void nbd_close(BlockDriverState *bs)
636 {
637 BDRVNBDState *s = bs->opaque;
638 g_free(s->export_name);
639 qemu_opts_del(s->socket_opts);
640
641 nbd_teardown_connection(bs);
642 }
643
644 static int64_t nbd_getlength(BlockDriverState *bs)
645 {
646 BDRVNBDState *s = bs->opaque;
647
648 return s->size;
649 }
650
651 static BlockDriver bdrv_nbd = {
652 .format_name = "nbd",
653 .protocol_name = "nbd",
654 .instance_size = sizeof(BDRVNBDState),
655 .bdrv_parse_filename = nbd_parse_filename,
656 .bdrv_file_open = nbd_open,
657 .bdrv_co_readv = nbd_co_readv,
658 .bdrv_co_writev = nbd_co_writev,
659 .bdrv_close = nbd_close,
660 .bdrv_co_flush_to_os = nbd_co_flush,
661 .bdrv_co_discard = nbd_co_discard,
662 .bdrv_getlength = nbd_getlength,
663 };
664
665 static BlockDriver bdrv_nbd_tcp = {
666 .format_name = "nbd",
667 .protocol_name = "nbd+tcp",
668 .instance_size = sizeof(BDRVNBDState),
669 .bdrv_parse_filename = nbd_parse_filename,
670 .bdrv_file_open = nbd_open,
671 .bdrv_co_readv = nbd_co_readv,
672 .bdrv_co_writev = nbd_co_writev,
673 .bdrv_close = nbd_close,
674 .bdrv_co_flush_to_os = nbd_co_flush,
675 .bdrv_co_discard = nbd_co_discard,
676 .bdrv_getlength = nbd_getlength,
677 };
678
679 static BlockDriver bdrv_nbd_unix = {
680 .format_name = "nbd",
681 .protocol_name = "nbd+unix",
682 .instance_size = sizeof(BDRVNBDState),
683 .bdrv_parse_filename = nbd_parse_filename,
684 .bdrv_file_open = nbd_open,
685 .bdrv_co_readv = nbd_co_readv,
686 .bdrv_co_writev = nbd_co_writev,
687 .bdrv_close = nbd_close,
688 .bdrv_co_flush_to_os = nbd_co_flush,
689 .bdrv_co_discard = nbd_co_discard,
690 .bdrv_getlength = nbd_getlength,
691 };
692
693 static void bdrv_nbd_init(void)
694 {
695 bdrv_register(&bdrv_nbd);
696 bdrv_register(&bdrv_nbd_tcp);
697 bdrv_register(&bdrv_nbd_unix);
698 }
699
700 block_init(bdrv_nbd_init);