]> git.proxmox.com Git - mirror_qemu.git/blob - block/nbd.c
Merge tag 'firmware/seabios-20231010-pull-request' of https://gitlab.com/kraxel/qemu...
[mirror_qemu.git] / block / nbd.c
1 /*
2 * QEMU Block driver for NBD
3 *
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright Red Hat
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 * Some parts:
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * THE SOFTWARE.
29 */
30
31 #include "qemu/osdep.h"
32
33 #include "trace.h"
34 #include "qemu/uri.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
38
39 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qapi/clone-visitor.h"
42
43 #include "block/qdict.h"
44 #include "block/nbd.h"
45 #include "block/block_int.h"
46 #include "block/coroutines.h"
47
48 #include "qemu/yank.h"
49
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
52
53 #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
54 #define INDEX_TO_COOKIE(index) ((index) + 1)
55
56 typedef struct {
57 Coroutine *coroutine;
58 uint64_t offset; /* original offset of the request */
59 bool receiving; /* sleeping in the yield in nbd_receive_replies */
60 } NBDClientRequest;
61
62 typedef enum NBDClientState {
63 NBD_CLIENT_CONNECTING_WAIT,
64 NBD_CLIENT_CONNECTING_NOWAIT,
65 NBD_CLIENT_CONNECTED,
66 NBD_CLIENT_QUIT
67 } NBDClientState;
68
69 typedef struct BDRVNBDState {
70 QIOChannel *ioc; /* The current I/O channel */
71 NBDExportInfo info;
72
73 /*
74 * Protects state, free_sema, in_flight, requests[].coroutine,
75 * reconnect_delay_timer.
76 */
77 QemuMutex requests_lock;
78 NBDClientState state;
79 CoQueue free_sema;
80 unsigned in_flight;
81 NBDClientRequest requests[MAX_NBD_REQUESTS];
82 QEMUTimer *reconnect_delay_timer;
83
84 /* Protects sending data on the socket. */
85 CoMutex send_mutex;
86
87 /*
88 * Protects receiving reply headers from the socket, as well as the
89 * fields reply and requests[].receiving
90 */
91 CoMutex receive_mutex;
92 NBDReply reply;
93
94 QEMUTimer *open_timer;
95
96 BlockDriverState *bs;
97
98 /* Connection parameters */
99 uint32_t reconnect_delay;
100 uint32_t open_timeout;
101 SocketAddress *saddr;
102 char *export;
103 char *tlscredsid;
104 QCryptoTLSCreds *tlscreds;
105 char *tlshostname;
106 char *x_dirty_bitmap;
107 bool alloc_depth;
108
109 NBDClientConnection *conn;
110 } BDRVNBDState;
111
112 static void nbd_yank(void *opaque);
113
114 static void nbd_clear_bdrvstate(BlockDriverState *bs)
115 {
116 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
117
118 nbd_client_connection_release(s->conn);
119 s->conn = NULL;
120
121 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
122
123 /* Must not leave timers behind that would access freed data */
124 assert(!s->reconnect_delay_timer);
125 assert(!s->open_timer);
126
127 object_unref(OBJECT(s->tlscreds));
128 qapi_free_SocketAddress(s->saddr);
129 s->saddr = NULL;
130 g_free(s->export);
131 s->export = NULL;
132 g_free(s->tlscredsid);
133 s->tlscredsid = NULL;
134 g_free(s->tlshostname);
135 s->tlshostname = NULL;
136 g_free(s->x_dirty_bitmap);
137 s->x_dirty_bitmap = NULL;
138 }
139
140 /* Called with s->receive_mutex taken. */
141 static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
142 {
143 if (req->receiving) {
144 req->receiving = false;
145 aio_co_wake(req->coroutine);
146 return true;
147 }
148
149 return false;
150 }
151
152 static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s)
153 {
154 int i;
155
156 QEMU_LOCK_GUARD(&s->receive_mutex);
157 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
158 if (nbd_recv_coroutine_wake_one(&s->requests[i])) {
159 return;
160 }
161 }
162 }
163
164 /* Called with s->requests_lock held. */
165 static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
166 {
167 if (s->state == NBD_CLIENT_CONNECTED) {
168 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
169 }
170
171 if (ret == -EIO) {
172 if (s->state == NBD_CLIENT_CONNECTED) {
173 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
174 NBD_CLIENT_CONNECTING_NOWAIT;
175 }
176 } else {
177 s->state = NBD_CLIENT_QUIT;
178 }
179 }
180
181 static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
182 {
183 QEMU_LOCK_GUARD(&s->requests_lock);
184 nbd_channel_error_locked(s, ret);
185 }
186
187 static void reconnect_delay_timer_del(BDRVNBDState *s)
188 {
189 if (s->reconnect_delay_timer) {
190 timer_free(s->reconnect_delay_timer);
191 s->reconnect_delay_timer = NULL;
192 }
193 }
194
195 static void reconnect_delay_timer_cb(void *opaque)
196 {
197 BDRVNBDState *s = opaque;
198
199 reconnect_delay_timer_del(s);
200 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
201 if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
202 return;
203 }
204 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
205 }
206 nbd_co_establish_connection_cancel(s->conn);
207 }
208
209 static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
210 {
211 assert(!s->reconnect_delay_timer);
212 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
213 QEMU_CLOCK_REALTIME,
214 SCALE_NS,
215 reconnect_delay_timer_cb, s);
216 timer_mod(s->reconnect_delay_timer, expire_time_ns);
217 }
218
219 static void nbd_teardown_connection(BlockDriverState *bs)
220 {
221 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
222
223 assert(!s->in_flight);
224
225 if (s->ioc) {
226 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
227 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
228 nbd_yank, s->bs);
229 object_unref(OBJECT(s->ioc));
230 s->ioc = NULL;
231 }
232
233 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
234 s->state = NBD_CLIENT_QUIT;
235 }
236 }
237
238 static void open_timer_del(BDRVNBDState *s)
239 {
240 if (s->open_timer) {
241 timer_free(s->open_timer);
242 s->open_timer = NULL;
243 }
244 }
245
246 static void open_timer_cb(void *opaque)
247 {
248 BDRVNBDState *s = opaque;
249
250 nbd_co_establish_connection_cancel(s->conn);
251 open_timer_del(s);
252 }
253
254 static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
255 {
256 assert(!s->open_timer);
257 s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
258 QEMU_CLOCK_REALTIME,
259 SCALE_NS,
260 open_timer_cb, s);
261 timer_mod(s->open_timer, expire_time_ns);
262 }
263
264 static bool nbd_client_will_reconnect(BDRVNBDState *s)
265 {
266 /*
267 * Called only after a socket error, so this is not performance sensitive.
268 */
269 QEMU_LOCK_GUARD(&s->requests_lock);
270 return s->state == NBD_CLIENT_CONNECTING_WAIT;
271 }
272
273 /*
274 * Update @bs with information learned during a completed negotiation process.
275 * Return failure if the server's advertised options are incompatible with the
276 * client's needs.
277 */
278 static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
279 {
280 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
281 int ret;
282
283 if (s->x_dirty_bitmap) {
284 if (!s->info.base_allocation) {
285 error_setg(errp, "requested x-dirty-bitmap %s not found",
286 s->x_dirty_bitmap);
287 return -EINVAL;
288 }
289 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
290 s->alloc_depth = true;
291 }
292 }
293
294 if (s->info.flags & NBD_FLAG_READ_ONLY) {
295 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
296 if (ret < 0) {
297 return ret;
298 }
299 }
300
301 if (s->info.flags & NBD_FLAG_SEND_FUA) {
302 bs->supported_write_flags = BDRV_REQ_FUA;
303 bs->supported_zero_flags |= BDRV_REQ_FUA;
304 }
305
306 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
307 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
308 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
309 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
310 }
311 }
312
313 trace_nbd_client_handshake_success(s->export);
314
315 return 0;
316 }
317
318 int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
319 bool blocking, Error **errp)
320 {
321 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
322 int ret;
323 IO_CODE();
324
325 assert_bdrv_graph_readable();
326 assert(!s->ioc);
327
328 s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
329 if (!s->ioc) {
330 return -ECONNREFUSED;
331 }
332
333 yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank,
334 bs);
335
336 ret = nbd_handle_updated_info(s->bs, NULL);
337 if (ret < 0) {
338 /*
339 * We have connected, but must fail for other reasons.
340 * Send NBD_CMD_DISC as a courtesy to the server.
341 */
342 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
343
344 nbd_send_request(s->ioc, &request);
345
346 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
347 nbd_yank, bs);
348 object_unref(OBJECT(s->ioc));
349 s->ioc = NULL;
350
351 return ret;
352 }
353
354 qio_channel_set_blocking(s->ioc, false, NULL);
355 qio_channel_set_follow_coroutine_ctx(s->ioc, true);
356
357 /* successfully connected */
358 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
359 s->state = NBD_CLIENT_CONNECTED;
360 }
361
362 return 0;
363 }
364
365 /* Called with s->requests_lock held. */
366 static bool nbd_client_connecting(BDRVNBDState *s)
367 {
368 return s->state == NBD_CLIENT_CONNECTING_WAIT ||
369 s->state == NBD_CLIENT_CONNECTING_NOWAIT;
370 }
371
372 /* Called with s->requests_lock taken. */
373 static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
374 {
375 int ret;
376 bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
377
378 /*
379 * Now we are sure that nobody is accessing the channel, and no one will
380 * try until we set the state to CONNECTED.
381 */
382 assert(nbd_client_connecting(s));
383 assert(s->in_flight == 1);
384
385 trace_nbd_reconnect_attempt(s->bs->in_flight);
386
387 if (blocking && !s->reconnect_delay_timer) {
388 /*
389 * It's the first reconnect attempt after switching to
390 * NBD_CLIENT_CONNECTING_WAIT
391 */
392 g_assert(s->reconnect_delay);
393 reconnect_delay_timer_init(s,
394 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
395 s->reconnect_delay * NANOSECONDS_PER_SECOND);
396 }
397
398 /* Finalize previous connection if any */
399 if (s->ioc) {
400 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
401 nbd_yank, s->bs);
402 object_unref(OBJECT(s->ioc));
403 s->ioc = NULL;
404 }
405
406 qemu_mutex_unlock(&s->requests_lock);
407 ret = nbd_co_do_establish_connection(s->bs, blocking, NULL);
408 trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight);
409 qemu_mutex_lock(&s->requests_lock);
410
411 /*
412 * The reconnect attempt is done (maybe successfully, maybe not), so
413 * we no longer need this timer. Delete it so it will not outlive
414 * this I/O request (so draining removes all timers).
415 */
416 reconnect_delay_timer_del(s);
417 }
418
419 static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie,
420 Error **errp)
421 {
422 int ret;
423 uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
424 QEMU_LOCK_GUARD(&s->receive_mutex);
425
426 while (true) {
427 if (s->reply.cookie == cookie) {
428 /* We are done */
429 return 0;
430 }
431
432 if (s->reply.cookie != 0) {
433 /*
434 * Some other request is being handled now. It should already be
435 * woken by whoever set s->reply.cookie (or never wait in this
436 * yield). So, we should not wake it here.
437 */
438 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
439 assert(!s->requests[ind2].receiving);
440
441 s->requests[ind].receiving = true;
442 qemu_co_mutex_unlock(&s->receive_mutex);
443
444 qemu_coroutine_yield();
445 /*
446 * We may be woken for 2 reasons:
447 * 1. From this function, executing in parallel coroutine, when our
448 * cookie is received.
449 * 2. From nbd_co_receive_one_chunk(), when previous request is
450 * finished and s->reply.cookie set to 0.
451 * Anyway, it's OK to lock the mutex and go to the next iteration.
452 */
453
454 qemu_co_mutex_lock(&s->receive_mutex);
455 assert(!s->requests[ind].receiving);
456 continue;
457 }
458
459 /* We are under mutex and cookie is 0. We have to do the dirty work. */
460 assert(s->reply.cookie == 0);
461 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, s->info.mode, errp);
462 if (ret == 0) {
463 ret = -EIO;
464 error_setg(errp, "server dropped connection");
465 }
466 if (ret < 0) {
467 nbd_channel_error(s, ret);
468 return ret;
469 }
470 if (nbd_reply_is_structured(&s->reply) &&
471 s->info.mode < NBD_MODE_STRUCTURED) {
472 nbd_channel_error(s, -EINVAL);
473 error_setg(errp, "unexpected structured reply");
474 return -EINVAL;
475 }
476 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
477 if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
478 nbd_channel_error(s, -EINVAL);
479 error_setg(errp, "unexpected cookie value");
480 return -EINVAL;
481 }
482 if (s->reply.cookie == cookie) {
483 /* We are done */
484 return 0;
485 }
486 nbd_recv_coroutine_wake_one(&s->requests[ind2]);
487 }
488 }
489
490 static int coroutine_fn GRAPH_RDLOCK
491 nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
492 QEMUIOVector *qiov)
493 {
494 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
495 int rc, i = -1;
496
497 qemu_mutex_lock(&s->requests_lock);
498 while (s->in_flight == MAX_NBD_REQUESTS ||
499 (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
500 qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
501 }
502
503 s->in_flight++;
504 if (s->state != NBD_CLIENT_CONNECTED) {
505 if (nbd_client_connecting(s)) {
506 nbd_reconnect_attempt(s);
507 qemu_co_queue_restart_all(&s->free_sema);
508 }
509 if (s->state != NBD_CLIENT_CONNECTED) {
510 rc = -EIO;
511 goto err;
512 }
513 }
514
515 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
516 if (s->requests[i].coroutine == NULL) {
517 break;
518 }
519 }
520
521 assert(i < MAX_NBD_REQUESTS);
522 s->requests[i].coroutine = qemu_coroutine_self();
523 s->requests[i].offset = request->from;
524 s->requests[i].receiving = false;
525 qemu_mutex_unlock(&s->requests_lock);
526
527 qemu_co_mutex_lock(&s->send_mutex);
528 request->cookie = INDEX_TO_COOKIE(i);
529 request->mode = s->info.mode;
530
531 assert(s->ioc);
532
533 if (qiov) {
534 qio_channel_set_cork(s->ioc, true);
535 rc = nbd_send_request(s->ioc, request);
536 if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
537 NULL) < 0) {
538 rc = -EIO;
539 }
540 qio_channel_set_cork(s->ioc, false);
541 } else {
542 rc = nbd_send_request(s->ioc, request);
543 }
544 qemu_co_mutex_unlock(&s->send_mutex);
545
546 if (rc < 0) {
547 qemu_mutex_lock(&s->requests_lock);
548 err:
549 nbd_channel_error_locked(s, rc);
550 if (i != -1) {
551 s->requests[i].coroutine = NULL;
552 }
553 s->in_flight--;
554 qemu_co_queue_next(&s->free_sema);
555 qemu_mutex_unlock(&s->requests_lock);
556 }
557 return rc;
558 }
559
560 static inline uint16_t payload_advance16(uint8_t **payload)
561 {
562 *payload += 2;
563 return lduw_be_p(*payload - 2);
564 }
565
566 static inline uint32_t payload_advance32(uint8_t **payload)
567 {
568 *payload += 4;
569 return ldl_be_p(*payload - 4);
570 }
571
572 static inline uint64_t payload_advance64(uint8_t **payload)
573 {
574 *payload += 8;
575 return ldq_be_p(*payload - 8);
576 }
577
578 static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
579 NBDStructuredReplyChunk *chunk,
580 uint8_t *payload, uint64_t orig_offset,
581 QEMUIOVector *qiov, Error **errp)
582 {
583 uint64_t offset;
584 uint32_t hole_size;
585
586 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
587 error_setg(errp, "Protocol error: invalid payload for "
588 "NBD_REPLY_TYPE_OFFSET_HOLE");
589 return -EINVAL;
590 }
591
592 offset = payload_advance64(&payload);
593 hole_size = payload_advance32(&payload);
594
595 if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
596 offset > orig_offset + qiov->size - hole_size) {
597 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
598 " region");
599 return -EINVAL;
600 }
601 if (s->info.min_block &&
602 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
603 trace_nbd_structured_read_compliance("hole");
604 }
605
606 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
607
608 return 0;
609 }
610
611 /*
612 * nbd_parse_blockstatus_payload
613 * Based on our request, we expect only one extent in reply, for the
614 * base:allocation context.
615 */
616 static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
617 NBDStructuredReplyChunk *chunk,
618 uint8_t *payload, bool wide,
619 uint64_t orig_length,
620 NBDExtent64 *extent, Error **errp)
621 {
622 uint32_t context_id;
623 uint32_t count;
624 size_t ext_len = wide ? sizeof(*extent) : sizeof(NBDExtent32);
625 size_t pay_len = sizeof(context_id) + wide * sizeof(count) + ext_len;
626
627 /* The server succeeded, so it must have sent [at least] one extent */
628 if (chunk->length < pay_len) {
629 error_setg(errp, "Protocol error: invalid payload for "
630 "NBD_REPLY_TYPE_BLOCK_STATUS");
631 return -EINVAL;
632 }
633
634 context_id = payload_advance32(&payload);
635 if (s->info.context_id != context_id) {
636 error_setg(errp, "Protocol error: unexpected context id %d for "
637 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
638 "id is %d", context_id,
639 s->info.context_id);
640 return -EINVAL;
641 }
642
643 if (wide) {
644 count = payload_advance32(&payload);
645 extent->length = payload_advance64(&payload);
646 extent->flags = payload_advance64(&payload);
647 } else {
648 count = 0;
649 extent->length = payload_advance32(&payload);
650 extent->flags = payload_advance32(&payload);
651 }
652
653 if (extent->length == 0) {
654 error_setg(errp, "Protocol error: server sent status chunk with "
655 "zero length");
656 return -EINVAL;
657 }
658
659 /*
660 * A server sending unaligned block status is in violation of the
661 * protocol, but as qemu-nbd 3.1 is such a server (at least for
662 * POSIX files that are not a multiple of 512 bytes, since qemu
663 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
664 * still sees an implicit hole beyond the real EOF), it's nicer to
665 * work around the misbehaving server. If the request included
666 * more than the final unaligned block, truncate it back to an
667 * aligned result; if the request was only the final block, round
668 * up to the full block and change the status to fully-allocated
669 * (always a safe status, even if it loses information).
670 */
671 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
672 s->info.min_block)) {
673 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
674 if (extent->length > s->info.min_block) {
675 extent->length = QEMU_ALIGN_DOWN(extent->length,
676 s->info.min_block);
677 } else {
678 extent->length = s->info.min_block;
679 extent->flags = 0;
680 }
681 }
682
683 /*
684 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
685 * sent us any more than one extent, nor should it have included
686 * status beyond our request in that extent. Furthermore, a wide
687 * server should have replied with an accurate count (we left
688 * count at 0 for a narrow server). However, it's easy enough to
689 * ignore the server's noncompliance without killing the
690 * connection; just ignore trailing extents, and clamp things to
691 * the length of our request.
692 */
693 if (count != wide || chunk->length > pay_len) {
694 trace_nbd_parse_blockstatus_compliance("unexpected extent count");
695 }
696 if (extent->length > orig_length) {
697 extent->length = orig_length;
698 trace_nbd_parse_blockstatus_compliance("extent length too large");
699 }
700
701 /*
702 * HACK: if we are using x-dirty-bitmaps to access
703 * qemu:allocation-depth, treat all depths > 2 the same as 2,
704 * since nbd_client_co_block_status is only expecting the low two
705 * bits to be set.
706 */
707 if (s->alloc_depth && extent->flags > 2) {
708 extent->flags = 2;
709 }
710
711 return 0;
712 }
713
714 /*
715 * nbd_parse_error_payload
716 * on success @errp contains message describing nbd error reply
717 */
718 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
719 uint8_t *payload, int *request_ret,
720 Error **errp)
721 {
722 uint32_t error;
723 uint16_t message_size;
724
725 assert(chunk->type & (1 << 15));
726
727 if (chunk->length < sizeof(error) + sizeof(message_size)) {
728 error_setg(errp,
729 "Protocol error: invalid payload for structured error");
730 return -EINVAL;
731 }
732
733 error = nbd_errno_to_system_errno(payload_advance32(&payload));
734 if (error == 0) {
735 error_setg(errp, "Protocol error: server sent structured error chunk "
736 "with error = 0");
737 return -EINVAL;
738 }
739
740 *request_ret = -error;
741 message_size = payload_advance16(&payload);
742
743 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
744 error_setg(errp, "Protocol error: server sent structured error chunk "
745 "with incorrect message size");
746 return -EINVAL;
747 }
748
749 /* TODO: Add a trace point to mention the server complaint */
750
751 /* TODO handle ERROR_OFFSET */
752
753 return 0;
754 }
755
756 static int coroutine_fn
757 nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset,
758 QEMUIOVector *qiov, Error **errp)
759 {
760 QEMUIOVector sub_qiov;
761 uint64_t offset;
762 size_t data_size;
763 int ret;
764 NBDStructuredReplyChunk *chunk = &s->reply.structured;
765
766 assert(nbd_reply_is_structured(&s->reply));
767
768 /* The NBD spec requires at least one byte of payload */
769 if (chunk->length <= sizeof(offset)) {
770 error_setg(errp, "Protocol error: invalid payload for "
771 "NBD_REPLY_TYPE_OFFSET_DATA");
772 return -EINVAL;
773 }
774
775 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
776 return -EIO;
777 }
778
779 data_size = chunk->length - sizeof(offset);
780 assert(data_size);
781 if (offset < orig_offset || data_size > qiov->size ||
782 offset > orig_offset + qiov->size - data_size) {
783 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
784 " region");
785 return -EINVAL;
786 }
787 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
788 trace_nbd_structured_read_compliance("data");
789 }
790
791 qemu_iovec_init(&sub_qiov, qiov->niov);
792 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
793 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
794 qemu_iovec_destroy(&sub_qiov);
795
796 return ret < 0 ? -EIO : 0;
797 }
798
799 #define NBD_MAX_MALLOC_PAYLOAD 1000
800 static coroutine_fn int nbd_co_receive_structured_payload(
801 BDRVNBDState *s, void **payload, Error **errp)
802 {
803 int ret;
804 uint32_t len;
805
806 assert(nbd_reply_is_structured(&s->reply));
807
808 len = s->reply.structured.length;
809
810 if (len == 0) {
811 return 0;
812 }
813
814 if (payload == NULL) {
815 error_setg(errp, "Unexpected structured payload");
816 return -EINVAL;
817 }
818
819 if (len > NBD_MAX_MALLOC_PAYLOAD) {
820 error_setg(errp, "Payload too large");
821 return -EINVAL;
822 }
823
824 *payload = g_new(char, len);
825 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
826 if (ret < 0) {
827 g_free(*payload);
828 *payload = NULL;
829 return ret;
830 }
831
832 return 0;
833 }
834
835 /*
836 * nbd_co_do_receive_one_chunk
837 * for simple reply:
838 * set request_ret to received reply error
839 * if qiov is not NULL: read payload to @qiov
840 * for structured reply chunk:
841 * if error chunk: read payload, set @request_ret, do not set @payload
842 * else if offset_data chunk: read payload data to @qiov, do not set @payload
843 * else: read payload to @payload
844 *
845 * If function fails, @errp contains corresponding error message, and the
846 * connection with the server is suspect. If it returns 0, then the
847 * transaction succeeded (although @request_ret may be a negative errno
848 * corresponding to the server's error reply), and errp is unchanged.
849 */
850 static coroutine_fn int nbd_co_do_receive_one_chunk(
851 BDRVNBDState *s, uint64_t cookie, bool only_structured,
852 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
853 {
854 int ret;
855 int i = COOKIE_TO_INDEX(cookie);
856 void *local_payload = NULL;
857 NBDStructuredReplyChunk *chunk;
858
859 if (payload) {
860 *payload = NULL;
861 }
862 *request_ret = 0;
863
864 ret = nbd_receive_replies(s, cookie, errp);
865 if (ret < 0) {
866 error_prepend(errp, "Connection closed: ");
867 return -EIO;
868 }
869 assert(s->ioc);
870
871 assert(s->reply.cookie == cookie);
872
873 if (nbd_reply_is_simple(&s->reply)) {
874 if (only_structured) {
875 error_setg(errp, "Protocol error: simple reply when structured "
876 "reply chunk was expected");
877 return -EINVAL;
878 }
879
880 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
881 if (*request_ret < 0 || !qiov) {
882 return 0;
883 }
884
885 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
886 errp) < 0 ? -EIO : 0;
887 }
888
889 /* handle structured reply chunk */
890 assert(s->info.mode >= NBD_MODE_STRUCTURED);
891 chunk = &s->reply.structured;
892
893 if (chunk->type == NBD_REPLY_TYPE_NONE) {
894 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
895 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
896 " NBD_REPLY_FLAG_DONE flag set");
897 return -EINVAL;
898 }
899 if (chunk->length) {
900 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
901 " nonzero length");
902 return -EINVAL;
903 }
904 return 0;
905 }
906
907 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
908 if (!qiov) {
909 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
910 return -EINVAL;
911 }
912
913 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
914 qiov, errp);
915 }
916
917 if (nbd_reply_type_is_error(chunk->type)) {
918 payload = &local_payload;
919 }
920
921 ret = nbd_co_receive_structured_payload(s, payload, errp);
922 if (ret < 0) {
923 return ret;
924 }
925
926 if (nbd_reply_type_is_error(chunk->type)) {
927 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
928 g_free(local_payload);
929 return ret;
930 }
931
932 return 0;
933 }
934
935 /*
936 * nbd_co_receive_one_chunk
937 * Read reply, wake up connection_co and set s->quit if needed.
938 * Return value is a fatal error code or normal nbd reply error code
939 */
940 static coroutine_fn int nbd_co_receive_one_chunk(
941 BDRVNBDState *s, uint64_t cookie, bool only_structured,
942 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
943 Error **errp)
944 {
945 int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
946 request_ret, qiov, payload, errp);
947
948 if (ret < 0) {
949 memset(reply, 0, sizeof(*reply));
950 nbd_channel_error(s, ret);
951 } else {
952 /* For assert at loop start in nbd_connection_entry */
953 *reply = s->reply;
954 }
955 s->reply.cookie = 0;
956
957 nbd_recv_coroutines_wake(s);
958
959 return ret;
960 }
961
962 typedef struct NBDReplyChunkIter {
963 int ret;
964 int request_ret;
965 Error *err;
966 bool done, only_structured;
967 } NBDReplyChunkIter;
968
969 static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
970 int ret, Error **local_err)
971 {
972 assert(local_err && *local_err);
973 assert(ret < 0);
974
975 if (!iter->ret) {
976 iter->ret = ret;
977 error_propagate(&iter->err, *local_err);
978 } else {
979 error_free(*local_err);
980 }
981
982 *local_err = NULL;
983 }
984
985 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
986 {
987 assert(ret < 0);
988
989 if (!iter->request_ret) {
990 iter->request_ret = ret;
991 }
992 }
993
994 /*
995 * NBD_FOREACH_REPLY_CHUNK
996 * The pointer stored in @payload requires g_free() to free it.
997 */
998 #define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
999 qiov, reply, payload) \
1000 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
1001 nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
1002
1003 /*
1004 * nbd_reply_chunk_iter_receive
1005 * The pointer stored in @payload requires g_free() to free it.
1006 */
1007 static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
1008 NBDReplyChunkIter *iter,
1009 uint64_t cookie,
1010 QEMUIOVector *qiov,
1011 NBDReply *reply,
1012 void **payload)
1013 {
1014 int ret, request_ret;
1015 NBDReply local_reply;
1016 NBDStructuredReplyChunk *chunk;
1017 Error *local_err = NULL;
1018
1019 if (iter->done) {
1020 /* Previous iteration was last. */
1021 goto break_loop;
1022 }
1023
1024 if (reply == NULL) {
1025 reply = &local_reply;
1026 }
1027
1028 ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
1029 &request_ret, qiov, reply, payload,
1030 &local_err);
1031 if (ret < 0) {
1032 nbd_iter_channel_error(iter, ret, &local_err);
1033 } else if (request_ret < 0) {
1034 nbd_iter_request_error(iter, request_ret);
1035 }
1036
1037 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1038 if (nbd_reply_is_simple(reply) || iter->ret < 0) {
1039 goto break_loop;
1040 }
1041
1042 chunk = &reply->structured;
1043 iter->only_structured = true;
1044
1045 if (chunk->type == NBD_REPLY_TYPE_NONE) {
1046 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1047 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
1048 goto break_loop;
1049 }
1050
1051 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
1052 /* This iteration is last. */
1053 iter->done = true;
1054 }
1055
1056 /* Execute the loop body */
1057 return true;
1058
1059 break_loop:
1060 qemu_mutex_lock(&s->requests_lock);
1061 s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
1062 s->in_flight--;
1063 qemu_co_queue_next(&s->free_sema);
1064 qemu_mutex_unlock(&s->requests_lock);
1065
1066 return false;
1067 }
1068
1069 static int coroutine_fn
1070 nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
1071 int *request_ret, Error **errp)
1072 {
1073 NBDReplyChunkIter iter;
1074
1075 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
1076 /* nbd_reply_chunk_iter_receive does all the work */
1077 }
1078
1079 error_propagate(errp, iter.err);
1080 *request_ret = iter.request_ret;
1081 return iter.ret;
1082 }
1083
1084 static int coroutine_fn
1085 nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
1086 uint64_t offset, QEMUIOVector *qiov,
1087 int *request_ret, Error **errp)
1088 {
1089 NBDReplyChunkIter iter;
1090 NBDReply reply;
1091 void *payload = NULL;
1092 Error *local_err = NULL;
1093
1094 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
1095 s->info.mode >= NBD_MODE_STRUCTURED,
1096 qiov, &reply, &payload)
1097 {
1098 int ret;
1099 NBDStructuredReplyChunk *chunk = &reply.structured;
1100
1101 assert(nbd_reply_is_structured(&reply));
1102
1103 switch (chunk->type) {
1104 case NBD_REPLY_TYPE_OFFSET_DATA:
1105 /*
1106 * special cased in nbd_co_receive_one_chunk, data is already
1107 * in qiov
1108 */
1109 break;
1110 case NBD_REPLY_TYPE_OFFSET_HOLE:
1111 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
1112 offset, qiov, &local_err);
1113 if (ret < 0) {
1114 nbd_channel_error(s, ret);
1115 nbd_iter_channel_error(&iter, ret, &local_err);
1116 }
1117 break;
1118 default:
1119 if (!nbd_reply_type_is_error(chunk->type)) {
1120 /* not allowed reply type */
1121 nbd_channel_error(s, -EINVAL);
1122 error_setg(&local_err,
1123 "Unexpected reply type: %d (%s) for CMD_READ",
1124 chunk->type, nbd_reply_type_lookup(chunk->type));
1125 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1126 }
1127 }
1128
1129 g_free(payload);
1130 payload = NULL;
1131 }
1132
1133 error_propagate(errp, iter.err);
1134 *request_ret = iter.request_ret;
1135 return iter.ret;
1136 }
1137
1138 static int coroutine_fn
1139 nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
1140 uint64_t length, NBDExtent64 *extent,
1141 int *request_ret, Error **errp)
1142 {
1143 NBDReplyChunkIter iter;
1144 NBDReply reply;
1145 void *payload = NULL;
1146 Error *local_err = NULL;
1147 bool received = false;
1148
1149 assert(!extent->length);
1150 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
1151 int ret;
1152 NBDStructuredReplyChunk *chunk = &reply.structured;
1153 bool wide;
1154
1155 assert(nbd_reply_is_structured(&reply));
1156
1157 switch (chunk->type) {
1158 case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
1159 case NBD_REPLY_TYPE_BLOCK_STATUS:
1160 wide = chunk->type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT;
1161 if ((s->info.mode >= NBD_MODE_EXTENDED) != wide) {
1162 trace_nbd_extended_headers_compliance("block_status");
1163 }
1164 if (received) {
1165 nbd_channel_error(s, -EINVAL);
1166 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
1167 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1168 }
1169 received = true;
1170
1171 ret = nbd_parse_blockstatus_payload(
1172 s, &reply.structured, payload, wide,
1173 length, extent, &local_err);
1174 if (ret < 0) {
1175 nbd_channel_error(s, ret);
1176 nbd_iter_channel_error(&iter, ret, &local_err);
1177 }
1178 break;
1179 default:
1180 if (!nbd_reply_type_is_error(chunk->type)) {
1181 nbd_channel_error(s, -EINVAL);
1182 error_setg(&local_err,
1183 "Unexpected reply type: %d (%s) "
1184 "for CMD_BLOCK_STATUS",
1185 chunk->type, nbd_reply_type_lookup(chunk->type));
1186 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1187 }
1188 }
1189
1190 g_free(payload);
1191 payload = NULL;
1192 }
1193
1194 if (!extent->length && !iter.request_ret) {
1195 error_setg(&local_err, "Server did not reply with any status extents");
1196 nbd_iter_channel_error(&iter, -EIO, &local_err);
1197 }
1198
1199 error_propagate(errp, iter.err);
1200 *request_ret = iter.request_ret;
1201 return iter.ret;
1202 }
1203
1204 static int coroutine_fn GRAPH_RDLOCK
1205 nbd_co_request(BlockDriverState *bs, NBDRequest *request,
1206 QEMUIOVector *write_qiov)
1207 {
1208 int ret, request_ret;
1209 Error *local_err = NULL;
1210 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1211
1212 assert(request->type != NBD_CMD_READ);
1213 if (write_qiov) {
1214 assert(request->type == NBD_CMD_WRITE);
1215 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
1216 } else {
1217 assert(request->type != NBD_CMD_WRITE);
1218 }
1219
1220 do {
1221 ret = nbd_co_send_request(bs, request, write_qiov);
1222 if (ret < 0) {
1223 continue;
1224 }
1225
1226 ret = nbd_co_receive_return_code(s, request->cookie,
1227 &request_ret, &local_err);
1228 if (local_err) {
1229 trace_nbd_co_request_fail(request->from, request->len,
1230 request->cookie, request->flags,
1231 request->type,
1232 nbd_cmd_lookup(request->type),
1233 ret, error_get_pretty(local_err));
1234 error_free(local_err);
1235 local_err = NULL;
1236 }
1237 } while (ret < 0 && nbd_client_will_reconnect(s));
1238
1239 return ret ? ret : request_ret;
1240 }
1241
1242 static int coroutine_fn GRAPH_RDLOCK
1243 nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1244 QEMUIOVector *qiov, BdrvRequestFlags flags)
1245 {
1246 int ret, request_ret;
1247 Error *local_err = NULL;
1248 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1249 NBDRequest request = {
1250 .type = NBD_CMD_READ,
1251 .from = offset,
1252 .len = bytes,
1253 };
1254
1255 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1256
1257 if (!bytes) {
1258 return 0;
1259 }
1260 /*
1261 * Work around the fact that the block layer doesn't do
1262 * byte-accurate sizing yet - if the read exceeds the server's
1263 * advertised size because the block layer rounded size up, then
1264 * truncate the request to the server and tail-pad with zero.
1265 */
1266 if (offset >= s->info.size) {
1267 assert(bytes < BDRV_SECTOR_SIZE);
1268 qemu_iovec_memset(qiov, 0, 0, bytes);
1269 return 0;
1270 }
1271 if (offset + bytes > s->info.size) {
1272 uint64_t slop = offset + bytes - s->info.size;
1273
1274 assert(slop < BDRV_SECTOR_SIZE);
1275 qemu_iovec_memset(qiov, bytes - slop, 0, slop);
1276 request.len -= slop;
1277 }
1278
1279 do {
1280 ret = nbd_co_send_request(bs, &request, NULL);
1281 if (ret < 0) {
1282 continue;
1283 }
1284
1285 ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
1286 &request_ret, &local_err);
1287 if (local_err) {
1288 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1289 request.flags, request.type,
1290 nbd_cmd_lookup(request.type),
1291 ret, error_get_pretty(local_err));
1292 error_free(local_err);
1293 local_err = NULL;
1294 }
1295 } while (ret < 0 && nbd_client_will_reconnect(s));
1296
1297 return ret ? ret : request_ret;
1298 }
1299
1300 static int coroutine_fn GRAPH_RDLOCK
1301 nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1302 QEMUIOVector *qiov, BdrvRequestFlags flags)
1303 {
1304 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1305 NBDRequest request = {
1306 .type = NBD_CMD_WRITE,
1307 .from = offset,
1308 .len = bytes,
1309 };
1310
1311 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1312 if (flags & BDRV_REQ_FUA) {
1313 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1314 request.flags |= NBD_CMD_FLAG_FUA;
1315 }
1316
1317 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1318
1319 if (!bytes) {
1320 return 0;
1321 }
1322 return nbd_co_request(bs, &request, qiov);
1323 }
1324
1325 static int coroutine_fn GRAPH_RDLOCK
1326 nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1327 BdrvRequestFlags flags)
1328 {
1329 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1330 NBDRequest request = {
1331 .type = NBD_CMD_WRITE_ZEROES,
1332 .from = offset,
1333 .len = bytes,
1334 };
1335
1336 /* rely on max_pwrite_zeroes */
1337 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
1338
1339 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1340 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1341 return -ENOTSUP;
1342 }
1343
1344 if (flags & BDRV_REQ_FUA) {
1345 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1346 request.flags |= NBD_CMD_FLAG_FUA;
1347 }
1348 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1349 request.flags |= NBD_CMD_FLAG_NO_HOLE;
1350 }
1351 if (flags & BDRV_REQ_NO_FALLBACK) {
1352 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
1353 request.flags |= NBD_CMD_FLAG_FAST_ZERO;
1354 }
1355
1356 if (!bytes) {
1357 return 0;
1358 }
1359 return nbd_co_request(bs, &request, NULL);
1360 }
1361
1362 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs)
1363 {
1364 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1365 NBDRequest request = { .type = NBD_CMD_FLUSH };
1366
1367 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1368 return 0;
1369 }
1370
1371 request.from = 0;
1372 request.len = 0;
1373
1374 return nbd_co_request(bs, &request, NULL);
1375 }
1376
1377 static int coroutine_fn GRAPH_RDLOCK
1378 nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1379 {
1380 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1381 NBDRequest request = {
1382 .type = NBD_CMD_TRIM,
1383 .from = offset,
1384 .len = bytes,
1385 };
1386
1387 /* rely on max_pdiscard */
1388 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
1389
1390 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1391 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1392 return 0;
1393 }
1394
1395 return nbd_co_request(bs, &request, NULL);
1396 }
1397
1398 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
1399 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1400 int64_t *pnum, int64_t *map, BlockDriverState **file)
1401 {
1402 int ret, request_ret;
1403 NBDExtent64 extent = { 0 };
1404 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1405 Error *local_err = NULL;
1406
1407 NBDRequest request = {
1408 .type = NBD_CMD_BLOCK_STATUS,
1409 .from = offset,
1410 .len = MIN(bytes, s->info.size - offset),
1411 .flags = NBD_CMD_FLAG_REQ_ONE,
1412 };
1413
1414 if (!s->info.base_allocation) {
1415 *pnum = bytes;
1416 *map = offset;
1417 *file = bs;
1418 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1419 }
1420 if (s->info.mode < NBD_MODE_EXTENDED) {
1421 request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
1422 request.len);
1423 }
1424
1425 /*
1426 * Work around the fact that the block layer doesn't do
1427 * byte-accurate sizing yet - if the status request exceeds the
1428 * server's advertised size because the block layer rounded size
1429 * up, we truncated the request to the server (above), or are
1430 * called on just the hole.
1431 */
1432 if (offset >= s->info.size) {
1433 *pnum = bytes;
1434 assert(bytes < BDRV_SECTOR_SIZE);
1435 /* Intentionally don't report offset_valid for the hole */
1436 return BDRV_BLOCK_ZERO;
1437 }
1438
1439 if (s->info.min_block) {
1440 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1441 }
1442 do {
1443 ret = nbd_co_send_request(bs, &request, NULL);
1444 if (ret < 0) {
1445 continue;
1446 }
1447
1448 ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
1449 &extent, &request_ret,
1450 &local_err);
1451 if (local_err) {
1452 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1453 request.flags, request.type,
1454 nbd_cmd_lookup(request.type),
1455 ret, error_get_pretty(local_err));
1456 error_free(local_err);
1457 local_err = NULL;
1458 }
1459 } while (ret < 0 && nbd_client_will_reconnect(s));
1460
1461 if (ret < 0 || request_ret < 0) {
1462 return ret ? ret : request_ret;
1463 }
1464
1465 assert(extent.length);
1466 *pnum = extent.length;
1467 *map = offset;
1468 *file = bs;
1469 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1470 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1471 BDRV_BLOCK_OFFSET_VALID;
1472 }
1473
1474 static int nbd_client_reopen_prepare(BDRVReopenState *state,
1475 BlockReopenQueue *queue, Error **errp)
1476 {
1477 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
1478
1479 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
1480 error_setg(errp, "Can't reopen read-only NBD mount as read/write");
1481 return -EACCES;
1482 }
1483 return 0;
1484 }
1485
1486 static void nbd_yank(void *opaque)
1487 {
1488 BlockDriverState *bs = opaque;
1489 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1490
1491 QEMU_LOCK_GUARD(&s->requests_lock);
1492 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1493 s->state = NBD_CLIENT_QUIT;
1494 }
1495
1496 static void nbd_client_close(BlockDriverState *bs)
1497 {
1498 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1499 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
1500
1501 if (s->ioc) {
1502 nbd_send_request(s->ioc, &request);
1503 }
1504
1505 nbd_teardown_connection(bs);
1506 }
1507
1508
1509 /*
1510 * Parse nbd_open options
1511 */
1512
1513 static int nbd_parse_uri(const char *filename, QDict *options)
1514 {
1515 URI *uri;
1516 const char *p;
1517 QueryParams *qp = NULL;
1518 int ret = 0;
1519 bool is_unix;
1520
1521 uri = uri_parse(filename);
1522 if (!uri) {
1523 return -EINVAL;
1524 }
1525
1526 /* transport */
1527 if (!g_strcmp0(uri->scheme, "nbd")) {
1528 is_unix = false;
1529 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1530 is_unix = false;
1531 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1532 is_unix = true;
1533 } else {
1534 ret = -EINVAL;
1535 goto out;
1536 }
1537
1538 p = uri->path ? uri->path : "";
1539 if (p[0] == '/') {
1540 p++;
1541 }
1542 if (p[0]) {
1543 qdict_put_str(options, "export", p);
1544 }
1545
1546 qp = query_params_parse(uri->query);
1547 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1548 ret = -EINVAL;
1549 goto out;
1550 }
1551
1552 if (is_unix) {
1553 /* nbd+unix:///export?socket=path */
1554 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1555 ret = -EINVAL;
1556 goto out;
1557 }
1558 qdict_put_str(options, "server.type", "unix");
1559 qdict_put_str(options, "server.path", qp->p[0].value);
1560 } else {
1561 QString *host;
1562 char *port_str;
1563
1564 /* nbd[+tcp]://host[:port]/export */
1565 if (!uri->server) {
1566 ret = -EINVAL;
1567 goto out;
1568 }
1569
1570 /* strip braces from literal IPv6 address */
1571 if (uri->server[0] == '[') {
1572 host = qstring_from_substr(uri->server, 1,
1573 strlen(uri->server) - 1);
1574 } else {
1575 host = qstring_from_str(uri->server);
1576 }
1577
1578 qdict_put_str(options, "server.type", "inet");
1579 qdict_put(options, "server.host", host);
1580
1581 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1582 qdict_put_str(options, "server.port", port_str);
1583 g_free(port_str);
1584 }
1585
1586 out:
1587 if (qp) {
1588 query_params_free(qp);
1589 }
1590 uri_free(uri);
1591 return ret;
1592 }
1593
1594 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1595 {
1596 const QDictEntry *e;
1597
1598 for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1599 if (!strcmp(e->key, "host") ||
1600 !strcmp(e->key, "port") ||
1601 !strcmp(e->key, "path") ||
1602 !strcmp(e->key, "export") ||
1603 strstart(e->key, "server.", NULL))
1604 {
1605 error_setg(errp, "Option '%s' cannot be used with a file name",
1606 e->key);
1607 return true;
1608 }
1609 }
1610
1611 return false;
1612 }
1613
1614 static void nbd_parse_filename(const char *filename, QDict *options,
1615 Error **errp)
1616 {
1617 g_autofree char *file = NULL;
1618 char *export_name;
1619 const char *host_spec;
1620 const char *unixpath;
1621
1622 if (nbd_has_filename_options_conflict(options, errp)) {
1623 return;
1624 }
1625
1626 if (strstr(filename, "://")) {
1627 int ret = nbd_parse_uri(filename, options);
1628 if (ret < 0) {
1629 error_setg(errp, "No valid URL specified");
1630 }
1631 return;
1632 }
1633
1634 file = g_strdup(filename);
1635
1636 export_name = strstr(file, EN_OPTSTR);
1637 if (export_name) {
1638 if (export_name[strlen(EN_OPTSTR)] == 0) {
1639 return;
1640 }
1641 export_name[0] = 0; /* truncate 'file' */
1642 export_name += strlen(EN_OPTSTR);
1643
1644 qdict_put_str(options, "export", export_name);
1645 }
1646
1647 /* extract the host_spec - fail if it's not nbd:... */
1648 if (!strstart(file, "nbd:", &host_spec)) {
1649 error_setg(errp, "File name string for NBD must start with 'nbd:'");
1650 return;
1651 }
1652
1653 if (!*host_spec) {
1654 return;
1655 }
1656
1657 /* are we a UNIX or TCP socket? */
1658 if (strstart(host_spec, "unix:", &unixpath)) {
1659 qdict_put_str(options, "server.type", "unix");
1660 qdict_put_str(options, "server.path", unixpath);
1661 } else {
1662 InetSocketAddress *addr = g_new(InetSocketAddress, 1);
1663
1664 if (inet_parse(addr, host_spec, errp)) {
1665 goto out_inet;
1666 }
1667
1668 qdict_put_str(options, "server.type", "inet");
1669 qdict_put_str(options, "server.host", addr->host);
1670 qdict_put_str(options, "server.port", addr->port);
1671 out_inet:
1672 qapi_free_InetSocketAddress(addr);
1673 }
1674 }
1675
1676 static bool nbd_process_legacy_socket_options(QDict *output_options,
1677 QemuOpts *legacy_opts,
1678 Error **errp)
1679 {
1680 const char *path = qemu_opt_get(legacy_opts, "path");
1681 const char *host = qemu_opt_get(legacy_opts, "host");
1682 const char *port = qemu_opt_get(legacy_opts, "port");
1683 const QDictEntry *e;
1684
1685 if (!path && !host && !port) {
1686 return true;
1687 }
1688
1689 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
1690 {
1691 if (strstart(e->key, "server.", NULL)) {
1692 error_setg(errp, "Cannot use 'server' and path/host/port at the "
1693 "same time");
1694 return false;
1695 }
1696 }
1697
1698 if (path && host) {
1699 error_setg(errp, "path and host may not be used at the same time");
1700 return false;
1701 } else if (path) {
1702 if (port) {
1703 error_setg(errp, "port may not be used without host");
1704 return false;
1705 }
1706
1707 qdict_put_str(output_options, "server.type", "unix");
1708 qdict_put_str(output_options, "server.path", path);
1709 } else if (host) {
1710 qdict_put_str(output_options, "server.type", "inet");
1711 qdict_put_str(output_options, "server.host", host);
1712 qdict_put_str(output_options, "server.port",
1713 port ?: stringify(NBD_DEFAULT_PORT));
1714 }
1715
1716 return true;
1717 }
1718
1719 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
1720 Error **errp)
1721 {
1722 SocketAddress *saddr = NULL;
1723 QDict *addr = NULL;
1724 Visitor *iv = NULL;
1725
1726 qdict_extract_subqdict(options, &addr, "server.");
1727 if (!qdict_size(addr)) {
1728 error_setg(errp, "NBD server address missing");
1729 goto done;
1730 }
1731
1732 iv = qobject_input_visitor_new_flat_confused(addr, errp);
1733 if (!iv) {
1734 goto done;
1735 }
1736
1737 if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
1738 goto done;
1739 }
1740
1741 if (socket_address_parse_named_fd(saddr, errp) < 0) {
1742 qapi_free_SocketAddress(saddr);
1743 saddr = NULL;
1744 goto done;
1745 }
1746
1747 done:
1748 qobject_unref(addr);
1749 visit_free(iv);
1750 return saddr;
1751 }
1752
1753 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
1754 {
1755 Object *obj;
1756 QCryptoTLSCreds *creds;
1757
1758 obj = object_resolve_path_component(
1759 object_get_objects_root(), id);
1760 if (!obj) {
1761 error_setg(errp, "No TLS credentials with id '%s'",
1762 id);
1763 return NULL;
1764 }
1765 creds = (QCryptoTLSCreds *)
1766 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
1767 if (!creds) {
1768 error_setg(errp, "Object with id '%s' is not TLS credentials",
1769 id);
1770 return NULL;
1771 }
1772
1773 if (!qcrypto_tls_creds_check_endpoint(creds,
1774 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT,
1775 errp)) {
1776 return NULL;
1777 }
1778 object_ref(obj);
1779 return creds;
1780 }
1781
1782
1783 static QemuOptsList nbd_runtime_opts = {
1784 .name = "nbd",
1785 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
1786 .desc = {
1787 {
1788 .name = "host",
1789 .type = QEMU_OPT_STRING,
1790 .help = "TCP host to connect to",
1791 },
1792 {
1793 .name = "port",
1794 .type = QEMU_OPT_STRING,
1795 .help = "TCP port to connect to",
1796 },
1797 {
1798 .name = "path",
1799 .type = QEMU_OPT_STRING,
1800 .help = "Unix socket path to connect to",
1801 },
1802 {
1803 .name = "export",
1804 .type = QEMU_OPT_STRING,
1805 .help = "Name of the NBD export to open",
1806 },
1807 {
1808 .name = "tls-creds",
1809 .type = QEMU_OPT_STRING,
1810 .help = "ID of the TLS credentials to use",
1811 },
1812 {
1813 .name = "tls-hostname",
1814 .type = QEMU_OPT_STRING,
1815 .help = "Override hostname for validating TLS x509 certificate",
1816 },
1817 {
1818 .name = "x-dirty-bitmap",
1819 .type = QEMU_OPT_STRING,
1820 .help = "experimental: expose named dirty bitmap in place of "
1821 "block status",
1822 },
1823 {
1824 .name = "reconnect-delay",
1825 .type = QEMU_OPT_NUMBER,
1826 .help = "On an unexpected disconnect, the nbd client tries to "
1827 "connect again until succeeding or encountering a serious "
1828 "error. During the first @reconnect-delay seconds, all "
1829 "requests are paused and will be rerun on a successful "
1830 "reconnect. After that time, any delayed requests and all "
1831 "future requests before a successful reconnect will "
1832 "immediately fail. Default 0",
1833 },
1834 {
1835 .name = "open-timeout",
1836 .type = QEMU_OPT_NUMBER,
1837 .help = "In seconds. If zero, the nbd driver tries the connection "
1838 "only once, and fails to open if the connection fails. "
1839 "If non-zero, the nbd driver will repeat connection "
1840 "attempts until successful or until @open-timeout seconds "
1841 "have elapsed. Default 0",
1842 },
1843 { /* end of list */ }
1844 },
1845 };
1846
1847 static int nbd_process_options(BlockDriverState *bs, QDict *options,
1848 Error **errp)
1849 {
1850 BDRVNBDState *s = bs->opaque;
1851 QemuOpts *opts;
1852 int ret = -EINVAL;
1853
1854 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
1855 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
1856 goto error;
1857 }
1858
1859 /* Translate @host, @port, and @path to a SocketAddress */
1860 if (!nbd_process_legacy_socket_options(options, opts, errp)) {
1861 goto error;
1862 }
1863
1864 /* Pop the config into our state object. Exit if invalid. */
1865 s->saddr = nbd_config(s, options, errp);
1866 if (!s->saddr) {
1867 goto error;
1868 }
1869
1870 s->export = g_strdup(qemu_opt_get(opts, "export"));
1871 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
1872 error_setg(errp, "export name too long to send to server");
1873 goto error;
1874 }
1875
1876 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
1877 if (s->tlscredsid) {
1878 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
1879 if (!s->tlscreds) {
1880 goto error;
1881 }
1882
1883 s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname"));
1884 if (!s->tlshostname &&
1885 s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
1886 s->tlshostname = g_strdup(s->saddr->u.inet.host);
1887 }
1888 }
1889
1890 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
1891 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
1892 error_setg(errp, "x-dirty-bitmap query too long to send to server");
1893 goto error;
1894 }
1895
1896 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
1897 s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0);
1898
1899 ret = 0;
1900
1901 error:
1902 qemu_opts_del(opts);
1903 return ret;
1904 }
1905
1906 static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
1907 Error **errp)
1908 {
1909 int ret;
1910 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1911
1912 s->bs = bs;
1913 qemu_mutex_init(&s->requests_lock);
1914 qemu_co_queue_init(&s->free_sema);
1915 qemu_co_mutex_init(&s->send_mutex);
1916 qemu_co_mutex_init(&s->receive_mutex);
1917
1918 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
1919 return -EEXIST;
1920 }
1921
1922 ret = nbd_process_options(bs, options, errp);
1923 if (ret < 0) {
1924 goto fail;
1925 }
1926
1927 s->conn = nbd_client_connection_new(s->saddr, true, s->export,
1928 s->x_dirty_bitmap, s->tlscreds,
1929 s->tlshostname);
1930
1931 if (s->open_timeout) {
1932 nbd_client_connection_enable_retry(s->conn);
1933 open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
1934 s->open_timeout * NANOSECONDS_PER_SECOND);
1935 }
1936
1937 s->state = NBD_CLIENT_CONNECTING_WAIT;
1938 ret = nbd_do_establish_connection(bs, true, errp);
1939 if (ret < 0) {
1940 goto fail;
1941 }
1942
1943 /*
1944 * The connect attempt is done, so we no longer need this timer.
1945 * Delete it, because we do not want it to be around when this node
1946 * is drained or closed.
1947 */
1948 open_timer_del(s);
1949
1950 nbd_client_connection_enable_retry(s->conn);
1951
1952 return 0;
1953
1954 fail:
1955 open_timer_del(s);
1956 nbd_clear_bdrvstate(bs);
1957 return ret;
1958 }
1959
1960 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
1961 {
1962 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1963 uint32_t min = s->info.min_block;
1964 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
1965
1966 /*
1967 * If the server did not advertise an alignment:
1968 * - a size that is not sector-aligned implies that an alignment
1969 * of 1 can be used to access those tail bytes
1970 * - advertisement of block status requires an alignment of 1, so
1971 * that we don't violate block layer constraints that block
1972 * status is always aligned (as we can't control whether the
1973 * server will report sub-sector extents, such as a hole at EOF
1974 * on an unaligned POSIX file)
1975 * - otherwise, assume the server is so old that we are safer avoiding
1976 * sub-sector requests
1977 */
1978 if (!min) {
1979 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
1980 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
1981 }
1982
1983 bs->bl.request_alignment = min;
1984 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
1985 bs->bl.max_pwrite_zeroes = max;
1986 bs->bl.max_transfer = max;
1987
1988 /*
1989 * Assume that if the server supports extended headers, it also
1990 * supports unlimited size zero and trim commands.
1991 */
1992 if (s->info.mode >= NBD_MODE_EXTENDED) {
1993 bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
1994 }
1995
1996 if (s->info.opt_block &&
1997 s->info.opt_block > bs->bl.opt_transfer) {
1998 bs->bl.opt_transfer = s->info.opt_block;
1999 }
2000 }
2001
2002 static void nbd_close(BlockDriverState *bs)
2003 {
2004 nbd_client_close(bs);
2005 nbd_clear_bdrvstate(bs);
2006 }
2007
2008 /*
2009 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
2010 * to a smaller size with exact=false, there is no reason to fail the
2011 * operation.
2012 *
2013 * Preallocation mode is ignored since it does not seems useful to fail when
2014 * we never change anything.
2015 */
2016 static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
2017 bool exact, PreallocMode prealloc,
2018 BdrvRequestFlags flags, Error **errp)
2019 {
2020 BDRVNBDState *s = bs->opaque;
2021
2022 if (offset != s->info.size && exact) {
2023 error_setg(errp, "Cannot resize NBD nodes");
2024 return -ENOTSUP;
2025 }
2026
2027 if (offset > s->info.size) {
2028 error_setg(errp, "Cannot grow NBD nodes");
2029 return -EINVAL;
2030 }
2031
2032 return 0;
2033 }
2034
2035 static int64_t coroutine_fn nbd_co_getlength(BlockDriverState *bs)
2036 {
2037 BDRVNBDState *s = bs->opaque;
2038
2039 return s->info.size;
2040 }
2041
2042 static void nbd_refresh_filename(BlockDriverState *bs)
2043 {
2044 BDRVNBDState *s = bs->opaque;
2045 const char *host = NULL, *port = NULL, *path = NULL;
2046 size_t len = 0;
2047
2048 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
2049 const InetSocketAddress *inet = &s->saddr->u.inet;
2050 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
2051 host = inet->host;
2052 port = inet->port;
2053 }
2054 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
2055 path = s->saddr->u.q_unix.path;
2056 } /* else can't represent as pseudo-filename */
2057
2058 if (path && s->export) {
2059 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2060 "nbd+unix:///%s?socket=%s", s->export, path);
2061 } else if (path && !s->export) {
2062 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2063 "nbd+unix://?socket=%s", path);
2064 } else if (host && s->export) {
2065 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2066 "nbd://%s:%s/%s", host, port, s->export);
2067 } else if (host && !s->export) {
2068 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2069 "nbd://%s:%s", host, port);
2070 }
2071 if (len >= sizeof(bs->exact_filename)) {
2072 /* Name is too long to represent exactly, so leave it empty. */
2073 bs->exact_filename[0] = '\0';
2074 }
2075 }
2076
2077 static char *nbd_dirname(BlockDriverState *bs, Error **errp)
2078 {
2079 /* The generic bdrv_dirname() implementation is able to work out some
2080 * directory name for NBD nodes, but that would be wrong. So far there is no
2081 * specification for how "export paths" would work, so NBD does not have
2082 * directory names. */
2083 error_setg(errp, "Cannot generate a base directory for NBD nodes");
2084 return NULL;
2085 }
2086
2087 static const char *const nbd_strong_runtime_opts[] = {
2088 "path",
2089 "host",
2090 "port",
2091 "export",
2092 "tls-creds",
2093 "tls-hostname",
2094 "server.",
2095
2096 NULL
2097 };
2098
2099 static void nbd_cancel_in_flight(BlockDriverState *bs)
2100 {
2101 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2102
2103 reconnect_delay_timer_del(s);
2104
2105 qemu_mutex_lock(&s->requests_lock);
2106 if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
2107 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
2108 }
2109 qemu_mutex_unlock(&s->requests_lock);
2110
2111 nbd_co_establish_connection_cancel(s->conn);
2112 }
2113
2114 static void nbd_attach_aio_context(BlockDriverState *bs,
2115 AioContext *new_context)
2116 {
2117 BDRVNBDState *s = bs->opaque;
2118
2119 /* The open_timer is used only during nbd_open() */
2120 assert(!s->open_timer);
2121
2122 /*
2123 * The reconnect_delay_timer is scheduled in I/O paths when the
2124 * connection is lost, to cancel the reconnection attempt after a
2125 * given time. Once this attempt is done (successfully or not),
2126 * nbd_reconnect_attempt() ensures the timer is deleted before the
2127 * respective I/O request is resumed.
2128 * Since the AioContext can only be changed when a node is drained,
2129 * the reconnect_delay_timer cannot be active here.
2130 */
2131 assert(!s->reconnect_delay_timer);
2132 }
2133
2134 static void nbd_detach_aio_context(BlockDriverState *bs)
2135 {
2136 BDRVNBDState *s = bs->opaque;
2137
2138 assert(!s->open_timer);
2139 assert(!s->reconnect_delay_timer);
2140 }
2141
2142 static BlockDriver bdrv_nbd = {
2143 .format_name = "nbd",
2144 .protocol_name = "nbd",
2145 .instance_size = sizeof(BDRVNBDState),
2146 .bdrv_parse_filename = nbd_parse_filename,
2147 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2148 .create_opts = &bdrv_create_opts_simple,
2149 .bdrv_file_open = nbd_open,
2150 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2151 .bdrv_co_preadv = nbd_client_co_preadv,
2152 .bdrv_co_pwritev = nbd_client_co_pwritev,
2153 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2154 .bdrv_close = nbd_close,
2155 .bdrv_co_flush_to_os = nbd_client_co_flush,
2156 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2157 .bdrv_refresh_limits = nbd_refresh_limits,
2158 .bdrv_co_truncate = nbd_co_truncate,
2159 .bdrv_co_getlength = nbd_co_getlength,
2160 .bdrv_refresh_filename = nbd_refresh_filename,
2161 .bdrv_co_block_status = nbd_client_co_block_status,
2162 .bdrv_dirname = nbd_dirname,
2163 .strong_runtime_opts = nbd_strong_runtime_opts,
2164 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2165
2166 .bdrv_attach_aio_context = nbd_attach_aio_context,
2167 .bdrv_detach_aio_context = nbd_detach_aio_context,
2168 };
2169
2170 static BlockDriver bdrv_nbd_tcp = {
2171 .format_name = "nbd",
2172 .protocol_name = "nbd+tcp",
2173 .instance_size = sizeof(BDRVNBDState),
2174 .bdrv_parse_filename = nbd_parse_filename,
2175 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2176 .create_opts = &bdrv_create_opts_simple,
2177 .bdrv_file_open = nbd_open,
2178 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2179 .bdrv_co_preadv = nbd_client_co_preadv,
2180 .bdrv_co_pwritev = nbd_client_co_pwritev,
2181 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2182 .bdrv_close = nbd_close,
2183 .bdrv_co_flush_to_os = nbd_client_co_flush,
2184 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2185 .bdrv_refresh_limits = nbd_refresh_limits,
2186 .bdrv_co_truncate = nbd_co_truncate,
2187 .bdrv_co_getlength = nbd_co_getlength,
2188 .bdrv_refresh_filename = nbd_refresh_filename,
2189 .bdrv_co_block_status = nbd_client_co_block_status,
2190 .bdrv_dirname = nbd_dirname,
2191 .strong_runtime_opts = nbd_strong_runtime_opts,
2192 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2193
2194 .bdrv_attach_aio_context = nbd_attach_aio_context,
2195 .bdrv_detach_aio_context = nbd_detach_aio_context,
2196 };
2197
2198 static BlockDriver bdrv_nbd_unix = {
2199 .format_name = "nbd",
2200 .protocol_name = "nbd+unix",
2201 .instance_size = sizeof(BDRVNBDState),
2202 .bdrv_parse_filename = nbd_parse_filename,
2203 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2204 .create_opts = &bdrv_create_opts_simple,
2205 .bdrv_file_open = nbd_open,
2206 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2207 .bdrv_co_preadv = nbd_client_co_preadv,
2208 .bdrv_co_pwritev = nbd_client_co_pwritev,
2209 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2210 .bdrv_close = nbd_close,
2211 .bdrv_co_flush_to_os = nbd_client_co_flush,
2212 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2213 .bdrv_refresh_limits = nbd_refresh_limits,
2214 .bdrv_co_truncate = nbd_co_truncate,
2215 .bdrv_co_getlength = nbd_co_getlength,
2216 .bdrv_refresh_filename = nbd_refresh_filename,
2217 .bdrv_co_block_status = nbd_client_co_block_status,
2218 .bdrv_dirname = nbd_dirname,
2219 .strong_runtime_opts = nbd_strong_runtime_opts,
2220 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2221
2222 .bdrv_attach_aio_context = nbd_attach_aio_context,
2223 .bdrv_detach_aio_context = nbd_detach_aio_context,
2224 };
2225
2226 static void bdrv_nbd_init(void)
2227 {
2228 bdrv_register(&bdrv_nbd);
2229 bdrv_register(&bdrv_nbd_tcp);
2230 bdrv_register(&bdrv_nbd_unix);
2231 }
2232
2233 block_init(bdrv_nbd_init);