]> git.proxmox.com Git - mirror_qemu.git/blob - block/nbd.c
Merge tag 'pull-nbd-2023-09-25' of https://repo.or.cz/qemu/ericb into staging
[mirror_qemu.git] / block / nbd.c
1 /*
2 * QEMU Block driver for NBD
3 *
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright Red Hat
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 * Some parts:
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * THE SOFTWARE.
29 */
30
31 #include "qemu/osdep.h"
32
33 #include "trace.h"
34 #include "qemu/uri.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
38
39 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qapi/clone-visitor.h"
42
43 #include "block/qdict.h"
44 #include "block/nbd.h"
45 #include "block/block_int.h"
46 #include "block/coroutines.h"
47
48 #include "qemu/yank.h"
49
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
52
53 #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
54 #define INDEX_TO_COOKIE(index) ((index) + 1)
55
56 typedef struct {
57 Coroutine *coroutine;
58 uint64_t offset; /* original offset of the request */
59 bool receiving; /* sleeping in the yield in nbd_receive_replies */
60 } NBDClientRequest;
61
62 typedef enum NBDClientState {
63 NBD_CLIENT_CONNECTING_WAIT,
64 NBD_CLIENT_CONNECTING_NOWAIT,
65 NBD_CLIENT_CONNECTED,
66 NBD_CLIENT_QUIT
67 } NBDClientState;
68
69 typedef struct BDRVNBDState {
70 QIOChannel *ioc; /* The current I/O channel */
71 NBDExportInfo info;
72
73 /*
74 * Protects state, free_sema, in_flight, requests[].coroutine,
75 * reconnect_delay_timer.
76 */
77 QemuMutex requests_lock;
78 NBDClientState state;
79 CoQueue free_sema;
80 unsigned in_flight;
81 NBDClientRequest requests[MAX_NBD_REQUESTS];
82 QEMUTimer *reconnect_delay_timer;
83
84 /* Protects sending data on the socket. */
85 CoMutex send_mutex;
86
87 /*
88 * Protects receiving reply headers from the socket, as well as the
89 * fields reply and requests[].receiving
90 */
91 CoMutex receive_mutex;
92 NBDReply reply;
93
94 QEMUTimer *open_timer;
95
96 BlockDriverState *bs;
97
98 /* Connection parameters */
99 uint32_t reconnect_delay;
100 uint32_t open_timeout;
101 SocketAddress *saddr;
102 char *export;
103 char *tlscredsid;
104 QCryptoTLSCreds *tlscreds;
105 char *tlshostname;
106 char *x_dirty_bitmap;
107 bool alloc_depth;
108
109 NBDClientConnection *conn;
110 } BDRVNBDState;
111
112 static void nbd_yank(void *opaque);
113
114 static void nbd_clear_bdrvstate(BlockDriverState *bs)
115 {
116 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
117
118 nbd_client_connection_release(s->conn);
119 s->conn = NULL;
120
121 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
122
123 /* Must not leave timers behind that would access freed data */
124 assert(!s->reconnect_delay_timer);
125 assert(!s->open_timer);
126
127 object_unref(OBJECT(s->tlscreds));
128 qapi_free_SocketAddress(s->saddr);
129 s->saddr = NULL;
130 g_free(s->export);
131 s->export = NULL;
132 g_free(s->tlscredsid);
133 s->tlscredsid = NULL;
134 g_free(s->tlshostname);
135 s->tlshostname = NULL;
136 g_free(s->x_dirty_bitmap);
137 s->x_dirty_bitmap = NULL;
138 }
139
140 /* Called with s->receive_mutex taken. */
141 static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
142 {
143 if (req->receiving) {
144 req->receiving = false;
145 aio_co_wake(req->coroutine);
146 return true;
147 }
148
149 return false;
150 }
151
152 static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s)
153 {
154 int i;
155
156 QEMU_LOCK_GUARD(&s->receive_mutex);
157 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
158 if (nbd_recv_coroutine_wake_one(&s->requests[i])) {
159 return;
160 }
161 }
162 }
163
164 /* Called with s->requests_lock held. */
165 static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
166 {
167 if (s->state == NBD_CLIENT_CONNECTED) {
168 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
169 }
170
171 if (ret == -EIO) {
172 if (s->state == NBD_CLIENT_CONNECTED) {
173 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
174 NBD_CLIENT_CONNECTING_NOWAIT;
175 }
176 } else {
177 s->state = NBD_CLIENT_QUIT;
178 }
179 }
180
181 static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
182 {
183 QEMU_LOCK_GUARD(&s->requests_lock);
184 nbd_channel_error_locked(s, ret);
185 }
186
187 static void reconnect_delay_timer_del(BDRVNBDState *s)
188 {
189 if (s->reconnect_delay_timer) {
190 timer_free(s->reconnect_delay_timer);
191 s->reconnect_delay_timer = NULL;
192 }
193 }
194
195 static void reconnect_delay_timer_cb(void *opaque)
196 {
197 BDRVNBDState *s = opaque;
198
199 reconnect_delay_timer_del(s);
200 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
201 if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
202 return;
203 }
204 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
205 }
206 nbd_co_establish_connection_cancel(s->conn);
207 }
208
209 static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
210 {
211 assert(!s->reconnect_delay_timer);
212 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
213 QEMU_CLOCK_REALTIME,
214 SCALE_NS,
215 reconnect_delay_timer_cb, s);
216 timer_mod(s->reconnect_delay_timer, expire_time_ns);
217 }
218
219 static void nbd_teardown_connection(BlockDriverState *bs)
220 {
221 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
222
223 assert(!s->in_flight);
224
225 if (s->ioc) {
226 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
227 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
228 nbd_yank, s->bs);
229 object_unref(OBJECT(s->ioc));
230 s->ioc = NULL;
231 }
232
233 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
234 s->state = NBD_CLIENT_QUIT;
235 }
236 }
237
238 static void open_timer_del(BDRVNBDState *s)
239 {
240 if (s->open_timer) {
241 timer_free(s->open_timer);
242 s->open_timer = NULL;
243 }
244 }
245
246 static void open_timer_cb(void *opaque)
247 {
248 BDRVNBDState *s = opaque;
249
250 nbd_co_establish_connection_cancel(s->conn);
251 open_timer_del(s);
252 }
253
254 static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
255 {
256 assert(!s->open_timer);
257 s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
258 QEMU_CLOCK_REALTIME,
259 SCALE_NS,
260 open_timer_cb, s);
261 timer_mod(s->open_timer, expire_time_ns);
262 }
263
264 static bool nbd_client_will_reconnect(BDRVNBDState *s)
265 {
266 /*
267 * Called only after a socket error, so this is not performance sensitive.
268 */
269 QEMU_LOCK_GUARD(&s->requests_lock);
270 return s->state == NBD_CLIENT_CONNECTING_WAIT;
271 }
272
273 /*
274 * Update @bs with information learned during a completed negotiation process.
275 * Return failure if the server's advertised options are incompatible with the
276 * client's needs.
277 */
278 static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
279 {
280 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
281 int ret;
282
283 if (s->x_dirty_bitmap) {
284 if (!s->info.base_allocation) {
285 error_setg(errp, "requested x-dirty-bitmap %s not found",
286 s->x_dirty_bitmap);
287 return -EINVAL;
288 }
289 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
290 s->alloc_depth = true;
291 }
292 }
293
294 if (s->info.flags & NBD_FLAG_READ_ONLY) {
295 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
296 if (ret < 0) {
297 return ret;
298 }
299 }
300
301 if (s->info.flags & NBD_FLAG_SEND_FUA) {
302 bs->supported_write_flags = BDRV_REQ_FUA;
303 bs->supported_zero_flags |= BDRV_REQ_FUA;
304 }
305
306 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
307 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
308 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
309 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
310 }
311 }
312
313 trace_nbd_client_handshake_success(s->export);
314
315 return 0;
316 }
317
318 int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
319 bool blocking, Error **errp)
320 {
321 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
322 int ret;
323 IO_CODE();
324
325 assert_bdrv_graph_readable();
326 assert(!s->ioc);
327
328 s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
329 if (!s->ioc) {
330 return -ECONNREFUSED;
331 }
332
333 yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank,
334 bs);
335
336 ret = nbd_handle_updated_info(s->bs, NULL);
337 if (ret < 0) {
338 /*
339 * We have connected, but must fail for other reasons.
340 * Send NBD_CMD_DISC as a courtesy to the server.
341 */
342 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
343
344 nbd_send_request(s->ioc, &request);
345
346 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
347 nbd_yank, bs);
348 object_unref(OBJECT(s->ioc));
349 s->ioc = NULL;
350
351 return ret;
352 }
353
354 qio_channel_set_blocking(s->ioc, false, NULL);
355 qio_channel_set_follow_coroutine_ctx(s->ioc, true);
356
357 /* successfully connected */
358 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
359 s->state = NBD_CLIENT_CONNECTED;
360 }
361
362 return 0;
363 }
364
365 /* Called with s->requests_lock held. */
366 static bool nbd_client_connecting(BDRVNBDState *s)
367 {
368 return s->state == NBD_CLIENT_CONNECTING_WAIT ||
369 s->state == NBD_CLIENT_CONNECTING_NOWAIT;
370 }
371
372 /* Called with s->requests_lock taken. */
373 static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
374 {
375 int ret;
376 bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
377
378 /*
379 * Now we are sure that nobody is accessing the channel, and no one will
380 * try until we set the state to CONNECTED.
381 */
382 assert(nbd_client_connecting(s));
383 assert(s->in_flight == 1);
384
385 trace_nbd_reconnect_attempt(s->bs->in_flight);
386
387 if (blocking && !s->reconnect_delay_timer) {
388 /*
389 * It's the first reconnect attempt after switching to
390 * NBD_CLIENT_CONNECTING_WAIT
391 */
392 g_assert(s->reconnect_delay);
393 reconnect_delay_timer_init(s,
394 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
395 s->reconnect_delay * NANOSECONDS_PER_SECOND);
396 }
397
398 /* Finalize previous connection if any */
399 if (s->ioc) {
400 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
401 nbd_yank, s->bs);
402 object_unref(OBJECT(s->ioc));
403 s->ioc = NULL;
404 }
405
406 qemu_mutex_unlock(&s->requests_lock);
407 ret = nbd_co_do_establish_connection(s->bs, blocking, NULL);
408 trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight);
409 qemu_mutex_lock(&s->requests_lock);
410
411 /*
412 * The reconnect attempt is done (maybe successfully, maybe not), so
413 * we no longer need this timer. Delete it so it will not outlive
414 * this I/O request (so draining removes all timers).
415 */
416 reconnect_delay_timer_del(s);
417 }
418
419 static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
420 {
421 int ret;
422 uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
423 QEMU_LOCK_GUARD(&s->receive_mutex);
424
425 while (true) {
426 if (s->reply.cookie == cookie) {
427 /* We are done */
428 return 0;
429 }
430
431 if (s->reply.cookie != 0) {
432 /*
433 * Some other request is being handled now. It should already be
434 * woken by whoever set s->reply.cookie (or never wait in this
435 * yield). So, we should not wake it here.
436 */
437 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
438 assert(!s->requests[ind2].receiving);
439
440 s->requests[ind].receiving = true;
441 qemu_co_mutex_unlock(&s->receive_mutex);
442
443 qemu_coroutine_yield();
444 /*
445 * We may be woken for 2 reasons:
446 * 1. From this function, executing in parallel coroutine, when our
447 * cookie is received.
448 * 2. From nbd_co_receive_one_chunk(), when previous request is
449 * finished and s->reply.cookie set to 0.
450 * Anyway, it's OK to lock the mutex and go to the next iteration.
451 */
452
453 qemu_co_mutex_lock(&s->receive_mutex);
454 assert(!s->requests[ind].receiving);
455 continue;
456 }
457
458 /* We are under mutex and cookie is 0. We have to do the dirty work. */
459 assert(s->reply.cookie == 0);
460 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
461 if (ret <= 0) {
462 ret = ret ? ret : -EIO;
463 nbd_channel_error(s, ret);
464 return ret;
465 }
466 if (nbd_reply_is_structured(&s->reply) &&
467 s->info.mode < NBD_MODE_STRUCTURED) {
468 nbd_channel_error(s, -EINVAL);
469 return -EINVAL;
470 }
471 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
472 if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
473 nbd_channel_error(s, -EINVAL);
474 return -EINVAL;
475 }
476 if (s->reply.cookie == cookie) {
477 /* We are done */
478 return 0;
479 }
480 nbd_recv_coroutine_wake_one(&s->requests[ind2]);
481 }
482 }
483
484 static int coroutine_fn GRAPH_RDLOCK
485 nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
486 QEMUIOVector *qiov)
487 {
488 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
489 int rc, i = -1;
490
491 qemu_mutex_lock(&s->requests_lock);
492 while (s->in_flight == MAX_NBD_REQUESTS ||
493 (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
494 qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
495 }
496
497 s->in_flight++;
498 if (s->state != NBD_CLIENT_CONNECTED) {
499 if (nbd_client_connecting(s)) {
500 nbd_reconnect_attempt(s);
501 qemu_co_queue_restart_all(&s->free_sema);
502 }
503 if (s->state != NBD_CLIENT_CONNECTED) {
504 rc = -EIO;
505 goto err;
506 }
507 }
508
509 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
510 if (s->requests[i].coroutine == NULL) {
511 break;
512 }
513 }
514
515 assert(i < MAX_NBD_REQUESTS);
516 s->requests[i].coroutine = qemu_coroutine_self();
517 s->requests[i].offset = request->from;
518 s->requests[i].receiving = false;
519 qemu_mutex_unlock(&s->requests_lock);
520
521 qemu_co_mutex_lock(&s->send_mutex);
522 request->cookie = INDEX_TO_COOKIE(i);
523 request->mode = s->info.mode;
524
525 assert(s->ioc);
526
527 if (qiov) {
528 qio_channel_set_cork(s->ioc, true);
529 rc = nbd_send_request(s->ioc, request);
530 if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
531 NULL) < 0) {
532 rc = -EIO;
533 }
534 qio_channel_set_cork(s->ioc, false);
535 } else {
536 rc = nbd_send_request(s->ioc, request);
537 }
538 qemu_co_mutex_unlock(&s->send_mutex);
539
540 if (rc < 0) {
541 qemu_mutex_lock(&s->requests_lock);
542 err:
543 nbd_channel_error_locked(s, rc);
544 if (i != -1) {
545 s->requests[i].coroutine = NULL;
546 }
547 s->in_flight--;
548 qemu_co_queue_next(&s->free_sema);
549 qemu_mutex_unlock(&s->requests_lock);
550 }
551 return rc;
552 }
553
554 static inline uint16_t payload_advance16(uint8_t **payload)
555 {
556 *payload += 2;
557 return lduw_be_p(*payload - 2);
558 }
559
560 static inline uint32_t payload_advance32(uint8_t **payload)
561 {
562 *payload += 4;
563 return ldl_be_p(*payload - 4);
564 }
565
566 static inline uint64_t payload_advance64(uint8_t **payload)
567 {
568 *payload += 8;
569 return ldq_be_p(*payload - 8);
570 }
571
572 static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
573 NBDStructuredReplyChunk *chunk,
574 uint8_t *payload, uint64_t orig_offset,
575 QEMUIOVector *qiov, Error **errp)
576 {
577 uint64_t offset;
578 uint32_t hole_size;
579
580 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
581 error_setg(errp, "Protocol error: invalid payload for "
582 "NBD_REPLY_TYPE_OFFSET_HOLE");
583 return -EINVAL;
584 }
585
586 offset = payload_advance64(&payload);
587 hole_size = payload_advance32(&payload);
588
589 if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
590 offset > orig_offset + qiov->size - hole_size) {
591 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
592 " region");
593 return -EINVAL;
594 }
595 if (s->info.min_block &&
596 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
597 trace_nbd_structured_read_compliance("hole");
598 }
599
600 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
601
602 return 0;
603 }
604
605 /*
606 * nbd_parse_blockstatus_payload
607 * Based on our request, we expect only one extent in reply, for the
608 * base:allocation context.
609 */
610 static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
611 NBDStructuredReplyChunk *chunk,
612 uint8_t *payload, uint64_t orig_length,
613 NBDExtent32 *extent, Error **errp)
614 {
615 uint32_t context_id;
616
617 /* The server succeeded, so it must have sent [at least] one extent */
618 if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
619 error_setg(errp, "Protocol error: invalid payload for "
620 "NBD_REPLY_TYPE_BLOCK_STATUS");
621 return -EINVAL;
622 }
623
624 context_id = payload_advance32(&payload);
625 if (s->info.context_id != context_id) {
626 error_setg(errp, "Protocol error: unexpected context id %d for "
627 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
628 "id is %d", context_id,
629 s->info.context_id);
630 return -EINVAL;
631 }
632
633 extent->length = payload_advance32(&payload);
634 extent->flags = payload_advance32(&payload);
635
636 if (extent->length == 0) {
637 error_setg(errp, "Protocol error: server sent status chunk with "
638 "zero length");
639 return -EINVAL;
640 }
641
642 /*
643 * A server sending unaligned block status is in violation of the
644 * protocol, but as qemu-nbd 3.1 is such a server (at least for
645 * POSIX files that are not a multiple of 512 bytes, since qemu
646 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
647 * still sees an implicit hole beyond the real EOF), it's nicer to
648 * work around the misbehaving server. If the request included
649 * more than the final unaligned block, truncate it back to an
650 * aligned result; if the request was only the final block, round
651 * up to the full block and change the status to fully-allocated
652 * (always a safe status, even if it loses information).
653 */
654 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
655 s->info.min_block)) {
656 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
657 if (extent->length > s->info.min_block) {
658 extent->length = QEMU_ALIGN_DOWN(extent->length,
659 s->info.min_block);
660 } else {
661 extent->length = s->info.min_block;
662 extent->flags = 0;
663 }
664 }
665
666 /*
667 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
668 * sent us any more than one extent, nor should it have included
669 * status beyond our request in that extent. However, it's easy
670 * enough to ignore the server's noncompliance without killing the
671 * connection; just ignore trailing extents, and clamp things to
672 * the length of our request.
673 */
674 if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
675 trace_nbd_parse_blockstatus_compliance("more than one extent");
676 }
677 if (extent->length > orig_length) {
678 extent->length = orig_length;
679 trace_nbd_parse_blockstatus_compliance("extent length too large");
680 }
681
682 /*
683 * HACK: if we are using x-dirty-bitmaps to access
684 * qemu:allocation-depth, treat all depths > 2 the same as 2,
685 * since nbd_client_co_block_status is only expecting the low two
686 * bits to be set.
687 */
688 if (s->alloc_depth && extent->flags > 2) {
689 extent->flags = 2;
690 }
691
692 return 0;
693 }
694
695 /*
696 * nbd_parse_error_payload
697 * on success @errp contains message describing nbd error reply
698 */
699 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
700 uint8_t *payload, int *request_ret,
701 Error **errp)
702 {
703 uint32_t error;
704 uint16_t message_size;
705
706 assert(chunk->type & (1 << 15));
707
708 if (chunk->length < sizeof(error) + sizeof(message_size)) {
709 error_setg(errp,
710 "Protocol error: invalid payload for structured error");
711 return -EINVAL;
712 }
713
714 error = nbd_errno_to_system_errno(payload_advance32(&payload));
715 if (error == 0) {
716 error_setg(errp, "Protocol error: server sent structured error chunk "
717 "with error = 0");
718 return -EINVAL;
719 }
720
721 *request_ret = -error;
722 message_size = payload_advance16(&payload);
723
724 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
725 error_setg(errp, "Protocol error: server sent structured error chunk "
726 "with incorrect message size");
727 return -EINVAL;
728 }
729
730 /* TODO: Add a trace point to mention the server complaint */
731
732 /* TODO handle ERROR_OFFSET */
733
734 return 0;
735 }
736
737 static int coroutine_fn
738 nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset,
739 QEMUIOVector *qiov, Error **errp)
740 {
741 QEMUIOVector sub_qiov;
742 uint64_t offset;
743 size_t data_size;
744 int ret;
745 NBDStructuredReplyChunk *chunk = &s->reply.structured;
746
747 assert(nbd_reply_is_structured(&s->reply));
748
749 /* The NBD spec requires at least one byte of payload */
750 if (chunk->length <= sizeof(offset)) {
751 error_setg(errp, "Protocol error: invalid payload for "
752 "NBD_REPLY_TYPE_OFFSET_DATA");
753 return -EINVAL;
754 }
755
756 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
757 return -EIO;
758 }
759
760 data_size = chunk->length - sizeof(offset);
761 assert(data_size);
762 if (offset < orig_offset || data_size > qiov->size ||
763 offset > orig_offset + qiov->size - data_size) {
764 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
765 " region");
766 return -EINVAL;
767 }
768 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
769 trace_nbd_structured_read_compliance("data");
770 }
771
772 qemu_iovec_init(&sub_qiov, qiov->niov);
773 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
774 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
775 qemu_iovec_destroy(&sub_qiov);
776
777 return ret < 0 ? -EIO : 0;
778 }
779
780 #define NBD_MAX_MALLOC_PAYLOAD 1000
781 static coroutine_fn int nbd_co_receive_structured_payload(
782 BDRVNBDState *s, void **payload, Error **errp)
783 {
784 int ret;
785 uint32_t len;
786
787 assert(nbd_reply_is_structured(&s->reply));
788
789 len = s->reply.structured.length;
790
791 if (len == 0) {
792 return 0;
793 }
794
795 if (payload == NULL) {
796 error_setg(errp, "Unexpected structured payload");
797 return -EINVAL;
798 }
799
800 if (len > NBD_MAX_MALLOC_PAYLOAD) {
801 error_setg(errp, "Payload too large");
802 return -EINVAL;
803 }
804
805 *payload = g_new(char, len);
806 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
807 if (ret < 0) {
808 g_free(*payload);
809 *payload = NULL;
810 return ret;
811 }
812
813 return 0;
814 }
815
816 /*
817 * nbd_co_do_receive_one_chunk
818 * for simple reply:
819 * set request_ret to received reply error
820 * if qiov is not NULL: read payload to @qiov
821 * for structured reply chunk:
822 * if error chunk: read payload, set @request_ret, do not set @payload
823 * else if offset_data chunk: read payload data to @qiov, do not set @payload
824 * else: read payload to @payload
825 *
826 * If function fails, @errp contains corresponding error message, and the
827 * connection with the server is suspect. If it returns 0, then the
828 * transaction succeeded (although @request_ret may be a negative errno
829 * corresponding to the server's error reply), and errp is unchanged.
830 */
831 static coroutine_fn int nbd_co_do_receive_one_chunk(
832 BDRVNBDState *s, uint64_t cookie, bool only_structured,
833 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
834 {
835 int ret;
836 int i = COOKIE_TO_INDEX(cookie);
837 void *local_payload = NULL;
838 NBDStructuredReplyChunk *chunk;
839
840 if (payload) {
841 *payload = NULL;
842 }
843 *request_ret = 0;
844
845 ret = nbd_receive_replies(s, cookie);
846 if (ret < 0) {
847 error_setg(errp, "Connection closed");
848 return -EIO;
849 }
850 assert(s->ioc);
851
852 assert(s->reply.cookie == cookie);
853
854 if (nbd_reply_is_simple(&s->reply)) {
855 if (only_structured) {
856 error_setg(errp, "Protocol error: simple reply when structured "
857 "reply chunk was expected");
858 return -EINVAL;
859 }
860
861 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
862 if (*request_ret < 0 || !qiov) {
863 return 0;
864 }
865
866 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
867 errp) < 0 ? -EIO : 0;
868 }
869
870 /* handle structured reply chunk */
871 assert(s->info.mode >= NBD_MODE_STRUCTURED);
872 chunk = &s->reply.structured;
873
874 if (chunk->type == NBD_REPLY_TYPE_NONE) {
875 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
876 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
877 " NBD_REPLY_FLAG_DONE flag set");
878 return -EINVAL;
879 }
880 if (chunk->length) {
881 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
882 " nonzero length");
883 return -EINVAL;
884 }
885 return 0;
886 }
887
888 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
889 if (!qiov) {
890 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
891 return -EINVAL;
892 }
893
894 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
895 qiov, errp);
896 }
897
898 if (nbd_reply_type_is_error(chunk->type)) {
899 payload = &local_payload;
900 }
901
902 ret = nbd_co_receive_structured_payload(s, payload, errp);
903 if (ret < 0) {
904 return ret;
905 }
906
907 if (nbd_reply_type_is_error(chunk->type)) {
908 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
909 g_free(local_payload);
910 return ret;
911 }
912
913 return 0;
914 }
915
916 /*
917 * nbd_co_receive_one_chunk
918 * Read reply, wake up connection_co and set s->quit if needed.
919 * Return value is a fatal error code or normal nbd reply error code
920 */
921 static coroutine_fn int nbd_co_receive_one_chunk(
922 BDRVNBDState *s, uint64_t cookie, bool only_structured,
923 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
924 Error **errp)
925 {
926 int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
927 request_ret, qiov, payload, errp);
928
929 if (ret < 0) {
930 memset(reply, 0, sizeof(*reply));
931 nbd_channel_error(s, ret);
932 } else {
933 /* For assert at loop start in nbd_connection_entry */
934 *reply = s->reply;
935 }
936 s->reply.cookie = 0;
937
938 nbd_recv_coroutines_wake(s);
939
940 return ret;
941 }
942
943 typedef struct NBDReplyChunkIter {
944 int ret;
945 int request_ret;
946 Error *err;
947 bool done, only_structured;
948 } NBDReplyChunkIter;
949
950 static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
951 int ret, Error **local_err)
952 {
953 assert(local_err && *local_err);
954 assert(ret < 0);
955
956 if (!iter->ret) {
957 iter->ret = ret;
958 error_propagate(&iter->err, *local_err);
959 } else {
960 error_free(*local_err);
961 }
962
963 *local_err = NULL;
964 }
965
966 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
967 {
968 assert(ret < 0);
969
970 if (!iter->request_ret) {
971 iter->request_ret = ret;
972 }
973 }
974
975 /*
976 * NBD_FOREACH_REPLY_CHUNK
977 * The pointer stored in @payload requires g_free() to free it.
978 */
979 #define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
980 qiov, reply, payload) \
981 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
982 nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
983
984 /*
985 * nbd_reply_chunk_iter_receive
986 * The pointer stored in @payload requires g_free() to free it.
987 */
988 static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
989 NBDReplyChunkIter *iter,
990 uint64_t cookie,
991 QEMUIOVector *qiov,
992 NBDReply *reply,
993 void **payload)
994 {
995 int ret, request_ret;
996 NBDReply local_reply;
997 NBDStructuredReplyChunk *chunk;
998 Error *local_err = NULL;
999
1000 if (iter->done) {
1001 /* Previous iteration was last. */
1002 goto break_loop;
1003 }
1004
1005 if (reply == NULL) {
1006 reply = &local_reply;
1007 }
1008
1009 ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
1010 &request_ret, qiov, reply, payload,
1011 &local_err);
1012 if (ret < 0) {
1013 nbd_iter_channel_error(iter, ret, &local_err);
1014 } else if (request_ret < 0) {
1015 nbd_iter_request_error(iter, request_ret);
1016 }
1017
1018 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1019 if (nbd_reply_is_simple(reply) || iter->ret < 0) {
1020 goto break_loop;
1021 }
1022
1023 chunk = &reply->structured;
1024 iter->only_structured = true;
1025
1026 if (chunk->type == NBD_REPLY_TYPE_NONE) {
1027 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1028 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
1029 goto break_loop;
1030 }
1031
1032 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
1033 /* This iteration is last. */
1034 iter->done = true;
1035 }
1036
1037 /* Execute the loop body */
1038 return true;
1039
1040 break_loop:
1041 qemu_mutex_lock(&s->requests_lock);
1042 s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
1043 s->in_flight--;
1044 qemu_co_queue_next(&s->free_sema);
1045 qemu_mutex_unlock(&s->requests_lock);
1046
1047 return false;
1048 }
1049
1050 static int coroutine_fn
1051 nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
1052 int *request_ret, Error **errp)
1053 {
1054 NBDReplyChunkIter iter;
1055
1056 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
1057 /* nbd_reply_chunk_iter_receive does all the work */
1058 }
1059
1060 error_propagate(errp, iter.err);
1061 *request_ret = iter.request_ret;
1062 return iter.ret;
1063 }
1064
1065 static int coroutine_fn
1066 nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
1067 uint64_t offset, QEMUIOVector *qiov,
1068 int *request_ret, Error **errp)
1069 {
1070 NBDReplyChunkIter iter;
1071 NBDReply reply;
1072 void *payload = NULL;
1073 Error *local_err = NULL;
1074
1075 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
1076 s->info.mode >= NBD_MODE_STRUCTURED,
1077 qiov, &reply, &payload)
1078 {
1079 int ret;
1080 NBDStructuredReplyChunk *chunk = &reply.structured;
1081
1082 assert(nbd_reply_is_structured(&reply));
1083
1084 switch (chunk->type) {
1085 case NBD_REPLY_TYPE_OFFSET_DATA:
1086 /*
1087 * special cased in nbd_co_receive_one_chunk, data is already
1088 * in qiov
1089 */
1090 break;
1091 case NBD_REPLY_TYPE_OFFSET_HOLE:
1092 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
1093 offset, qiov, &local_err);
1094 if (ret < 0) {
1095 nbd_channel_error(s, ret);
1096 nbd_iter_channel_error(&iter, ret, &local_err);
1097 }
1098 break;
1099 default:
1100 if (!nbd_reply_type_is_error(chunk->type)) {
1101 /* not allowed reply type */
1102 nbd_channel_error(s, -EINVAL);
1103 error_setg(&local_err,
1104 "Unexpected reply type: %d (%s) for CMD_READ",
1105 chunk->type, nbd_reply_type_lookup(chunk->type));
1106 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1107 }
1108 }
1109
1110 g_free(payload);
1111 payload = NULL;
1112 }
1113
1114 error_propagate(errp, iter.err);
1115 *request_ret = iter.request_ret;
1116 return iter.ret;
1117 }
1118
1119 static int coroutine_fn
1120 nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
1121 uint64_t length, NBDExtent32 *extent,
1122 int *request_ret, Error **errp)
1123 {
1124 NBDReplyChunkIter iter;
1125 NBDReply reply;
1126 void *payload = NULL;
1127 Error *local_err = NULL;
1128 bool received = false;
1129
1130 assert(!extent->length);
1131 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
1132 int ret;
1133 NBDStructuredReplyChunk *chunk = &reply.structured;
1134
1135 assert(nbd_reply_is_structured(&reply));
1136
1137 switch (chunk->type) {
1138 case NBD_REPLY_TYPE_BLOCK_STATUS:
1139 if (received) {
1140 nbd_channel_error(s, -EINVAL);
1141 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
1142 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1143 }
1144 received = true;
1145
1146 ret = nbd_parse_blockstatus_payload(s, &reply.structured,
1147 payload, length, extent,
1148 &local_err);
1149 if (ret < 0) {
1150 nbd_channel_error(s, ret);
1151 nbd_iter_channel_error(&iter, ret, &local_err);
1152 }
1153 break;
1154 default:
1155 if (!nbd_reply_type_is_error(chunk->type)) {
1156 nbd_channel_error(s, -EINVAL);
1157 error_setg(&local_err,
1158 "Unexpected reply type: %d (%s) "
1159 "for CMD_BLOCK_STATUS",
1160 chunk->type, nbd_reply_type_lookup(chunk->type));
1161 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1162 }
1163 }
1164
1165 g_free(payload);
1166 payload = NULL;
1167 }
1168
1169 if (!extent->length && !iter.request_ret) {
1170 error_setg(&local_err, "Server did not reply with any status extents");
1171 nbd_iter_channel_error(&iter, -EIO, &local_err);
1172 }
1173
1174 error_propagate(errp, iter.err);
1175 *request_ret = iter.request_ret;
1176 return iter.ret;
1177 }
1178
1179 static int coroutine_fn GRAPH_RDLOCK
1180 nbd_co_request(BlockDriverState *bs, NBDRequest *request,
1181 QEMUIOVector *write_qiov)
1182 {
1183 int ret, request_ret;
1184 Error *local_err = NULL;
1185 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1186
1187 assert(request->type != NBD_CMD_READ);
1188 if (write_qiov) {
1189 assert(request->type == NBD_CMD_WRITE);
1190 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
1191 } else {
1192 assert(request->type != NBD_CMD_WRITE);
1193 }
1194
1195 do {
1196 ret = nbd_co_send_request(bs, request, write_qiov);
1197 if (ret < 0) {
1198 continue;
1199 }
1200
1201 ret = nbd_co_receive_return_code(s, request->cookie,
1202 &request_ret, &local_err);
1203 if (local_err) {
1204 trace_nbd_co_request_fail(request->from, request->len,
1205 request->cookie, request->flags,
1206 request->type,
1207 nbd_cmd_lookup(request->type),
1208 ret, error_get_pretty(local_err));
1209 error_free(local_err);
1210 local_err = NULL;
1211 }
1212 } while (ret < 0 && nbd_client_will_reconnect(s));
1213
1214 return ret ? ret : request_ret;
1215 }
1216
1217 static int coroutine_fn GRAPH_RDLOCK
1218 nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1219 QEMUIOVector *qiov, BdrvRequestFlags flags)
1220 {
1221 int ret, request_ret;
1222 Error *local_err = NULL;
1223 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1224 NBDRequest request = {
1225 .type = NBD_CMD_READ,
1226 .from = offset,
1227 .len = bytes,
1228 };
1229
1230 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1231
1232 if (!bytes) {
1233 return 0;
1234 }
1235 /*
1236 * Work around the fact that the block layer doesn't do
1237 * byte-accurate sizing yet - if the read exceeds the server's
1238 * advertised size because the block layer rounded size up, then
1239 * truncate the request to the server and tail-pad with zero.
1240 */
1241 if (offset >= s->info.size) {
1242 assert(bytes < BDRV_SECTOR_SIZE);
1243 qemu_iovec_memset(qiov, 0, 0, bytes);
1244 return 0;
1245 }
1246 if (offset + bytes > s->info.size) {
1247 uint64_t slop = offset + bytes - s->info.size;
1248
1249 assert(slop < BDRV_SECTOR_SIZE);
1250 qemu_iovec_memset(qiov, bytes - slop, 0, slop);
1251 request.len -= slop;
1252 }
1253
1254 do {
1255 ret = nbd_co_send_request(bs, &request, NULL);
1256 if (ret < 0) {
1257 continue;
1258 }
1259
1260 ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
1261 &request_ret, &local_err);
1262 if (local_err) {
1263 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1264 request.flags, request.type,
1265 nbd_cmd_lookup(request.type),
1266 ret, error_get_pretty(local_err));
1267 error_free(local_err);
1268 local_err = NULL;
1269 }
1270 } while (ret < 0 && nbd_client_will_reconnect(s));
1271
1272 return ret ? ret : request_ret;
1273 }
1274
1275 static int coroutine_fn GRAPH_RDLOCK
1276 nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1277 QEMUIOVector *qiov, BdrvRequestFlags flags)
1278 {
1279 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1280 NBDRequest request = {
1281 .type = NBD_CMD_WRITE,
1282 .from = offset,
1283 .len = bytes,
1284 };
1285
1286 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1287 if (flags & BDRV_REQ_FUA) {
1288 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1289 request.flags |= NBD_CMD_FLAG_FUA;
1290 }
1291
1292 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1293
1294 if (!bytes) {
1295 return 0;
1296 }
1297 return nbd_co_request(bs, &request, qiov);
1298 }
1299
1300 static int coroutine_fn GRAPH_RDLOCK
1301 nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1302 BdrvRequestFlags flags)
1303 {
1304 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1305 NBDRequest request = {
1306 .type = NBD_CMD_WRITE_ZEROES,
1307 .from = offset,
1308 .len = bytes,
1309 };
1310
1311 /* rely on max_pwrite_zeroes */
1312 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
1313
1314 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1315 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1316 return -ENOTSUP;
1317 }
1318
1319 if (flags & BDRV_REQ_FUA) {
1320 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1321 request.flags |= NBD_CMD_FLAG_FUA;
1322 }
1323 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1324 request.flags |= NBD_CMD_FLAG_NO_HOLE;
1325 }
1326 if (flags & BDRV_REQ_NO_FALLBACK) {
1327 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
1328 request.flags |= NBD_CMD_FLAG_FAST_ZERO;
1329 }
1330
1331 if (!bytes) {
1332 return 0;
1333 }
1334 return nbd_co_request(bs, &request, NULL);
1335 }
1336
1337 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs)
1338 {
1339 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1340 NBDRequest request = { .type = NBD_CMD_FLUSH };
1341
1342 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1343 return 0;
1344 }
1345
1346 request.from = 0;
1347 request.len = 0;
1348
1349 return nbd_co_request(bs, &request, NULL);
1350 }
1351
1352 static int coroutine_fn GRAPH_RDLOCK
1353 nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1354 {
1355 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1356 NBDRequest request = {
1357 .type = NBD_CMD_TRIM,
1358 .from = offset,
1359 .len = bytes,
1360 };
1361
1362 /* rely on max_pdiscard */
1363 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
1364
1365 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1366 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1367 return 0;
1368 }
1369
1370 return nbd_co_request(bs, &request, NULL);
1371 }
1372
1373 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
1374 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1375 int64_t *pnum, int64_t *map, BlockDriverState **file)
1376 {
1377 int ret, request_ret;
1378 NBDExtent32 extent = { 0 };
1379 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1380 Error *local_err = NULL;
1381
1382 NBDRequest request = {
1383 .type = NBD_CMD_BLOCK_STATUS,
1384 .from = offset,
1385 .len = MIN(bytes, s->info.size - offset),
1386 .flags = NBD_CMD_FLAG_REQ_ONE,
1387 };
1388
1389 if (!s->info.base_allocation) {
1390 *pnum = bytes;
1391 *map = offset;
1392 *file = bs;
1393 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1394 }
1395 if (s->info.mode < NBD_MODE_EXTENDED) {
1396 request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
1397 request.len);
1398 }
1399
1400 /*
1401 * Work around the fact that the block layer doesn't do
1402 * byte-accurate sizing yet - if the status request exceeds the
1403 * server's advertised size because the block layer rounded size
1404 * up, we truncated the request to the server (above), or are
1405 * called on just the hole.
1406 */
1407 if (offset >= s->info.size) {
1408 *pnum = bytes;
1409 assert(bytes < BDRV_SECTOR_SIZE);
1410 /* Intentionally don't report offset_valid for the hole */
1411 return BDRV_BLOCK_ZERO;
1412 }
1413
1414 if (s->info.min_block) {
1415 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1416 }
1417 do {
1418 ret = nbd_co_send_request(bs, &request, NULL);
1419 if (ret < 0) {
1420 continue;
1421 }
1422
1423 ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
1424 &extent, &request_ret,
1425 &local_err);
1426 if (local_err) {
1427 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1428 request.flags, request.type,
1429 nbd_cmd_lookup(request.type),
1430 ret, error_get_pretty(local_err));
1431 error_free(local_err);
1432 local_err = NULL;
1433 }
1434 } while (ret < 0 && nbd_client_will_reconnect(s));
1435
1436 if (ret < 0 || request_ret < 0) {
1437 return ret ? ret : request_ret;
1438 }
1439
1440 assert(extent.length);
1441 *pnum = extent.length;
1442 *map = offset;
1443 *file = bs;
1444 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1445 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1446 BDRV_BLOCK_OFFSET_VALID;
1447 }
1448
1449 static int nbd_client_reopen_prepare(BDRVReopenState *state,
1450 BlockReopenQueue *queue, Error **errp)
1451 {
1452 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
1453
1454 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
1455 error_setg(errp, "Can't reopen read-only NBD mount as read/write");
1456 return -EACCES;
1457 }
1458 return 0;
1459 }
1460
1461 static void nbd_yank(void *opaque)
1462 {
1463 BlockDriverState *bs = opaque;
1464 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1465
1466 QEMU_LOCK_GUARD(&s->requests_lock);
1467 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1468 s->state = NBD_CLIENT_QUIT;
1469 }
1470
1471 static void nbd_client_close(BlockDriverState *bs)
1472 {
1473 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1474 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
1475
1476 if (s->ioc) {
1477 nbd_send_request(s->ioc, &request);
1478 }
1479
1480 nbd_teardown_connection(bs);
1481 }
1482
1483
1484 /*
1485 * Parse nbd_open options
1486 */
1487
1488 static int nbd_parse_uri(const char *filename, QDict *options)
1489 {
1490 URI *uri;
1491 const char *p;
1492 QueryParams *qp = NULL;
1493 int ret = 0;
1494 bool is_unix;
1495
1496 uri = uri_parse(filename);
1497 if (!uri) {
1498 return -EINVAL;
1499 }
1500
1501 /* transport */
1502 if (!g_strcmp0(uri->scheme, "nbd")) {
1503 is_unix = false;
1504 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1505 is_unix = false;
1506 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1507 is_unix = true;
1508 } else {
1509 ret = -EINVAL;
1510 goto out;
1511 }
1512
1513 p = uri->path ? uri->path : "";
1514 if (p[0] == '/') {
1515 p++;
1516 }
1517 if (p[0]) {
1518 qdict_put_str(options, "export", p);
1519 }
1520
1521 qp = query_params_parse(uri->query);
1522 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1523 ret = -EINVAL;
1524 goto out;
1525 }
1526
1527 if (is_unix) {
1528 /* nbd+unix:///export?socket=path */
1529 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1530 ret = -EINVAL;
1531 goto out;
1532 }
1533 qdict_put_str(options, "server.type", "unix");
1534 qdict_put_str(options, "server.path", qp->p[0].value);
1535 } else {
1536 QString *host;
1537 char *port_str;
1538
1539 /* nbd[+tcp]://host[:port]/export */
1540 if (!uri->server) {
1541 ret = -EINVAL;
1542 goto out;
1543 }
1544
1545 /* strip braces from literal IPv6 address */
1546 if (uri->server[0] == '[') {
1547 host = qstring_from_substr(uri->server, 1,
1548 strlen(uri->server) - 1);
1549 } else {
1550 host = qstring_from_str(uri->server);
1551 }
1552
1553 qdict_put_str(options, "server.type", "inet");
1554 qdict_put(options, "server.host", host);
1555
1556 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1557 qdict_put_str(options, "server.port", port_str);
1558 g_free(port_str);
1559 }
1560
1561 out:
1562 if (qp) {
1563 query_params_free(qp);
1564 }
1565 uri_free(uri);
1566 return ret;
1567 }
1568
1569 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1570 {
1571 const QDictEntry *e;
1572
1573 for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1574 if (!strcmp(e->key, "host") ||
1575 !strcmp(e->key, "port") ||
1576 !strcmp(e->key, "path") ||
1577 !strcmp(e->key, "export") ||
1578 strstart(e->key, "server.", NULL))
1579 {
1580 error_setg(errp, "Option '%s' cannot be used with a file name",
1581 e->key);
1582 return true;
1583 }
1584 }
1585
1586 return false;
1587 }
1588
1589 static void nbd_parse_filename(const char *filename, QDict *options,
1590 Error **errp)
1591 {
1592 g_autofree char *file = NULL;
1593 char *export_name;
1594 const char *host_spec;
1595 const char *unixpath;
1596
1597 if (nbd_has_filename_options_conflict(options, errp)) {
1598 return;
1599 }
1600
1601 if (strstr(filename, "://")) {
1602 int ret = nbd_parse_uri(filename, options);
1603 if (ret < 0) {
1604 error_setg(errp, "No valid URL specified");
1605 }
1606 return;
1607 }
1608
1609 file = g_strdup(filename);
1610
1611 export_name = strstr(file, EN_OPTSTR);
1612 if (export_name) {
1613 if (export_name[strlen(EN_OPTSTR)] == 0) {
1614 return;
1615 }
1616 export_name[0] = 0; /* truncate 'file' */
1617 export_name += strlen(EN_OPTSTR);
1618
1619 qdict_put_str(options, "export", export_name);
1620 }
1621
1622 /* extract the host_spec - fail if it's not nbd:... */
1623 if (!strstart(file, "nbd:", &host_spec)) {
1624 error_setg(errp, "File name string for NBD must start with 'nbd:'");
1625 return;
1626 }
1627
1628 if (!*host_spec) {
1629 return;
1630 }
1631
1632 /* are we a UNIX or TCP socket? */
1633 if (strstart(host_spec, "unix:", &unixpath)) {
1634 qdict_put_str(options, "server.type", "unix");
1635 qdict_put_str(options, "server.path", unixpath);
1636 } else {
1637 InetSocketAddress *addr = g_new(InetSocketAddress, 1);
1638
1639 if (inet_parse(addr, host_spec, errp)) {
1640 goto out_inet;
1641 }
1642
1643 qdict_put_str(options, "server.type", "inet");
1644 qdict_put_str(options, "server.host", addr->host);
1645 qdict_put_str(options, "server.port", addr->port);
1646 out_inet:
1647 qapi_free_InetSocketAddress(addr);
1648 }
1649 }
1650
1651 static bool nbd_process_legacy_socket_options(QDict *output_options,
1652 QemuOpts *legacy_opts,
1653 Error **errp)
1654 {
1655 const char *path = qemu_opt_get(legacy_opts, "path");
1656 const char *host = qemu_opt_get(legacy_opts, "host");
1657 const char *port = qemu_opt_get(legacy_opts, "port");
1658 const QDictEntry *e;
1659
1660 if (!path && !host && !port) {
1661 return true;
1662 }
1663
1664 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
1665 {
1666 if (strstart(e->key, "server.", NULL)) {
1667 error_setg(errp, "Cannot use 'server' and path/host/port at the "
1668 "same time");
1669 return false;
1670 }
1671 }
1672
1673 if (path && host) {
1674 error_setg(errp, "path and host may not be used at the same time");
1675 return false;
1676 } else if (path) {
1677 if (port) {
1678 error_setg(errp, "port may not be used without host");
1679 return false;
1680 }
1681
1682 qdict_put_str(output_options, "server.type", "unix");
1683 qdict_put_str(output_options, "server.path", path);
1684 } else if (host) {
1685 qdict_put_str(output_options, "server.type", "inet");
1686 qdict_put_str(output_options, "server.host", host);
1687 qdict_put_str(output_options, "server.port",
1688 port ?: stringify(NBD_DEFAULT_PORT));
1689 }
1690
1691 return true;
1692 }
1693
1694 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
1695 Error **errp)
1696 {
1697 SocketAddress *saddr = NULL;
1698 QDict *addr = NULL;
1699 Visitor *iv = NULL;
1700
1701 qdict_extract_subqdict(options, &addr, "server.");
1702 if (!qdict_size(addr)) {
1703 error_setg(errp, "NBD server address missing");
1704 goto done;
1705 }
1706
1707 iv = qobject_input_visitor_new_flat_confused(addr, errp);
1708 if (!iv) {
1709 goto done;
1710 }
1711
1712 if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
1713 goto done;
1714 }
1715
1716 if (socket_address_parse_named_fd(saddr, errp) < 0) {
1717 qapi_free_SocketAddress(saddr);
1718 saddr = NULL;
1719 goto done;
1720 }
1721
1722 done:
1723 qobject_unref(addr);
1724 visit_free(iv);
1725 return saddr;
1726 }
1727
1728 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
1729 {
1730 Object *obj;
1731 QCryptoTLSCreds *creds;
1732
1733 obj = object_resolve_path_component(
1734 object_get_objects_root(), id);
1735 if (!obj) {
1736 error_setg(errp, "No TLS credentials with id '%s'",
1737 id);
1738 return NULL;
1739 }
1740 creds = (QCryptoTLSCreds *)
1741 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
1742 if (!creds) {
1743 error_setg(errp, "Object with id '%s' is not TLS credentials",
1744 id);
1745 return NULL;
1746 }
1747
1748 if (!qcrypto_tls_creds_check_endpoint(creds,
1749 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT,
1750 errp)) {
1751 return NULL;
1752 }
1753 object_ref(obj);
1754 return creds;
1755 }
1756
1757
1758 static QemuOptsList nbd_runtime_opts = {
1759 .name = "nbd",
1760 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
1761 .desc = {
1762 {
1763 .name = "host",
1764 .type = QEMU_OPT_STRING,
1765 .help = "TCP host to connect to",
1766 },
1767 {
1768 .name = "port",
1769 .type = QEMU_OPT_STRING,
1770 .help = "TCP port to connect to",
1771 },
1772 {
1773 .name = "path",
1774 .type = QEMU_OPT_STRING,
1775 .help = "Unix socket path to connect to",
1776 },
1777 {
1778 .name = "export",
1779 .type = QEMU_OPT_STRING,
1780 .help = "Name of the NBD export to open",
1781 },
1782 {
1783 .name = "tls-creds",
1784 .type = QEMU_OPT_STRING,
1785 .help = "ID of the TLS credentials to use",
1786 },
1787 {
1788 .name = "tls-hostname",
1789 .type = QEMU_OPT_STRING,
1790 .help = "Override hostname for validating TLS x509 certificate",
1791 },
1792 {
1793 .name = "x-dirty-bitmap",
1794 .type = QEMU_OPT_STRING,
1795 .help = "experimental: expose named dirty bitmap in place of "
1796 "block status",
1797 },
1798 {
1799 .name = "reconnect-delay",
1800 .type = QEMU_OPT_NUMBER,
1801 .help = "On an unexpected disconnect, the nbd client tries to "
1802 "connect again until succeeding or encountering a serious "
1803 "error. During the first @reconnect-delay seconds, all "
1804 "requests are paused and will be rerun on a successful "
1805 "reconnect. After that time, any delayed requests and all "
1806 "future requests before a successful reconnect will "
1807 "immediately fail. Default 0",
1808 },
1809 {
1810 .name = "open-timeout",
1811 .type = QEMU_OPT_NUMBER,
1812 .help = "In seconds. If zero, the nbd driver tries the connection "
1813 "only once, and fails to open if the connection fails. "
1814 "If non-zero, the nbd driver will repeat connection "
1815 "attempts until successful or until @open-timeout seconds "
1816 "have elapsed. Default 0",
1817 },
1818 { /* end of list */ }
1819 },
1820 };
1821
1822 static int nbd_process_options(BlockDriverState *bs, QDict *options,
1823 Error **errp)
1824 {
1825 BDRVNBDState *s = bs->opaque;
1826 QemuOpts *opts;
1827 int ret = -EINVAL;
1828
1829 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
1830 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
1831 goto error;
1832 }
1833
1834 /* Translate @host, @port, and @path to a SocketAddress */
1835 if (!nbd_process_legacy_socket_options(options, opts, errp)) {
1836 goto error;
1837 }
1838
1839 /* Pop the config into our state object. Exit if invalid. */
1840 s->saddr = nbd_config(s, options, errp);
1841 if (!s->saddr) {
1842 goto error;
1843 }
1844
1845 s->export = g_strdup(qemu_opt_get(opts, "export"));
1846 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
1847 error_setg(errp, "export name too long to send to server");
1848 goto error;
1849 }
1850
1851 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
1852 if (s->tlscredsid) {
1853 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
1854 if (!s->tlscreds) {
1855 goto error;
1856 }
1857
1858 s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname"));
1859 if (!s->tlshostname &&
1860 s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
1861 s->tlshostname = g_strdup(s->saddr->u.inet.host);
1862 }
1863 }
1864
1865 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
1866 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
1867 error_setg(errp, "x-dirty-bitmap query too long to send to server");
1868 goto error;
1869 }
1870
1871 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
1872 s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0);
1873
1874 ret = 0;
1875
1876 error:
1877 qemu_opts_del(opts);
1878 return ret;
1879 }
1880
1881 static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
1882 Error **errp)
1883 {
1884 int ret;
1885 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1886
1887 s->bs = bs;
1888 qemu_mutex_init(&s->requests_lock);
1889 qemu_co_queue_init(&s->free_sema);
1890 qemu_co_mutex_init(&s->send_mutex);
1891 qemu_co_mutex_init(&s->receive_mutex);
1892
1893 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
1894 return -EEXIST;
1895 }
1896
1897 ret = nbd_process_options(bs, options, errp);
1898 if (ret < 0) {
1899 goto fail;
1900 }
1901
1902 s->conn = nbd_client_connection_new(s->saddr, true, s->export,
1903 s->x_dirty_bitmap, s->tlscreds,
1904 s->tlshostname);
1905
1906 if (s->open_timeout) {
1907 nbd_client_connection_enable_retry(s->conn);
1908 open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
1909 s->open_timeout * NANOSECONDS_PER_SECOND);
1910 }
1911
1912 s->state = NBD_CLIENT_CONNECTING_WAIT;
1913 ret = nbd_do_establish_connection(bs, true, errp);
1914 if (ret < 0) {
1915 goto fail;
1916 }
1917
1918 /*
1919 * The connect attempt is done, so we no longer need this timer.
1920 * Delete it, because we do not want it to be around when this node
1921 * is drained or closed.
1922 */
1923 open_timer_del(s);
1924
1925 nbd_client_connection_enable_retry(s->conn);
1926
1927 return 0;
1928
1929 fail:
1930 open_timer_del(s);
1931 nbd_clear_bdrvstate(bs);
1932 return ret;
1933 }
1934
1935 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
1936 {
1937 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1938 uint32_t min = s->info.min_block;
1939 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
1940
1941 /*
1942 * If the server did not advertise an alignment:
1943 * - a size that is not sector-aligned implies that an alignment
1944 * of 1 can be used to access those tail bytes
1945 * - advertisement of block status requires an alignment of 1, so
1946 * that we don't violate block layer constraints that block
1947 * status is always aligned (as we can't control whether the
1948 * server will report sub-sector extents, such as a hole at EOF
1949 * on an unaligned POSIX file)
1950 * - otherwise, assume the server is so old that we are safer avoiding
1951 * sub-sector requests
1952 */
1953 if (!min) {
1954 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
1955 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
1956 }
1957
1958 bs->bl.request_alignment = min;
1959 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
1960 bs->bl.max_pwrite_zeroes = max;
1961 bs->bl.max_transfer = max;
1962
1963 /*
1964 * Assume that if the server supports extended headers, it also
1965 * supports unlimited size zero and trim commands.
1966 */
1967 if (s->info.mode >= NBD_MODE_EXTENDED) {
1968 bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
1969 }
1970
1971 if (s->info.opt_block &&
1972 s->info.opt_block > bs->bl.opt_transfer) {
1973 bs->bl.opt_transfer = s->info.opt_block;
1974 }
1975 }
1976
1977 static void nbd_close(BlockDriverState *bs)
1978 {
1979 nbd_client_close(bs);
1980 nbd_clear_bdrvstate(bs);
1981 }
1982
1983 /*
1984 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
1985 * to a smaller size with exact=false, there is no reason to fail the
1986 * operation.
1987 *
1988 * Preallocation mode is ignored since it does not seems useful to fail when
1989 * we never change anything.
1990 */
1991 static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
1992 bool exact, PreallocMode prealloc,
1993 BdrvRequestFlags flags, Error **errp)
1994 {
1995 BDRVNBDState *s = bs->opaque;
1996
1997 if (offset != s->info.size && exact) {
1998 error_setg(errp, "Cannot resize NBD nodes");
1999 return -ENOTSUP;
2000 }
2001
2002 if (offset > s->info.size) {
2003 error_setg(errp, "Cannot grow NBD nodes");
2004 return -EINVAL;
2005 }
2006
2007 return 0;
2008 }
2009
2010 static int64_t coroutine_fn nbd_co_getlength(BlockDriverState *bs)
2011 {
2012 BDRVNBDState *s = bs->opaque;
2013
2014 return s->info.size;
2015 }
2016
2017 static void nbd_refresh_filename(BlockDriverState *bs)
2018 {
2019 BDRVNBDState *s = bs->opaque;
2020 const char *host = NULL, *port = NULL, *path = NULL;
2021 size_t len = 0;
2022
2023 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
2024 const InetSocketAddress *inet = &s->saddr->u.inet;
2025 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
2026 host = inet->host;
2027 port = inet->port;
2028 }
2029 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
2030 path = s->saddr->u.q_unix.path;
2031 } /* else can't represent as pseudo-filename */
2032
2033 if (path && s->export) {
2034 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2035 "nbd+unix:///%s?socket=%s", s->export, path);
2036 } else if (path && !s->export) {
2037 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2038 "nbd+unix://?socket=%s", path);
2039 } else if (host && s->export) {
2040 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2041 "nbd://%s:%s/%s", host, port, s->export);
2042 } else if (host && !s->export) {
2043 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2044 "nbd://%s:%s", host, port);
2045 }
2046 if (len >= sizeof(bs->exact_filename)) {
2047 /* Name is too long to represent exactly, so leave it empty. */
2048 bs->exact_filename[0] = '\0';
2049 }
2050 }
2051
2052 static char *nbd_dirname(BlockDriverState *bs, Error **errp)
2053 {
2054 /* The generic bdrv_dirname() implementation is able to work out some
2055 * directory name for NBD nodes, but that would be wrong. So far there is no
2056 * specification for how "export paths" would work, so NBD does not have
2057 * directory names. */
2058 error_setg(errp, "Cannot generate a base directory for NBD nodes");
2059 return NULL;
2060 }
2061
2062 static const char *const nbd_strong_runtime_opts[] = {
2063 "path",
2064 "host",
2065 "port",
2066 "export",
2067 "tls-creds",
2068 "tls-hostname",
2069 "server.",
2070
2071 NULL
2072 };
2073
2074 static void nbd_cancel_in_flight(BlockDriverState *bs)
2075 {
2076 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2077
2078 reconnect_delay_timer_del(s);
2079
2080 qemu_mutex_lock(&s->requests_lock);
2081 if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
2082 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
2083 }
2084 qemu_mutex_unlock(&s->requests_lock);
2085
2086 nbd_co_establish_connection_cancel(s->conn);
2087 }
2088
2089 static void nbd_attach_aio_context(BlockDriverState *bs,
2090 AioContext *new_context)
2091 {
2092 BDRVNBDState *s = bs->opaque;
2093
2094 /* The open_timer is used only during nbd_open() */
2095 assert(!s->open_timer);
2096
2097 /*
2098 * The reconnect_delay_timer is scheduled in I/O paths when the
2099 * connection is lost, to cancel the reconnection attempt after a
2100 * given time. Once this attempt is done (successfully or not),
2101 * nbd_reconnect_attempt() ensures the timer is deleted before the
2102 * respective I/O request is resumed.
2103 * Since the AioContext can only be changed when a node is drained,
2104 * the reconnect_delay_timer cannot be active here.
2105 */
2106 assert(!s->reconnect_delay_timer);
2107 }
2108
2109 static void nbd_detach_aio_context(BlockDriverState *bs)
2110 {
2111 BDRVNBDState *s = bs->opaque;
2112
2113 assert(!s->open_timer);
2114 assert(!s->reconnect_delay_timer);
2115 }
2116
2117 static BlockDriver bdrv_nbd = {
2118 .format_name = "nbd",
2119 .protocol_name = "nbd",
2120 .instance_size = sizeof(BDRVNBDState),
2121 .bdrv_parse_filename = nbd_parse_filename,
2122 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2123 .create_opts = &bdrv_create_opts_simple,
2124 .bdrv_file_open = nbd_open,
2125 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2126 .bdrv_co_preadv = nbd_client_co_preadv,
2127 .bdrv_co_pwritev = nbd_client_co_pwritev,
2128 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2129 .bdrv_close = nbd_close,
2130 .bdrv_co_flush_to_os = nbd_client_co_flush,
2131 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2132 .bdrv_refresh_limits = nbd_refresh_limits,
2133 .bdrv_co_truncate = nbd_co_truncate,
2134 .bdrv_co_getlength = nbd_co_getlength,
2135 .bdrv_refresh_filename = nbd_refresh_filename,
2136 .bdrv_co_block_status = nbd_client_co_block_status,
2137 .bdrv_dirname = nbd_dirname,
2138 .strong_runtime_opts = nbd_strong_runtime_opts,
2139 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2140
2141 .bdrv_attach_aio_context = nbd_attach_aio_context,
2142 .bdrv_detach_aio_context = nbd_detach_aio_context,
2143 };
2144
2145 static BlockDriver bdrv_nbd_tcp = {
2146 .format_name = "nbd",
2147 .protocol_name = "nbd+tcp",
2148 .instance_size = sizeof(BDRVNBDState),
2149 .bdrv_parse_filename = nbd_parse_filename,
2150 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2151 .create_opts = &bdrv_create_opts_simple,
2152 .bdrv_file_open = nbd_open,
2153 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2154 .bdrv_co_preadv = nbd_client_co_preadv,
2155 .bdrv_co_pwritev = nbd_client_co_pwritev,
2156 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2157 .bdrv_close = nbd_close,
2158 .bdrv_co_flush_to_os = nbd_client_co_flush,
2159 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2160 .bdrv_refresh_limits = nbd_refresh_limits,
2161 .bdrv_co_truncate = nbd_co_truncate,
2162 .bdrv_co_getlength = nbd_co_getlength,
2163 .bdrv_refresh_filename = nbd_refresh_filename,
2164 .bdrv_co_block_status = nbd_client_co_block_status,
2165 .bdrv_dirname = nbd_dirname,
2166 .strong_runtime_opts = nbd_strong_runtime_opts,
2167 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2168
2169 .bdrv_attach_aio_context = nbd_attach_aio_context,
2170 .bdrv_detach_aio_context = nbd_detach_aio_context,
2171 };
2172
2173 static BlockDriver bdrv_nbd_unix = {
2174 .format_name = "nbd",
2175 .protocol_name = "nbd+unix",
2176 .instance_size = sizeof(BDRVNBDState),
2177 .bdrv_parse_filename = nbd_parse_filename,
2178 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2179 .create_opts = &bdrv_create_opts_simple,
2180 .bdrv_file_open = nbd_open,
2181 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2182 .bdrv_co_preadv = nbd_client_co_preadv,
2183 .bdrv_co_pwritev = nbd_client_co_pwritev,
2184 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2185 .bdrv_close = nbd_close,
2186 .bdrv_co_flush_to_os = nbd_client_co_flush,
2187 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2188 .bdrv_refresh_limits = nbd_refresh_limits,
2189 .bdrv_co_truncate = nbd_co_truncate,
2190 .bdrv_co_getlength = nbd_co_getlength,
2191 .bdrv_refresh_filename = nbd_refresh_filename,
2192 .bdrv_co_block_status = nbd_client_co_block_status,
2193 .bdrv_dirname = nbd_dirname,
2194 .strong_runtime_opts = nbd_strong_runtime_opts,
2195 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2196
2197 .bdrv_attach_aio_context = nbd_attach_aio_context,
2198 .bdrv_detach_aio_context = nbd_detach_aio_context,
2199 };
2200
2201 static void bdrv_nbd_init(void)
2202 {
2203 bdrv_register(&bdrv_nbd);
2204 bdrv_register(&bdrv_nbd_tcp);
2205 bdrv_register(&bdrv_nbd_unix);
2206 }
2207
2208 block_init(bdrv_nbd_init);