2 * QEMU Block driver for NBD
4 * Copyright (c) 2019 Virtuozzo International GmbH.
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu/osdep.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
39 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qapi/clone-visitor.h"
43 #include "block/qdict.h"
44 #include "block/nbd.h"
45 #include "block/block_int.h"
46 #include "block/coroutines.h"
48 #include "qemu/yank.h"
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
53 #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
54 #define INDEX_TO_COOKIE(index) ((index) + 1)
58 uint64_t offset
; /* original offset of the request */
59 bool receiving
; /* sleeping in the yield in nbd_receive_replies */
62 typedef enum NBDClientState
{
63 NBD_CLIENT_CONNECTING_WAIT
,
64 NBD_CLIENT_CONNECTING_NOWAIT
,
69 typedef struct BDRVNBDState
{
70 QIOChannel
*ioc
; /* The current I/O channel */
74 * Protects state, free_sema, in_flight, requests[].coroutine,
75 * reconnect_delay_timer.
77 QemuMutex requests_lock
;
81 NBDClientRequest requests
[MAX_NBD_REQUESTS
];
82 QEMUTimer
*reconnect_delay_timer
;
84 /* Protects sending data on the socket. */
88 * Protects receiving reply headers from the socket, as well as the
89 * fields reply and requests[].receiving
91 CoMutex receive_mutex
;
94 QEMUTimer
*open_timer
;
98 /* Connection parameters */
99 uint32_t reconnect_delay
;
100 uint32_t open_timeout
;
101 SocketAddress
*saddr
;
104 QCryptoTLSCreds
*tlscreds
;
106 char *x_dirty_bitmap
;
109 NBDClientConnection
*conn
;
112 static void nbd_yank(void *opaque
);
114 static void nbd_clear_bdrvstate(BlockDriverState
*bs
)
116 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
118 nbd_client_connection_release(s
->conn
);
121 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
));
123 /* Must not leave timers behind that would access freed data */
124 assert(!s
->reconnect_delay_timer
);
125 assert(!s
->open_timer
);
127 object_unref(OBJECT(s
->tlscreds
));
128 qapi_free_SocketAddress(s
->saddr
);
132 g_free(s
->tlscredsid
);
133 s
->tlscredsid
= NULL
;
134 g_free(s
->tlshostname
);
135 s
->tlshostname
= NULL
;
136 g_free(s
->x_dirty_bitmap
);
137 s
->x_dirty_bitmap
= NULL
;
140 /* Called with s->receive_mutex taken. */
141 static bool coroutine_fn
nbd_recv_coroutine_wake_one(NBDClientRequest
*req
)
143 if (req
->receiving
) {
144 req
->receiving
= false;
145 aio_co_wake(req
->coroutine
);
152 static void coroutine_fn
nbd_recv_coroutines_wake(BDRVNBDState
*s
)
156 QEMU_LOCK_GUARD(&s
->receive_mutex
);
157 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
158 if (nbd_recv_coroutine_wake_one(&s
->requests
[i
])) {
164 /* Called with s->requests_lock held. */
165 static void coroutine_fn
nbd_channel_error_locked(BDRVNBDState
*s
, int ret
)
167 if (s
->state
== NBD_CLIENT_CONNECTED
) {
168 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
172 if (s
->state
== NBD_CLIENT_CONNECTED
) {
173 s
->state
= s
->reconnect_delay
? NBD_CLIENT_CONNECTING_WAIT
:
174 NBD_CLIENT_CONNECTING_NOWAIT
;
177 s
->state
= NBD_CLIENT_QUIT
;
181 static void coroutine_fn
nbd_channel_error(BDRVNBDState
*s
, int ret
)
183 QEMU_LOCK_GUARD(&s
->requests_lock
);
184 nbd_channel_error_locked(s
, ret
);
187 static void reconnect_delay_timer_del(BDRVNBDState
*s
)
189 if (s
->reconnect_delay_timer
) {
190 timer_free(s
->reconnect_delay_timer
);
191 s
->reconnect_delay_timer
= NULL
;
195 static void reconnect_delay_timer_cb(void *opaque
)
197 BDRVNBDState
*s
= opaque
;
199 reconnect_delay_timer_del(s
);
200 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
201 if (s
->state
!= NBD_CLIENT_CONNECTING_WAIT
) {
204 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
206 nbd_co_establish_connection_cancel(s
->conn
);
209 static void reconnect_delay_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
211 assert(!s
->reconnect_delay_timer
);
212 s
->reconnect_delay_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
215 reconnect_delay_timer_cb
, s
);
216 timer_mod(s
->reconnect_delay_timer
, expire_time_ns
);
219 static void nbd_teardown_connection(BlockDriverState
*bs
)
221 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
223 assert(!s
->in_flight
);
226 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
227 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
229 object_unref(OBJECT(s
->ioc
));
233 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
234 s
->state
= NBD_CLIENT_QUIT
;
238 static void open_timer_del(BDRVNBDState
*s
)
241 timer_free(s
->open_timer
);
242 s
->open_timer
= NULL
;
246 static void open_timer_cb(void *opaque
)
248 BDRVNBDState
*s
= opaque
;
250 nbd_co_establish_connection_cancel(s
->conn
);
254 static void open_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
256 assert(!s
->open_timer
);
257 s
->open_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
261 timer_mod(s
->open_timer
, expire_time_ns
);
264 static bool nbd_client_will_reconnect(BDRVNBDState
*s
)
267 * Called only after a socket error, so this is not performance sensitive.
269 QEMU_LOCK_GUARD(&s
->requests_lock
);
270 return s
->state
== NBD_CLIENT_CONNECTING_WAIT
;
274 * Update @bs with information learned during a completed negotiation process.
275 * Return failure if the server's advertised options are incompatible with the
278 static int nbd_handle_updated_info(BlockDriverState
*bs
, Error
**errp
)
280 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
283 if (s
->x_dirty_bitmap
) {
284 if (!s
->info
.base_allocation
) {
285 error_setg(errp
, "requested x-dirty-bitmap %s not found",
289 if (strcmp(s
->x_dirty_bitmap
, "qemu:allocation-depth") == 0) {
290 s
->alloc_depth
= true;
294 if (s
->info
.flags
& NBD_FLAG_READ_ONLY
) {
295 ret
= bdrv_apply_auto_read_only(bs
, "NBD export is read-only", errp
);
301 if (s
->info
.flags
& NBD_FLAG_SEND_FUA
) {
302 bs
->supported_write_flags
= BDRV_REQ_FUA
;
303 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
306 if (s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
307 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
308 if (s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
) {
309 bs
->supported_zero_flags
|= BDRV_REQ_NO_FALLBACK
;
313 trace_nbd_client_handshake_success(s
->export
);
318 int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState
*bs
,
319 bool blocking
, Error
**errp
)
321 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
325 assert_bdrv_graph_readable();
328 s
->ioc
= nbd_co_establish_connection(s
->conn
, &s
->info
, blocking
, errp
);
330 return -ECONNREFUSED
;
333 yank_register_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
), nbd_yank
,
336 ret
= nbd_handle_updated_info(s
->bs
, NULL
);
339 * We have connected, but must fail for other reasons.
340 * Send NBD_CMD_DISC as a courtesy to the server.
342 NBDRequest request
= { .type
= NBD_CMD_DISC
, .mode
= s
->info
.mode
};
344 nbd_send_request(s
->ioc
, &request
);
346 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
348 object_unref(OBJECT(s
->ioc
));
354 qio_channel_set_blocking(s
->ioc
, false, NULL
);
355 qio_channel_set_follow_coroutine_ctx(s
->ioc
, true);
357 /* successfully connected */
358 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
359 s
->state
= NBD_CLIENT_CONNECTED
;
365 /* Called with s->requests_lock held. */
366 static bool nbd_client_connecting(BDRVNBDState
*s
)
368 return s
->state
== NBD_CLIENT_CONNECTING_WAIT
||
369 s
->state
== NBD_CLIENT_CONNECTING_NOWAIT
;
372 /* Called with s->requests_lock taken. */
373 static void coroutine_fn GRAPH_RDLOCK
nbd_reconnect_attempt(BDRVNBDState
*s
)
376 bool blocking
= s
->state
== NBD_CLIENT_CONNECTING_WAIT
;
379 * Now we are sure that nobody is accessing the channel, and no one will
380 * try until we set the state to CONNECTED.
382 assert(nbd_client_connecting(s
));
383 assert(s
->in_flight
== 1);
385 trace_nbd_reconnect_attempt(s
->bs
->in_flight
);
387 if (blocking
&& !s
->reconnect_delay_timer
) {
389 * It's the first reconnect attempt after switching to
390 * NBD_CLIENT_CONNECTING_WAIT
392 g_assert(s
->reconnect_delay
);
393 reconnect_delay_timer_init(s
,
394 qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
395 s
->reconnect_delay
* NANOSECONDS_PER_SECOND
);
398 /* Finalize previous connection if any */
400 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
402 object_unref(OBJECT(s
->ioc
));
406 qemu_mutex_unlock(&s
->requests_lock
);
407 ret
= nbd_co_do_establish_connection(s
->bs
, blocking
, NULL
);
408 trace_nbd_reconnect_attempt_result(ret
, s
->bs
->in_flight
);
409 qemu_mutex_lock(&s
->requests_lock
);
412 * The reconnect attempt is done (maybe successfully, maybe not), so
413 * we no longer need this timer. Delete it so it will not outlive
414 * this I/O request (so draining removes all timers).
416 reconnect_delay_timer_del(s
);
419 static coroutine_fn
int nbd_receive_replies(BDRVNBDState
*s
, uint64_t cookie
,
423 uint64_t ind
= COOKIE_TO_INDEX(cookie
), ind2
;
424 QEMU_LOCK_GUARD(&s
->receive_mutex
);
427 if (s
->reply
.cookie
== cookie
) {
432 if (s
->reply
.cookie
!= 0) {
434 * Some other request is being handled now. It should already be
435 * woken by whoever set s->reply.cookie (or never wait in this
436 * yield). So, we should not wake it here.
438 ind2
= COOKIE_TO_INDEX(s
->reply
.cookie
);
439 assert(!s
->requests
[ind2
].receiving
);
441 s
->requests
[ind
].receiving
= true;
442 qemu_co_mutex_unlock(&s
->receive_mutex
);
444 qemu_coroutine_yield();
446 * We may be woken for 2 reasons:
447 * 1. From this function, executing in parallel coroutine, when our
448 * cookie is received.
449 * 2. From nbd_co_receive_one_chunk(), when previous request is
450 * finished and s->reply.cookie set to 0.
451 * Anyway, it's OK to lock the mutex and go to the next iteration.
454 qemu_co_mutex_lock(&s
->receive_mutex
);
455 assert(!s
->requests
[ind
].receiving
);
459 /* We are under mutex and cookie is 0. We have to do the dirty work. */
460 assert(s
->reply
.cookie
== 0);
461 ret
= nbd_receive_reply(s
->bs
, s
->ioc
, &s
->reply
, s
->info
.mode
, errp
);
464 error_setg(errp
, "server dropped connection");
467 nbd_channel_error(s
, ret
);
470 if (nbd_reply_is_structured(&s
->reply
) &&
471 s
->info
.mode
< NBD_MODE_STRUCTURED
) {
472 nbd_channel_error(s
, -EINVAL
);
473 error_setg(errp
, "unexpected structured reply");
476 ind2
= COOKIE_TO_INDEX(s
->reply
.cookie
);
477 if (ind2
>= MAX_NBD_REQUESTS
|| !s
->requests
[ind2
].coroutine
) {
478 nbd_channel_error(s
, -EINVAL
);
479 error_setg(errp
, "unexpected cookie value");
482 if (s
->reply
.cookie
== cookie
) {
486 nbd_recv_coroutine_wake_one(&s
->requests
[ind2
]);
490 static int coroutine_fn GRAPH_RDLOCK
491 nbd_co_send_request(BlockDriverState
*bs
, NBDRequest
*request
,
494 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
497 qemu_mutex_lock(&s
->requests_lock
);
498 while (s
->in_flight
== MAX_NBD_REQUESTS
||
499 (s
->state
!= NBD_CLIENT_CONNECTED
&& s
->in_flight
> 0)) {
500 qemu_co_queue_wait(&s
->free_sema
, &s
->requests_lock
);
504 if (s
->state
!= NBD_CLIENT_CONNECTED
) {
505 if (nbd_client_connecting(s
)) {
506 nbd_reconnect_attempt(s
);
507 qemu_co_queue_restart_all(&s
->free_sema
);
509 if (s
->state
!= NBD_CLIENT_CONNECTED
) {
515 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
516 if (s
->requests
[i
].coroutine
== NULL
) {
521 assert(i
< MAX_NBD_REQUESTS
);
522 s
->requests
[i
].coroutine
= qemu_coroutine_self();
523 s
->requests
[i
].offset
= request
->from
;
524 s
->requests
[i
].receiving
= false;
525 qemu_mutex_unlock(&s
->requests_lock
);
527 qemu_co_mutex_lock(&s
->send_mutex
);
528 request
->cookie
= INDEX_TO_COOKIE(i
);
529 request
->mode
= s
->info
.mode
;
534 qio_channel_set_cork(s
->ioc
, true);
535 rc
= nbd_send_request(s
->ioc
, request
);
536 if (rc
>= 0 && qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
540 qio_channel_set_cork(s
->ioc
, false);
542 rc
= nbd_send_request(s
->ioc
, request
);
544 qemu_co_mutex_unlock(&s
->send_mutex
);
547 qemu_mutex_lock(&s
->requests_lock
);
549 nbd_channel_error_locked(s
, rc
);
551 s
->requests
[i
].coroutine
= NULL
;
554 qemu_co_queue_next(&s
->free_sema
);
555 qemu_mutex_unlock(&s
->requests_lock
);
560 static inline uint16_t payload_advance16(uint8_t **payload
)
563 return lduw_be_p(*payload
- 2);
566 static inline uint32_t payload_advance32(uint8_t **payload
)
569 return ldl_be_p(*payload
- 4);
572 static inline uint64_t payload_advance64(uint8_t **payload
)
575 return ldq_be_p(*payload
- 8);
578 static int nbd_parse_offset_hole_payload(BDRVNBDState
*s
,
579 NBDStructuredReplyChunk
*chunk
,
580 uint8_t *payload
, uint64_t orig_offset
,
581 QEMUIOVector
*qiov
, Error
**errp
)
586 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
587 error_setg(errp
, "Protocol error: invalid payload for "
588 "NBD_REPLY_TYPE_OFFSET_HOLE");
592 offset
= payload_advance64(&payload
);
593 hole_size
= payload_advance32(&payload
);
595 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
596 offset
> orig_offset
+ qiov
->size
- hole_size
) {
597 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
601 if (s
->info
.min_block
&&
602 !QEMU_IS_ALIGNED(hole_size
, s
->info
.min_block
)) {
603 trace_nbd_structured_read_compliance("hole");
606 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
612 * nbd_parse_blockstatus_payload
613 * Based on our request, we expect only one extent in reply, for the
614 * base:allocation context.
616 static int nbd_parse_blockstatus_payload(BDRVNBDState
*s
,
617 NBDStructuredReplyChunk
*chunk
,
618 uint8_t *payload
, uint64_t orig_length
,
619 NBDExtent32
*extent
, Error
**errp
)
623 /* The server succeeded, so it must have sent [at least] one extent */
624 if (chunk
->length
< sizeof(context_id
) + sizeof(*extent
)) {
625 error_setg(errp
, "Protocol error: invalid payload for "
626 "NBD_REPLY_TYPE_BLOCK_STATUS");
630 context_id
= payload_advance32(&payload
);
631 if (s
->info
.context_id
!= context_id
) {
632 error_setg(errp
, "Protocol error: unexpected context id %d for "
633 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
634 "id is %d", context_id
,
639 extent
->length
= payload_advance32(&payload
);
640 extent
->flags
= payload_advance32(&payload
);
642 if (extent
->length
== 0) {
643 error_setg(errp
, "Protocol error: server sent status chunk with "
649 * A server sending unaligned block status is in violation of the
650 * protocol, but as qemu-nbd 3.1 is such a server (at least for
651 * POSIX files that are not a multiple of 512 bytes, since qemu
652 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
653 * still sees an implicit hole beyond the real EOF), it's nicer to
654 * work around the misbehaving server. If the request included
655 * more than the final unaligned block, truncate it back to an
656 * aligned result; if the request was only the final block, round
657 * up to the full block and change the status to fully-allocated
658 * (always a safe status, even if it loses information).
660 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(extent
->length
,
661 s
->info
.min_block
)) {
662 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
663 if (extent
->length
> s
->info
.min_block
) {
664 extent
->length
= QEMU_ALIGN_DOWN(extent
->length
,
667 extent
->length
= s
->info
.min_block
;
673 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
674 * sent us any more than one extent, nor should it have included
675 * status beyond our request in that extent. However, it's easy
676 * enough to ignore the server's noncompliance without killing the
677 * connection; just ignore trailing extents, and clamp things to
678 * the length of our request.
680 if (chunk
->length
> sizeof(context_id
) + sizeof(*extent
)) {
681 trace_nbd_parse_blockstatus_compliance("more than one extent");
683 if (extent
->length
> orig_length
) {
684 extent
->length
= orig_length
;
685 trace_nbd_parse_blockstatus_compliance("extent length too large");
689 * HACK: if we are using x-dirty-bitmaps to access
690 * qemu:allocation-depth, treat all depths > 2 the same as 2,
691 * since nbd_client_co_block_status is only expecting the low two
694 if (s
->alloc_depth
&& extent
->flags
> 2) {
702 * nbd_parse_error_payload
703 * on success @errp contains message describing nbd error reply
705 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
706 uint8_t *payload
, int *request_ret
,
710 uint16_t message_size
;
712 assert(chunk
->type
& (1 << 15));
714 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
716 "Protocol error: invalid payload for structured error");
720 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
722 error_setg(errp
, "Protocol error: server sent structured error chunk "
727 *request_ret
= -error
;
728 message_size
= payload_advance16(&payload
);
730 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
731 error_setg(errp
, "Protocol error: server sent structured error chunk "
732 "with incorrect message size");
736 /* TODO: Add a trace point to mention the server complaint */
738 /* TODO handle ERROR_OFFSET */
743 static int coroutine_fn
744 nbd_co_receive_offset_data_payload(BDRVNBDState
*s
, uint64_t orig_offset
,
745 QEMUIOVector
*qiov
, Error
**errp
)
747 QEMUIOVector sub_qiov
;
751 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
753 assert(nbd_reply_is_structured(&s
->reply
));
755 /* The NBD spec requires at least one byte of payload */
756 if (chunk
->length
<= sizeof(offset
)) {
757 error_setg(errp
, "Protocol error: invalid payload for "
758 "NBD_REPLY_TYPE_OFFSET_DATA");
762 if (nbd_read64(s
->ioc
, &offset
, "OFFSET_DATA offset", errp
) < 0) {
766 data_size
= chunk
->length
- sizeof(offset
);
768 if (offset
< orig_offset
|| data_size
> qiov
->size
||
769 offset
> orig_offset
+ qiov
->size
- data_size
) {
770 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
774 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(data_size
, s
->info
.min_block
)) {
775 trace_nbd_structured_read_compliance("data");
778 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
779 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
780 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
781 qemu_iovec_destroy(&sub_qiov
);
783 return ret
< 0 ? -EIO
: 0;
786 #define NBD_MAX_MALLOC_PAYLOAD 1000
787 static coroutine_fn
int nbd_co_receive_structured_payload(
788 BDRVNBDState
*s
, void **payload
, Error
**errp
)
793 assert(nbd_reply_is_structured(&s
->reply
));
795 len
= s
->reply
.structured
.length
;
801 if (payload
== NULL
) {
802 error_setg(errp
, "Unexpected structured payload");
806 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
807 error_setg(errp
, "Payload too large");
811 *payload
= g_new(char, len
);
812 ret
= nbd_read(s
->ioc
, *payload
, len
, "structured payload", errp
);
823 * nbd_co_do_receive_one_chunk
825 * set request_ret to received reply error
826 * if qiov is not NULL: read payload to @qiov
827 * for structured reply chunk:
828 * if error chunk: read payload, set @request_ret, do not set @payload
829 * else if offset_data chunk: read payload data to @qiov, do not set @payload
830 * else: read payload to @payload
832 * If function fails, @errp contains corresponding error message, and the
833 * connection with the server is suspect. If it returns 0, then the
834 * transaction succeeded (although @request_ret may be a negative errno
835 * corresponding to the server's error reply), and errp is unchanged.
837 static coroutine_fn
int nbd_co_do_receive_one_chunk(
838 BDRVNBDState
*s
, uint64_t cookie
, bool only_structured
,
839 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
842 int i
= COOKIE_TO_INDEX(cookie
);
843 void *local_payload
= NULL
;
844 NBDStructuredReplyChunk
*chunk
;
851 ret
= nbd_receive_replies(s
, cookie
, errp
);
853 error_prepend(errp
, "Connection closed: ");
858 assert(s
->reply
.cookie
== cookie
);
860 if (nbd_reply_is_simple(&s
->reply
)) {
861 if (only_structured
) {
862 error_setg(errp
, "Protocol error: simple reply when structured "
863 "reply chunk was expected");
867 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
868 if (*request_ret
< 0 || !qiov
) {
872 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
873 errp
) < 0 ? -EIO
: 0;
876 /* handle structured reply chunk */
877 assert(s
->info
.mode
>= NBD_MODE_STRUCTURED
);
878 chunk
= &s
->reply
.structured
;
880 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
881 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
882 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
883 " NBD_REPLY_FLAG_DONE flag set");
887 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
894 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
896 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
900 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
904 if (nbd_reply_type_is_error(chunk
->type
)) {
905 payload
= &local_payload
;
908 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
913 if (nbd_reply_type_is_error(chunk
->type
)) {
914 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
915 g_free(local_payload
);
923 * nbd_co_receive_one_chunk
924 * Read reply, wake up connection_co and set s->quit if needed.
925 * Return value is a fatal error code or normal nbd reply error code
927 static coroutine_fn
int nbd_co_receive_one_chunk(
928 BDRVNBDState
*s
, uint64_t cookie
, bool only_structured
,
929 int *request_ret
, QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
,
932 int ret
= nbd_co_do_receive_one_chunk(s
, cookie
, only_structured
,
933 request_ret
, qiov
, payload
, errp
);
936 memset(reply
, 0, sizeof(*reply
));
937 nbd_channel_error(s
, ret
);
939 /* For assert at loop start in nbd_connection_entry */
944 nbd_recv_coroutines_wake(s
);
949 typedef struct NBDReplyChunkIter
{
953 bool done
, only_structured
;
956 static void nbd_iter_channel_error(NBDReplyChunkIter
*iter
,
957 int ret
, Error
**local_err
)
959 assert(local_err
&& *local_err
);
964 error_propagate(&iter
->err
, *local_err
);
966 error_free(*local_err
);
972 static void nbd_iter_request_error(NBDReplyChunkIter
*iter
, int ret
)
976 if (!iter
->request_ret
) {
977 iter
->request_ret
= ret
;
982 * NBD_FOREACH_REPLY_CHUNK
983 * The pointer stored in @payload requires g_free() to free it.
985 #define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
986 qiov, reply, payload) \
987 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
988 nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
991 * nbd_reply_chunk_iter_receive
992 * The pointer stored in @payload requires g_free() to free it.
994 static bool coroutine_fn
nbd_reply_chunk_iter_receive(BDRVNBDState
*s
,
995 NBDReplyChunkIter
*iter
,
1001 int ret
, request_ret
;
1002 NBDReply local_reply
;
1003 NBDStructuredReplyChunk
*chunk
;
1004 Error
*local_err
= NULL
;
1007 /* Previous iteration was last. */
1011 if (reply
== NULL
) {
1012 reply
= &local_reply
;
1015 ret
= nbd_co_receive_one_chunk(s
, cookie
, iter
->only_structured
,
1016 &request_ret
, qiov
, reply
, payload
,
1019 nbd_iter_channel_error(iter
, ret
, &local_err
);
1020 } else if (request_ret
< 0) {
1021 nbd_iter_request_error(iter
, request_ret
);
1024 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1025 if (nbd_reply_is_simple(reply
) || iter
->ret
< 0) {
1029 chunk
= &reply
->structured
;
1030 iter
->only_structured
= true;
1032 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
1033 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1034 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
1038 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
1039 /* This iteration is last. */
1043 /* Execute the loop body */
1047 qemu_mutex_lock(&s
->requests_lock
);
1048 s
->requests
[COOKIE_TO_INDEX(cookie
)].coroutine
= NULL
;
1050 qemu_co_queue_next(&s
->free_sema
);
1051 qemu_mutex_unlock(&s
->requests_lock
);
1056 static int coroutine_fn
1057 nbd_co_receive_return_code(BDRVNBDState
*s
, uint64_t cookie
,
1058 int *request_ret
, Error
**errp
)
1060 NBDReplyChunkIter iter
;
1062 NBD_FOREACH_REPLY_CHUNK(s
, iter
, cookie
, false, NULL
, NULL
, NULL
) {
1063 /* nbd_reply_chunk_iter_receive does all the work */
1066 error_propagate(errp
, iter
.err
);
1067 *request_ret
= iter
.request_ret
;
1071 static int coroutine_fn
1072 nbd_co_receive_cmdread_reply(BDRVNBDState
*s
, uint64_t cookie
,
1073 uint64_t offset
, QEMUIOVector
*qiov
,
1074 int *request_ret
, Error
**errp
)
1076 NBDReplyChunkIter iter
;
1078 void *payload
= NULL
;
1079 Error
*local_err
= NULL
;
1081 NBD_FOREACH_REPLY_CHUNK(s
, iter
, cookie
,
1082 s
->info
.mode
>= NBD_MODE_STRUCTURED
,
1083 qiov
, &reply
, &payload
)
1086 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1088 assert(nbd_reply_is_structured(&reply
));
1090 switch (chunk
->type
) {
1091 case NBD_REPLY_TYPE_OFFSET_DATA
:
1093 * special cased in nbd_co_receive_one_chunk, data is already
1097 case NBD_REPLY_TYPE_OFFSET_HOLE
:
1098 ret
= nbd_parse_offset_hole_payload(s
, &reply
.structured
, payload
,
1099 offset
, qiov
, &local_err
);
1101 nbd_channel_error(s
, ret
);
1102 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1106 if (!nbd_reply_type_is_error(chunk
->type
)) {
1107 /* not allowed reply type */
1108 nbd_channel_error(s
, -EINVAL
);
1109 error_setg(&local_err
,
1110 "Unexpected reply type: %d (%s) for CMD_READ",
1111 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1112 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1120 error_propagate(errp
, iter
.err
);
1121 *request_ret
= iter
.request_ret
;
1125 static int coroutine_fn
1126 nbd_co_receive_blockstatus_reply(BDRVNBDState
*s
, uint64_t cookie
,
1127 uint64_t length
, NBDExtent32
*extent
,
1128 int *request_ret
, Error
**errp
)
1130 NBDReplyChunkIter iter
;
1132 void *payload
= NULL
;
1133 Error
*local_err
= NULL
;
1134 bool received
= false;
1136 assert(!extent
->length
);
1137 NBD_FOREACH_REPLY_CHUNK(s
, iter
, cookie
, false, NULL
, &reply
, &payload
) {
1139 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1141 assert(nbd_reply_is_structured(&reply
));
1143 switch (chunk
->type
) {
1144 case NBD_REPLY_TYPE_BLOCK_STATUS
:
1146 nbd_channel_error(s
, -EINVAL
);
1147 error_setg(&local_err
, "Several BLOCK_STATUS chunks in reply");
1148 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1152 ret
= nbd_parse_blockstatus_payload(s
, &reply
.structured
,
1153 payload
, length
, extent
,
1156 nbd_channel_error(s
, ret
);
1157 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1161 if (!nbd_reply_type_is_error(chunk
->type
)) {
1162 nbd_channel_error(s
, -EINVAL
);
1163 error_setg(&local_err
,
1164 "Unexpected reply type: %d (%s) "
1165 "for CMD_BLOCK_STATUS",
1166 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1167 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1175 if (!extent
->length
&& !iter
.request_ret
) {
1176 error_setg(&local_err
, "Server did not reply with any status extents");
1177 nbd_iter_channel_error(&iter
, -EIO
, &local_err
);
1180 error_propagate(errp
, iter
.err
);
1181 *request_ret
= iter
.request_ret
;
1185 static int coroutine_fn GRAPH_RDLOCK
1186 nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
1187 QEMUIOVector
*write_qiov
)
1189 int ret
, request_ret
;
1190 Error
*local_err
= NULL
;
1191 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1193 assert(request
->type
!= NBD_CMD_READ
);
1195 assert(request
->type
== NBD_CMD_WRITE
);
1196 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
1198 assert(request
->type
!= NBD_CMD_WRITE
);
1202 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
1207 ret
= nbd_co_receive_return_code(s
, request
->cookie
,
1208 &request_ret
, &local_err
);
1210 trace_nbd_co_request_fail(request
->from
, request
->len
,
1211 request
->cookie
, request
->flags
,
1213 nbd_cmd_lookup(request
->type
),
1214 ret
, error_get_pretty(local_err
));
1215 error_free(local_err
);
1218 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1220 return ret
? ret
: request_ret
;
1223 static int coroutine_fn GRAPH_RDLOCK
1224 nbd_client_co_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1225 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
1227 int ret
, request_ret
;
1228 Error
*local_err
= NULL
;
1229 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1230 NBDRequest request
= {
1231 .type
= NBD_CMD_READ
,
1236 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1242 * Work around the fact that the block layer doesn't do
1243 * byte-accurate sizing yet - if the read exceeds the server's
1244 * advertised size because the block layer rounded size up, then
1245 * truncate the request to the server and tail-pad with zero.
1247 if (offset
>= s
->info
.size
) {
1248 assert(bytes
< BDRV_SECTOR_SIZE
);
1249 qemu_iovec_memset(qiov
, 0, 0, bytes
);
1252 if (offset
+ bytes
> s
->info
.size
) {
1253 uint64_t slop
= offset
+ bytes
- s
->info
.size
;
1255 assert(slop
< BDRV_SECTOR_SIZE
);
1256 qemu_iovec_memset(qiov
, bytes
- slop
, 0, slop
);
1257 request
.len
-= slop
;
1261 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1266 ret
= nbd_co_receive_cmdread_reply(s
, request
.cookie
, offset
, qiov
,
1267 &request_ret
, &local_err
);
1269 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.cookie
,
1270 request
.flags
, request
.type
,
1271 nbd_cmd_lookup(request
.type
),
1272 ret
, error_get_pretty(local_err
));
1273 error_free(local_err
);
1276 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1278 return ret
? ret
: request_ret
;
1281 static int coroutine_fn GRAPH_RDLOCK
1282 nbd_client_co_pwritev(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1283 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
1285 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1286 NBDRequest request
= {
1287 .type
= NBD_CMD_WRITE
,
1292 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1293 if (flags
& BDRV_REQ_FUA
) {
1294 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1295 request
.flags
|= NBD_CMD_FLAG_FUA
;
1298 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1303 return nbd_co_request(bs
, &request
, qiov
);
1306 static int coroutine_fn GRAPH_RDLOCK
1307 nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1308 BdrvRequestFlags flags
)
1310 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1311 NBDRequest request
= {
1312 .type
= NBD_CMD_WRITE_ZEROES
,
1317 /* rely on max_pwrite_zeroes */
1318 assert(bytes
<= UINT32_MAX
|| s
->info
.mode
>= NBD_MODE_EXTENDED
);
1320 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1321 if (!(s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
1325 if (flags
& BDRV_REQ_FUA
) {
1326 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1327 request
.flags
|= NBD_CMD_FLAG_FUA
;
1329 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
1330 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
1332 if (flags
& BDRV_REQ_NO_FALLBACK
) {
1333 assert(s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
);
1334 request
.flags
|= NBD_CMD_FLAG_FAST_ZERO
;
1340 return nbd_co_request(bs
, &request
, NULL
);
1343 static int coroutine_fn GRAPH_RDLOCK
nbd_client_co_flush(BlockDriverState
*bs
)
1345 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1346 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
1348 if (!(s
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
1355 return nbd_co_request(bs
, &request
, NULL
);
1358 static int coroutine_fn GRAPH_RDLOCK
1359 nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
1361 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1362 NBDRequest request
= {
1363 .type
= NBD_CMD_TRIM
,
1368 /* rely on max_pdiscard */
1369 assert(bytes
<= UINT32_MAX
|| s
->info
.mode
>= NBD_MODE_EXTENDED
);
1371 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1372 if (!(s
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
1376 return nbd_co_request(bs
, &request
, NULL
);
1379 static int coroutine_fn GRAPH_RDLOCK
nbd_client_co_block_status(
1380 BlockDriverState
*bs
, bool want_zero
, int64_t offset
, int64_t bytes
,
1381 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
1383 int ret
, request_ret
;
1384 NBDExtent32 extent
= { 0 };
1385 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1386 Error
*local_err
= NULL
;
1388 NBDRequest request
= {
1389 .type
= NBD_CMD_BLOCK_STATUS
,
1391 .len
= MIN(bytes
, s
->info
.size
- offset
),
1392 .flags
= NBD_CMD_FLAG_REQ_ONE
,
1395 if (!s
->info
.base_allocation
) {
1399 return BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
1401 if (s
->info
.mode
< NBD_MODE_EXTENDED
) {
1402 request
.len
= MIN(QEMU_ALIGN_DOWN(INT_MAX
, bs
->bl
.request_alignment
),
1407 * Work around the fact that the block layer doesn't do
1408 * byte-accurate sizing yet - if the status request exceeds the
1409 * server's advertised size because the block layer rounded size
1410 * up, we truncated the request to the server (above), or are
1411 * called on just the hole.
1413 if (offset
>= s
->info
.size
) {
1415 assert(bytes
< BDRV_SECTOR_SIZE
);
1416 /* Intentionally don't report offset_valid for the hole */
1417 return BDRV_BLOCK_ZERO
;
1420 if (s
->info
.min_block
) {
1421 assert(QEMU_IS_ALIGNED(request
.len
, s
->info
.min_block
));
1424 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1429 ret
= nbd_co_receive_blockstatus_reply(s
, request
.cookie
, bytes
,
1430 &extent
, &request_ret
,
1433 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.cookie
,
1434 request
.flags
, request
.type
,
1435 nbd_cmd_lookup(request
.type
),
1436 ret
, error_get_pretty(local_err
));
1437 error_free(local_err
);
1440 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1442 if (ret
< 0 || request_ret
< 0) {
1443 return ret
? ret
: request_ret
;
1446 assert(extent
.length
);
1447 *pnum
= extent
.length
;
1450 return (extent
.flags
& NBD_STATE_HOLE
? 0 : BDRV_BLOCK_DATA
) |
1451 (extent
.flags
& NBD_STATE_ZERO
? BDRV_BLOCK_ZERO
: 0) |
1452 BDRV_BLOCK_OFFSET_VALID
;
1455 static int nbd_client_reopen_prepare(BDRVReopenState
*state
,
1456 BlockReopenQueue
*queue
, Error
**errp
)
1458 BDRVNBDState
*s
= (BDRVNBDState
*)state
->bs
->opaque
;
1460 if ((state
->flags
& BDRV_O_RDWR
) && (s
->info
.flags
& NBD_FLAG_READ_ONLY
)) {
1461 error_setg(errp
, "Can't reopen read-only NBD mount as read/write");
1467 static void nbd_yank(void *opaque
)
1469 BlockDriverState
*bs
= opaque
;
1470 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1472 QEMU_LOCK_GUARD(&s
->requests_lock
);
1473 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1474 s
->state
= NBD_CLIENT_QUIT
;
1477 static void nbd_client_close(BlockDriverState
*bs
)
1479 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1480 NBDRequest request
= { .type
= NBD_CMD_DISC
, .mode
= s
->info
.mode
};
1483 nbd_send_request(s
->ioc
, &request
);
1486 nbd_teardown_connection(bs
);
1491 * Parse nbd_open options
1494 static int nbd_parse_uri(const char *filename
, QDict
*options
)
1498 QueryParams
*qp
= NULL
;
1502 uri
= uri_parse(filename
);
1508 if (!g_strcmp0(uri
->scheme
, "nbd")) {
1510 } else if (!g_strcmp0(uri
->scheme
, "nbd+tcp")) {
1512 } else if (!g_strcmp0(uri
->scheme
, "nbd+unix")) {
1519 p
= uri
->path
? uri
->path
: "";
1524 qdict_put_str(options
, "export", p
);
1527 qp
= query_params_parse(uri
->query
);
1528 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
1534 /* nbd+unix:///export?socket=path */
1535 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
1539 qdict_put_str(options
, "server.type", "unix");
1540 qdict_put_str(options
, "server.path", qp
->p
[0].value
);
1545 /* nbd[+tcp]://host[:port]/export */
1551 /* strip braces from literal IPv6 address */
1552 if (uri
->server
[0] == '[') {
1553 host
= qstring_from_substr(uri
->server
, 1,
1554 strlen(uri
->server
) - 1);
1556 host
= qstring_from_str(uri
->server
);
1559 qdict_put_str(options
, "server.type", "inet");
1560 qdict_put(options
, "server.host", host
);
1562 port_str
= g_strdup_printf("%d", uri
->port
?: NBD_DEFAULT_PORT
);
1563 qdict_put_str(options
, "server.port", port_str
);
1569 query_params_free(qp
);
1575 static bool nbd_has_filename_options_conflict(QDict
*options
, Error
**errp
)
1577 const QDictEntry
*e
;
1579 for (e
= qdict_first(options
); e
; e
= qdict_next(options
, e
)) {
1580 if (!strcmp(e
->key
, "host") ||
1581 !strcmp(e
->key
, "port") ||
1582 !strcmp(e
->key
, "path") ||
1583 !strcmp(e
->key
, "export") ||
1584 strstart(e
->key
, "server.", NULL
))
1586 error_setg(errp
, "Option '%s' cannot be used with a file name",
1595 static void nbd_parse_filename(const char *filename
, QDict
*options
,
1598 g_autofree
char *file
= NULL
;
1600 const char *host_spec
;
1601 const char *unixpath
;
1603 if (nbd_has_filename_options_conflict(options
, errp
)) {
1607 if (strstr(filename
, "://")) {
1608 int ret
= nbd_parse_uri(filename
, options
);
1610 error_setg(errp
, "No valid URL specified");
1615 file
= g_strdup(filename
);
1617 export_name
= strstr(file
, EN_OPTSTR
);
1619 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
1622 export_name
[0] = 0; /* truncate 'file' */
1623 export_name
+= strlen(EN_OPTSTR
);
1625 qdict_put_str(options
, "export", export_name
);
1628 /* extract the host_spec - fail if it's not nbd:... */
1629 if (!strstart(file
, "nbd:", &host_spec
)) {
1630 error_setg(errp
, "File name string for NBD must start with 'nbd:'");
1638 /* are we a UNIX or TCP socket? */
1639 if (strstart(host_spec
, "unix:", &unixpath
)) {
1640 qdict_put_str(options
, "server.type", "unix");
1641 qdict_put_str(options
, "server.path", unixpath
);
1643 InetSocketAddress
*addr
= g_new(InetSocketAddress
, 1);
1645 if (inet_parse(addr
, host_spec
, errp
)) {
1649 qdict_put_str(options
, "server.type", "inet");
1650 qdict_put_str(options
, "server.host", addr
->host
);
1651 qdict_put_str(options
, "server.port", addr
->port
);
1653 qapi_free_InetSocketAddress(addr
);
1657 static bool nbd_process_legacy_socket_options(QDict
*output_options
,
1658 QemuOpts
*legacy_opts
,
1661 const char *path
= qemu_opt_get(legacy_opts
, "path");
1662 const char *host
= qemu_opt_get(legacy_opts
, "host");
1663 const char *port
= qemu_opt_get(legacy_opts
, "port");
1664 const QDictEntry
*e
;
1666 if (!path
&& !host
&& !port
) {
1670 for (e
= qdict_first(output_options
); e
; e
= qdict_next(output_options
, e
))
1672 if (strstart(e
->key
, "server.", NULL
)) {
1673 error_setg(errp
, "Cannot use 'server' and path/host/port at the "
1680 error_setg(errp
, "path and host may not be used at the same time");
1684 error_setg(errp
, "port may not be used without host");
1688 qdict_put_str(output_options
, "server.type", "unix");
1689 qdict_put_str(output_options
, "server.path", path
);
1691 qdict_put_str(output_options
, "server.type", "inet");
1692 qdict_put_str(output_options
, "server.host", host
);
1693 qdict_put_str(output_options
, "server.port",
1694 port
?: stringify(NBD_DEFAULT_PORT
));
1700 static SocketAddress
*nbd_config(BDRVNBDState
*s
, QDict
*options
,
1703 SocketAddress
*saddr
= NULL
;
1707 qdict_extract_subqdict(options
, &addr
, "server.");
1708 if (!qdict_size(addr
)) {
1709 error_setg(errp
, "NBD server address missing");
1713 iv
= qobject_input_visitor_new_flat_confused(addr
, errp
);
1718 if (!visit_type_SocketAddress(iv
, NULL
, &saddr
, errp
)) {
1722 if (socket_address_parse_named_fd(saddr
, errp
) < 0) {
1723 qapi_free_SocketAddress(saddr
);
1729 qobject_unref(addr
);
1734 static QCryptoTLSCreds
*nbd_get_tls_creds(const char *id
, Error
**errp
)
1737 QCryptoTLSCreds
*creds
;
1739 obj
= object_resolve_path_component(
1740 object_get_objects_root(), id
);
1742 error_setg(errp
, "No TLS credentials with id '%s'",
1746 creds
= (QCryptoTLSCreds
*)
1747 object_dynamic_cast(obj
, TYPE_QCRYPTO_TLS_CREDS
);
1749 error_setg(errp
, "Object with id '%s' is not TLS credentials",
1754 if (!qcrypto_tls_creds_check_endpoint(creds
,
1755 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT
,
1764 static QemuOptsList nbd_runtime_opts
= {
1766 .head
= QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts
.head
),
1770 .type
= QEMU_OPT_STRING
,
1771 .help
= "TCP host to connect to",
1775 .type
= QEMU_OPT_STRING
,
1776 .help
= "TCP port to connect to",
1780 .type
= QEMU_OPT_STRING
,
1781 .help
= "Unix socket path to connect to",
1785 .type
= QEMU_OPT_STRING
,
1786 .help
= "Name of the NBD export to open",
1789 .name
= "tls-creds",
1790 .type
= QEMU_OPT_STRING
,
1791 .help
= "ID of the TLS credentials to use",
1794 .name
= "tls-hostname",
1795 .type
= QEMU_OPT_STRING
,
1796 .help
= "Override hostname for validating TLS x509 certificate",
1799 .name
= "x-dirty-bitmap",
1800 .type
= QEMU_OPT_STRING
,
1801 .help
= "experimental: expose named dirty bitmap in place of "
1805 .name
= "reconnect-delay",
1806 .type
= QEMU_OPT_NUMBER
,
1807 .help
= "On an unexpected disconnect, the nbd client tries to "
1808 "connect again until succeeding or encountering a serious "
1809 "error. During the first @reconnect-delay seconds, all "
1810 "requests are paused and will be rerun on a successful "
1811 "reconnect. After that time, any delayed requests and all "
1812 "future requests before a successful reconnect will "
1813 "immediately fail. Default 0",
1816 .name
= "open-timeout",
1817 .type
= QEMU_OPT_NUMBER
,
1818 .help
= "In seconds. If zero, the nbd driver tries the connection "
1819 "only once, and fails to open if the connection fails. "
1820 "If non-zero, the nbd driver will repeat connection "
1821 "attempts until successful or until @open-timeout seconds "
1822 "have elapsed. Default 0",
1824 { /* end of list */ }
1828 static int nbd_process_options(BlockDriverState
*bs
, QDict
*options
,
1831 BDRVNBDState
*s
= bs
->opaque
;
1835 opts
= qemu_opts_create(&nbd_runtime_opts
, NULL
, 0, &error_abort
);
1836 if (!qemu_opts_absorb_qdict(opts
, options
, errp
)) {
1840 /* Translate @host, @port, and @path to a SocketAddress */
1841 if (!nbd_process_legacy_socket_options(options
, opts
, errp
)) {
1845 /* Pop the config into our state object. Exit if invalid. */
1846 s
->saddr
= nbd_config(s
, options
, errp
);
1851 s
->export
= g_strdup(qemu_opt_get(opts
, "export"));
1852 if (s
->export
&& strlen(s
->export
) > NBD_MAX_STRING_SIZE
) {
1853 error_setg(errp
, "export name too long to send to server");
1857 s
->tlscredsid
= g_strdup(qemu_opt_get(opts
, "tls-creds"));
1858 if (s
->tlscredsid
) {
1859 s
->tlscreds
= nbd_get_tls_creds(s
->tlscredsid
, errp
);
1864 s
->tlshostname
= g_strdup(qemu_opt_get(opts
, "tls-hostname"));
1865 if (!s
->tlshostname
&&
1866 s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
1867 s
->tlshostname
= g_strdup(s
->saddr
->u
.inet
.host
);
1871 s
->x_dirty_bitmap
= g_strdup(qemu_opt_get(opts
, "x-dirty-bitmap"));
1872 if (s
->x_dirty_bitmap
&& strlen(s
->x_dirty_bitmap
) > NBD_MAX_STRING_SIZE
) {
1873 error_setg(errp
, "x-dirty-bitmap query too long to send to server");
1877 s
->reconnect_delay
= qemu_opt_get_number(opts
, "reconnect-delay", 0);
1878 s
->open_timeout
= qemu_opt_get_number(opts
, "open-timeout", 0);
1883 qemu_opts_del(opts
);
1887 static int nbd_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
1891 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1894 qemu_mutex_init(&s
->requests_lock
);
1895 qemu_co_queue_init(&s
->free_sema
);
1896 qemu_co_mutex_init(&s
->send_mutex
);
1897 qemu_co_mutex_init(&s
->receive_mutex
);
1899 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
), errp
)) {
1903 ret
= nbd_process_options(bs
, options
, errp
);
1908 s
->conn
= nbd_client_connection_new(s
->saddr
, true, s
->export
,
1909 s
->x_dirty_bitmap
, s
->tlscreds
,
1912 if (s
->open_timeout
) {
1913 nbd_client_connection_enable_retry(s
->conn
);
1914 open_timer_init(s
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
1915 s
->open_timeout
* NANOSECONDS_PER_SECOND
);
1918 s
->state
= NBD_CLIENT_CONNECTING_WAIT
;
1919 ret
= nbd_do_establish_connection(bs
, true, errp
);
1925 * The connect attempt is done, so we no longer need this timer.
1926 * Delete it, because we do not want it to be around when this node
1927 * is drained or closed.
1931 nbd_client_connection_enable_retry(s
->conn
);
1937 nbd_clear_bdrvstate(bs
);
1941 static void nbd_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1943 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1944 uint32_t min
= s
->info
.min_block
;
1945 uint32_t max
= MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE
, s
->info
.max_block
);
1948 * If the server did not advertise an alignment:
1949 * - a size that is not sector-aligned implies that an alignment
1950 * of 1 can be used to access those tail bytes
1951 * - advertisement of block status requires an alignment of 1, so
1952 * that we don't violate block layer constraints that block
1953 * status is always aligned (as we can't control whether the
1954 * server will report sub-sector extents, such as a hole at EOF
1955 * on an unaligned POSIX file)
1956 * - otherwise, assume the server is so old that we are safer avoiding
1957 * sub-sector requests
1960 min
= (!QEMU_IS_ALIGNED(s
->info
.size
, BDRV_SECTOR_SIZE
) ||
1961 s
->info
.base_allocation
) ? 1 : BDRV_SECTOR_SIZE
;
1964 bs
->bl
.request_alignment
= min
;
1965 bs
->bl
.max_pdiscard
= QEMU_ALIGN_DOWN(INT_MAX
, min
);
1966 bs
->bl
.max_pwrite_zeroes
= max
;
1967 bs
->bl
.max_transfer
= max
;
1970 * Assume that if the server supports extended headers, it also
1971 * supports unlimited size zero and trim commands.
1973 if (s
->info
.mode
>= NBD_MODE_EXTENDED
) {
1974 bs
->bl
.max_pdiscard
= bs
->bl
.max_pwrite_zeroes
= 0;
1977 if (s
->info
.opt_block
&&
1978 s
->info
.opt_block
> bs
->bl
.opt_transfer
) {
1979 bs
->bl
.opt_transfer
= s
->info
.opt_block
;
1983 static void nbd_close(BlockDriverState
*bs
)
1985 nbd_client_close(bs
);
1986 nbd_clear_bdrvstate(bs
);
1990 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
1991 * to a smaller size with exact=false, there is no reason to fail the
1994 * Preallocation mode is ignored since it does not seems useful to fail when
1995 * we never change anything.
1997 static int coroutine_fn
nbd_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1998 bool exact
, PreallocMode prealloc
,
1999 BdrvRequestFlags flags
, Error
**errp
)
2001 BDRVNBDState
*s
= bs
->opaque
;
2003 if (offset
!= s
->info
.size
&& exact
) {
2004 error_setg(errp
, "Cannot resize NBD nodes");
2008 if (offset
> s
->info
.size
) {
2009 error_setg(errp
, "Cannot grow NBD nodes");
2016 static int64_t coroutine_fn
nbd_co_getlength(BlockDriverState
*bs
)
2018 BDRVNBDState
*s
= bs
->opaque
;
2020 return s
->info
.size
;
2023 static void nbd_refresh_filename(BlockDriverState
*bs
)
2025 BDRVNBDState
*s
= bs
->opaque
;
2026 const char *host
= NULL
, *port
= NULL
, *path
= NULL
;
2029 if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
2030 const InetSocketAddress
*inet
= &s
->saddr
->u
.inet
;
2031 if (!inet
->has_ipv4
&& !inet
->has_ipv6
&& !inet
->has_to
) {
2035 } else if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_UNIX
) {
2036 path
= s
->saddr
->u
.q_unix
.path
;
2037 } /* else can't represent as pseudo-filename */
2039 if (path
&& s
->export
) {
2040 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2041 "nbd+unix:///%s?socket=%s", s
->export
, path
);
2042 } else if (path
&& !s
->export
) {
2043 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2044 "nbd+unix://?socket=%s", path
);
2045 } else if (host
&& s
->export
) {
2046 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2047 "nbd://%s:%s/%s", host
, port
, s
->export
);
2048 } else if (host
&& !s
->export
) {
2049 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2050 "nbd://%s:%s", host
, port
);
2052 if (len
>= sizeof(bs
->exact_filename
)) {
2053 /* Name is too long to represent exactly, so leave it empty. */
2054 bs
->exact_filename
[0] = '\0';
2058 static char *nbd_dirname(BlockDriverState
*bs
, Error
**errp
)
2060 /* The generic bdrv_dirname() implementation is able to work out some
2061 * directory name for NBD nodes, but that would be wrong. So far there is no
2062 * specification for how "export paths" would work, so NBD does not have
2063 * directory names. */
2064 error_setg(errp
, "Cannot generate a base directory for NBD nodes");
2068 static const char *const nbd_strong_runtime_opts
[] = {
2080 static void nbd_cancel_in_flight(BlockDriverState
*bs
)
2082 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
2084 reconnect_delay_timer_del(s
);
2086 qemu_mutex_lock(&s
->requests_lock
);
2087 if (s
->state
== NBD_CLIENT_CONNECTING_WAIT
) {
2088 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
2090 qemu_mutex_unlock(&s
->requests_lock
);
2092 nbd_co_establish_connection_cancel(s
->conn
);
2095 static void nbd_attach_aio_context(BlockDriverState
*bs
,
2096 AioContext
*new_context
)
2098 BDRVNBDState
*s
= bs
->opaque
;
2100 /* The open_timer is used only during nbd_open() */
2101 assert(!s
->open_timer
);
2104 * The reconnect_delay_timer is scheduled in I/O paths when the
2105 * connection is lost, to cancel the reconnection attempt after a
2106 * given time. Once this attempt is done (successfully or not),
2107 * nbd_reconnect_attempt() ensures the timer is deleted before the
2108 * respective I/O request is resumed.
2109 * Since the AioContext can only be changed when a node is drained,
2110 * the reconnect_delay_timer cannot be active here.
2112 assert(!s
->reconnect_delay_timer
);
2115 static void nbd_detach_aio_context(BlockDriverState
*bs
)
2117 BDRVNBDState
*s
= bs
->opaque
;
2119 assert(!s
->open_timer
);
2120 assert(!s
->reconnect_delay_timer
);
2123 static BlockDriver bdrv_nbd
= {
2124 .format_name
= "nbd",
2125 .protocol_name
= "nbd",
2126 .instance_size
= sizeof(BDRVNBDState
),
2127 .bdrv_parse_filename
= nbd_parse_filename
,
2128 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2129 .create_opts
= &bdrv_create_opts_simple
,
2130 .bdrv_file_open
= nbd_open
,
2131 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2132 .bdrv_co_preadv
= nbd_client_co_preadv
,
2133 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2134 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2135 .bdrv_close
= nbd_close
,
2136 .bdrv_co_flush_to_os
= nbd_client_co_flush
,
2137 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2138 .bdrv_refresh_limits
= nbd_refresh_limits
,
2139 .bdrv_co_truncate
= nbd_co_truncate
,
2140 .bdrv_co_getlength
= nbd_co_getlength
,
2141 .bdrv_refresh_filename
= nbd_refresh_filename
,
2142 .bdrv_co_block_status
= nbd_client_co_block_status
,
2143 .bdrv_dirname
= nbd_dirname
,
2144 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2145 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2147 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2148 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2151 static BlockDriver bdrv_nbd_tcp
= {
2152 .format_name
= "nbd",
2153 .protocol_name
= "nbd+tcp",
2154 .instance_size
= sizeof(BDRVNBDState
),
2155 .bdrv_parse_filename
= nbd_parse_filename
,
2156 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2157 .create_opts
= &bdrv_create_opts_simple
,
2158 .bdrv_file_open
= nbd_open
,
2159 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2160 .bdrv_co_preadv
= nbd_client_co_preadv
,
2161 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2162 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2163 .bdrv_close
= nbd_close
,
2164 .bdrv_co_flush_to_os
= nbd_client_co_flush
,
2165 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2166 .bdrv_refresh_limits
= nbd_refresh_limits
,
2167 .bdrv_co_truncate
= nbd_co_truncate
,
2168 .bdrv_co_getlength
= nbd_co_getlength
,
2169 .bdrv_refresh_filename
= nbd_refresh_filename
,
2170 .bdrv_co_block_status
= nbd_client_co_block_status
,
2171 .bdrv_dirname
= nbd_dirname
,
2172 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2173 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2175 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2176 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2179 static BlockDriver bdrv_nbd_unix
= {
2180 .format_name
= "nbd",
2181 .protocol_name
= "nbd+unix",
2182 .instance_size
= sizeof(BDRVNBDState
),
2183 .bdrv_parse_filename
= nbd_parse_filename
,
2184 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2185 .create_opts
= &bdrv_create_opts_simple
,
2186 .bdrv_file_open
= nbd_open
,
2187 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2188 .bdrv_co_preadv
= nbd_client_co_preadv
,
2189 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2190 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2191 .bdrv_close
= nbd_close
,
2192 .bdrv_co_flush_to_os
= nbd_client_co_flush
,
2193 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2194 .bdrv_refresh_limits
= nbd_refresh_limits
,
2195 .bdrv_co_truncate
= nbd_co_truncate
,
2196 .bdrv_co_getlength
= nbd_co_getlength
,
2197 .bdrv_refresh_filename
= nbd_refresh_filename
,
2198 .bdrv_co_block_status
= nbd_client_co_block_status
,
2199 .bdrv_dirname
= nbd_dirname
,
2200 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2201 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2203 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2204 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2207 static void bdrv_nbd_init(void)
2209 bdrv_register(&bdrv_nbd
);
2210 bdrv_register(&bdrv_nbd_tcp
);
2211 bdrv_register(&bdrv_nbd_unix
);
2214 block_init(bdrv_nbd_init
);