2 * QEMU Block driver for NBD
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright (C) 2016 Red Hat, Inc.
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu/osdep.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
39 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qapi/clone-visitor.h"
43 #include "block/qdict.h"
44 #include "block/nbd.h"
45 #include "block/block_int.h"
46 #include "block/coroutines.h"
48 #include "qemu/yank.h"
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
53 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
54 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
58 uint64_t offset
; /* original offset of the request */
59 bool receiving
; /* sleeping in the yield in nbd_receive_replies */
62 typedef enum NBDClientState
{
63 NBD_CLIENT_CONNECTING_WAIT
,
64 NBD_CLIENT_CONNECTING_NOWAIT
,
69 typedef struct BDRVNBDState
{
70 QIOChannel
*ioc
; /* The current I/O channel */
74 * Protects state, free_sema, in_flight, requests[].coroutine,
75 * reconnect_delay_timer.
77 QemuMutex requests_lock
;
81 NBDClientRequest requests
[MAX_NBD_REQUESTS
];
82 QEMUTimer
*reconnect_delay_timer
;
85 CoMutex receive_mutex
;
87 QEMUTimer
*open_timer
;
92 /* Connection parameters */
93 uint32_t reconnect_delay
;
94 uint32_t open_timeout
;
98 QCryptoTLSCreds
*tlscreds
;
100 char *x_dirty_bitmap
;
103 NBDClientConnection
*conn
;
106 static void nbd_yank(void *opaque
);
108 static void nbd_clear_bdrvstate(BlockDriverState
*bs
)
110 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
112 nbd_client_connection_release(s
->conn
);
115 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
));
117 /* Must not leave timers behind that would access freed data */
118 assert(!s
->reconnect_delay_timer
);
119 assert(!s
->open_timer
);
121 object_unref(OBJECT(s
->tlscreds
));
122 qapi_free_SocketAddress(s
->saddr
);
126 g_free(s
->tlscredsid
);
127 s
->tlscredsid
= NULL
;
128 g_free(s
->tlshostname
);
129 s
->tlshostname
= NULL
;
130 g_free(s
->x_dirty_bitmap
);
131 s
->x_dirty_bitmap
= NULL
;
134 /* Called with s->receive_mutex taken. */
135 static bool coroutine_fn
nbd_recv_coroutine_wake_one(NBDClientRequest
*req
)
137 if (req
->receiving
) {
138 req
->receiving
= false;
139 aio_co_wake(req
->coroutine
);
146 static void coroutine_fn
nbd_recv_coroutines_wake(BDRVNBDState
*s
)
150 QEMU_LOCK_GUARD(&s
->receive_mutex
);
151 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
152 if (nbd_recv_coroutine_wake_one(&s
->requests
[i
])) {
158 /* Called with s->requests_lock held. */
159 static void coroutine_fn
nbd_channel_error_locked(BDRVNBDState
*s
, int ret
)
161 if (s
->state
== NBD_CLIENT_CONNECTED
) {
162 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
166 if (s
->state
== NBD_CLIENT_CONNECTED
) {
167 s
->state
= s
->reconnect_delay
? NBD_CLIENT_CONNECTING_WAIT
:
168 NBD_CLIENT_CONNECTING_NOWAIT
;
171 s
->state
= NBD_CLIENT_QUIT
;
175 static void coroutine_fn
nbd_channel_error(BDRVNBDState
*s
, int ret
)
177 QEMU_LOCK_GUARD(&s
->requests_lock
);
178 nbd_channel_error_locked(s
, ret
);
181 static void reconnect_delay_timer_del(BDRVNBDState
*s
)
183 if (s
->reconnect_delay_timer
) {
184 timer_free(s
->reconnect_delay_timer
);
185 s
->reconnect_delay_timer
= NULL
;
189 static void reconnect_delay_timer_cb(void *opaque
)
191 BDRVNBDState
*s
= opaque
;
193 reconnect_delay_timer_del(s
);
194 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
195 if (s
->state
!= NBD_CLIENT_CONNECTING_WAIT
) {
198 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
200 nbd_co_establish_connection_cancel(s
->conn
);
203 static void reconnect_delay_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
205 assert(!s
->reconnect_delay_timer
);
206 s
->reconnect_delay_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
209 reconnect_delay_timer_cb
, s
);
210 timer_mod(s
->reconnect_delay_timer
, expire_time_ns
);
213 static void nbd_teardown_connection(BlockDriverState
*bs
)
215 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
217 assert(!s
->in_flight
);
220 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
221 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
223 object_unref(OBJECT(s
->ioc
));
227 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
228 s
->state
= NBD_CLIENT_QUIT
;
232 static void open_timer_del(BDRVNBDState
*s
)
235 timer_free(s
->open_timer
);
236 s
->open_timer
= NULL
;
240 static void open_timer_cb(void *opaque
)
242 BDRVNBDState
*s
= opaque
;
244 nbd_co_establish_connection_cancel(s
->conn
);
248 static void open_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
250 assert(!s
->open_timer
);
251 s
->open_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
255 timer_mod(s
->open_timer
, expire_time_ns
);
258 static bool nbd_client_will_reconnect(BDRVNBDState
*s
)
261 * Called only after a socket error, so this is not performance sensitive.
263 QEMU_LOCK_GUARD(&s
->requests_lock
);
264 return s
->state
== NBD_CLIENT_CONNECTING_WAIT
;
268 * Update @bs with information learned during a completed negotiation process.
269 * Return failure if the server's advertised options are incompatible with the
272 static int nbd_handle_updated_info(BlockDriverState
*bs
, Error
**errp
)
274 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
277 if (s
->x_dirty_bitmap
) {
278 if (!s
->info
.base_allocation
) {
279 error_setg(errp
, "requested x-dirty-bitmap %s not found",
283 if (strcmp(s
->x_dirty_bitmap
, "qemu:allocation-depth") == 0) {
284 s
->alloc_depth
= true;
288 if (s
->info
.flags
& NBD_FLAG_READ_ONLY
) {
289 ret
= bdrv_apply_auto_read_only(bs
, "NBD export is read-only", errp
);
295 if (s
->info
.flags
& NBD_FLAG_SEND_FUA
) {
296 bs
->supported_write_flags
= BDRV_REQ_FUA
;
297 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
300 if (s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
301 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
302 if (s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
) {
303 bs
->supported_zero_flags
|= BDRV_REQ_NO_FALLBACK
;
307 trace_nbd_client_handshake_success(s
->export
);
312 int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState
*bs
,
313 bool blocking
, Error
**errp
)
315 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
321 s
->ioc
= nbd_co_establish_connection(s
->conn
, &s
->info
, blocking
, errp
);
323 return -ECONNREFUSED
;
326 yank_register_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
), nbd_yank
,
329 ret
= nbd_handle_updated_info(s
->bs
, NULL
);
332 * We have connected, but must fail for other reasons.
333 * Send NBD_CMD_DISC as a courtesy to the server.
335 NBDRequest request
= { .type
= NBD_CMD_DISC
};
337 nbd_send_request(s
->ioc
, &request
);
339 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
341 object_unref(OBJECT(s
->ioc
));
347 qio_channel_set_blocking(s
->ioc
, false, NULL
);
348 qio_channel_attach_aio_context(s
->ioc
, bdrv_get_aio_context(bs
));
350 /* successfully connected */
351 WITH_QEMU_LOCK_GUARD(&s
->requests_lock
) {
352 s
->state
= NBD_CLIENT_CONNECTED
;
358 /* Called with s->requests_lock held. */
359 static bool nbd_client_connecting(BDRVNBDState
*s
)
361 return s
->state
== NBD_CLIENT_CONNECTING_WAIT
||
362 s
->state
== NBD_CLIENT_CONNECTING_NOWAIT
;
365 /* Called with s->requests_lock taken. */
366 static coroutine_fn
void nbd_reconnect_attempt(BDRVNBDState
*s
)
368 bool blocking
= s
->state
== NBD_CLIENT_CONNECTING_WAIT
;
371 * Now we are sure that nobody is accessing the channel, and no one will
372 * try until we set the state to CONNECTED.
374 assert(nbd_client_connecting(s
));
375 assert(s
->in_flight
== 1);
377 if (blocking
&& !s
->reconnect_delay_timer
) {
379 * It's the first reconnect attempt after switching to
380 * NBD_CLIENT_CONNECTING_WAIT
382 g_assert(s
->reconnect_delay
);
383 reconnect_delay_timer_init(s
,
384 qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
385 s
->reconnect_delay
* NANOSECONDS_PER_SECOND
);
388 /* Finalize previous connection if any */
390 qio_channel_detach_aio_context(QIO_CHANNEL(s
->ioc
));
391 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
393 object_unref(OBJECT(s
->ioc
));
397 qemu_mutex_unlock(&s
->requests_lock
);
398 nbd_co_do_establish_connection(s
->bs
, blocking
, NULL
);
399 qemu_mutex_lock(&s
->requests_lock
);
402 * The reconnect attempt is done (maybe successfully, maybe not), so
403 * we no longer need this timer. Delete it so it will not outlive
404 * this I/O request (so draining removes all timers).
406 reconnect_delay_timer_del(s
);
409 static coroutine_fn
int nbd_receive_replies(BDRVNBDState
*s
, uint64_t handle
)
412 uint64_t ind
= HANDLE_TO_INDEX(s
, handle
), ind2
;
413 QEMU_LOCK_GUARD(&s
->receive_mutex
);
416 if (s
->reply
.handle
== handle
) {
421 if (s
->reply
.handle
!= 0) {
423 * Some other request is being handled now. It should already be
424 * woken by whoever set s->reply.handle (or never wait in this
425 * yield). So, we should not wake it here.
427 ind2
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
428 assert(!s
->requests
[ind2
].receiving
);
430 s
->requests
[ind
].receiving
= true;
431 qemu_co_mutex_unlock(&s
->receive_mutex
);
433 qemu_coroutine_yield();
435 * We may be woken for 2 reasons:
436 * 1. From this function, executing in parallel coroutine, when our
437 * handle is received.
438 * 2. From nbd_co_receive_one_chunk(), when previous request is
439 * finished and s->reply.handle set to 0.
440 * Anyway, it's OK to lock the mutex and go to the next iteration.
443 qemu_co_mutex_lock(&s
->receive_mutex
);
444 assert(!s
->requests
[ind
].receiving
);
448 /* We are under mutex and handle is 0. We have to do the dirty work. */
449 assert(s
->reply
.handle
== 0);
450 ret
= nbd_receive_reply(s
->bs
, s
->ioc
, &s
->reply
, NULL
);
452 ret
= ret
? ret
: -EIO
;
453 nbd_channel_error(s
, ret
);
456 if (nbd_reply_is_structured(&s
->reply
) && !s
->info
.structured_reply
) {
457 nbd_channel_error(s
, -EINVAL
);
460 ind2
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
461 if (ind2
>= MAX_NBD_REQUESTS
|| !s
->requests
[ind2
].coroutine
) {
462 nbd_channel_error(s
, -EINVAL
);
465 if (s
->reply
.handle
== handle
) {
469 nbd_recv_coroutine_wake_one(&s
->requests
[ind2
]);
473 static int coroutine_fn
nbd_co_send_request(BlockDriverState
*bs
,
477 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
480 qemu_mutex_lock(&s
->requests_lock
);
481 while (s
->in_flight
== MAX_NBD_REQUESTS
||
482 (s
->state
!= NBD_CLIENT_CONNECTED
&& s
->in_flight
> 0)) {
483 qemu_co_queue_wait(&s
->free_sema
, &s
->requests_lock
);
487 if (s
->state
!= NBD_CLIENT_CONNECTED
) {
488 if (nbd_client_connecting(s
)) {
489 nbd_reconnect_attempt(s
);
490 qemu_co_queue_restart_all(&s
->free_sema
);
492 if (s
->state
!= NBD_CLIENT_CONNECTED
) {
498 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
499 if (s
->requests
[i
].coroutine
== NULL
) {
504 assert(i
< MAX_NBD_REQUESTS
);
505 s
->requests
[i
].coroutine
= qemu_coroutine_self();
506 s
->requests
[i
].offset
= request
->from
;
507 s
->requests
[i
].receiving
= false;
508 qemu_mutex_unlock(&s
->requests_lock
);
510 qemu_co_mutex_lock(&s
->send_mutex
);
511 request
->handle
= INDEX_TO_HANDLE(s
, i
);
516 qio_channel_set_cork(s
->ioc
, true);
517 rc
= nbd_send_request(s
->ioc
, request
);
519 if (qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
523 } else if (rc
>= 0) {
526 qio_channel_set_cork(s
->ioc
, false);
528 rc
= nbd_send_request(s
->ioc
, request
);
530 qemu_co_mutex_unlock(&s
->send_mutex
);
533 qemu_mutex_lock(&s
->requests_lock
);
535 nbd_channel_error_locked(s
, rc
);
537 s
->requests
[i
].coroutine
= NULL
;
540 qemu_co_queue_next(&s
->free_sema
);
541 qemu_mutex_unlock(&s
->requests_lock
);
546 static inline uint16_t payload_advance16(uint8_t **payload
)
549 return lduw_be_p(*payload
- 2);
552 static inline uint32_t payload_advance32(uint8_t **payload
)
555 return ldl_be_p(*payload
- 4);
558 static inline uint64_t payload_advance64(uint8_t **payload
)
561 return ldq_be_p(*payload
- 8);
564 static int nbd_parse_offset_hole_payload(BDRVNBDState
*s
,
565 NBDStructuredReplyChunk
*chunk
,
566 uint8_t *payload
, uint64_t orig_offset
,
567 QEMUIOVector
*qiov
, Error
**errp
)
572 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
573 error_setg(errp
, "Protocol error: invalid payload for "
574 "NBD_REPLY_TYPE_OFFSET_HOLE");
578 offset
= payload_advance64(&payload
);
579 hole_size
= payload_advance32(&payload
);
581 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
582 offset
> orig_offset
+ qiov
->size
- hole_size
) {
583 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
587 if (s
->info
.min_block
&&
588 !QEMU_IS_ALIGNED(hole_size
, s
->info
.min_block
)) {
589 trace_nbd_structured_read_compliance("hole");
592 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
598 * nbd_parse_blockstatus_payload
599 * Based on our request, we expect only one extent in reply, for the
600 * base:allocation context.
602 static int nbd_parse_blockstatus_payload(BDRVNBDState
*s
,
603 NBDStructuredReplyChunk
*chunk
,
604 uint8_t *payload
, uint64_t orig_length
,
605 NBDExtent
*extent
, Error
**errp
)
609 /* The server succeeded, so it must have sent [at least] one extent */
610 if (chunk
->length
< sizeof(context_id
) + sizeof(*extent
)) {
611 error_setg(errp
, "Protocol error: invalid payload for "
612 "NBD_REPLY_TYPE_BLOCK_STATUS");
616 context_id
= payload_advance32(&payload
);
617 if (s
->info
.context_id
!= context_id
) {
618 error_setg(errp
, "Protocol error: unexpected context id %d for "
619 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
620 "id is %d", context_id
,
625 extent
->length
= payload_advance32(&payload
);
626 extent
->flags
= payload_advance32(&payload
);
628 if (extent
->length
== 0) {
629 error_setg(errp
, "Protocol error: server sent status chunk with "
635 * A server sending unaligned block status is in violation of the
636 * protocol, but as qemu-nbd 3.1 is such a server (at least for
637 * POSIX files that are not a multiple of 512 bytes, since qemu
638 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
639 * still sees an implicit hole beyond the real EOF), it's nicer to
640 * work around the misbehaving server. If the request included
641 * more than the final unaligned block, truncate it back to an
642 * aligned result; if the request was only the final block, round
643 * up to the full block and change the status to fully-allocated
644 * (always a safe status, even if it loses information).
646 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(extent
->length
,
647 s
->info
.min_block
)) {
648 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
649 if (extent
->length
> s
->info
.min_block
) {
650 extent
->length
= QEMU_ALIGN_DOWN(extent
->length
,
653 extent
->length
= s
->info
.min_block
;
659 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
660 * sent us any more than one extent, nor should it have included
661 * status beyond our request in that extent. However, it's easy
662 * enough to ignore the server's noncompliance without killing the
663 * connection; just ignore trailing extents, and clamp things to
664 * the length of our request.
666 if (chunk
->length
> sizeof(context_id
) + sizeof(*extent
)) {
667 trace_nbd_parse_blockstatus_compliance("more than one extent");
669 if (extent
->length
> orig_length
) {
670 extent
->length
= orig_length
;
671 trace_nbd_parse_blockstatus_compliance("extent length too large");
675 * HACK: if we are using x-dirty-bitmaps to access
676 * qemu:allocation-depth, treat all depths > 2 the same as 2,
677 * since nbd_client_co_block_status is only expecting the low two
680 if (s
->alloc_depth
&& extent
->flags
> 2) {
688 * nbd_parse_error_payload
689 * on success @errp contains message describing nbd error reply
691 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
692 uint8_t *payload
, int *request_ret
,
696 uint16_t message_size
;
698 assert(chunk
->type
& (1 << 15));
700 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
702 "Protocol error: invalid payload for structured error");
706 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
708 error_setg(errp
, "Protocol error: server sent structured error chunk "
713 *request_ret
= -error
;
714 message_size
= payload_advance16(&payload
);
716 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
717 error_setg(errp
, "Protocol error: server sent structured error chunk "
718 "with incorrect message size");
722 /* TODO: Add a trace point to mention the server complaint */
724 /* TODO handle ERROR_OFFSET */
729 static int coroutine_fn
730 nbd_co_receive_offset_data_payload(BDRVNBDState
*s
, uint64_t orig_offset
,
731 QEMUIOVector
*qiov
, Error
**errp
)
733 QEMUIOVector sub_qiov
;
737 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
739 assert(nbd_reply_is_structured(&s
->reply
));
741 /* The NBD spec requires at least one byte of payload */
742 if (chunk
->length
<= sizeof(offset
)) {
743 error_setg(errp
, "Protocol error: invalid payload for "
744 "NBD_REPLY_TYPE_OFFSET_DATA");
748 if (nbd_read64(s
->ioc
, &offset
, "OFFSET_DATA offset", errp
) < 0) {
752 data_size
= chunk
->length
- sizeof(offset
);
754 if (offset
< orig_offset
|| data_size
> qiov
->size
||
755 offset
> orig_offset
+ qiov
->size
- data_size
) {
756 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
760 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(data_size
, s
->info
.min_block
)) {
761 trace_nbd_structured_read_compliance("data");
764 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
765 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
766 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
767 qemu_iovec_destroy(&sub_qiov
);
769 return ret
< 0 ? -EIO
: 0;
772 #define NBD_MAX_MALLOC_PAYLOAD 1000
773 static coroutine_fn
int nbd_co_receive_structured_payload(
774 BDRVNBDState
*s
, void **payload
, Error
**errp
)
779 assert(nbd_reply_is_structured(&s
->reply
));
781 len
= s
->reply
.structured
.length
;
787 if (payload
== NULL
) {
788 error_setg(errp
, "Unexpected structured payload");
792 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
793 error_setg(errp
, "Payload too large");
797 *payload
= g_new(char, len
);
798 ret
= nbd_read(s
->ioc
, *payload
, len
, "structured payload", errp
);
809 * nbd_co_do_receive_one_chunk
811 * set request_ret to received reply error
812 * if qiov is not NULL: read payload to @qiov
813 * for structured reply chunk:
814 * if error chunk: read payload, set @request_ret, do not set @payload
815 * else if offset_data chunk: read payload data to @qiov, do not set @payload
816 * else: read payload to @payload
818 * If function fails, @errp contains corresponding error message, and the
819 * connection with the server is suspect. If it returns 0, then the
820 * transaction succeeded (although @request_ret may be a negative errno
821 * corresponding to the server's error reply), and errp is unchanged.
823 static coroutine_fn
int nbd_co_do_receive_one_chunk(
824 BDRVNBDState
*s
, uint64_t handle
, bool only_structured
,
825 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
828 int i
= HANDLE_TO_INDEX(s
, handle
);
829 void *local_payload
= NULL
;
830 NBDStructuredReplyChunk
*chunk
;
837 ret
= nbd_receive_replies(s
, handle
);
839 error_setg(errp
, "Connection closed");
844 assert(s
->reply
.handle
== handle
);
846 if (nbd_reply_is_simple(&s
->reply
)) {
847 if (only_structured
) {
848 error_setg(errp
, "Protocol error: simple reply when structured "
849 "reply chunk was expected");
853 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
854 if (*request_ret
< 0 || !qiov
) {
858 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
859 errp
) < 0 ? -EIO
: 0;
862 /* handle structured reply chunk */
863 assert(s
->info
.structured_reply
);
864 chunk
= &s
->reply
.structured
;
866 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
867 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
868 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
869 " NBD_REPLY_FLAG_DONE flag set");
873 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
880 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
882 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
886 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
890 if (nbd_reply_type_is_error(chunk
->type
)) {
891 payload
= &local_payload
;
894 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
899 if (nbd_reply_type_is_error(chunk
->type
)) {
900 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
901 g_free(local_payload
);
909 * nbd_co_receive_one_chunk
910 * Read reply, wake up connection_co and set s->quit if needed.
911 * Return value is a fatal error code or normal nbd reply error code
913 static coroutine_fn
int nbd_co_receive_one_chunk(
914 BDRVNBDState
*s
, uint64_t handle
, bool only_structured
,
915 int *request_ret
, QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
,
918 int ret
= nbd_co_do_receive_one_chunk(s
, handle
, only_structured
,
919 request_ret
, qiov
, payload
, errp
);
922 memset(reply
, 0, sizeof(*reply
));
923 nbd_channel_error(s
, ret
);
925 /* For assert at loop start in nbd_connection_entry */
930 nbd_recv_coroutines_wake(s
);
935 typedef struct NBDReplyChunkIter
{
939 bool done
, only_structured
;
942 static void nbd_iter_channel_error(NBDReplyChunkIter
*iter
,
943 int ret
, Error
**local_err
)
945 assert(local_err
&& *local_err
);
950 error_propagate(&iter
->err
, *local_err
);
952 error_free(*local_err
);
958 static void nbd_iter_request_error(NBDReplyChunkIter
*iter
, int ret
)
962 if (!iter
->request_ret
) {
963 iter
->request_ret
= ret
;
968 * NBD_FOREACH_REPLY_CHUNK
969 * The pointer stored in @payload requires g_free() to free it.
971 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
972 qiov, reply, payload) \
973 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
974 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
977 * nbd_reply_chunk_iter_receive
978 * The pointer stored in @payload requires g_free() to free it.
980 static bool nbd_reply_chunk_iter_receive(BDRVNBDState
*s
,
981 NBDReplyChunkIter
*iter
,
983 QEMUIOVector
*qiov
, NBDReply
*reply
,
986 int ret
, request_ret
;
987 NBDReply local_reply
;
988 NBDStructuredReplyChunk
*chunk
;
989 Error
*local_err
= NULL
;
992 /* Previous iteration was last. */
997 reply
= &local_reply
;
1000 ret
= nbd_co_receive_one_chunk(s
, handle
, iter
->only_structured
,
1001 &request_ret
, qiov
, reply
, payload
,
1004 nbd_iter_channel_error(iter
, ret
, &local_err
);
1005 } else if (request_ret
< 0) {
1006 nbd_iter_request_error(iter
, request_ret
);
1009 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1010 if (nbd_reply_is_simple(reply
) || iter
->ret
< 0) {
1014 chunk
= &reply
->structured
;
1015 iter
->only_structured
= true;
1017 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
1018 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1019 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
1023 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
1024 /* This iteration is last. */
1028 /* Execute the loop body */
1032 qemu_mutex_lock(&s
->requests_lock
);
1033 s
->requests
[HANDLE_TO_INDEX(s
, handle
)].coroutine
= NULL
;
1035 qemu_co_queue_next(&s
->free_sema
);
1036 qemu_mutex_unlock(&s
->requests_lock
);
1041 static int coroutine_fn
nbd_co_receive_return_code(BDRVNBDState
*s
, uint64_t handle
,
1042 int *request_ret
, Error
**errp
)
1044 NBDReplyChunkIter iter
;
1046 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, NULL
, NULL
) {
1047 /* nbd_reply_chunk_iter_receive does all the work */
1050 error_propagate(errp
, iter
.err
);
1051 *request_ret
= iter
.request_ret
;
1055 static int coroutine_fn
nbd_co_receive_cmdread_reply(BDRVNBDState
*s
, uint64_t handle
,
1056 uint64_t offset
, QEMUIOVector
*qiov
,
1057 int *request_ret
, Error
**errp
)
1059 NBDReplyChunkIter iter
;
1061 void *payload
= NULL
;
1062 Error
*local_err
= NULL
;
1064 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
1065 qiov
, &reply
, &payload
)
1068 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1070 assert(nbd_reply_is_structured(&reply
));
1072 switch (chunk
->type
) {
1073 case NBD_REPLY_TYPE_OFFSET_DATA
:
1075 * special cased in nbd_co_receive_one_chunk, data is already
1079 case NBD_REPLY_TYPE_OFFSET_HOLE
:
1080 ret
= nbd_parse_offset_hole_payload(s
, &reply
.structured
, payload
,
1081 offset
, qiov
, &local_err
);
1083 nbd_channel_error(s
, ret
);
1084 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1088 if (!nbd_reply_type_is_error(chunk
->type
)) {
1089 /* not allowed reply type */
1090 nbd_channel_error(s
, -EINVAL
);
1091 error_setg(&local_err
,
1092 "Unexpected reply type: %d (%s) for CMD_READ",
1093 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1094 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1102 error_propagate(errp
, iter
.err
);
1103 *request_ret
= iter
.request_ret
;
1107 static int coroutine_fn
nbd_co_receive_blockstatus_reply(BDRVNBDState
*s
,
1108 uint64_t handle
, uint64_t length
,
1110 int *request_ret
, Error
**errp
)
1112 NBDReplyChunkIter iter
;
1114 void *payload
= NULL
;
1115 Error
*local_err
= NULL
;
1116 bool received
= false;
1118 assert(!extent
->length
);
1119 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, &reply
, &payload
) {
1121 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1123 assert(nbd_reply_is_structured(&reply
));
1125 switch (chunk
->type
) {
1126 case NBD_REPLY_TYPE_BLOCK_STATUS
:
1128 nbd_channel_error(s
, -EINVAL
);
1129 error_setg(&local_err
, "Several BLOCK_STATUS chunks in reply");
1130 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1134 ret
= nbd_parse_blockstatus_payload(s
, &reply
.structured
,
1135 payload
, length
, extent
,
1138 nbd_channel_error(s
, ret
);
1139 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1143 if (!nbd_reply_type_is_error(chunk
->type
)) {
1144 nbd_channel_error(s
, -EINVAL
);
1145 error_setg(&local_err
,
1146 "Unexpected reply type: %d (%s) "
1147 "for CMD_BLOCK_STATUS",
1148 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1149 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1157 if (!extent
->length
&& !iter
.request_ret
) {
1158 error_setg(&local_err
, "Server did not reply with any status extents");
1159 nbd_iter_channel_error(&iter
, -EIO
, &local_err
);
1162 error_propagate(errp
, iter
.err
);
1163 *request_ret
= iter
.request_ret
;
1167 static int coroutine_fn
nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
1168 QEMUIOVector
*write_qiov
)
1170 int ret
, request_ret
;
1171 Error
*local_err
= NULL
;
1172 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1174 assert(request
->type
!= NBD_CMD_READ
);
1176 assert(request
->type
== NBD_CMD_WRITE
);
1177 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
1179 assert(request
->type
!= NBD_CMD_WRITE
);
1183 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
1188 ret
= nbd_co_receive_return_code(s
, request
->handle
,
1189 &request_ret
, &local_err
);
1191 trace_nbd_co_request_fail(request
->from
, request
->len
,
1192 request
->handle
, request
->flags
,
1194 nbd_cmd_lookup(request
->type
),
1195 ret
, error_get_pretty(local_err
));
1196 error_free(local_err
);
1199 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1201 return ret
? ret
: request_ret
;
1204 static int coroutine_fn
nbd_client_co_preadv(BlockDriverState
*bs
, int64_t offset
,
1205 int64_t bytes
, QEMUIOVector
*qiov
,
1206 BdrvRequestFlags flags
)
1208 int ret
, request_ret
;
1209 Error
*local_err
= NULL
;
1210 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1211 NBDRequest request
= {
1212 .type
= NBD_CMD_READ
,
1217 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1224 * Work around the fact that the block layer doesn't do
1225 * byte-accurate sizing yet - if the read exceeds the server's
1226 * advertised size because the block layer rounded size up, then
1227 * truncate the request to the server and tail-pad with zero.
1229 if (offset
>= s
->info
.size
) {
1230 assert(bytes
< BDRV_SECTOR_SIZE
);
1231 qemu_iovec_memset(qiov
, 0, 0, bytes
);
1234 if (offset
+ bytes
> s
->info
.size
) {
1235 uint64_t slop
= offset
+ bytes
- s
->info
.size
;
1237 assert(slop
< BDRV_SECTOR_SIZE
);
1238 qemu_iovec_memset(qiov
, bytes
- slop
, 0, slop
);
1239 request
.len
-= slop
;
1243 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1248 ret
= nbd_co_receive_cmdread_reply(s
, request
.handle
, offset
, qiov
,
1249 &request_ret
, &local_err
);
1251 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
1252 request
.flags
, request
.type
,
1253 nbd_cmd_lookup(request
.type
),
1254 ret
, error_get_pretty(local_err
));
1255 error_free(local_err
);
1258 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1260 return ret
? ret
: request_ret
;
1263 static int coroutine_fn
nbd_client_co_pwritev(BlockDriverState
*bs
, int64_t offset
,
1264 int64_t bytes
, QEMUIOVector
*qiov
,
1265 BdrvRequestFlags flags
)
1267 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1268 NBDRequest request
= {
1269 .type
= NBD_CMD_WRITE
,
1274 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1275 if (flags
& BDRV_REQ_FUA
) {
1276 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1277 request
.flags
|= NBD_CMD_FLAG_FUA
;
1280 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1285 return nbd_co_request(bs
, &request
, qiov
);
1288 static int coroutine_fn
nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
1289 int64_t bytes
, BdrvRequestFlags flags
)
1291 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1292 NBDRequest request
= {
1293 .type
= NBD_CMD_WRITE_ZEROES
,
1295 .len
= bytes
, /* .len is uint32_t actually */
1298 assert(bytes
<= UINT32_MAX
); /* rely on max_pwrite_zeroes */
1300 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1301 if (!(s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
1305 if (flags
& BDRV_REQ_FUA
) {
1306 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1307 request
.flags
|= NBD_CMD_FLAG_FUA
;
1309 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
1310 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
1312 if (flags
& BDRV_REQ_NO_FALLBACK
) {
1313 assert(s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
);
1314 request
.flags
|= NBD_CMD_FLAG_FAST_ZERO
;
1320 return nbd_co_request(bs
, &request
, NULL
);
1323 static int coroutine_fn
nbd_client_co_flush(BlockDriverState
*bs
)
1325 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1326 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
1328 if (!(s
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
1335 return nbd_co_request(bs
, &request
, NULL
);
1338 static int coroutine_fn
nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
1341 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1342 NBDRequest request
= {
1343 .type
= NBD_CMD_TRIM
,
1345 .len
= bytes
, /* len is uint32_t */
1348 assert(bytes
<= UINT32_MAX
); /* rely on max_pdiscard */
1350 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1351 if (!(s
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
1355 return nbd_co_request(bs
, &request
, NULL
);
1358 static int coroutine_fn
nbd_client_co_block_status(
1359 BlockDriverState
*bs
, bool want_zero
, int64_t offset
, int64_t bytes
,
1360 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
1362 int ret
, request_ret
;
1363 NBDExtent extent
= { 0 };
1364 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1365 Error
*local_err
= NULL
;
1367 NBDRequest request
= {
1368 .type
= NBD_CMD_BLOCK_STATUS
,
1370 .len
= MIN(QEMU_ALIGN_DOWN(INT_MAX
, bs
->bl
.request_alignment
),
1371 MIN(bytes
, s
->info
.size
- offset
)),
1372 .flags
= NBD_CMD_FLAG_REQ_ONE
,
1375 if (!s
->info
.base_allocation
) {
1379 return BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
1383 * Work around the fact that the block layer doesn't do
1384 * byte-accurate sizing yet - if the status request exceeds the
1385 * server's advertised size because the block layer rounded size
1386 * up, we truncated the request to the server (above), or are
1387 * called on just the hole.
1389 if (offset
>= s
->info
.size
) {
1391 assert(bytes
< BDRV_SECTOR_SIZE
);
1392 /* Intentionally don't report offset_valid for the hole */
1393 return BDRV_BLOCK_ZERO
;
1396 if (s
->info
.min_block
) {
1397 assert(QEMU_IS_ALIGNED(request
.len
, s
->info
.min_block
));
1400 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1405 ret
= nbd_co_receive_blockstatus_reply(s
, request
.handle
, bytes
,
1406 &extent
, &request_ret
,
1409 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
1410 request
.flags
, request
.type
,
1411 nbd_cmd_lookup(request
.type
),
1412 ret
, error_get_pretty(local_err
));
1413 error_free(local_err
);
1416 } while (ret
< 0 && nbd_client_will_reconnect(s
));
1418 if (ret
< 0 || request_ret
< 0) {
1419 return ret
? ret
: request_ret
;
1422 assert(extent
.length
);
1423 *pnum
= extent
.length
;
1426 return (extent
.flags
& NBD_STATE_HOLE
? 0 : BDRV_BLOCK_DATA
) |
1427 (extent
.flags
& NBD_STATE_ZERO
? BDRV_BLOCK_ZERO
: 0) |
1428 BDRV_BLOCK_OFFSET_VALID
;
1431 static int nbd_client_reopen_prepare(BDRVReopenState
*state
,
1432 BlockReopenQueue
*queue
, Error
**errp
)
1434 BDRVNBDState
*s
= (BDRVNBDState
*)state
->bs
->opaque
;
1436 if ((state
->flags
& BDRV_O_RDWR
) && (s
->info
.flags
& NBD_FLAG_READ_ONLY
)) {
1437 error_setg(errp
, "Can't reopen read-only NBD mount as read/write");
1443 static void nbd_yank(void *opaque
)
1445 BlockDriverState
*bs
= opaque
;
1446 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1448 QEMU_LOCK_GUARD(&s
->requests_lock
);
1449 qio_channel_shutdown(QIO_CHANNEL(s
->ioc
), QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1450 s
->state
= NBD_CLIENT_QUIT
;
1453 static void nbd_client_close(BlockDriverState
*bs
)
1455 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1456 NBDRequest request
= { .type
= NBD_CMD_DISC
};
1459 nbd_send_request(s
->ioc
, &request
);
1462 nbd_teardown_connection(bs
);
1467 * Parse nbd_open options
1470 static int nbd_parse_uri(const char *filename
, QDict
*options
)
1474 QueryParams
*qp
= NULL
;
1478 uri
= uri_parse(filename
);
1484 if (!g_strcmp0(uri
->scheme
, "nbd")) {
1486 } else if (!g_strcmp0(uri
->scheme
, "nbd+tcp")) {
1488 } else if (!g_strcmp0(uri
->scheme
, "nbd+unix")) {
1495 p
= uri
->path
? uri
->path
: "";
1500 qdict_put_str(options
, "export", p
);
1503 qp
= query_params_parse(uri
->query
);
1504 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
1510 /* nbd+unix:///export?socket=path */
1511 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
1515 qdict_put_str(options
, "server.type", "unix");
1516 qdict_put_str(options
, "server.path", qp
->p
[0].value
);
1521 /* nbd[+tcp]://host[:port]/export */
1527 /* strip braces from literal IPv6 address */
1528 if (uri
->server
[0] == '[') {
1529 host
= qstring_from_substr(uri
->server
, 1,
1530 strlen(uri
->server
) - 1);
1532 host
= qstring_from_str(uri
->server
);
1535 qdict_put_str(options
, "server.type", "inet");
1536 qdict_put(options
, "server.host", host
);
1538 port_str
= g_strdup_printf("%d", uri
->port
?: NBD_DEFAULT_PORT
);
1539 qdict_put_str(options
, "server.port", port_str
);
1545 query_params_free(qp
);
1551 static bool nbd_has_filename_options_conflict(QDict
*options
, Error
**errp
)
1553 const QDictEntry
*e
;
1555 for (e
= qdict_first(options
); e
; e
= qdict_next(options
, e
)) {
1556 if (!strcmp(e
->key
, "host") ||
1557 !strcmp(e
->key
, "port") ||
1558 !strcmp(e
->key
, "path") ||
1559 !strcmp(e
->key
, "export") ||
1560 strstart(e
->key
, "server.", NULL
))
1562 error_setg(errp
, "Option '%s' cannot be used with a file name",
1571 static void nbd_parse_filename(const char *filename
, QDict
*options
,
1574 g_autofree
char *file
= NULL
;
1576 const char *host_spec
;
1577 const char *unixpath
;
1579 if (nbd_has_filename_options_conflict(options
, errp
)) {
1583 if (strstr(filename
, "://")) {
1584 int ret
= nbd_parse_uri(filename
, options
);
1586 error_setg(errp
, "No valid URL specified");
1591 file
= g_strdup(filename
);
1593 export_name
= strstr(file
, EN_OPTSTR
);
1595 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
1598 export_name
[0] = 0; /* truncate 'file' */
1599 export_name
+= strlen(EN_OPTSTR
);
1601 qdict_put_str(options
, "export", export_name
);
1604 /* extract the host_spec - fail if it's not nbd:... */
1605 if (!strstart(file
, "nbd:", &host_spec
)) {
1606 error_setg(errp
, "File name string for NBD must start with 'nbd:'");
1614 /* are we a UNIX or TCP socket? */
1615 if (strstart(host_spec
, "unix:", &unixpath
)) {
1616 qdict_put_str(options
, "server.type", "unix");
1617 qdict_put_str(options
, "server.path", unixpath
);
1619 InetSocketAddress
*addr
= g_new(InetSocketAddress
, 1);
1621 if (inet_parse(addr
, host_spec
, errp
)) {
1625 qdict_put_str(options
, "server.type", "inet");
1626 qdict_put_str(options
, "server.host", addr
->host
);
1627 qdict_put_str(options
, "server.port", addr
->port
);
1629 qapi_free_InetSocketAddress(addr
);
1633 static bool nbd_process_legacy_socket_options(QDict
*output_options
,
1634 QemuOpts
*legacy_opts
,
1637 const char *path
= qemu_opt_get(legacy_opts
, "path");
1638 const char *host
= qemu_opt_get(legacy_opts
, "host");
1639 const char *port
= qemu_opt_get(legacy_opts
, "port");
1640 const QDictEntry
*e
;
1642 if (!path
&& !host
&& !port
) {
1646 for (e
= qdict_first(output_options
); e
; e
= qdict_next(output_options
, e
))
1648 if (strstart(e
->key
, "server.", NULL
)) {
1649 error_setg(errp
, "Cannot use 'server' and path/host/port at the "
1656 error_setg(errp
, "path and host may not be used at the same time");
1660 error_setg(errp
, "port may not be used without host");
1664 qdict_put_str(output_options
, "server.type", "unix");
1665 qdict_put_str(output_options
, "server.path", path
);
1667 qdict_put_str(output_options
, "server.type", "inet");
1668 qdict_put_str(output_options
, "server.host", host
);
1669 qdict_put_str(output_options
, "server.port",
1670 port
?: stringify(NBD_DEFAULT_PORT
));
1676 static SocketAddress
*nbd_config(BDRVNBDState
*s
, QDict
*options
,
1679 SocketAddress
*saddr
= NULL
;
1683 qdict_extract_subqdict(options
, &addr
, "server.");
1684 if (!qdict_size(addr
)) {
1685 error_setg(errp
, "NBD server address missing");
1689 iv
= qobject_input_visitor_new_flat_confused(addr
, errp
);
1694 if (!visit_type_SocketAddress(iv
, NULL
, &saddr
, errp
)) {
1698 if (socket_address_parse_named_fd(saddr
, errp
) < 0) {
1699 qapi_free_SocketAddress(saddr
);
1705 qobject_unref(addr
);
1710 static QCryptoTLSCreds
*nbd_get_tls_creds(const char *id
, Error
**errp
)
1713 QCryptoTLSCreds
*creds
;
1715 obj
= object_resolve_path_component(
1716 object_get_objects_root(), id
);
1718 error_setg(errp
, "No TLS credentials with id '%s'",
1722 creds
= (QCryptoTLSCreds
*)
1723 object_dynamic_cast(obj
, TYPE_QCRYPTO_TLS_CREDS
);
1725 error_setg(errp
, "Object with id '%s' is not TLS credentials",
1730 if (!qcrypto_tls_creds_check_endpoint(creds
,
1731 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT
,
1740 static QemuOptsList nbd_runtime_opts
= {
1742 .head
= QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts
.head
),
1746 .type
= QEMU_OPT_STRING
,
1747 .help
= "TCP host to connect to",
1751 .type
= QEMU_OPT_STRING
,
1752 .help
= "TCP port to connect to",
1756 .type
= QEMU_OPT_STRING
,
1757 .help
= "Unix socket path to connect to",
1761 .type
= QEMU_OPT_STRING
,
1762 .help
= "Name of the NBD export to open",
1765 .name
= "tls-creds",
1766 .type
= QEMU_OPT_STRING
,
1767 .help
= "ID of the TLS credentials to use",
1770 .name
= "tls-hostname",
1771 .type
= QEMU_OPT_STRING
,
1772 .help
= "Override hostname for validating TLS x509 certificate",
1775 .name
= "x-dirty-bitmap",
1776 .type
= QEMU_OPT_STRING
,
1777 .help
= "experimental: expose named dirty bitmap in place of "
1781 .name
= "reconnect-delay",
1782 .type
= QEMU_OPT_NUMBER
,
1783 .help
= "On an unexpected disconnect, the nbd client tries to "
1784 "connect again until succeeding or encountering a serious "
1785 "error. During the first @reconnect-delay seconds, all "
1786 "requests are paused and will be rerun on a successful "
1787 "reconnect. After that time, any delayed requests and all "
1788 "future requests before a successful reconnect will "
1789 "immediately fail. Default 0",
1792 .name
= "open-timeout",
1793 .type
= QEMU_OPT_NUMBER
,
1794 .help
= "In seconds. If zero, the nbd driver tries the connection "
1795 "only once, and fails to open if the connection fails. "
1796 "If non-zero, the nbd driver will repeat connection "
1797 "attempts until successful or until @open-timeout seconds "
1798 "have elapsed. Default 0",
1800 { /* end of list */ }
1804 static int nbd_process_options(BlockDriverState
*bs
, QDict
*options
,
1807 BDRVNBDState
*s
= bs
->opaque
;
1811 opts
= qemu_opts_create(&nbd_runtime_opts
, NULL
, 0, &error_abort
);
1812 if (!qemu_opts_absorb_qdict(opts
, options
, errp
)) {
1816 /* Translate @host, @port, and @path to a SocketAddress */
1817 if (!nbd_process_legacy_socket_options(options
, opts
, errp
)) {
1821 /* Pop the config into our state object. Exit if invalid. */
1822 s
->saddr
= nbd_config(s
, options
, errp
);
1827 s
->export
= g_strdup(qemu_opt_get(opts
, "export"));
1828 if (s
->export
&& strlen(s
->export
) > NBD_MAX_STRING_SIZE
) {
1829 error_setg(errp
, "export name too long to send to server");
1833 s
->tlscredsid
= g_strdup(qemu_opt_get(opts
, "tls-creds"));
1834 if (s
->tlscredsid
) {
1835 s
->tlscreds
= nbd_get_tls_creds(s
->tlscredsid
, errp
);
1840 s
->tlshostname
= g_strdup(qemu_opt_get(opts
, "tls-hostname"));
1841 if (!s
->tlshostname
&&
1842 s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
1843 s
->tlshostname
= g_strdup(s
->saddr
->u
.inet
.host
);
1847 s
->x_dirty_bitmap
= g_strdup(qemu_opt_get(opts
, "x-dirty-bitmap"));
1848 if (s
->x_dirty_bitmap
&& strlen(s
->x_dirty_bitmap
) > NBD_MAX_STRING_SIZE
) {
1849 error_setg(errp
, "x-dirty-bitmap query too long to send to server");
1853 s
->reconnect_delay
= qemu_opt_get_number(opts
, "reconnect-delay", 0);
1854 s
->open_timeout
= qemu_opt_get_number(opts
, "open-timeout", 0);
1859 qemu_opts_del(opts
);
1863 static int nbd_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
1867 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1870 qemu_mutex_init(&s
->requests_lock
);
1871 qemu_co_queue_init(&s
->free_sema
);
1872 qemu_co_mutex_init(&s
->send_mutex
);
1873 qemu_co_mutex_init(&s
->receive_mutex
);
1875 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
), errp
)) {
1879 ret
= nbd_process_options(bs
, options
, errp
);
1884 s
->conn
= nbd_client_connection_new(s
->saddr
, true, s
->export
,
1885 s
->x_dirty_bitmap
, s
->tlscreds
,
1888 if (s
->open_timeout
) {
1889 nbd_client_connection_enable_retry(s
->conn
);
1890 open_timer_init(s
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
1891 s
->open_timeout
* NANOSECONDS_PER_SECOND
);
1894 s
->state
= NBD_CLIENT_CONNECTING_WAIT
;
1895 ret
= nbd_do_establish_connection(bs
, true, errp
);
1901 * The connect attempt is done, so we no longer need this timer.
1902 * Delete it, because we do not want it to be around when this node
1903 * is drained or closed.
1907 nbd_client_connection_enable_retry(s
->conn
);
1913 nbd_clear_bdrvstate(bs
);
1917 static int coroutine_fn
nbd_co_flush(BlockDriverState
*bs
)
1919 return nbd_client_co_flush(bs
);
1922 static void nbd_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1924 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1925 uint32_t min
= s
->info
.min_block
;
1926 uint32_t max
= MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE
, s
->info
.max_block
);
1929 * If the server did not advertise an alignment:
1930 * - a size that is not sector-aligned implies that an alignment
1931 * of 1 can be used to access those tail bytes
1932 * - advertisement of block status requires an alignment of 1, so
1933 * that we don't violate block layer constraints that block
1934 * status is always aligned (as we can't control whether the
1935 * server will report sub-sector extents, such as a hole at EOF
1936 * on an unaligned POSIX file)
1937 * - otherwise, assume the server is so old that we are safer avoiding
1938 * sub-sector requests
1941 min
= (!QEMU_IS_ALIGNED(s
->info
.size
, BDRV_SECTOR_SIZE
) ||
1942 s
->info
.base_allocation
) ? 1 : BDRV_SECTOR_SIZE
;
1945 bs
->bl
.request_alignment
= min
;
1946 bs
->bl
.max_pdiscard
= QEMU_ALIGN_DOWN(INT_MAX
, min
);
1947 bs
->bl
.max_pwrite_zeroes
= max
;
1948 bs
->bl
.max_transfer
= max
;
1950 if (s
->info
.opt_block
&&
1951 s
->info
.opt_block
> bs
->bl
.opt_transfer
) {
1952 bs
->bl
.opt_transfer
= s
->info
.opt_block
;
1956 static void nbd_close(BlockDriverState
*bs
)
1958 nbd_client_close(bs
);
1959 nbd_clear_bdrvstate(bs
);
1963 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
1964 * to a smaller size with exact=false, there is no reason to fail the
1967 * Preallocation mode is ignored since it does not seems useful to fail when
1968 * we never change anything.
1970 static int coroutine_fn
nbd_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1971 bool exact
, PreallocMode prealloc
,
1972 BdrvRequestFlags flags
, Error
**errp
)
1974 BDRVNBDState
*s
= bs
->opaque
;
1976 if (offset
!= s
->info
.size
&& exact
) {
1977 error_setg(errp
, "Cannot resize NBD nodes");
1981 if (offset
> s
->info
.size
) {
1982 error_setg(errp
, "Cannot grow NBD nodes");
1989 static int64_t nbd_getlength(BlockDriverState
*bs
)
1991 BDRVNBDState
*s
= bs
->opaque
;
1993 return s
->info
.size
;
1996 static void nbd_refresh_filename(BlockDriverState
*bs
)
1998 BDRVNBDState
*s
= bs
->opaque
;
1999 const char *host
= NULL
, *port
= NULL
, *path
= NULL
;
2002 if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
2003 const InetSocketAddress
*inet
= &s
->saddr
->u
.inet
;
2004 if (!inet
->has_ipv4
&& !inet
->has_ipv6
&& !inet
->has_to
) {
2008 } else if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_UNIX
) {
2009 path
= s
->saddr
->u
.q_unix
.path
;
2010 } /* else can't represent as pseudo-filename */
2012 if (path
&& s
->export
) {
2013 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2014 "nbd+unix:///%s?socket=%s", s
->export
, path
);
2015 } else if (path
&& !s
->export
) {
2016 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2017 "nbd+unix://?socket=%s", path
);
2018 } else if (host
&& s
->export
) {
2019 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2020 "nbd://%s:%s/%s", host
, port
, s
->export
);
2021 } else if (host
&& !s
->export
) {
2022 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2023 "nbd://%s:%s", host
, port
);
2025 if (len
>= sizeof(bs
->exact_filename
)) {
2026 /* Name is too long to represent exactly, so leave it empty. */
2027 bs
->exact_filename
[0] = '\0';
2031 static char *nbd_dirname(BlockDriverState
*bs
, Error
**errp
)
2033 /* The generic bdrv_dirname() implementation is able to work out some
2034 * directory name for NBD nodes, but that would be wrong. So far there is no
2035 * specification for how "export paths" would work, so NBD does not have
2036 * directory names. */
2037 error_setg(errp
, "Cannot generate a base directory for NBD nodes");
2041 static const char *const nbd_strong_runtime_opts
[] = {
2053 static void nbd_cancel_in_flight(BlockDriverState
*bs
)
2055 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
2057 reconnect_delay_timer_del(s
);
2059 qemu_mutex_lock(&s
->requests_lock
);
2060 if (s
->state
== NBD_CLIENT_CONNECTING_WAIT
) {
2061 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
2063 qemu_mutex_unlock(&s
->requests_lock
);
2065 nbd_co_establish_connection_cancel(s
->conn
);
2068 static void nbd_attach_aio_context(BlockDriverState
*bs
,
2069 AioContext
*new_context
)
2071 BDRVNBDState
*s
= bs
->opaque
;
2073 /* The open_timer is used only during nbd_open() */
2074 assert(!s
->open_timer
);
2077 * The reconnect_delay_timer is scheduled in I/O paths when the
2078 * connection is lost, to cancel the reconnection attempt after a
2079 * given time. Once this attempt is done (successfully or not),
2080 * nbd_reconnect_attempt() ensures the timer is deleted before the
2081 * respective I/O request is resumed.
2082 * Since the AioContext can only be changed when a node is drained,
2083 * the reconnect_delay_timer cannot be active here.
2085 assert(!s
->reconnect_delay_timer
);
2088 qio_channel_attach_aio_context(s
->ioc
, new_context
);
2092 static void nbd_detach_aio_context(BlockDriverState
*bs
)
2094 BDRVNBDState
*s
= bs
->opaque
;
2096 assert(!s
->open_timer
);
2097 assert(!s
->reconnect_delay_timer
);
2100 qio_channel_detach_aio_context(s
->ioc
);
2104 static BlockDriver bdrv_nbd
= {
2105 .format_name
= "nbd",
2106 .protocol_name
= "nbd",
2107 .instance_size
= sizeof(BDRVNBDState
),
2108 .bdrv_parse_filename
= nbd_parse_filename
,
2109 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2110 .create_opts
= &bdrv_create_opts_simple
,
2111 .bdrv_file_open
= nbd_open
,
2112 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2113 .bdrv_co_preadv
= nbd_client_co_preadv
,
2114 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2115 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2116 .bdrv_close
= nbd_close
,
2117 .bdrv_co_flush_to_os
= nbd_co_flush
,
2118 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2119 .bdrv_refresh_limits
= nbd_refresh_limits
,
2120 .bdrv_co_truncate
= nbd_co_truncate
,
2121 .bdrv_getlength
= nbd_getlength
,
2122 .bdrv_refresh_filename
= nbd_refresh_filename
,
2123 .bdrv_co_block_status
= nbd_client_co_block_status
,
2124 .bdrv_dirname
= nbd_dirname
,
2125 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2126 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2128 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2129 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2132 static BlockDriver bdrv_nbd_tcp
= {
2133 .format_name
= "nbd",
2134 .protocol_name
= "nbd+tcp",
2135 .instance_size
= sizeof(BDRVNBDState
),
2136 .bdrv_parse_filename
= nbd_parse_filename
,
2137 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2138 .create_opts
= &bdrv_create_opts_simple
,
2139 .bdrv_file_open
= nbd_open
,
2140 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2141 .bdrv_co_preadv
= nbd_client_co_preadv
,
2142 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2143 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2144 .bdrv_close
= nbd_close
,
2145 .bdrv_co_flush_to_os
= nbd_co_flush
,
2146 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2147 .bdrv_refresh_limits
= nbd_refresh_limits
,
2148 .bdrv_co_truncate
= nbd_co_truncate
,
2149 .bdrv_getlength
= nbd_getlength
,
2150 .bdrv_refresh_filename
= nbd_refresh_filename
,
2151 .bdrv_co_block_status
= nbd_client_co_block_status
,
2152 .bdrv_dirname
= nbd_dirname
,
2153 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2154 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2156 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2157 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2160 static BlockDriver bdrv_nbd_unix
= {
2161 .format_name
= "nbd",
2162 .protocol_name
= "nbd+unix",
2163 .instance_size
= sizeof(BDRVNBDState
),
2164 .bdrv_parse_filename
= nbd_parse_filename
,
2165 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2166 .create_opts
= &bdrv_create_opts_simple
,
2167 .bdrv_file_open
= nbd_open
,
2168 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2169 .bdrv_co_preadv
= nbd_client_co_preadv
,
2170 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2171 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2172 .bdrv_close
= nbd_close
,
2173 .bdrv_co_flush_to_os
= nbd_co_flush
,
2174 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2175 .bdrv_refresh_limits
= nbd_refresh_limits
,
2176 .bdrv_co_truncate
= nbd_co_truncate
,
2177 .bdrv_getlength
= nbd_getlength
,
2178 .bdrv_refresh_filename
= nbd_refresh_filename
,
2179 .bdrv_co_block_status
= nbd_client_co_block_status
,
2180 .bdrv_dirname
= nbd_dirname
,
2181 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2182 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2184 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2185 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2188 static void bdrv_nbd_init(void)
2190 bdrv_register(&bdrv_nbd
);
2191 bdrv_register(&bdrv_nbd_tcp
);
2192 bdrv_register(&bdrv_nbd_unix
);
2195 block_init(bdrv_nbd_init
);