]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU Block driver for NBD | |
3 | * | |
4 | * Copyright (c) 2019 Virtuozzo International GmbH. | |
5 | * Copyright (C) 2016 Red Hat, Inc. | |
6 | * Copyright (C) 2008 Bull S.A.S. | |
7 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | |
8 | * | |
9 | * Some parts: | |
10 | * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws> | |
11 | * | |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
13 | * of this software and associated documentation files (the "Software"), to deal | |
14 | * in the Software without restriction, including without limitation the rights | |
15 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
16 | * copies of the Software, and to permit persons to whom the Software is | |
17 | * furnished to do so, subject to the following conditions: | |
18 | * | |
19 | * The above copyright notice and this permission notice shall be included in | |
20 | * all copies or substantial portions of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
25 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
27 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
28 | * THE SOFTWARE. | |
29 | */ | |
30 | ||
31 | #include "qemu/osdep.h" | |
32 | ||
33 | #include "trace.h" | |
34 | #include "qemu/uri.h" | |
35 | #include "qemu/option.h" | |
36 | #include "qemu/cutils.h" | |
37 | #include "qemu/main-loop.h" | |
38 | ||
39 | #include "qapi/qapi-visit-sockets.h" | |
40 | #include "qapi/qmp/qstring.h" | |
41 | #include "qapi/clone-visitor.h" | |
42 | ||
43 | #include "block/qdict.h" | |
44 | #include "block/nbd.h" | |
45 | #include "block/block_int.h" | |
46 | #include "block/coroutines.h" | |
47 | ||
48 | #include "qemu/yank.h" | |
49 | ||
50 | #define EN_OPTSTR ":exportname=" | |
51 | #define MAX_NBD_REQUESTS 16 | |
52 | ||
53 | #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs)) | |
54 | #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs)) | |
55 | ||
56 | typedef struct { | |
57 | Coroutine *coroutine; | |
58 | uint64_t offset; /* original offset of the request */ | |
59 | bool receiving; /* sleeping in the yield in nbd_receive_replies */ | |
60 | } NBDClientRequest; | |
61 | ||
62 | typedef enum NBDClientState { | |
63 | NBD_CLIENT_CONNECTING_WAIT, | |
64 | NBD_CLIENT_CONNECTING_NOWAIT, | |
65 | NBD_CLIENT_CONNECTED, | |
66 | NBD_CLIENT_QUIT | |
67 | } NBDClientState; | |
68 | ||
69 | typedef struct BDRVNBDState { | |
70 | QIOChannel *ioc; /* The current I/O channel */ | |
71 | NBDExportInfo info; | |
72 | ||
73 | /* | |
74 | * Protects state, free_sema, in_flight, requests[].coroutine, | |
75 | * reconnect_delay_timer. | |
76 | */ | |
77 | QemuMutex requests_lock; | |
78 | NBDClientState state; | |
79 | CoQueue free_sema; | |
80 | unsigned in_flight; | |
81 | NBDClientRequest requests[MAX_NBD_REQUESTS]; | |
82 | QEMUTimer *reconnect_delay_timer; | |
83 | ||
84 | /* Protects sending data on the socket. */ | |
85 | CoMutex send_mutex; | |
86 | ||
87 | /* | |
88 | * Protects receiving reply headers from the socket, as well as the | |
89 | * fields reply and requests[].receiving | |
90 | */ | |
91 | CoMutex receive_mutex; | |
92 | NBDReply reply; | |
93 | ||
94 | QEMUTimer *open_timer; | |
95 | ||
96 | BlockDriverState *bs; | |
97 | ||
98 | /* Connection parameters */ | |
99 | uint32_t reconnect_delay; | |
100 | uint32_t open_timeout; | |
101 | SocketAddress *saddr; | |
102 | char *export; | |
103 | char *tlscredsid; | |
104 | QCryptoTLSCreds *tlscreds; | |
105 | char *tlshostname; | |
106 | char *x_dirty_bitmap; | |
107 | bool alloc_depth; | |
108 | ||
109 | NBDClientConnection *conn; | |
110 | } BDRVNBDState; | |
111 | ||
112 | static void nbd_yank(void *opaque); | |
113 | ||
114 | static void nbd_clear_bdrvstate(BlockDriverState *bs) | |
115 | { | |
116 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
117 | ||
118 | nbd_client_connection_release(s->conn); | |
119 | s->conn = NULL; | |
120 | ||
121 | yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name)); | |
122 | ||
123 | /* Must not leave timers behind that would access freed data */ | |
124 | assert(!s->reconnect_delay_timer); | |
125 | assert(!s->open_timer); | |
126 | ||
127 | object_unref(OBJECT(s->tlscreds)); | |
128 | qapi_free_SocketAddress(s->saddr); | |
129 | s->saddr = NULL; | |
130 | g_free(s->export); | |
131 | s->export = NULL; | |
132 | g_free(s->tlscredsid); | |
133 | s->tlscredsid = NULL; | |
134 | g_free(s->tlshostname); | |
135 | s->tlshostname = NULL; | |
136 | g_free(s->x_dirty_bitmap); | |
137 | s->x_dirty_bitmap = NULL; | |
138 | } | |
139 | ||
140 | /* Called with s->receive_mutex taken. */ | |
141 | static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req) | |
142 | { | |
143 | if (req->receiving) { | |
144 | req->receiving = false; | |
145 | aio_co_wake(req->coroutine); | |
146 | return true; | |
147 | } | |
148 | ||
149 | return false; | |
150 | } | |
151 | ||
152 | static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s) | |
153 | { | |
154 | int i; | |
155 | ||
156 | QEMU_LOCK_GUARD(&s->receive_mutex); | |
157 | for (i = 0; i < MAX_NBD_REQUESTS; i++) { | |
158 | if (nbd_recv_coroutine_wake_one(&s->requests[i])) { | |
159 | return; | |
160 | } | |
161 | } | |
162 | } | |
163 | ||
164 | /* Called with s->requests_lock held. */ | |
165 | static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret) | |
166 | { | |
167 | if (s->state == NBD_CLIENT_CONNECTED) { | |
168 | qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); | |
169 | } | |
170 | ||
171 | if (ret == -EIO) { | |
172 | if (s->state == NBD_CLIENT_CONNECTED) { | |
173 | s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT : | |
174 | NBD_CLIENT_CONNECTING_NOWAIT; | |
175 | } | |
176 | } else { | |
177 | s->state = NBD_CLIENT_QUIT; | |
178 | } | |
179 | } | |
180 | ||
181 | static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret) | |
182 | { | |
183 | QEMU_LOCK_GUARD(&s->requests_lock); | |
184 | nbd_channel_error_locked(s, ret); | |
185 | } | |
186 | ||
187 | static void reconnect_delay_timer_del(BDRVNBDState *s) | |
188 | { | |
189 | if (s->reconnect_delay_timer) { | |
190 | timer_free(s->reconnect_delay_timer); | |
191 | s->reconnect_delay_timer = NULL; | |
192 | } | |
193 | } | |
194 | ||
195 | static void reconnect_delay_timer_cb(void *opaque) | |
196 | { | |
197 | BDRVNBDState *s = opaque; | |
198 | ||
199 | reconnect_delay_timer_del(s); | |
200 | WITH_QEMU_LOCK_GUARD(&s->requests_lock) { | |
201 | if (s->state != NBD_CLIENT_CONNECTING_WAIT) { | |
202 | return; | |
203 | } | |
204 | s->state = NBD_CLIENT_CONNECTING_NOWAIT; | |
205 | } | |
206 | nbd_co_establish_connection_cancel(s->conn); | |
207 | } | |
208 | ||
209 | static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) | |
210 | { | |
211 | assert(!s->reconnect_delay_timer); | |
212 | s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs), | |
213 | QEMU_CLOCK_REALTIME, | |
214 | SCALE_NS, | |
215 | reconnect_delay_timer_cb, s); | |
216 | timer_mod(s->reconnect_delay_timer, expire_time_ns); | |
217 | } | |
218 | ||
219 | static void nbd_teardown_connection(BlockDriverState *bs) | |
220 | { | |
221 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
222 | ||
223 | assert(!s->in_flight); | |
224 | ||
225 | if (s->ioc) { | |
226 | qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); | |
227 | yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), | |
228 | nbd_yank, s->bs); | |
229 | object_unref(OBJECT(s->ioc)); | |
230 | s->ioc = NULL; | |
231 | } | |
232 | ||
233 | WITH_QEMU_LOCK_GUARD(&s->requests_lock) { | |
234 | s->state = NBD_CLIENT_QUIT; | |
235 | } | |
236 | } | |
237 | ||
238 | static void open_timer_del(BDRVNBDState *s) | |
239 | { | |
240 | if (s->open_timer) { | |
241 | timer_free(s->open_timer); | |
242 | s->open_timer = NULL; | |
243 | } | |
244 | } | |
245 | ||
246 | static void open_timer_cb(void *opaque) | |
247 | { | |
248 | BDRVNBDState *s = opaque; | |
249 | ||
250 | nbd_co_establish_connection_cancel(s->conn); | |
251 | open_timer_del(s); | |
252 | } | |
253 | ||
254 | static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) | |
255 | { | |
256 | assert(!s->open_timer); | |
257 | s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs), | |
258 | QEMU_CLOCK_REALTIME, | |
259 | SCALE_NS, | |
260 | open_timer_cb, s); | |
261 | timer_mod(s->open_timer, expire_time_ns); | |
262 | } | |
263 | ||
264 | static bool nbd_client_will_reconnect(BDRVNBDState *s) | |
265 | { | |
266 | /* | |
267 | * Called only after a socket error, so this is not performance sensitive. | |
268 | */ | |
269 | QEMU_LOCK_GUARD(&s->requests_lock); | |
270 | return s->state == NBD_CLIENT_CONNECTING_WAIT; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Update @bs with information learned during a completed negotiation process. | |
275 | * Return failure if the server's advertised options are incompatible with the | |
276 | * client's needs. | |
277 | */ | |
278 | static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp) | |
279 | { | |
280 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
281 | int ret; | |
282 | ||
283 | if (s->x_dirty_bitmap) { | |
284 | if (!s->info.base_allocation) { | |
285 | error_setg(errp, "requested x-dirty-bitmap %s not found", | |
286 | s->x_dirty_bitmap); | |
287 | return -EINVAL; | |
288 | } | |
289 | if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) { | |
290 | s->alloc_depth = true; | |
291 | } | |
292 | } | |
293 | ||
294 | if (s->info.flags & NBD_FLAG_READ_ONLY) { | |
295 | ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp); | |
296 | if (ret < 0) { | |
297 | return ret; | |
298 | } | |
299 | } | |
300 | ||
301 | if (s->info.flags & NBD_FLAG_SEND_FUA) { | |
302 | bs->supported_write_flags = BDRV_REQ_FUA; | |
303 | bs->supported_zero_flags |= BDRV_REQ_FUA; | |
304 | } | |
305 | ||
306 | if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) { | |
307 | bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP; | |
308 | if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) { | |
309 | bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK; | |
310 | } | |
311 | } | |
312 | ||
313 | trace_nbd_client_handshake_success(s->export); | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, | |
319 | bool blocking, Error **errp) | |
320 | { | |
321 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
322 | int ret; | |
323 | IO_CODE(); | |
324 | ||
325 | assert_bdrv_graph_readable(); | |
326 | assert(!s->ioc); | |
327 | ||
328 | s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); | |
329 | if (!s->ioc) { | |
330 | return -ECONNREFUSED; | |
331 | } | |
332 | ||
333 | yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank, | |
334 | bs); | |
335 | ||
336 | ret = nbd_handle_updated_info(s->bs, NULL); | |
337 | if (ret < 0) { | |
338 | /* | |
339 | * We have connected, but must fail for other reasons. | |
340 | * Send NBD_CMD_DISC as a courtesy to the server. | |
341 | */ | |
342 | NBDRequest request = { .type = NBD_CMD_DISC }; | |
343 | ||
344 | nbd_send_request(s->ioc, &request); | |
345 | ||
346 | yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), | |
347 | nbd_yank, bs); | |
348 | object_unref(OBJECT(s->ioc)); | |
349 | s->ioc = NULL; | |
350 | ||
351 | return ret; | |
352 | } | |
353 | ||
354 | qio_channel_set_blocking(s->ioc, false, NULL); | |
355 | qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs)); | |
356 | ||
357 | /* successfully connected */ | |
358 | WITH_QEMU_LOCK_GUARD(&s->requests_lock) { | |
359 | s->state = NBD_CLIENT_CONNECTED; | |
360 | } | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | /* Called with s->requests_lock held. */ | |
366 | static bool nbd_client_connecting(BDRVNBDState *s) | |
367 | { | |
368 | return s->state == NBD_CLIENT_CONNECTING_WAIT || | |
369 | s->state == NBD_CLIENT_CONNECTING_NOWAIT; | |
370 | } | |
371 | ||
372 | /* Called with s->requests_lock taken. */ | |
373 | static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s) | |
374 | { | |
375 | int ret; | |
376 | bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT; | |
377 | ||
378 | /* | |
379 | * Now we are sure that nobody is accessing the channel, and no one will | |
380 | * try until we set the state to CONNECTED. | |
381 | */ | |
382 | assert(nbd_client_connecting(s)); | |
383 | assert(s->in_flight == 1); | |
384 | ||
385 | trace_nbd_reconnect_attempt(s->bs->in_flight); | |
386 | ||
387 | if (blocking && !s->reconnect_delay_timer) { | |
388 | /* | |
389 | * It's the first reconnect attempt after switching to | |
390 | * NBD_CLIENT_CONNECTING_WAIT | |
391 | */ | |
392 | g_assert(s->reconnect_delay); | |
393 | reconnect_delay_timer_init(s, | |
394 | qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + | |
395 | s->reconnect_delay * NANOSECONDS_PER_SECOND); | |
396 | } | |
397 | ||
398 | /* Finalize previous connection if any */ | |
399 | if (s->ioc) { | |
400 | qio_channel_detach_aio_context(s->ioc); | |
401 | yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), | |
402 | nbd_yank, s->bs); | |
403 | object_unref(OBJECT(s->ioc)); | |
404 | s->ioc = NULL; | |
405 | } | |
406 | ||
407 | qemu_mutex_unlock(&s->requests_lock); | |
408 | ret = nbd_co_do_establish_connection(s->bs, blocking, NULL); | |
409 | trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight); | |
410 | qemu_mutex_lock(&s->requests_lock); | |
411 | ||
412 | /* | |
413 | * The reconnect attempt is done (maybe successfully, maybe not), so | |
414 | * we no longer need this timer. Delete it so it will not outlive | |
415 | * this I/O request (so draining removes all timers). | |
416 | */ | |
417 | reconnect_delay_timer_del(s); | |
418 | } | |
419 | ||
420 | static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) | |
421 | { | |
422 | int ret; | |
423 | uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2; | |
424 | QEMU_LOCK_GUARD(&s->receive_mutex); | |
425 | ||
426 | while (true) { | |
427 | if (s->reply.handle == handle) { | |
428 | /* We are done */ | |
429 | return 0; | |
430 | } | |
431 | ||
432 | if (s->reply.handle != 0) { | |
433 | /* | |
434 | * Some other request is being handled now. It should already be | |
435 | * woken by whoever set s->reply.handle (or never wait in this | |
436 | * yield). So, we should not wake it here. | |
437 | */ | |
438 | ind2 = HANDLE_TO_INDEX(s, s->reply.handle); | |
439 | assert(!s->requests[ind2].receiving); | |
440 | ||
441 | s->requests[ind].receiving = true; | |
442 | qemu_co_mutex_unlock(&s->receive_mutex); | |
443 | ||
444 | qemu_coroutine_yield(); | |
445 | /* | |
446 | * We may be woken for 2 reasons: | |
447 | * 1. From this function, executing in parallel coroutine, when our | |
448 | * handle is received. | |
449 | * 2. From nbd_co_receive_one_chunk(), when previous request is | |
450 | * finished and s->reply.handle set to 0. | |
451 | * Anyway, it's OK to lock the mutex and go to the next iteration. | |
452 | */ | |
453 | ||
454 | qemu_co_mutex_lock(&s->receive_mutex); | |
455 | assert(!s->requests[ind].receiving); | |
456 | continue; | |
457 | } | |
458 | ||
459 | /* We are under mutex and handle is 0. We have to do the dirty work. */ | |
460 | assert(s->reply.handle == 0); | |
461 | ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL); | |
462 | if (ret <= 0) { | |
463 | ret = ret ? ret : -EIO; | |
464 | nbd_channel_error(s, ret); | |
465 | return ret; | |
466 | } | |
467 | if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) { | |
468 | nbd_channel_error(s, -EINVAL); | |
469 | return -EINVAL; | |
470 | } | |
471 | ind2 = HANDLE_TO_INDEX(s, s->reply.handle); | |
472 | if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) { | |
473 | nbd_channel_error(s, -EINVAL); | |
474 | return -EINVAL; | |
475 | } | |
476 | if (s->reply.handle == handle) { | |
477 | /* We are done */ | |
478 | return 0; | |
479 | } | |
480 | nbd_recv_coroutine_wake_one(&s->requests[ind2]); | |
481 | } | |
482 | } | |
483 | ||
484 | static int coroutine_fn GRAPH_RDLOCK | |
485 | nbd_co_send_request(BlockDriverState *bs, NBDRequest *request, | |
486 | QEMUIOVector *qiov) | |
487 | { | |
488 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
489 | int rc, i = -1; | |
490 | ||
491 | qemu_mutex_lock(&s->requests_lock); | |
492 | while (s->in_flight == MAX_NBD_REQUESTS || | |
493 | (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) { | |
494 | qemu_co_queue_wait(&s->free_sema, &s->requests_lock); | |
495 | } | |
496 | ||
497 | s->in_flight++; | |
498 | if (s->state != NBD_CLIENT_CONNECTED) { | |
499 | if (nbd_client_connecting(s)) { | |
500 | nbd_reconnect_attempt(s); | |
501 | qemu_co_queue_restart_all(&s->free_sema); | |
502 | } | |
503 | if (s->state != NBD_CLIENT_CONNECTED) { | |
504 | rc = -EIO; | |
505 | goto err; | |
506 | } | |
507 | } | |
508 | ||
509 | for (i = 0; i < MAX_NBD_REQUESTS; i++) { | |
510 | if (s->requests[i].coroutine == NULL) { | |
511 | break; | |
512 | } | |
513 | } | |
514 | ||
515 | assert(i < MAX_NBD_REQUESTS); | |
516 | s->requests[i].coroutine = qemu_coroutine_self(); | |
517 | s->requests[i].offset = request->from; | |
518 | s->requests[i].receiving = false; | |
519 | qemu_mutex_unlock(&s->requests_lock); | |
520 | ||
521 | qemu_co_mutex_lock(&s->send_mutex); | |
522 | request->handle = INDEX_TO_HANDLE(s, i); | |
523 | ||
524 | assert(s->ioc); | |
525 | ||
526 | if (qiov) { | |
527 | qio_channel_set_cork(s->ioc, true); | |
528 | rc = nbd_send_request(s->ioc, request); | |
529 | if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, | |
530 | NULL) < 0) { | |
531 | rc = -EIO; | |
532 | } | |
533 | qio_channel_set_cork(s->ioc, false); | |
534 | } else { | |
535 | rc = nbd_send_request(s->ioc, request); | |
536 | } | |
537 | qemu_co_mutex_unlock(&s->send_mutex); | |
538 | ||
539 | if (rc < 0) { | |
540 | qemu_mutex_lock(&s->requests_lock); | |
541 | err: | |
542 | nbd_channel_error_locked(s, rc); | |
543 | if (i != -1) { | |
544 | s->requests[i].coroutine = NULL; | |
545 | } | |
546 | s->in_flight--; | |
547 | qemu_co_queue_next(&s->free_sema); | |
548 | qemu_mutex_unlock(&s->requests_lock); | |
549 | } | |
550 | return rc; | |
551 | } | |
552 | ||
553 | static inline uint16_t payload_advance16(uint8_t **payload) | |
554 | { | |
555 | *payload += 2; | |
556 | return lduw_be_p(*payload - 2); | |
557 | } | |
558 | ||
559 | static inline uint32_t payload_advance32(uint8_t **payload) | |
560 | { | |
561 | *payload += 4; | |
562 | return ldl_be_p(*payload - 4); | |
563 | } | |
564 | ||
565 | static inline uint64_t payload_advance64(uint8_t **payload) | |
566 | { | |
567 | *payload += 8; | |
568 | return ldq_be_p(*payload - 8); | |
569 | } | |
570 | ||
571 | static int nbd_parse_offset_hole_payload(BDRVNBDState *s, | |
572 | NBDStructuredReplyChunk *chunk, | |
573 | uint8_t *payload, uint64_t orig_offset, | |
574 | QEMUIOVector *qiov, Error **errp) | |
575 | { | |
576 | uint64_t offset; | |
577 | uint32_t hole_size; | |
578 | ||
579 | if (chunk->length != sizeof(offset) + sizeof(hole_size)) { | |
580 | error_setg(errp, "Protocol error: invalid payload for " | |
581 | "NBD_REPLY_TYPE_OFFSET_HOLE"); | |
582 | return -EINVAL; | |
583 | } | |
584 | ||
585 | offset = payload_advance64(&payload); | |
586 | hole_size = payload_advance32(&payload); | |
587 | ||
588 | if (!hole_size || offset < orig_offset || hole_size > qiov->size || | |
589 | offset > orig_offset + qiov->size - hole_size) { | |
590 | error_setg(errp, "Protocol error: server sent chunk exceeding requested" | |
591 | " region"); | |
592 | return -EINVAL; | |
593 | } | |
594 | if (s->info.min_block && | |
595 | !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) { | |
596 | trace_nbd_structured_read_compliance("hole"); | |
597 | } | |
598 | ||
599 | qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); | |
600 | ||
601 | return 0; | |
602 | } | |
603 | ||
604 | /* | |
605 | * nbd_parse_blockstatus_payload | |
606 | * Based on our request, we expect only one extent in reply, for the | |
607 | * base:allocation context. | |
608 | */ | |
609 | static int nbd_parse_blockstatus_payload(BDRVNBDState *s, | |
610 | NBDStructuredReplyChunk *chunk, | |
611 | uint8_t *payload, uint64_t orig_length, | |
612 | NBDExtent *extent, Error **errp) | |
613 | { | |
614 | uint32_t context_id; | |
615 | ||
616 | /* The server succeeded, so it must have sent [at least] one extent */ | |
617 | if (chunk->length < sizeof(context_id) + sizeof(*extent)) { | |
618 | error_setg(errp, "Protocol error: invalid payload for " | |
619 | "NBD_REPLY_TYPE_BLOCK_STATUS"); | |
620 | return -EINVAL; | |
621 | } | |
622 | ||
623 | context_id = payload_advance32(&payload); | |
624 | if (s->info.context_id != context_id) { | |
625 | error_setg(errp, "Protocol error: unexpected context id %d for " | |
626 | "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context " | |
627 | "id is %d", context_id, | |
628 | s->info.context_id); | |
629 | return -EINVAL; | |
630 | } | |
631 | ||
632 | extent->length = payload_advance32(&payload); | |
633 | extent->flags = payload_advance32(&payload); | |
634 | ||
635 | if (extent->length == 0) { | |
636 | error_setg(errp, "Protocol error: server sent status chunk with " | |
637 | "zero length"); | |
638 | return -EINVAL; | |
639 | } | |
640 | ||
641 | /* | |
642 | * A server sending unaligned block status is in violation of the | |
643 | * protocol, but as qemu-nbd 3.1 is such a server (at least for | |
644 | * POSIX files that are not a multiple of 512 bytes, since qemu | |
645 | * rounds files up to 512-byte multiples but lseek(SEEK_HOLE) | |
646 | * still sees an implicit hole beyond the real EOF), it's nicer to | |
647 | * work around the misbehaving server. If the request included | |
648 | * more than the final unaligned block, truncate it back to an | |
649 | * aligned result; if the request was only the final block, round | |
650 | * up to the full block and change the status to fully-allocated | |
651 | * (always a safe status, even if it loses information). | |
652 | */ | |
653 | if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length, | |
654 | s->info.min_block)) { | |
655 | trace_nbd_parse_blockstatus_compliance("extent length is unaligned"); | |
656 | if (extent->length > s->info.min_block) { | |
657 | extent->length = QEMU_ALIGN_DOWN(extent->length, | |
658 | s->info.min_block); | |
659 | } else { | |
660 | extent->length = s->info.min_block; | |
661 | extent->flags = 0; | |
662 | } | |
663 | } | |
664 | ||
665 | /* | |
666 | * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have | |
667 | * sent us any more than one extent, nor should it have included | |
668 | * status beyond our request in that extent. However, it's easy | |
669 | * enough to ignore the server's noncompliance without killing the | |
670 | * connection; just ignore trailing extents, and clamp things to | |
671 | * the length of our request. | |
672 | */ | |
673 | if (chunk->length > sizeof(context_id) + sizeof(*extent)) { | |
674 | trace_nbd_parse_blockstatus_compliance("more than one extent"); | |
675 | } | |
676 | if (extent->length > orig_length) { | |
677 | extent->length = orig_length; | |
678 | trace_nbd_parse_blockstatus_compliance("extent length too large"); | |
679 | } | |
680 | ||
681 | /* | |
682 | * HACK: if we are using x-dirty-bitmaps to access | |
683 | * qemu:allocation-depth, treat all depths > 2 the same as 2, | |
684 | * since nbd_client_co_block_status is only expecting the low two | |
685 | * bits to be set. | |
686 | */ | |
687 | if (s->alloc_depth && extent->flags > 2) { | |
688 | extent->flags = 2; | |
689 | } | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | /* | |
695 | * nbd_parse_error_payload | |
696 | * on success @errp contains message describing nbd error reply | |
697 | */ | |
698 | static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, | |
699 | uint8_t *payload, int *request_ret, | |
700 | Error **errp) | |
701 | { | |
702 | uint32_t error; | |
703 | uint16_t message_size; | |
704 | ||
705 | assert(chunk->type & (1 << 15)); | |
706 | ||
707 | if (chunk->length < sizeof(error) + sizeof(message_size)) { | |
708 | error_setg(errp, | |
709 | "Protocol error: invalid payload for structured error"); | |
710 | return -EINVAL; | |
711 | } | |
712 | ||
713 | error = nbd_errno_to_system_errno(payload_advance32(&payload)); | |
714 | if (error == 0) { | |
715 | error_setg(errp, "Protocol error: server sent structured error chunk " | |
716 | "with error = 0"); | |
717 | return -EINVAL; | |
718 | } | |
719 | ||
720 | *request_ret = -error; | |
721 | message_size = payload_advance16(&payload); | |
722 | ||
723 | if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) { | |
724 | error_setg(errp, "Protocol error: server sent structured error chunk " | |
725 | "with incorrect message size"); | |
726 | return -EINVAL; | |
727 | } | |
728 | ||
729 | /* TODO: Add a trace point to mention the server complaint */ | |
730 | ||
731 | /* TODO handle ERROR_OFFSET */ | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | static int coroutine_fn | |
737 | nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset, | |
738 | QEMUIOVector *qiov, Error **errp) | |
739 | { | |
740 | QEMUIOVector sub_qiov; | |
741 | uint64_t offset; | |
742 | size_t data_size; | |
743 | int ret; | |
744 | NBDStructuredReplyChunk *chunk = &s->reply.structured; | |
745 | ||
746 | assert(nbd_reply_is_structured(&s->reply)); | |
747 | ||
748 | /* The NBD spec requires at least one byte of payload */ | |
749 | if (chunk->length <= sizeof(offset)) { | |
750 | error_setg(errp, "Protocol error: invalid payload for " | |
751 | "NBD_REPLY_TYPE_OFFSET_DATA"); | |
752 | return -EINVAL; | |
753 | } | |
754 | ||
755 | if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) { | |
756 | return -EIO; | |
757 | } | |
758 | ||
759 | data_size = chunk->length - sizeof(offset); | |
760 | assert(data_size); | |
761 | if (offset < orig_offset || data_size > qiov->size || | |
762 | offset > orig_offset + qiov->size - data_size) { | |
763 | error_setg(errp, "Protocol error: server sent chunk exceeding requested" | |
764 | " region"); | |
765 | return -EINVAL; | |
766 | } | |
767 | if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) { | |
768 | trace_nbd_structured_read_compliance("data"); | |
769 | } | |
770 | ||
771 | qemu_iovec_init(&sub_qiov, qiov->niov); | |
772 | qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size); | |
773 | ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp); | |
774 | qemu_iovec_destroy(&sub_qiov); | |
775 | ||
776 | return ret < 0 ? -EIO : 0; | |
777 | } | |
778 | ||
779 | #define NBD_MAX_MALLOC_PAYLOAD 1000 | |
780 | static coroutine_fn int nbd_co_receive_structured_payload( | |
781 | BDRVNBDState *s, void **payload, Error **errp) | |
782 | { | |
783 | int ret; | |
784 | uint32_t len; | |
785 | ||
786 | assert(nbd_reply_is_structured(&s->reply)); | |
787 | ||
788 | len = s->reply.structured.length; | |
789 | ||
790 | if (len == 0) { | |
791 | return 0; | |
792 | } | |
793 | ||
794 | if (payload == NULL) { | |
795 | error_setg(errp, "Unexpected structured payload"); | |
796 | return -EINVAL; | |
797 | } | |
798 | ||
799 | if (len > NBD_MAX_MALLOC_PAYLOAD) { | |
800 | error_setg(errp, "Payload too large"); | |
801 | return -EINVAL; | |
802 | } | |
803 | ||
804 | *payload = g_new(char, len); | |
805 | ret = nbd_read(s->ioc, *payload, len, "structured payload", errp); | |
806 | if (ret < 0) { | |
807 | g_free(*payload); | |
808 | *payload = NULL; | |
809 | return ret; | |
810 | } | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | /* | |
816 | * nbd_co_do_receive_one_chunk | |
817 | * for simple reply: | |
818 | * set request_ret to received reply error | |
819 | * if qiov is not NULL: read payload to @qiov | |
820 | * for structured reply chunk: | |
821 | * if error chunk: read payload, set @request_ret, do not set @payload | |
822 | * else if offset_data chunk: read payload data to @qiov, do not set @payload | |
823 | * else: read payload to @payload | |
824 | * | |
825 | * If function fails, @errp contains corresponding error message, and the | |
826 | * connection with the server is suspect. If it returns 0, then the | |
827 | * transaction succeeded (although @request_ret may be a negative errno | |
828 | * corresponding to the server's error reply), and errp is unchanged. | |
829 | */ | |
830 | static coroutine_fn int nbd_co_do_receive_one_chunk( | |
831 | BDRVNBDState *s, uint64_t handle, bool only_structured, | |
832 | int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp) | |
833 | { | |
834 | int ret; | |
835 | int i = HANDLE_TO_INDEX(s, handle); | |
836 | void *local_payload = NULL; | |
837 | NBDStructuredReplyChunk *chunk; | |
838 | ||
839 | if (payload) { | |
840 | *payload = NULL; | |
841 | } | |
842 | *request_ret = 0; | |
843 | ||
844 | ret = nbd_receive_replies(s, handle); | |
845 | if (ret < 0) { | |
846 | error_setg(errp, "Connection closed"); | |
847 | return -EIO; | |
848 | } | |
849 | assert(s->ioc); | |
850 | ||
851 | assert(s->reply.handle == handle); | |
852 | ||
853 | if (nbd_reply_is_simple(&s->reply)) { | |
854 | if (only_structured) { | |
855 | error_setg(errp, "Protocol error: simple reply when structured " | |
856 | "reply chunk was expected"); | |
857 | return -EINVAL; | |
858 | } | |
859 | ||
860 | *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error); | |
861 | if (*request_ret < 0 || !qiov) { | |
862 | return 0; | |
863 | } | |
864 | ||
865 | return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, | |
866 | errp) < 0 ? -EIO : 0; | |
867 | } | |
868 | ||
869 | /* handle structured reply chunk */ | |
870 | assert(s->info.structured_reply); | |
871 | chunk = &s->reply.structured; | |
872 | ||
873 | if (chunk->type == NBD_REPLY_TYPE_NONE) { | |
874 | if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) { | |
875 | error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without" | |
876 | " NBD_REPLY_FLAG_DONE flag set"); | |
877 | return -EINVAL; | |
878 | } | |
879 | if (chunk->length) { | |
880 | error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with" | |
881 | " nonzero length"); | |
882 | return -EINVAL; | |
883 | } | |
884 | return 0; | |
885 | } | |
886 | ||
887 | if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) { | |
888 | if (!qiov) { | |
889 | error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk"); | |
890 | return -EINVAL; | |
891 | } | |
892 | ||
893 | return nbd_co_receive_offset_data_payload(s, s->requests[i].offset, | |
894 | qiov, errp); | |
895 | } | |
896 | ||
897 | if (nbd_reply_type_is_error(chunk->type)) { | |
898 | payload = &local_payload; | |
899 | } | |
900 | ||
901 | ret = nbd_co_receive_structured_payload(s, payload, errp); | |
902 | if (ret < 0) { | |
903 | return ret; | |
904 | } | |
905 | ||
906 | if (nbd_reply_type_is_error(chunk->type)) { | |
907 | ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp); | |
908 | g_free(local_payload); | |
909 | return ret; | |
910 | } | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | /* | |
916 | * nbd_co_receive_one_chunk | |
917 | * Read reply, wake up connection_co and set s->quit if needed. | |
918 | * Return value is a fatal error code or normal nbd reply error code | |
919 | */ | |
920 | static coroutine_fn int nbd_co_receive_one_chunk( | |
921 | BDRVNBDState *s, uint64_t handle, bool only_structured, | |
922 | int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload, | |
923 | Error **errp) | |
924 | { | |
925 | int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured, | |
926 | request_ret, qiov, payload, errp); | |
927 | ||
928 | if (ret < 0) { | |
929 | memset(reply, 0, sizeof(*reply)); | |
930 | nbd_channel_error(s, ret); | |
931 | } else { | |
932 | /* For assert at loop start in nbd_connection_entry */ | |
933 | *reply = s->reply; | |
934 | } | |
935 | s->reply.handle = 0; | |
936 | ||
937 | nbd_recv_coroutines_wake(s); | |
938 | ||
939 | return ret; | |
940 | } | |
941 | ||
942 | typedef struct NBDReplyChunkIter { | |
943 | int ret; | |
944 | int request_ret; | |
945 | Error *err; | |
946 | bool done, only_structured; | |
947 | } NBDReplyChunkIter; | |
948 | ||
949 | static void nbd_iter_channel_error(NBDReplyChunkIter *iter, | |
950 | int ret, Error **local_err) | |
951 | { | |
952 | assert(local_err && *local_err); | |
953 | assert(ret < 0); | |
954 | ||
955 | if (!iter->ret) { | |
956 | iter->ret = ret; | |
957 | error_propagate(&iter->err, *local_err); | |
958 | } else { | |
959 | error_free(*local_err); | |
960 | } | |
961 | ||
962 | *local_err = NULL; | |
963 | } | |
964 | ||
965 | static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret) | |
966 | { | |
967 | assert(ret < 0); | |
968 | ||
969 | if (!iter->request_ret) { | |
970 | iter->request_ret = ret; | |
971 | } | |
972 | } | |
973 | ||
974 | /* | |
975 | * NBD_FOREACH_REPLY_CHUNK | |
976 | * The pointer stored in @payload requires g_free() to free it. | |
977 | */ | |
978 | #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \ | |
979 | qiov, reply, payload) \ | |
980 | for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \ | |
981 | nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);) | |
982 | ||
983 | /* | |
984 | * nbd_reply_chunk_iter_receive | |
985 | * The pointer stored in @payload requires g_free() to free it. | |
986 | */ | |
987 | static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s, | |
988 | NBDReplyChunkIter *iter, | |
989 | uint64_t handle, | |
990 | QEMUIOVector *qiov, | |
991 | NBDReply *reply, | |
992 | void **payload) | |
993 | { | |
994 | int ret, request_ret; | |
995 | NBDReply local_reply; | |
996 | NBDStructuredReplyChunk *chunk; | |
997 | Error *local_err = NULL; | |
998 | ||
999 | if (iter->done) { | |
1000 | /* Previous iteration was last. */ | |
1001 | goto break_loop; | |
1002 | } | |
1003 | ||
1004 | if (reply == NULL) { | |
1005 | reply = &local_reply; | |
1006 | } | |
1007 | ||
1008 | ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured, | |
1009 | &request_ret, qiov, reply, payload, | |
1010 | &local_err); | |
1011 | if (ret < 0) { | |
1012 | nbd_iter_channel_error(iter, ret, &local_err); | |
1013 | } else if (request_ret < 0) { | |
1014 | nbd_iter_request_error(iter, request_ret); | |
1015 | } | |
1016 | ||
1017 | /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */ | |
1018 | if (nbd_reply_is_simple(reply) || iter->ret < 0) { | |
1019 | goto break_loop; | |
1020 | } | |
1021 | ||
1022 | chunk = &reply->structured; | |
1023 | iter->only_structured = true; | |
1024 | ||
1025 | if (chunk->type == NBD_REPLY_TYPE_NONE) { | |
1026 | /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */ | |
1027 | assert(chunk->flags & NBD_REPLY_FLAG_DONE); | |
1028 | goto break_loop; | |
1029 | } | |
1030 | ||
1031 | if (chunk->flags & NBD_REPLY_FLAG_DONE) { | |
1032 | /* This iteration is last. */ | |
1033 | iter->done = true; | |
1034 | } | |
1035 | ||
1036 | /* Execute the loop body */ | |
1037 | return true; | |
1038 | ||
1039 | break_loop: | |
1040 | qemu_mutex_lock(&s->requests_lock); | |
1041 | s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL; | |
1042 | s->in_flight--; | |
1043 | qemu_co_queue_next(&s->free_sema); | |
1044 | qemu_mutex_unlock(&s->requests_lock); | |
1045 | ||
1046 | return false; | |
1047 | } | |
1048 | ||
1049 | static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle, | |
1050 | int *request_ret, Error **errp) | |
1051 | { | |
1052 | NBDReplyChunkIter iter; | |
1053 | ||
1054 | NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) { | |
1055 | /* nbd_reply_chunk_iter_receive does all the work */ | |
1056 | } | |
1057 | ||
1058 | error_propagate(errp, iter.err); | |
1059 | *request_ret = iter.request_ret; | |
1060 | return iter.ret; | |
1061 | } | |
1062 | ||
1063 | static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle, | |
1064 | uint64_t offset, QEMUIOVector *qiov, | |
1065 | int *request_ret, Error **errp) | |
1066 | { | |
1067 | NBDReplyChunkIter iter; | |
1068 | NBDReply reply; | |
1069 | void *payload = NULL; | |
1070 | Error *local_err = NULL; | |
1071 | ||
1072 | NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply, | |
1073 | qiov, &reply, &payload) | |
1074 | { | |
1075 | int ret; | |
1076 | NBDStructuredReplyChunk *chunk = &reply.structured; | |
1077 | ||
1078 | assert(nbd_reply_is_structured(&reply)); | |
1079 | ||
1080 | switch (chunk->type) { | |
1081 | case NBD_REPLY_TYPE_OFFSET_DATA: | |
1082 | /* | |
1083 | * special cased in nbd_co_receive_one_chunk, data is already | |
1084 | * in qiov | |
1085 | */ | |
1086 | break; | |
1087 | case NBD_REPLY_TYPE_OFFSET_HOLE: | |
1088 | ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload, | |
1089 | offset, qiov, &local_err); | |
1090 | if (ret < 0) { | |
1091 | nbd_channel_error(s, ret); | |
1092 | nbd_iter_channel_error(&iter, ret, &local_err); | |
1093 | } | |
1094 | break; | |
1095 | default: | |
1096 | if (!nbd_reply_type_is_error(chunk->type)) { | |
1097 | /* not allowed reply type */ | |
1098 | nbd_channel_error(s, -EINVAL); | |
1099 | error_setg(&local_err, | |
1100 | "Unexpected reply type: %d (%s) for CMD_READ", | |
1101 | chunk->type, nbd_reply_type_lookup(chunk->type)); | |
1102 | nbd_iter_channel_error(&iter, -EINVAL, &local_err); | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | g_free(payload); | |
1107 | payload = NULL; | |
1108 | } | |
1109 | ||
1110 | error_propagate(errp, iter.err); | |
1111 | *request_ret = iter.request_ret; | |
1112 | return iter.ret; | |
1113 | } | |
1114 | ||
1115 | static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s, | |
1116 | uint64_t handle, uint64_t length, | |
1117 | NBDExtent *extent, | |
1118 | int *request_ret, Error **errp) | |
1119 | { | |
1120 | NBDReplyChunkIter iter; | |
1121 | NBDReply reply; | |
1122 | void *payload = NULL; | |
1123 | Error *local_err = NULL; | |
1124 | bool received = false; | |
1125 | ||
1126 | assert(!extent->length); | |
1127 | NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) { | |
1128 | int ret; | |
1129 | NBDStructuredReplyChunk *chunk = &reply.structured; | |
1130 | ||
1131 | assert(nbd_reply_is_structured(&reply)); | |
1132 | ||
1133 | switch (chunk->type) { | |
1134 | case NBD_REPLY_TYPE_BLOCK_STATUS: | |
1135 | if (received) { | |
1136 | nbd_channel_error(s, -EINVAL); | |
1137 | error_setg(&local_err, "Several BLOCK_STATUS chunks in reply"); | |
1138 | nbd_iter_channel_error(&iter, -EINVAL, &local_err); | |
1139 | } | |
1140 | received = true; | |
1141 | ||
1142 | ret = nbd_parse_blockstatus_payload(s, &reply.structured, | |
1143 | payload, length, extent, | |
1144 | &local_err); | |
1145 | if (ret < 0) { | |
1146 | nbd_channel_error(s, ret); | |
1147 | nbd_iter_channel_error(&iter, ret, &local_err); | |
1148 | } | |
1149 | break; | |
1150 | default: | |
1151 | if (!nbd_reply_type_is_error(chunk->type)) { | |
1152 | nbd_channel_error(s, -EINVAL); | |
1153 | error_setg(&local_err, | |
1154 | "Unexpected reply type: %d (%s) " | |
1155 | "for CMD_BLOCK_STATUS", | |
1156 | chunk->type, nbd_reply_type_lookup(chunk->type)); | |
1157 | nbd_iter_channel_error(&iter, -EINVAL, &local_err); | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | g_free(payload); | |
1162 | payload = NULL; | |
1163 | } | |
1164 | ||
1165 | if (!extent->length && !iter.request_ret) { | |
1166 | error_setg(&local_err, "Server did not reply with any status extents"); | |
1167 | nbd_iter_channel_error(&iter, -EIO, &local_err); | |
1168 | } | |
1169 | ||
1170 | error_propagate(errp, iter.err); | |
1171 | *request_ret = iter.request_ret; | |
1172 | return iter.ret; | |
1173 | } | |
1174 | ||
1175 | static int coroutine_fn GRAPH_RDLOCK | |
1176 | nbd_co_request(BlockDriverState *bs, NBDRequest *request, | |
1177 | QEMUIOVector *write_qiov) | |
1178 | { | |
1179 | int ret, request_ret; | |
1180 | Error *local_err = NULL; | |
1181 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1182 | ||
1183 | assert(request->type != NBD_CMD_READ); | |
1184 | if (write_qiov) { | |
1185 | assert(request->type == NBD_CMD_WRITE); | |
1186 | assert(request->len == iov_size(write_qiov->iov, write_qiov->niov)); | |
1187 | } else { | |
1188 | assert(request->type != NBD_CMD_WRITE); | |
1189 | } | |
1190 | ||
1191 | do { | |
1192 | ret = nbd_co_send_request(bs, request, write_qiov); | |
1193 | if (ret < 0) { | |
1194 | continue; | |
1195 | } | |
1196 | ||
1197 | ret = nbd_co_receive_return_code(s, request->handle, | |
1198 | &request_ret, &local_err); | |
1199 | if (local_err) { | |
1200 | trace_nbd_co_request_fail(request->from, request->len, | |
1201 | request->handle, request->flags, | |
1202 | request->type, | |
1203 | nbd_cmd_lookup(request->type), | |
1204 | ret, error_get_pretty(local_err)); | |
1205 | error_free(local_err); | |
1206 | local_err = NULL; | |
1207 | } | |
1208 | } while (ret < 0 && nbd_client_will_reconnect(s)); | |
1209 | ||
1210 | return ret ? ret : request_ret; | |
1211 | } | |
1212 | ||
1213 | static int coroutine_fn GRAPH_RDLOCK | |
1214 | nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | |
1215 | QEMUIOVector *qiov, BdrvRequestFlags flags) | |
1216 | { | |
1217 | int ret, request_ret; | |
1218 | Error *local_err = NULL; | |
1219 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1220 | NBDRequest request = { | |
1221 | .type = NBD_CMD_READ, | |
1222 | .from = offset, | |
1223 | .len = bytes, | |
1224 | }; | |
1225 | ||
1226 | assert(bytes <= NBD_MAX_BUFFER_SIZE); | |
1227 | ||
1228 | if (!bytes) { | |
1229 | return 0; | |
1230 | } | |
1231 | /* | |
1232 | * Work around the fact that the block layer doesn't do | |
1233 | * byte-accurate sizing yet - if the read exceeds the server's | |
1234 | * advertised size because the block layer rounded size up, then | |
1235 | * truncate the request to the server and tail-pad with zero. | |
1236 | */ | |
1237 | if (offset >= s->info.size) { | |
1238 | assert(bytes < BDRV_SECTOR_SIZE); | |
1239 | qemu_iovec_memset(qiov, 0, 0, bytes); | |
1240 | return 0; | |
1241 | } | |
1242 | if (offset + bytes > s->info.size) { | |
1243 | uint64_t slop = offset + bytes - s->info.size; | |
1244 | ||
1245 | assert(slop < BDRV_SECTOR_SIZE); | |
1246 | qemu_iovec_memset(qiov, bytes - slop, 0, slop); | |
1247 | request.len -= slop; | |
1248 | } | |
1249 | ||
1250 | do { | |
1251 | ret = nbd_co_send_request(bs, &request, NULL); | |
1252 | if (ret < 0) { | |
1253 | continue; | |
1254 | } | |
1255 | ||
1256 | ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov, | |
1257 | &request_ret, &local_err); | |
1258 | if (local_err) { | |
1259 | trace_nbd_co_request_fail(request.from, request.len, request.handle, | |
1260 | request.flags, request.type, | |
1261 | nbd_cmd_lookup(request.type), | |
1262 | ret, error_get_pretty(local_err)); | |
1263 | error_free(local_err); | |
1264 | local_err = NULL; | |
1265 | } | |
1266 | } while (ret < 0 && nbd_client_will_reconnect(s)); | |
1267 | ||
1268 | return ret ? ret : request_ret; | |
1269 | } | |
1270 | ||
1271 | static int coroutine_fn GRAPH_RDLOCK | |
1272 | nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, | |
1273 | QEMUIOVector *qiov, BdrvRequestFlags flags) | |
1274 | { | |
1275 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1276 | NBDRequest request = { | |
1277 | .type = NBD_CMD_WRITE, | |
1278 | .from = offset, | |
1279 | .len = bytes, | |
1280 | }; | |
1281 | ||
1282 | assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); | |
1283 | if (flags & BDRV_REQ_FUA) { | |
1284 | assert(s->info.flags & NBD_FLAG_SEND_FUA); | |
1285 | request.flags |= NBD_CMD_FLAG_FUA; | |
1286 | } | |
1287 | ||
1288 | assert(bytes <= NBD_MAX_BUFFER_SIZE); | |
1289 | ||
1290 | if (!bytes) { | |
1291 | return 0; | |
1292 | } | |
1293 | return nbd_co_request(bs, &request, qiov); | |
1294 | } | |
1295 | ||
1296 | static int coroutine_fn GRAPH_RDLOCK | |
1297 | nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, | |
1298 | BdrvRequestFlags flags) | |
1299 | { | |
1300 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1301 | NBDRequest request = { | |
1302 | .type = NBD_CMD_WRITE_ZEROES, | |
1303 | .from = offset, | |
1304 | .len = bytes, /* .len is uint32_t actually */ | |
1305 | }; | |
1306 | ||
1307 | assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */ | |
1308 | ||
1309 | assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); | |
1310 | if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { | |
1311 | return -ENOTSUP; | |
1312 | } | |
1313 | ||
1314 | if (flags & BDRV_REQ_FUA) { | |
1315 | assert(s->info.flags & NBD_FLAG_SEND_FUA); | |
1316 | request.flags |= NBD_CMD_FLAG_FUA; | |
1317 | } | |
1318 | if (!(flags & BDRV_REQ_MAY_UNMAP)) { | |
1319 | request.flags |= NBD_CMD_FLAG_NO_HOLE; | |
1320 | } | |
1321 | if (flags & BDRV_REQ_NO_FALLBACK) { | |
1322 | assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO); | |
1323 | request.flags |= NBD_CMD_FLAG_FAST_ZERO; | |
1324 | } | |
1325 | ||
1326 | if (!bytes) { | |
1327 | return 0; | |
1328 | } | |
1329 | return nbd_co_request(bs, &request, NULL); | |
1330 | } | |
1331 | ||
1332 | static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs) | |
1333 | { | |
1334 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1335 | NBDRequest request = { .type = NBD_CMD_FLUSH }; | |
1336 | ||
1337 | if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) { | |
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | request.from = 0; | |
1342 | request.len = 0; | |
1343 | ||
1344 | return nbd_co_request(bs, &request, NULL); | |
1345 | } | |
1346 | ||
1347 | static int coroutine_fn GRAPH_RDLOCK | |
1348 | nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) | |
1349 | { | |
1350 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1351 | NBDRequest request = { | |
1352 | .type = NBD_CMD_TRIM, | |
1353 | .from = offset, | |
1354 | .len = bytes, /* len is uint32_t */ | |
1355 | }; | |
1356 | ||
1357 | assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */ | |
1358 | ||
1359 | assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); | |
1360 | if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { | |
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | return nbd_co_request(bs, &request, NULL); | |
1365 | } | |
1366 | ||
1367 | static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status( | |
1368 | BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, | |
1369 | int64_t *pnum, int64_t *map, BlockDriverState **file) | |
1370 | { | |
1371 | int ret, request_ret; | |
1372 | NBDExtent extent = { 0 }; | |
1373 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1374 | Error *local_err = NULL; | |
1375 | ||
1376 | NBDRequest request = { | |
1377 | .type = NBD_CMD_BLOCK_STATUS, | |
1378 | .from = offset, | |
1379 | .len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment), | |
1380 | MIN(bytes, s->info.size - offset)), | |
1381 | .flags = NBD_CMD_FLAG_REQ_ONE, | |
1382 | }; | |
1383 | ||
1384 | if (!s->info.base_allocation) { | |
1385 | *pnum = bytes; | |
1386 | *map = offset; | |
1387 | *file = bs; | |
1388 | return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; | |
1389 | } | |
1390 | ||
1391 | /* | |
1392 | * Work around the fact that the block layer doesn't do | |
1393 | * byte-accurate sizing yet - if the status request exceeds the | |
1394 | * server's advertised size because the block layer rounded size | |
1395 | * up, we truncated the request to the server (above), or are | |
1396 | * called on just the hole. | |
1397 | */ | |
1398 | if (offset >= s->info.size) { | |
1399 | *pnum = bytes; | |
1400 | assert(bytes < BDRV_SECTOR_SIZE); | |
1401 | /* Intentionally don't report offset_valid for the hole */ | |
1402 | return BDRV_BLOCK_ZERO; | |
1403 | } | |
1404 | ||
1405 | if (s->info.min_block) { | |
1406 | assert(QEMU_IS_ALIGNED(request.len, s->info.min_block)); | |
1407 | } | |
1408 | do { | |
1409 | ret = nbd_co_send_request(bs, &request, NULL); | |
1410 | if (ret < 0) { | |
1411 | continue; | |
1412 | } | |
1413 | ||
1414 | ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes, | |
1415 | &extent, &request_ret, | |
1416 | &local_err); | |
1417 | if (local_err) { | |
1418 | trace_nbd_co_request_fail(request.from, request.len, request.handle, | |
1419 | request.flags, request.type, | |
1420 | nbd_cmd_lookup(request.type), | |
1421 | ret, error_get_pretty(local_err)); | |
1422 | error_free(local_err); | |
1423 | local_err = NULL; | |
1424 | } | |
1425 | } while (ret < 0 && nbd_client_will_reconnect(s)); | |
1426 | ||
1427 | if (ret < 0 || request_ret < 0) { | |
1428 | return ret ? ret : request_ret; | |
1429 | } | |
1430 | ||
1431 | assert(extent.length); | |
1432 | *pnum = extent.length; | |
1433 | *map = offset; | |
1434 | *file = bs; | |
1435 | return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) | | |
1436 | (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) | | |
1437 | BDRV_BLOCK_OFFSET_VALID; | |
1438 | } | |
1439 | ||
1440 | static int nbd_client_reopen_prepare(BDRVReopenState *state, | |
1441 | BlockReopenQueue *queue, Error **errp) | |
1442 | { | |
1443 | BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque; | |
1444 | ||
1445 | if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) { | |
1446 | error_setg(errp, "Can't reopen read-only NBD mount as read/write"); | |
1447 | return -EACCES; | |
1448 | } | |
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static void nbd_yank(void *opaque) | |
1453 | { | |
1454 | BlockDriverState *bs = opaque; | |
1455 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1456 | ||
1457 | QEMU_LOCK_GUARD(&s->requests_lock); | |
1458 | qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); | |
1459 | s->state = NBD_CLIENT_QUIT; | |
1460 | } | |
1461 | ||
1462 | static void nbd_client_close(BlockDriverState *bs) | |
1463 | { | |
1464 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1465 | NBDRequest request = { .type = NBD_CMD_DISC }; | |
1466 | ||
1467 | if (s->ioc) { | |
1468 | nbd_send_request(s->ioc, &request); | |
1469 | } | |
1470 | ||
1471 | nbd_teardown_connection(bs); | |
1472 | } | |
1473 | ||
1474 | ||
1475 | /* | |
1476 | * Parse nbd_open options | |
1477 | */ | |
1478 | ||
1479 | static int nbd_parse_uri(const char *filename, QDict *options) | |
1480 | { | |
1481 | URI *uri; | |
1482 | const char *p; | |
1483 | QueryParams *qp = NULL; | |
1484 | int ret = 0; | |
1485 | bool is_unix; | |
1486 | ||
1487 | uri = uri_parse(filename); | |
1488 | if (!uri) { | |
1489 | return -EINVAL; | |
1490 | } | |
1491 | ||
1492 | /* transport */ | |
1493 | if (!g_strcmp0(uri->scheme, "nbd")) { | |
1494 | is_unix = false; | |
1495 | } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) { | |
1496 | is_unix = false; | |
1497 | } else if (!g_strcmp0(uri->scheme, "nbd+unix")) { | |
1498 | is_unix = true; | |
1499 | } else { | |
1500 | ret = -EINVAL; | |
1501 | goto out; | |
1502 | } | |
1503 | ||
1504 | p = uri->path ? uri->path : ""; | |
1505 | if (p[0] == '/') { | |
1506 | p++; | |
1507 | } | |
1508 | if (p[0]) { | |
1509 | qdict_put_str(options, "export", p); | |
1510 | } | |
1511 | ||
1512 | qp = query_params_parse(uri->query); | |
1513 | if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { | |
1514 | ret = -EINVAL; | |
1515 | goto out; | |
1516 | } | |
1517 | ||
1518 | if (is_unix) { | |
1519 | /* nbd+unix:///export?socket=path */ | |
1520 | if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) { | |
1521 | ret = -EINVAL; | |
1522 | goto out; | |
1523 | } | |
1524 | qdict_put_str(options, "server.type", "unix"); | |
1525 | qdict_put_str(options, "server.path", qp->p[0].value); | |
1526 | } else { | |
1527 | QString *host; | |
1528 | char *port_str; | |
1529 | ||
1530 | /* nbd[+tcp]://host[:port]/export */ | |
1531 | if (!uri->server) { | |
1532 | ret = -EINVAL; | |
1533 | goto out; | |
1534 | } | |
1535 | ||
1536 | /* strip braces from literal IPv6 address */ | |
1537 | if (uri->server[0] == '[') { | |
1538 | host = qstring_from_substr(uri->server, 1, | |
1539 | strlen(uri->server) - 1); | |
1540 | } else { | |
1541 | host = qstring_from_str(uri->server); | |
1542 | } | |
1543 | ||
1544 | qdict_put_str(options, "server.type", "inet"); | |
1545 | qdict_put(options, "server.host", host); | |
1546 | ||
1547 | port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT); | |
1548 | qdict_put_str(options, "server.port", port_str); | |
1549 | g_free(port_str); | |
1550 | } | |
1551 | ||
1552 | out: | |
1553 | if (qp) { | |
1554 | query_params_free(qp); | |
1555 | } | |
1556 | uri_free(uri); | |
1557 | return ret; | |
1558 | } | |
1559 | ||
1560 | static bool nbd_has_filename_options_conflict(QDict *options, Error **errp) | |
1561 | { | |
1562 | const QDictEntry *e; | |
1563 | ||
1564 | for (e = qdict_first(options); e; e = qdict_next(options, e)) { | |
1565 | if (!strcmp(e->key, "host") || | |
1566 | !strcmp(e->key, "port") || | |
1567 | !strcmp(e->key, "path") || | |
1568 | !strcmp(e->key, "export") || | |
1569 | strstart(e->key, "server.", NULL)) | |
1570 | { | |
1571 | error_setg(errp, "Option '%s' cannot be used with a file name", | |
1572 | e->key); | |
1573 | return true; | |
1574 | } | |
1575 | } | |
1576 | ||
1577 | return false; | |
1578 | } | |
1579 | ||
1580 | static void nbd_parse_filename(const char *filename, QDict *options, | |
1581 | Error **errp) | |
1582 | { | |
1583 | g_autofree char *file = NULL; | |
1584 | char *export_name; | |
1585 | const char *host_spec; | |
1586 | const char *unixpath; | |
1587 | ||
1588 | if (nbd_has_filename_options_conflict(options, errp)) { | |
1589 | return; | |
1590 | } | |
1591 | ||
1592 | if (strstr(filename, "://")) { | |
1593 | int ret = nbd_parse_uri(filename, options); | |
1594 | if (ret < 0) { | |
1595 | error_setg(errp, "No valid URL specified"); | |
1596 | } | |
1597 | return; | |
1598 | } | |
1599 | ||
1600 | file = g_strdup(filename); | |
1601 | ||
1602 | export_name = strstr(file, EN_OPTSTR); | |
1603 | if (export_name) { | |
1604 | if (export_name[strlen(EN_OPTSTR)] == 0) { | |
1605 | return; | |
1606 | } | |
1607 | export_name[0] = 0; /* truncate 'file' */ | |
1608 | export_name += strlen(EN_OPTSTR); | |
1609 | ||
1610 | qdict_put_str(options, "export", export_name); | |
1611 | } | |
1612 | ||
1613 | /* extract the host_spec - fail if it's not nbd:... */ | |
1614 | if (!strstart(file, "nbd:", &host_spec)) { | |
1615 | error_setg(errp, "File name string for NBD must start with 'nbd:'"); | |
1616 | return; | |
1617 | } | |
1618 | ||
1619 | if (!*host_spec) { | |
1620 | return; | |
1621 | } | |
1622 | ||
1623 | /* are we a UNIX or TCP socket? */ | |
1624 | if (strstart(host_spec, "unix:", &unixpath)) { | |
1625 | qdict_put_str(options, "server.type", "unix"); | |
1626 | qdict_put_str(options, "server.path", unixpath); | |
1627 | } else { | |
1628 | InetSocketAddress *addr = g_new(InetSocketAddress, 1); | |
1629 | ||
1630 | if (inet_parse(addr, host_spec, errp)) { | |
1631 | goto out_inet; | |
1632 | } | |
1633 | ||
1634 | qdict_put_str(options, "server.type", "inet"); | |
1635 | qdict_put_str(options, "server.host", addr->host); | |
1636 | qdict_put_str(options, "server.port", addr->port); | |
1637 | out_inet: | |
1638 | qapi_free_InetSocketAddress(addr); | |
1639 | } | |
1640 | } | |
1641 | ||
1642 | static bool nbd_process_legacy_socket_options(QDict *output_options, | |
1643 | QemuOpts *legacy_opts, | |
1644 | Error **errp) | |
1645 | { | |
1646 | const char *path = qemu_opt_get(legacy_opts, "path"); | |
1647 | const char *host = qemu_opt_get(legacy_opts, "host"); | |
1648 | const char *port = qemu_opt_get(legacy_opts, "port"); | |
1649 | const QDictEntry *e; | |
1650 | ||
1651 | if (!path && !host && !port) { | |
1652 | return true; | |
1653 | } | |
1654 | ||
1655 | for (e = qdict_first(output_options); e; e = qdict_next(output_options, e)) | |
1656 | { | |
1657 | if (strstart(e->key, "server.", NULL)) { | |
1658 | error_setg(errp, "Cannot use 'server' and path/host/port at the " | |
1659 | "same time"); | |
1660 | return false; | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | if (path && host) { | |
1665 | error_setg(errp, "path and host may not be used at the same time"); | |
1666 | return false; | |
1667 | } else if (path) { | |
1668 | if (port) { | |
1669 | error_setg(errp, "port may not be used without host"); | |
1670 | return false; | |
1671 | } | |
1672 | ||
1673 | qdict_put_str(output_options, "server.type", "unix"); | |
1674 | qdict_put_str(output_options, "server.path", path); | |
1675 | } else if (host) { | |
1676 | qdict_put_str(output_options, "server.type", "inet"); | |
1677 | qdict_put_str(output_options, "server.host", host); | |
1678 | qdict_put_str(output_options, "server.port", | |
1679 | port ?: stringify(NBD_DEFAULT_PORT)); | |
1680 | } | |
1681 | ||
1682 | return true; | |
1683 | } | |
1684 | ||
1685 | static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, | |
1686 | Error **errp) | |
1687 | { | |
1688 | SocketAddress *saddr = NULL; | |
1689 | QDict *addr = NULL; | |
1690 | Visitor *iv = NULL; | |
1691 | ||
1692 | qdict_extract_subqdict(options, &addr, "server."); | |
1693 | if (!qdict_size(addr)) { | |
1694 | error_setg(errp, "NBD server address missing"); | |
1695 | goto done; | |
1696 | } | |
1697 | ||
1698 | iv = qobject_input_visitor_new_flat_confused(addr, errp); | |
1699 | if (!iv) { | |
1700 | goto done; | |
1701 | } | |
1702 | ||
1703 | if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) { | |
1704 | goto done; | |
1705 | } | |
1706 | ||
1707 | if (socket_address_parse_named_fd(saddr, errp) < 0) { | |
1708 | qapi_free_SocketAddress(saddr); | |
1709 | saddr = NULL; | |
1710 | goto done; | |
1711 | } | |
1712 | ||
1713 | done: | |
1714 | qobject_unref(addr); | |
1715 | visit_free(iv); | |
1716 | return saddr; | |
1717 | } | |
1718 | ||
1719 | static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp) | |
1720 | { | |
1721 | Object *obj; | |
1722 | QCryptoTLSCreds *creds; | |
1723 | ||
1724 | obj = object_resolve_path_component( | |
1725 | object_get_objects_root(), id); | |
1726 | if (!obj) { | |
1727 | error_setg(errp, "No TLS credentials with id '%s'", | |
1728 | id); | |
1729 | return NULL; | |
1730 | } | |
1731 | creds = (QCryptoTLSCreds *) | |
1732 | object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS); | |
1733 | if (!creds) { | |
1734 | error_setg(errp, "Object with id '%s' is not TLS credentials", | |
1735 | id); | |
1736 | return NULL; | |
1737 | } | |
1738 | ||
1739 | if (!qcrypto_tls_creds_check_endpoint(creds, | |
1740 | QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, | |
1741 | errp)) { | |
1742 | return NULL; | |
1743 | } | |
1744 | object_ref(obj); | |
1745 | return creds; | |
1746 | } | |
1747 | ||
1748 | ||
1749 | static QemuOptsList nbd_runtime_opts = { | |
1750 | .name = "nbd", | |
1751 | .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head), | |
1752 | .desc = { | |
1753 | { | |
1754 | .name = "host", | |
1755 | .type = QEMU_OPT_STRING, | |
1756 | .help = "TCP host to connect to", | |
1757 | }, | |
1758 | { | |
1759 | .name = "port", | |
1760 | .type = QEMU_OPT_STRING, | |
1761 | .help = "TCP port to connect to", | |
1762 | }, | |
1763 | { | |
1764 | .name = "path", | |
1765 | .type = QEMU_OPT_STRING, | |
1766 | .help = "Unix socket path to connect to", | |
1767 | }, | |
1768 | { | |
1769 | .name = "export", | |
1770 | .type = QEMU_OPT_STRING, | |
1771 | .help = "Name of the NBD export to open", | |
1772 | }, | |
1773 | { | |
1774 | .name = "tls-creds", | |
1775 | .type = QEMU_OPT_STRING, | |
1776 | .help = "ID of the TLS credentials to use", | |
1777 | }, | |
1778 | { | |
1779 | .name = "tls-hostname", | |
1780 | .type = QEMU_OPT_STRING, | |
1781 | .help = "Override hostname for validating TLS x509 certificate", | |
1782 | }, | |
1783 | { | |
1784 | .name = "x-dirty-bitmap", | |
1785 | .type = QEMU_OPT_STRING, | |
1786 | .help = "experimental: expose named dirty bitmap in place of " | |
1787 | "block status", | |
1788 | }, | |
1789 | { | |
1790 | .name = "reconnect-delay", | |
1791 | .type = QEMU_OPT_NUMBER, | |
1792 | .help = "On an unexpected disconnect, the nbd client tries to " | |
1793 | "connect again until succeeding or encountering a serious " | |
1794 | "error. During the first @reconnect-delay seconds, all " | |
1795 | "requests are paused and will be rerun on a successful " | |
1796 | "reconnect. After that time, any delayed requests and all " | |
1797 | "future requests before a successful reconnect will " | |
1798 | "immediately fail. Default 0", | |
1799 | }, | |
1800 | { | |
1801 | .name = "open-timeout", | |
1802 | .type = QEMU_OPT_NUMBER, | |
1803 | .help = "In seconds. If zero, the nbd driver tries the connection " | |
1804 | "only once, and fails to open if the connection fails. " | |
1805 | "If non-zero, the nbd driver will repeat connection " | |
1806 | "attempts until successful or until @open-timeout seconds " | |
1807 | "have elapsed. Default 0", | |
1808 | }, | |
1809 | { /* end of list */ } | |
1810 | }, | |
1811 | }; | |
1812 | ||
1813 | static int nbd_process_options(BlockDriverState *bs, QDict *options, | |
1814 | Error **errp) | |
1815 | { | |
1816 | BDRVNBDState *s = bs->opaque; | |
1817 | QemuOpts *opts; | |
1818 | int ret = -EINVAL; | |
1819 | ||
1820 | opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort); | |
1821 | if (!qemu_opts_absorb_qdict(opts, options, errp)) { | |
1822 | goto error; | |
1823 | } | |
1824 | ||
1825 | /* Translate @host, @port, and @path to a SocketAddress */ | |
1826 | if (!nbd_process_legacy_socket_options(options, opts, errp)) { | |
1827 | goto error; | |
1828 | } | |
1829 | ||
1830 | /* Pop the config into our state object. Exit if invalid. */ | |
1831 | s->saddr = nbd_config(s, options, errp); | |
1832 | if (!s->saddr) { | |
1833 | goto error; | |
1834 | } | |
1835 | ||
1836 | s->export = g_strdup(qemu_opt_get(opts, "export")); | |
1837 | if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) { | |
1838 | error_setg(errp, "export name too long to send to server"); | |
1839 | goto error; | |
1840 | } | |
1841 | ||
1842 | s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds")); | |
1843 | if (s->tlscredsid) { | |
1844 | s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp); | |
1845 | if (!s->tlscreds) { | |
1846 | goto error; | |
1847 | } | |
1848 | ||
1849 | s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname")); | |
1850 | if (!s->tlshostname && | |
1851 | s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { | |
1852 | s->tlshostname = g_strdup(s->saddr->u.inet.host); | |
1853 | } | |
1854 | } | |
1855 | ||
1856 | s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap")); | |
1857 | if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) { | |
1858 | error_setg(errp, "x-dirty-bitmap query too long to send to server"); | |
1859 | goto error; | |
1860 | } | |
1861 | ||
1862 | s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0); | |
1863 | s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0); | |
1864 | ||
1865 | ret = 0; | |
1866 | ||
1867 | error: | |
1868 | qemu_opts_del(opts); | |
1869 | return ret; | |
1870 | } | |
1871 | ||
1872 | static int nbd_open(BlockDriverState *bs, QDict *options, int flags, | |
1873 | Error **errp) | |
1874 | { | |
1875 | int ret; | |
1876 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1877 | ||
1878 | s->bs = bs; | |
1879 | qemu_mutex_init(&s->requests_lock); | |
1880 | qemu_co_queue_init(&s->free_sema); | |
1881 | qemu_co_mutex_init(&s->send_mutex); | |
1882 | qemu_co_mutex_init(&s->receive_mutex); | |
1883 | ||
1884 | if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) { | |
1885 | return -EEXIST; | |
1886 | } | |
1887 | ||
1888 | ret = nbd_process_options(bs, options, errp); | |
1889 | if (ret < 0) { | |
1890 | goto fail; | |
1891 | } | |
1892 | ||
1893 | s->conn = nbd_client_connection_new(s->saddr, true, s->export, | |
1894 | s->x_dirty_bitmap, s->tlscreds, | |
1895 | s->tlshostname); | |
1896 | ||
1897 | if (s->open_timeout) { | |
1898 | nbd_client_connection_enable_retry(s->conn); | |
1899 | open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + | |
1900 | s->open_timeout * NANOSECONDS_PER_SECOND); | |
1901 | } | |
1902 | ||
1903 | s->state = NBD_CLIENT_CONNECTING_WAIT; | |
1904 | ret = nbd_do_establish_connection(bs, true, errp); | |
1905 | if (ret < 0) { | |
1906 | goto fail; | |
1907 | } | |
1908 | ||
1909 | /* | |
1910 | * The connect attempt is done, so we no longer need this timer. | |
1911 | * Delete it, because we do not want it to be around when this node | |
1912 | * is drained or closed. | |
1913 | */ | |
1914 | open_timer_del(s); | |
1915 | ||
1916 | nbd_client_connection_enable_retry(s->conn); | |
1917 | ||
1918 | return 0; | |
1919 | ||
1920 | fail: | |
1921 | open_timer_del(s); | |
1922 | nbd_clear_bdrvstate(bs); | |
1923 | return ret; | |
1924 | } | |
1925 | ||
1926 | static void nbd_refresh_limits(BlockDriverState *bs, Error **errp) | |
1927 | { | |
1928 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
1929 | uint32_t min = s->info.min_block; | |
1930 | uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block); | |
1931 | ||
1932 | /* | |
1933 | * If the server did not advertise an alignment: | |
1934 | * - a size that is not sector-aligned implies that an alignment | |
1935 | * of 1 can be used to access those tail bytes | |
1936 | * - advertisement of block status requires an alignment of 1, so | |
1937 | * that we don't violate block layer constraints that block | |
1938 | * status is always aligned (as we can't control whether the | |
1939 | * server will report sub-sector extents, such as a hole at EOF | |
1940 | * on an unaligned POSIX file) | |
1941 | * - otherwise, assume the server is so old that we are safer avoiding | |
1942 | * sub-sector requests | |
1943 | */ | |
1944 | if (!min) { | |
1945 | min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) || | |
1946 | s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE; | |
1947 | } | |
1948 | ||
1949 | bs->bl.request_alignment = min; | |
1950 | bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min); | |
1951 | bs->bl.max_pwrite_zeroes = max; | |
1952 | bs->bl.max_transfer = max; | |
1953 | ||
1954 | if (s->info.opt_block && | |
1955 | s->info.opt_block > bs->bl.opt_transfer) { | |
1956 | bs->bl.opt_transfer = s->info.opt_block; | |
1957 | } | |
1958 | } | |
1959 | ||
1960 | static void nbd_close(BlockDriverState *bs) | |
1961 | { | |
1962 | nbd_client_close(bs); | |
1963 | nbd_clear_bdrvstate(bs); | |
1964 | } | |
1965 | ||
1966 | /* | |
1967 | * NBD cannot truncate, but if the caller asks to truncate to the same size, or | |
1968 | * to a smaller size with exact=false, there is no reason to fail the | |
1969 | * operation. | |
1970 | * | |
1971 | * Preallocation mode is ignored since it does not seems useful to fail when | |
1972 | * we never change anything. | |
1973 | */ | |
1974 | static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset, | |
1975 | bool exact, PreallocMode prealloc, | |
1976 | BdrvRequestFlags flags, Error **errp) | |
1977 | { | |
1978 | BDRVNBDState *s = bs->opaque; | |
1979 | ||
1980 | if (offset != s->info.size && exact) { | |
1981 | error_setg(errp, "Cannot resize NBD nodes"); | |
1982 | return -ENOTSUP; | |
1983 | } | |
1984 | ||
1985 | if (offset > s->info.size) { | |
1986 | error_setg(errp, "Cannot grow NBD nodes"); | |
1987 | return -EINVAL; | |
1988 | } | |
1989 | ||
1990 | return 0; | |
1991 | } | |
1992 | ||
1993 | static int64_t coroutine_fn nbd_co_getlength(BlockDriverState *bs) | |
1994 | { | |
1995 | BDRVNBDState *s = bs->opaque; | |
1996 | ||
1997 | return s->info.size; | |
1998 | } | |
1999 | ||
2000 | static void nbd_refresh_filename(BlockDriverState *bs) | |
2001 | { | |
2002 | BDRVNBDState *s = bs->opaque; | |
2003 | const char *host = NULL, *port = NULL, *path = NULL; | |
2004 | size_t len = 0; | |
2005 | ||
2006 | if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { | |
2007 | const InetSocketAddress *inet = &s->saddr->u.inet; | |
2008 | if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { | |
2009 | host = inet->host; | |
2010 | port = inet->port; | |
2011 | } | |
2012 | } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) { | |
2013 | path = s->saddr->u.q_unix.path; | |
2014 | } /* else can't represent as pseudo-filename */ | |
2015 | ||
2016 | if (path && s->export) { | |
2017 | len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), | |
2018 | "nbd+unix:///%s?socket=%s", s->export, path); | |
2019 | } else if (path && !s->export) { | |
2020 | len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), | |
2021 | "nbd+unix://?socket=%s", path); | |
2022 | } else if (host && s->export) { | |
2023 | len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), | |
2024 | "nbd://%s:%s/%s", host, port, s->export); | |
2025 | } else if (host && !s->export) { | |
2026 | len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), | |
2027 | "nbd://%s:%s", host, port); | |
2028 | } | |
2029 | if (len >= sizeof(bs->exact_filename)) { | |
2030 | /* Name is too long to represent exactly, so leave it empty. */ | |
2031 | bs->exact_filename[0] = '\0'; | |
2032 | } | |
2033 | } | |
2034 | ||
2035 | static char *nbd_dirname(BlockDriverState *bs, Error **errp) | |
2036 | { | |
2037 | /* The generic bdrv_dirname() implementation is able to work out some | |
2038 | * directory name for NBD nodes, but that would be wrong. So far there is no | |
2039 | * specification for how "export paths" would work, so NBD does not have | |
2040 | * directory names. */ | |
2041 | error_setg(errp, "Cannot generate a base directory for NBD nodes"); | |
2042 | return NULL; | |
2043 | } | |
2044 | ||
2045 | static const char *const nbd_strong_runtime_opts[] = { | |
2046 | "path", | |
2047 | "host", | |
2048 | "port", | |
2049 | "export", | |
2050 | "tls-creds", | |
2051 | "tls-hostname", | |
2052 | "server.", | |
2053 | ||
2054 | NULL | |
2055 | }; | |
2056 | ||
2057 | static void nbd_cancel_in_flight(BlockDriverState *bs) | |
2058 | { | |
2059 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | |
2060 | ||
2061 | reconnect_delay_timer_del(s); | |
2062 | ||
2063 | qemu_mutex_lock(&s->requests_lock); | |
2064 | if (s->state == NBD_CLIENT_CONNECTING_WAIT) { | |
2065 | s->state = NBD_CLIENT_CONNECTING_NOWAIT; | |
2066 | } | |
2067 | qemu_mutex_unlock(&s->requests_lock); | |
2068 | ||
2069 | nbd_co_establish_connection_cancel(s->conn); | |
2070 | } | |
2071 | ||
2072 | static void nbd_attach_aio_context(BlockDriverState *bs, | |
2073 | AioContext *new_context) | |
2074 | { | |
2075 | BDRVNBDState *s = bs->opaque; | |
2076 | ||
2077 | /* The open_timer is used only during nbd_open() */ | |
2078 | assert(!s->open_timer); | |
2079 | ||
2080 | /* | |
2081 | * The reconnect_delay_timer is scheduled in I/O paths when the | |
2082 | * connection is lost, to cancel the reconnection attempt after a | |
2083 | * given time. Once this attempt is done (successfully or not), | |
2084 | * nbd_reconnect_attempt() ensures the timer is deleted before the | |
2085 | * respective I/O request is resumed. | |
2086 | * Since the AioContext can only be changed when a node is drained, | |
2087 | * the reconnect_delay_timer cannot be active here. | |
2088 | */ | |
2089 | assert(!s->reconnect_delay_timer); | |
2090 | ||
2091 | if (s->ioc) { | |
2092 | qio_channel_attach_aio_context(s->ioc, new_context); | |
2093 | } | |
2094 | } | |
2095 | ||
2096 | static void nbd_detach_aio_context(BlockDriverState *bs) | |
2097 | { | |
2098 | BDRVNBDState *s = bs->opaque; | |
2099 | ||
2100 | assert(!s->open_timer); | |
2101 | assert(!s->reconnect_delay_timer); | |
2102 | ||
2103 | if (s->ioc) { | |
2104 | qio_channel_detach_aio_context(s->ioc); | |
2105 | } | |
2106 | } | |
2107 | ||
2108 | static BlockDriver bdrv_nbd = { | |
2109 | .format_name = "nbd", | |
2110 | .protocol_name = "nbd", | |
2111 | .instance_size = sizeof(BDRVNBDState), | |
2112 | .bdrv_parse_filename = nbd_parse_filename, | |
2113 | .bdrv_co_create_opts = bdrv_co_create_opts_simple, | |
2114 | .create_opts = &bdrv_create_opts_simple, | |
2115 | .bdrv_file_open = nbd_open, | |
2116 | .bdrv_reopen_prepare = nbd_client_reopen_prepare, | |
2117 | .bdrv_co_preadv = nbd_client_co_preadv, | |
2118 | .bdrv_co_pwritev = nbd_client_co_pwritev, | |
2119 | .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, | |
2120 | .bdrv_close = nbd_close, | |
2121 | .bdrv_co_flush_to_os = nbd_client_co_flush, | |
2122 | .bdrv_co_pdiscard = nbd_client_co_pdiscard, | |
2123 | .bdrv_refresh_limits = nbd_refresh_limits, | |
2124 | .bdrv_co_truncate = nbd_co_truncate, | |
2125 | .bdrv_co_getlength = nbd_co_getlength, | |
2126 | .bdrv_refresh_filename = nbd_refresh_filename, | |
2127 | .bdrv_co_block_status = nbd_client_co_block_status, | |
2128 | .bdrv_dirname = nbd_dirname, | |
2129 | .strong_runtime_opts = nbd_strong_runtime_opts, | |
2130 | .bdrv_cancel_in_flight = nbd_cancel_in_flight, | |
2131 | ||
2132 | .bdrv_attach_aio_context = nbd_attach_aio_context, | |
2133 | .bdrv_detach_aio_context = nbd_detach_aio_context, | |
2134 | }; | |
2135 | ||
2136 | static BlockDriver bdrv_nbd_tcp = { | |
2137 | .format_name = "nbd", | |
2138 | .protocol_name = "nbd+tcp", | |
2139 | .instance_size = sizeof(BDRVNBDState), | |
2140 | .bdrv_parse_filename = nbd_parse_filename, | |
2141 | .bdrv_co_create_opts = bdrv_co_create_opts_simple, | |
2142 | .create_opts = &bdrv_create_opts_simple, | |
2143 | .bdrv_file_open = nbd_open, | |
2144 | .bdrv_reopen_prepare = nbd_client_reopen_prepare, | |
2145 | .bdrv_co_preadv = nbd_client_co_preadv, | |
2146 | .bdrv_co_pwritev = nbd_client_co_pwritev, | |
2147 | .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, | |
2148 | .bdrv_close = nbd_close, | |
2149 | .bdrv_co_flush_to_os = nbd_client_co_flush, | |
2150 | .bdrv_co_pdiscard = nbd_client_co_pdiscard, | |
2151 | .bdrv_refresh_limits = nbd_refresh_limits, | |
2152 | .bdrv_co_truncate = nbd_co_truncate, | |
2153 | .bdrv_co_getlength = nbd_co_getlength, | |
2154 | .bdrv_refresh_filename = nbd_refresh_filename, | |
2155 | .bdrv_co_block_status = nbd_client_co_block_status, | |
2156 | .bdrv_dirname = nbd_dirname, | |
2157 | .strong_runtime_opts = nbd_strong_runtime_opts, | |
2158 | .bdrv_cancel_in_flight = nbd_cancel_in_flight, | |
2159 | ||
2160 | .bdrv_attach_aio_context = nbd_attach_aio_context, | |
2161 | .bdrv_detach_aio_context = nbd_detach_aio_context, | |
2162 | }; | |
2163 | ||
2164 | static BlockDriver bdrv_nbd_unix = { | |
2165 | .format_name = "nbd", | |
2166 | .protocol_name = "nbd+unix", | |
2167 | .instance_size = sizeof(BDRVNBDState), | |
2168 | .bdrv_parse_filename = nbd_parse_filename, | |
2169 | .bdrv_co_create_opts = bdrv_co_create_opts_simple, | |
2170 | .create_opts = &bdrv_create_opts_simple, | |
2171 | .bdrv_file_open = nbd_open, | |
2172 | .bdrv_reopen_prepare = nbd_client_reopen_prepare, | |
2173 | .bdrv_co_preadv = nbd_client_co_preadv, | |
2174 | .bdrv_co_pwritev = nbd_client_co_pwritev, | |
2175 | .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, | |
2176 | .bdrv_close = nbd_close, | |
2177 | .bdrv_co_flush_to_os = nbd_client_co_flush, | |
2178 | .bdrv_co_pdiscard = nbd_client_co_pdiscard, | |
2179 | .bdrv_refresh_limits = nbd_refresh_limits, | |
2180 | .bdrv_co_truncate = nbd_co_truncate, | |
2181 | .bdrv_co_getlength = nbd_co_getlength, | |
2182 | .bdrv_refresh_filename = nbd_refresh_filename, | |
2183 | .bdrv_co_block_status = nbd_client_co_block_status, | |
2184 | .bdrv_dirname = nbd_dirname, | |
2185 | .strong_runtime_opts = nbd_strong_runtime_opts, | |
2186 | .bdrv_cancel_in_flight = nbd_cancel_in_flight, | |
2187 | ||
2188 | .bdrv_attach_aio_context = nbd_attach_aio_context, | |
2189 | .bdrv_detach_aio_context = nbd_detach_aio_context, | |
2190 | }; | |
2191 | ||
2192 | static void bdrv_nbd_init(void) | |
2193 | { | |
2194 | bdrv_register(&bdrv_nbd); | |
2195 | bdrv_register(&bdrv_nbd_tcp); | |
2196 | bdrv_register(&bdrv_nbd_unix); | |
2197 | } | |
2198 | ||
2199 | block_init(bdrv_nbd_init); |