1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Paolo Bonzini <pbonzini@redhat.com>
3 Date: Thu, 1 Jun 2017 12:44:56 +0200
4 Subject: [PATCH] nbd: make it thread-safe, fix qcow2 over nbd
6 NBD is not thread safe, because it accesses s->in_flight without
7 a CoMutex. Fixing this will be required for multiqueue.
8 CoQueue doesn't have spurious wakeups but, when another coroutine can
9 run between qemu_co_queue_next's wakeup and qemu_co_queue_wait's
10 re-locking of the mutex, the wait condition can become false and
13 In fact, it turns out that the loop is necessary even without this
14 multi-threaded scenario. A particular sequence of coroutine wakeups
15 is happening ~80% of the time when starting a guest with qcow2 image
16 served over NBD (i.e. qemu-nbd --format=raw, and QEMU's -drive option
17 has -format=qcow2). This patch fixes that issue too.
19 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
21 block/nbd-client.c | 30 +++++++++---------------------
22 1 file changed, 9 insertions(+), 21 deletions(-)
24 diff --git a/block/nbd-client.c b/block/nbd-client.c
25 index 56eb0e2e16..282679b4f8 100644
26 --- a/block/nbd-client.c
27 +++ b/block/nbd-client.c
28 @@ -114,6 +114,10 @@ static int nbd_co_send_request(BlockDriverState *bs,
31 qemu_co_mutex_lock(&s->send_mutex);
32 + while (s->in_flight == MAX_NBD_REQUESTS) {
33 + qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
37 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
38 if (s->recv_coroutine[i] == NULL) {
39 @@ -176,20 +180,6 @@ static void nbd_co_receive_reply(NBDClientSession *s,
43 -static void nbd_coroutine_start(NBDClientSession *s,
44 - NBDRequest *request)
46 - /* Poor man semaphore. The free_sema is locked when no other request
47 - * can be accepted, and unlocked after receiving one reply. */
48 - if (s->in_flight == MAX_NBD_REQUESTS) {
49 - qemu_co_queue_wait(&s->free_sema, NULL);
50 - assert(s->in_flight < MAX_NBD_REQUESTS);
54 - /* s->recv_coroutine[i] is set as soon as we get the send_lock. */
57 static void nbd_coroutine_end(BlockDriverState *bs,
60 @@ -197,13 +187,16 @@ static void nbd_coroutine_end(BlockDriverState *bs,
61 int i = HANDLE_TO_INDEX(s, request->handle);
63 s->recv_coroutine[i] = NULL;
65 - qemu_co_queue_next(&s->free_sema);
67 /* Kick the read_reply_co to get the next reply. */
68 if (s->read_reply_co) {
69 aio_co_wake(s->read_reply_co);
72 + qemu_co_mutex_lock(&s->send_mutex);
74 + qemu_co_queue_next(&s->free_sema);
75 + qemu_co_mutex_unlock(&s->send_mutex);
78 int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
79 @@ -221,7 +214,6 @@ int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
80 assert(bytes <= NBD_MAX_BUFFER_SIZE);
83 - nbd_coroutine_start(client, &request);
84 ret = nbd_co_send_request(bs, &request, NULL);
87 @@ -251,7 +243,6 @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
89 assert(bytes <= NBD_MAX_BUFFER_SIZE);
91 - nbd_coroutine_start(client, &request);
92 ret = nbd_co_send_request(bs, &request, qiov);
95 @@ -286,7 +277,6 @@ int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
96 request.flags |= NBD_CMD_FLAG_NO_HOLE;
99 - nbd_coroutine_start(client, &request);
100 ret = nbd_co_send_request(bs, &request, NULL);
103 @@ -311,7 +301,6 @@ int nbd_client_co_flush(BlockDriverState *bs)
107 - nbd_coroutine_start(client, &request);
108 ret = nbd_co_send_request(bs, &request, NULL);
111 @@ -337,7 +326,6 @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
115 - nbd_coroutine_start(client, &request);
116 ret = nbd_co_send_request(bs, &request, NULL);