]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/extra/0006-Revert-Revert-graph-lock-Disable-locking-for-now.patch
bump version to 8.1.2-6
[pve-qemu.git] / debian / patches / extra / 0006-Revert-Revert-graph-lock-Disable-locking-for-now.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Fiona Ebner <f.ebner@proxmox.com>
3 Date: Thu, 28 Sep 2023 10:07:03 +0200
4 Subject: [PATCH] Revert "Revert "graph-lock: Disable locking for now""
5
6 This reverts commit 3cce22defb4b0e47cf135444e30cc673cff5ebad.
7
8 There are still some issues with graph locking, e.g. deadlocks during
9 backup canceling [0]. Because the AioContext locks still exist, it
10 should be safe to disable locking again.
11
12 From the original 80fc5d2600 ("graph-lock: Disable locking for now"):
13
14 > We don't currently rely on graph locking yet. It is supposed to replace
15 > the AioContext lock eventually to enable multiqueue support, but as long
16 > as we still have the AioContext lock, it is sufficient without the graph
17 > lock. Once the AioContext lock goes away, the deadlock doesn't exist any
18 > more either and this commit can be reverted. (Of course, it can also be
19 > reverted while the AioContext lock still exists if the callers have been
20 > fixed.)
21
22 [0]: https://lists.nongnu.org/archive/html/qemu-devel/2023-09/msg00729.html
23
24 Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
25 ---
26 block/graph-lock.c | 24 ++++++++++++++++++++++++
27 1 file changed, 24 insertions(+)
28
29 diff --git a/block/graph-lock.c b/block/graph-lock.c
30 index 5e66f01ae8..5c2873262a 100644
31 --- a/block/graph-lock.c
32 +++ b/block/graph-lock.c
33 @@ -30,8 +30,10 @@ BdrvGraphLock graph_lock;
34 /* Protects the list of aiocontext and orphaned_reader_count */
35 static QemuMutex aio_context_list_lock;
36
37 +#if 0
38 /* Written and read with atomic operations. */
39 static int has_writer;
40 +#endif
41
42 /*
43 * A reader coroutine could move from an AioContext to another.
44 @@ -88,6 +90,7 @@ void unregister_aiocontext(AioContext *ctx)
45 g_free(ctx->bdrv_graph);
46 }
47
48 +#if 0
49 static uint32_t reader_count(void)
50 {
51 BdrvGraphRWlock *brdv_graph;
52 @@ -105,12 +108,19 @@ static uint32_t reader_count(void)
53 assert((int32_t)rd >= 0);
54 return rd;
55 }
56 +#endif
57
58 void bdrv_graph_wrlock(BlockDriverState *bs)
59 {
60 +#if 0
61 AioContext *ctx = NULL;
62
63 GLOBAL_STATE_CODE();
64 + /*
65 + * TODO Some callers hold an AioContext lock when this is called, which
66 + * causes deadlocks. Reenable once the AioContext locking is cleaned up (or
67 + * AioContext locks are gone).
68 + */
69 assert(!qatomic_read(&has_writer));
70
71 /*
72 @@ -158,11 +168,13 @@ void bdrv_graph_wrlock(BlockDriverState *bs)
73 if (ctx) {
74 aio_context_acquire(bdrv_get_aio_context(bs));
75 }
76 +#endif
77 }
78
79 void bdrv_graph_wrunlock(void)
80 {
81 GLOBAL_STATE_CODE();
82 +#if 0
83 QEMU_LOCK_GUARD(&aio_context_list_lock);
84 assert(qatomic_read(&has_writer));
85
86 @@ -174,10 +186,13 @@ void bdrv_graph_wrunlock(void)
87
88 /* Wake up all coroutine that are waiting to read the graph */
89 qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
90 +#endif
91 }
92
93 void coroutine_fn bdrv_graph_co_rdlock(void)
94 {
95 + /* TODO Reenable when wrlock is reenabled */
96 +#if 0
97 BdrvGraphRWlock *bdrv_graph;
98 bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
99
100 @@ -237,10 +252,12 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
101 qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
102 }
103 }
104 +#endif
105 }
106
107 void coroutine_fn bdrv_graph_co_rdunlock(void)
108 {
109 +#if 0
110 BdrvGraphRWlock *bdrv_graph;
111 bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
112
113 @@ -258,6 +275,7 @@ void coroutine_fn bdrv_graph_co_rdunlock(void)
114 if (qatomic_read(&has_writer)) {
115 aio_wait_kick();
116 }
117 +#endif
118 }
119
120 void bdrv_graph_rdlock_main_loop(void)
121 @@ -275,13 +293,19 @@ void bdrv_graph_rdunlock_main_loop(void)
122 void assert_bdrv_graph_readable(void)
123 {
124 /* reader_count() is slow due to aio_context_list_lock lock contention */
125 + /* TODO Reenable when wrlock is reenabled */
126 +#if 0
127 #ifdef CONFIG_DEBUG_GRAPH_LOCK
128 assert(qemu_in_main_thread() || reader_count());
129 #endif
130 +#endif
131 }
132
133 void assert_bdrv_graph_writable(void)
134 {
135 assert(qemu_in_main_thread());
136 + /* TODO Reenable when wrlock is reenabled */
137 +#if 0
138 assert(qatomic_read(&has_writer));
139 +#endif
140 }