]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/extra/0008-coroutine-use-QEMU_DEFINE_STATIC_CO_TLS.patch
bump version to 7.0.0-4
[pve-qemu.git] / debian / patches / extra / 0008-coroutine-use-QEMU_DEFINE_STATIC_CO_TLS.patch
CommitLineData
14ed5546
FE
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Stefan Hajnoczi <stefanha@redhat.com>
3Date: Mon, 7 Mar 2022 15:38:52 +0000
4Subject: [PATCH] coroutine: use QEMU_DEFINE_STATIC_CO_TLS()
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9Thread-Local Storage variables cannot be used directly from coroutine
10code because the compiler may optimize TLS variable accesses across
11qemu_coroutine_yield() calls. When the coroutine is re-entered from
12another thread the TLS variables from the old thread must no longer be
13used.
14
15Use QEMU_DEFINE_STATIC_CO_TLS() for the current and leader variables.
16The alloc_pool QSLIST needs a typedef so the return value of
17get_ptr_alloc_pool() can be stored in a local variable.
18
19One example of why this code is necessary: a coroutine that yields
20before calling qemu_coroutine_create() to create another coroutine is
21affected by the TLS issue.
22
23Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
24Message-Id: <20220307153853.602859-3-stefanha@redhat.com>
25Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
26Signed-off-by: Kevin Wolf <kwolf@redhat.com>
27(cherry-picked from commit ac387a08a9c9f6b36757da912f0339c25f421f90)
28Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
29---
30 util/qemu-coroutine.c | 41 ++++++++++++++++++++++++-----------------
31 1 file changed, 24 insertions(+), 17 deletions(-)
32
33diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
34index c03b2422ff..f3e8300c8d 100644
35--- a/util/qemu-coroutine.c
36+++ b/util/qemu-coroutine.c
37@@ -18,6 +18,7 @@
38 #include "qemu/atomic.h"
39 #include "qemu/coroutine.h"
40 #include "qemu/coroutine_int.h"
41+#include "qemu/coroutine-tls.h"
42 #include "block/aio.h"
43
44 /** Initial batch size is 64, and is increased on demand */
45@@ -29,17 +30,20 @@ enum {
46 static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
47 static unsigned int pool_batch_size = POOL_INITIAL_BATCH_SIZE;
48 static unsigned int release_pool_size;
49-static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool);
50-static __thread unsigned int alloc_pool_size;
51-static __thread Notifier coroutine_pool_cleanup_notifier;
52+
53+typedef QSLIST_HEAD(, Coroutine) CoroutineQSList;
54+QEMU_DEFINE_STATIC_CO_TLS(CoroutineQSList, alloc_pool);
55+QEMU_DEFINE_STATIC_CO_TLS(unsigned int, alloc_pool_size);
56+QEMU_DEFINE_STATIC_CO_TLS(Notifier, coroutine_pool_cleanup_notifier);
57
58 static void coroutine_pool_cleanup(Notifier *n, void *value)
59 {
60 Coroutine *co;
61 Coroutine *tmp;
62+ CoroutineQSList *alloc_pool = get_ptr_alloc_pool();
63
64- QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) {
65- QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
66+ QSLIST_FOREACH_SAFE(co, alloc_pool, pool_next, tmp) {
67+ QSLIST_REMOVE_HEAD(alloc_pool, pool_next);
68 qemu_coroutine_delete(co);
69 }
70 }
71@@ -49,27 +53,30 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
72 Coroutine *co = NULL;
73
74 if (CONFIG_COROUTINE_POOL) {
75- co = QSLIST_FIRST(&alloc_pool);
76+ CoroutineQSList *alloc_pool = get_ptr_alloc_pool();
77+
78+ co = QSLIST_FIRST(alloc_pool);
79 if (!co) {
80 if (release_pool_size > qatomic_read(&pool_batch_size)) {
81 /* Slow path; a good place to register the destructor, too. */
82- if (!coroutine_pool_cleanup_notifier.notify) {
83- coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup;
84- qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier);
85+ Notifier *notifier = get_ptr_coroutine_pool_cleanup_notifier();
86+ if (!notifier->notify) {
87+ notifier->notify = coroutine_pool_cleanup;
88+ qemu_thread_atexit_add(notifier);
89 }
90
91 /* This is not exact; there could be a little skew between
92 * release_pool_size and the actual size of release_pool. But
93 * it is just a heuristic, it does not need to be perfect.
94 */
95- alloc_pool_size = qatomic_xchg(&release_pool_size, 0);
96- QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
97- co = QSLIST_FIRST(&alloc_pool);
98+ set_alloc_pool_size(qatomic_xchg(&release_pool_size, 0));
99+ QSLIST_MOVE_ATOMIC(alloc_pool, &release_pool);
100+ co = QSLIST_FIRST(alloc_pool);
101 }
102 }
103 if (co) {
104- QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
105- alloc_pool_size--;
106+ QSLIST_REMOVE_HEAD(alloc_pool, pool_next);
107+ set_alloc_pool_size(get_alloc_pool_size() - 1);
108 }
109 }
110
111@@ -93,9 +100,9 @@ static void coroutine_delete(Coroutine *co)
112 qatomic_inc(&release_pool_size);
113 return;
114 }
115- if (alloc_pool_size < qatomic_read(&pool_batch_size)) {
116- QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next);
117- alloc_pool_size++;
118+ if (get_alloc_pool_size() < qatomic_read(&pool_batch_size)) {
119+ QSLIST_INSERT_HEAD(get_ptr_alloc_pool(), co, pool_next);
120+ set_alloc_pool_size(get_alloc_pool_size() + 1);
121 return;
122 }
123 }