]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0042-PVE-Backup-use-QemuMutex-instead-of-QemuRecMutex.patch
b18762de42d8b5a05dbd0d6ab6900bfe324ac2fa
[pve-qemu.git] / debian / patches / pve / 0042-PVE-Backup-use-QemuMutex-instead-of-QemuRecMutex.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Dietmar Maurer <dietmar@proxmox.com>
3 Date: Fri, 17 Apr 2020 08:57:48 +0200
4 Subject: [PATCH] PVE Backup: use QemuMutex instead of QemuRecMutex
5
6 We acquire/release all mutexes outside coroutines now, so we can now
7 correctly use a normal mutex.
8 ---
9 pve-backup.c | 58 ++++++++++++++++++++++++++--------------------------
10 1 file changed, 29 insertions(+), 29 deletions(-)
11
12 diff --git a/pve-backup.c b/pve-backup.c
13 index dddf430399..bb917ee972 100644
14 --- a/pve-backup.c
15 +++ b/pve-backup.c
16 @@ -31,7 +31,7 @@
17 static struct PVEBackupState {
18 struct {
19 // Everithing accessed from qmp_backup_query command is protected using lock
20 - QemuRecMutex lock;
21 + QemuMutex lock;
22 Error *error;
23 time_t start_time;
24 time_t end_time;
25 @@ -46,14 +46,14 @@ static struct PVEBackupState {
26 VmaWriter *vmaw;
27 ProxmoxBackupHandle *pbs;
28 GList *di_list;
29 - QemuRecMutex backup_mutex;
30 + QemuMutex backup_mutex;
31 CoMutex dump_callback_mutex;
32 } backup_state;
33
34 static void pvebackup_init(void)
35 {
36 - qemu_rec_mutex_init(&backup_state.stat.lock);
37 - qemu_rec_mutex_init(&backup_state.backup_mutex);
38 + qemu_mutex_init(&backup_state.stat.lock);
39 + qemu_mutex_init(&backup_state.backup_mutex);
40 qemu_co_mutex_init(&backup_state.dump_callback_mutex);
41 }
42
43 @@ -91,26 +91,26 @@ lookup_active_block_job(PVEBackupDevInfo *di)
44
45 static void pvebackup_propagate_error(Error *err)
46 {
47 - qemu_rec_mutex_lock(&backup_state.stat.lock);
48 + qemu_mutex_lock(&backup_state.stat.lock);
49 error_propagate(&backup_state.stat.error, err);
50 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
51 + qemu_mutex_unlock(&backup_state.stat.lock);
52 }
53
54 static bool pvebackup_error_or_canceled(void)
55 {
56 - qemu_rec_mutex_lock(&backup_state.stat.lock);
57 + qemu_mutex_lock(&backup_state.stat.lock);
58 bool error_or_canceled = !!backup_state.stat.error;
59 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
60 + qemu_mutex_unlock(&backup_state.stat.lock);
61
62 return error_or_canceled;
63 }
64
65 static void pvebackup_add_transfered_bytes(size_t transferred, size_t zero_bytes)
66 {
67 - qemu_rec_mutex_lock(&backup_state.stat.lock);
68 + qemu_mutex_lock(&backup_state.stat.lock);
69 backup_state.stat.zero_bytes += zero_bytes;
70 backup_state.stat.transferred += transferred;
71 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
72 + qemu_mutex_unlock(&backup_state.stat.lock);
73 }
74
75 // This may get called from multiple coroutines in multiple io-threads
76 @@ -226,9 +226,9 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused)
77 {
78 assert(qemu_in_coroutine());
79
80 - qemu_rec_mutex_lock(&backup_state.stat.lock);
81 + qemu_mutex_lock(&backup_state.stat.lock);
82 backup_state.stat.end_time = time(NULL);
83 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
84 + qemu_mutex_unlock(&backup_state.stat.lock);
85
86 if (backup_state.vmaw) {
87 Error *local_err = NULL;
88 @@ -284,7 +284,7 @@ static void pvebackup_complete_cb(void *opaque, int ret)
89
90 PVEBackupDevInfo *di = opaque;
91
92 - qemu_rec_mutex_lock(&backup_state.backup_mutex);
93 + qemu_mutex_lock(&backup_state.backup_mutex);
94
95 di->completed = true;
96
97 @@ -305,7 +305,7 @@ static void pvebackup_complete_cb(void *opaque, int ret)
98
99 g_free(di);
100
101 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
102 + qemu_mutex_unlock(&backup_state.backup_mutex);
103
104 pvebackup_run_next_job();
105 }
106 @@ -318,7 +318,7 @@ static void pvebackup_cancel(void)
107 error_setg(&cancel_err, "backup canceled");
108 pvebackup_propagate_error(cancel_err);
109
110 - qemu_rec_mutex_lock(&backup_state.backup_mutex);
111 + qemu_mutex_lock(&backup_state.backup_mutex);
112
113 if (backup_state.vmaw) {
114 /* make sure vma writer does not block anymore */
115 @@ -329,13 +329,13 @@ static void pvebackup_cancel(void)
116 proxmox_backup_abort(backup_state.pbs, "backup canceled");
117 }
118
119 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
120 + qemu_mutex_unlock(&backup_state.backup_mutex);
121
122 for(;;) {
123
124 BlockJob *next_job = NULL;
125
126 - qemu_rec_mutex_lock(&backup_state.backup_mutex);
127 + qemu_mutex_lock(&backup_state.backup_mutex);
128
129 GList *l = backup_state.di_list;
130 while (l) {
131 @@ -349,7 +349,7 @@ static void pvebackup_cancel(void)
132 }
133 }
134
135 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
136 + qemu_mutex_unlock(&backup_state.backup_mutex);
137
138 if (next_job) {
139 AioContext *aio_context = next_job->job.aio_context;
140 @@ -423,7 +423,7 @@ static void pvebackup_run_next_job(void)
141 {
142 assert(!qemu_in_coroutine());
143
144 - qemu_rec_mutex_lock(&backup_state.backup_mutex);
145 + qemu_mutex_lock(&backup_state.backup_mutex);
146
147 GList *l = backup_state.di_list;
148 while (l) {
149 @@ -433,7 +433,7 @@ static void pvebackup_run_next_job(void)
150 BlockJob *job = lookup_active_block_job(di);
151
152 if (job) {
153 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
154 + qemu_mutex_unlock(&backup_state.backup_mutex);
155
156 AioContext *aio_context = job->job.aio_context;
157 aio_context_acquire(aio_context);
158 @@ -453,7 +453,7 @@ static void pvebackup_run_next_job(void)
159
160 block_on_coroutine_fn(pvebackup_co_cleanup, NULL); // no more jobs, run cleanup
161
162 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
163 + qemu_mutex_unlock(&backup_state.backup_mutex);
164 }
165
166 static bool create_backup_jobs(void) {
167 @@ -778,7 +778,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
168 }
169 /* initialize global backup_state now */
170
171 - qemu_rec_mutex_lock(&backup_state.stat.lock);
172 + qemu_mutex_lock(&backup_state.stat.lock);
173
174 if (backup_state.stat.error) {
175 error_free(backup_state.stat.error);
176 @@ -801,7 +801,7 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
177 backup_state.stat.transferred = 0;
178 backup_state.stat.zero_bytes = 0;
179
180 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
181 + qemu_mutex_unlock(&backup_state.stat.lock);
182
183 backup_state.speed = (task->has_speed && task->speed > 0) ? task->speed : 0;
184
185 @@ -895,16 +895,16 @@ UuidInfo *qmp_backup(
186 .errp = errp,
187 };
188
189 - qemu_rec_mutex_lock(&backup_state.backup_mutex);
190 + qemu_mutex_lock(&backup_state.backup_mutex);
191
192 block_on_coroutine_fn(pvebackup_co_prepare, &task);
193
194 if (*errp == NULL) {
195 create_backup_jobs();
196 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
197 + qemu_mutex_unlock(&backup_state.backup_mutex);
198 pvebackup_run_next_job();
199 } else {
200 - qemu_rec_mutex_unlock(&backup_state.backup_mutex);
201 + qemu_mutex_unlock(&backup_state.backup_mutex);
202 }
203
204 return task.result;
205 @@ -914,11 +914,11 @@ BackupStatus *qmp_query_backup(Error **errp)
206 {
207 BackupStatus *info = g_malloc0(sizeof(*info));
208
209 - qemu_rec_mutex_lock(&backup_state.stat.lock);
210 + qemu_mutex_lock(&backup_state.stat.lock);
211
212 if (!backup_state.stat.start_time) {
213 /* not started, return {} */
214 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
215 + qemu_mutex_unlock(&backup_state.stat.lock);
216 return info;
217 }
218
219 @@ -955,7 +955,7 @@ BackupStatus *qmp_query_backup(Error **errp)
220 info->has_transferred = true;
221 info->transferred = backup_state.stat.transferred;
222
223 - qemu_rec_mutex_unlock(&backup_state.stat.lock);
224 + qemu_mutex_unlock(&backup_state.stat.lock);
225
226 return info;
227 }