]> git.proxmox.com Git - pve-qemu-kvm.git/blob - debian/patches/old/backup-vma-remove-async-queue.patch
bump version to 2.9.0-1~rc2+5
[pve-qemu-kvm.git] / debian / patches / old / backup-vma-remove-async-queue.patch
1 We do not gain much speed here, so I removed the whole queue code
2 to make things simpler.
3
4 Also, previous code produced segmentation faults in qemu_co_mutex_lock().
5
6 Index: new/vma-writer.c
7 ===================================================================
8 --- new.orig/vma-writer.c 2014-11-20 09:08:33.000000000 +0100
9 +++ new/vma-writer.c 2014-11-20 09:10:14.000000000 +0100
10 @@ -34,14 +34,8 @@
11 do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
12
13 #define WRITE_BUFFERS 5
14 -
15 -typedef struct VmaAIOCB VmaAIOCB;
16 -struct VmaAIOCB {
17 - unsigned char buffer[VMA_MAX_EXTENT_SIZE];
18 - VmaWriter *vmaw;
19 - size_t bytes;
20 - Coroutine *co;
21 -};
22 +#define HEADER_CLUSTERS 8
23 +#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
24
25 struct VmaWriter {
26 int fd;
27 @@ -53,16 +47,14 @@
28 bool closed;
29
30 /* we always write extents */
31 - unsigned char outbuf[VMA_MAX_EXTENT_SIZE];
32 + unsigned char *outbuf;
33 int outbuf_pos; /* in bytes */
34 int outbuf_count; /* in VMA_BLOCKS */
35 uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
36
37 - VmaAIOCB *aiocbs[WRITE_BUFFERS];
38 - CoQueue wqueue;
39 + unsigned char *headerbuf;
40
41 GChecksum *md5csum;
42 - CoMutex writer_lock;
43 CoMutex flush_lock;
44 Coroutine *co_writer;
45
46 @@ -223,38 +215,39 @@
47 }
48
49 static ssize_t coroutine_fn
50 -vma_co_write(VmaWriter *vmaw, const void *buf, size_t bytes)
51 +vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
52 {
53 - size_t done = 0;
54 - ssize_t ret;
55 + DPRINTF("vma_queue_write enter %zd\n", bytes);
56
57 - /* atomic writes (we cannot interleave writes) */
58 - qemu_co_mutex_lock(&vmaw->writer_lock);
59 + assert(vmaw);
60 + assert(buf);
61 + assert(bytes <= VMA_MAX_EXTENT_SIZE);
62
63 - DPRINTF("vma_co_write enter %zd\n", bytes);
64 + size_t done = 0;
65 + ssize_t ret;
66
67 assert(vmaw->co_writer == NULL);
68
69 vmaw->co_writer = qemu_coroutine_self();
70
71 - aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, NULL, vma_co_continue_write, vmaw);
72 -
73 - DPRINTF("vma_co_write wait until writable\n");
74 - qemu_coroutine_yield();
75 - DPRINTF("vma_co_write starting %zd\n", bytes);
76 -
77 while (done < bytes) {
78 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, NULL, vma_co_continue_write, vmaw);
79 + qemu_coroutine_yield();
80 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, NULL, NULL, NULL);
81 + if (vmaw->status < 0) {
82 + DPRINTF("vma_queue_write detected canceled backup\n");
83 + done = -1;
84 + break;
85 + }
86 ret = write(vmaw->fd, buf + done, bytes - done);
87 if (ret > 0) {
88 done += ret;
89 - DPRINTF("vma_co_write written %zd %zd\n", done, ret);
90 + DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
91 } else if (ret < 0) {
92 if (errno == EAGAIN || errno == EWOULDBLOCK) {
93 - DPRINTF("vma_co_write yield %zd\n", done);
94 - qemu_coroutine_yield();
95 - DPRINTF("vma_co_write restart %zd\n", done);
96 - } else {
97 - vma_writer_set_error(vmaw, "vma_co_write write error - %s",
98 + /* try again */
99 + } else {
100 + vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
101 g_strerror(errno));
102 done = -1; /* always return failure for partial writes */
103 break;
104 @@ -264,102 +257,9 @@
105 }
106 }
107
108 - aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, NULL, NULL, NULL);
109 -
110 vmaw->co_writer = NULL;
111 -
112 - qemu_co_mutex_unlock(&vmaw->writer_lock);
113 -
114 - DPRINTF("vma_co_write leave %zd\n", done);
115 - return done;
116 -}
117 -
118 -static void coroutine_fn vma_co_writer_task(void *opaque)
119 -{
120 - VmaAIOCB *cb = opaque;
121 -
122 - DPRINTF("vma_co_writer_task start\n");
123 -
124 - int64_t done = vma_co_write(cb->vmaw, cb->buffer, cb->bytes);
125 - DPRINTF("vma_co_writer_task write done %zd\n", done);
126 -
127 - if (done != cb->bytes) {
128 - DPRINTF("vma_co_writer_task failed write %zd %zd", cb->bytes, done);
129 - vma_writer_set_error(cb->vmaw, "vma_co_writer_task failed write %zd",
130 - done);
131 - }
132 -
133 - cb->bytes = 0;
134 -
135 - qemu_co_queue_next(&cb->vmaw->wqueue);
136 -
137 - DPRINTF("vma_co_writer_task end\n");
138 -}
139 -
140 -static void coroutine_fn vma_queue_flush(VmaWriter *vmaw)
141 -{
142 - DPRINTF("vma_queue_flush enter\n");
143 -
144 - assert(vmaw);
145 -
146 - while (1) {
147 - int i;
148 - VmaAIOCB *cb = NULL;
149 - for (i = 0; i < WRITE_BUFFERS; i++) {
150 - if (vmaw->aiocbs[i]->bytes) {
151 - cb = vmaw->aiocbs[i];
152 - DPRINTF("FOUND USED AIO BUFFER %d %zd\n", i,
153 - vmaw->aiocbs[i]->bytes);
154 - break;
155 - }
156 - }
157 - if (!cb) {
158 - break;
159 - }
160 - qemu_co_queue_wait(&vmaw->wqueue);
161 - }
162 -
163 - DPRINTF("vma_queue_flush leave\n");
164 -}
165 -
166 -/**
167 - * NOTE: pipe buffer size in only 4096 bytes on linux (see 'ulimit -a')
168 - * So we need to create a coroutione to allow 'parallel' execution.
169 - */
170 -static ssize_t coroutine_fn
171 -vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
172 -{
173 - DPRINTF("vma_queue_write enter %zd\n", bytes);
174 -
175 - assert(vmaw);
176 - assert(buf);
177 - assert(bytes <= VMA_MAX_EXTENT_SIZE);
178 -
179 - VmaAIOCB *cb = NULL;
180 - while (!cb) {
181 - int i;
182 - for (i = 0; i < WRITE_BUFFERS; i++) {
183 - if (!vmaw->aiocbs[i]->bytes) {
184 - cb = vmaw->aiocbs[i];
185 - break;
186 - }
187 - }
188 - if (!cb) {
189 - qemu_co_queue_wait(&vmaw->wqueue);
190 - }
191 - }
192 -
193 - memcpy(cb->buffer, buf, bytes);
194 - cb->bytes = bytes;
195 - cb->vmaw = vmaw;
196 -
197 - DPRINTF("vma_queue_write start %zd\n", bytes);
198 - cb->co = qemu_coroutine_create(vma_co_writer_task);
199 - qemu_coroutine_enter(cb->co, cb);
200 -
201 - DPRINTF("vma_queue_write leave\n");
202 -
203 - return bytes;
204 +
205 + return (done == bytes) ? bytes : -1;
206 }
207
208 VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
209 @@ -426,20 +326,16 @@
210 }
211
212 /* we use O_DIRECT, so we need to align IO buffers */
213 - int i;
214 - for (i = 0; i < WRITE_BUFFERS; i++) {
215 - vmaw->aiocbs[i] = qemu_memalign(512, sizeof(VmaAIOCB));
216 - memset(vmaw->aiocbs[i], 0, sizeof(VmaAIOCB));
217 - }
218 +
219 + vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
220 + vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
221
222 vmaw->outbuf_count = 0;
223 vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
224
225 vmaw->header_blob_table_pos = 1; /* start at pos 1 */
226
227 - qemu_co_mutex_init(&vmaw->writer_lock);
228 qemu_co_mutex_init(&vmaw->flush_lock);
229 - qemu_co_queue_init(&vmaw->wqueue);
230
231 uuid_copy(vmaw->uuid, uuid);
232
233 @@ -466,8 +362,7 @@
234 static int coroutine_fn vma_write_header(VmaWriter *vmaw)
235 {
236 assert(vmaw);
237 - int header_clusters = 8;
238 - char buf[65536*header_clusters];
239 + unsigned char *buf = vmaw->headerbuf;
240 VmaHeader *head = (VmaHeader *)buf;
241
242 int i;
243 @@ -478,7 +373,7 @@
244 return vmaw->status;
245 }
246
247 - memset(buf, 0, sizeof(buf));
248 + memset(buf, 0, HEADERBUF_SIZE);
249
250 head->magic = VMA_MAGIC;
251 head->version = GUINT32_TO_BE(1); /* v1 */
252 @@ -513,7 +408,7 @@
253 uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
254 head->header_size = GUINT32_TO_BE(header_size);
255
256 - if (header_size > sizeof(buf)) {
257 + if (header_size > HEADERBUF_SIZE) {
258 return -1; /* just to be sure */
259 }
260
261 @@ -811,13 +706,7 @@
262
263 int i;
264
265 - vma_queue_flush(vmaw);
266 -
267 - /* this should not happen - just to be sure */
268 - while (!qemu_co_queue_empty(&vmaw->wqueue)) {
269 - DPRINTF("vma_writer_close wait\n");
270 - co_aio_sleep_ns(qemu_get_aio_context(), QEMU_CLOCK_REALTIME, 1000000);
271 - }
272 + assert(vmaw->co_writer == NULL);
273
274 if (vmaw->cmd) {
275 if (pclose(vmaw->cmd) < 0) {
276 @@ -875,9 +764,5 @@
277 g_checksum_free(vmaw->md5csum);
278 }
279
280 - for (i = 0; i < WRITE_BUFFERS; i++) {
281 - free(vmaw->aiocbs[i]);
282 - }
283 -
284 g_free(vmaw);
285 }
286 Index: new/blockdev.c
287 ===================================================================
288 --- new.orig/blockdev.c 2014-11-20 09:08:33.000000000 +0100
289 +++ new/blockdev.c 2014-11-20 09:08:49.000000000 +0100
290 @@ -2094,6 +2094,11 @@
291 error_setg(&backup_state.error, "backup cancelled");
292 }
293
294 + if (backup_state.vmaw) {
295 + /* make sure vma writer does not block anymore */
296 + vma_writer_set_error(backup_state.vmaw, "backup cancelled");
297 + }
298 +
299 /* drain all i/o (awake jobs waiting for aio) */
300 bdrv_drain_all();
301
302 @@ -2106,6 +2111,7 @@
303 if (job) {
304 if (!di->completed) {
305 block_job_cancel_sync(job);
306 + bdrv_drain_all(); /* drain all i/o (awake jobs waiting for aio) */
307 }
308 }
309 }