]> git.proxmox.com Git - pve-qemu-kvm.git/blob - debian/patches/pve/0047-glusterfs-allow-partial-reads.patch
fix backup jobs
[pve-qemu-kvm.git] / debian / patches / pve / 0047-glusterfs-allow-partial-reads.patch
1 From 2b93cabb41628355ef87e478ea4616e58019a85d Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Wed, 30 Nov 2016 10:27:47 +0100
4 Subject: [PATCH 47/47] glusterfs: allow partial reads
5
6 This should deal with qemu bug #1644754 until upstream
7 decides which way to go. The general direction seems to be
8 away from sector based block APIs and with that in mind, and
9 when comparing to other network block backends (eg. nfs)
10 treating partial reads as errors doesn't seem to make much
11 sense.
12 ---
13 block/gluster.c | 10 +++++++++-
14 1 file changed, 9 insertions(+), 1 deletion(-)
15
16 diff --git a/block/gluster.c b/block/gluster.c
17 index e712dc7..daf6cec 100644
18 --- a/block/gluster.c
19 +++ b/block/gluster.c
20 @@ -42,6 +42,7 @@ typedef struct GlusterAIOCB {
21 int ret;
22 Coroutine *coroutine;
23 AioContext *aio_context;
24 + bool is_write;
25 } GlusterAIOCB;
26
27 typedef struct BDRVGlusterState {
28 @@ -705,8 +706,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
29 acb->ret = 0; /* Success */
30 } else if (ret < 0) {
31 acb->ret = -errno; /* Read/Write failed */
32 + } else if (acb->is_write) {
33 + acb->ret = -EIO; /* Partial write - fail it */
34 } else {
35 - acb->ret = -EIO; /* Partial read/write - fail it */
36 + acb->ret = 0; /* Success */
37 }
38
39 aio_co_schedule(acb->aio_context, acb->coroutine);
40 @@ -954,6 +957,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
41 acb.ret = 0;
42 acb.coroutine = qemu_coroutine_self();
43 acb.aio_context = bdrv_get_aio_context(bs);
44 + acb.is_write = true;
45
46 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
47 if (ret < 0) {
48 @@ -1076,9 +1080,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
49 acb.aio_context = bdrv_get_aio_context(bs);
50
51 if (write) {
52 + acb.is_write = true;
53 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
54 gluster_finish_aiocb, &acb);
55 } else {
56 + acb.is_write = false;
57 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
58 gluster_finish_aiocb, &acb);
59 }
60 @@ -1142,6 +1148,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
61 acb.ret = 0;
62 acb.coroutine = qemu_coroutine_self();
63 acb.aio_context = bdrv_get_aio_context(bs);
64 + acb.is_write = true;
65
66 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
67 if (ret < 0) {
68 @@ -1188,6 +1195,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
69 acb.ret = 0;
70 acb.coroutine = qemu_coroutine_self();
71 acb.aio_context = bdrv_get_aio_context(bs);
72 + acb.is_write = true;
73
74 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
75 if (ret < 0) {
76 --
77 2.1.4
78