]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0008-PVE-Up-glusterfs-allow-partial-reads.patch
update submodule and patches to 7.2.0
[pve-qemu.git] / debian / patches / pve / 0008-PVE-Up-glusterfs-allow-partial-reads.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Mon, 6 Apr 2020 12:16:38 +0200
4 Subject: [PATCH] PVE: [Up] glusterfs: allow partial reads
5
6 This should deal with qemu bug #1644754 until upstream
7 decides which way to go. The general direction seems to be
8 away from sector based block APIs and with that in mind, and
9 when comparing to other network block backends (eg. nfs)
10 treating partial reads as errors doesn't seem to make much
11 sense.
12
13 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
14 ---
15 block/gluster.c | 10 +++++++++-
16 1 file changed, 9 insertions(+), 1 deletion(-)
17
18 diff --git a/block/gluster.c b/block/gluster.c
19 index 2e03102f00..7886c5fe8c 100644
20 --- a/block/gluster.c
21 +++ b/block/gluster.c
22 @@ -57,6 +57,7 @@ typedef struct GlusterAIOCB {
23 int ret;
24 Coroutine *coroutine;
25 AioContext *aio_context;
26 + bool is_write;
27 } GlusterAIOCB;
28
29 typedef struct BDRVGlusterState {
30 @@ -752,8 +753,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret,
31 acb->ret = 0; /* Success */
32 } else if (ret < 0) {
33 acb->ret = -errno; /* Read/Write failed */
34 + } else if (acb->is_write) {
35 + acb->ret = -EIO; /* Partial write - fail it */
36 } else {
37 - acb->ret = -EIO; /* Partial read/write - fail it */
38 + acb->ret = 0; /* Success */
39 }
40
41 aio_co_schedule(acb->aio_context, acb->coroutine);
42 @@ -1022,6 +1025,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
43 acb.ret = 0;
44 acb.coroutine = qemu_coroutine_self();
45 acb.aio_context = bdrv_get_aio_context(bs);
46 + acb.is_write = true;
47
48 ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
49 if (ret < 0) {
50 @@ -1203,9 +1207,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
51 acb.aio_context = bdrv_get_aio_context(bs);
52
53 if (write) {
54 + acb.is_write = true;
55 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
56 gluster_finish_aiocb, &acb);
57 } else {
58 + acb.is_write = false;
59 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
60 gluster_finish_aiocb, &acb);
61 }
62 @@ -1268,6 +1274,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
63 acb.ret = 0;
64 acb.coroutine = qemu_coroutine_self();
65 acb.aio_context = bdrv_get_aio_context(bs);
66 + acb.is_write = true;
67
68 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
69 if (ret < 0) {
70 @@ -1316,6 +1323,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
71 acb.ret = 0;
72 acb.coroutine = qemu_coroutine_self();
73 acb.aio_context = bdrv_get_aio_context(bs);
74 + acb.is_write = true;
75
76 ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
77 if (ret < 0) {