]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0022-glusterfs-allow-partial-reads.patch
remove efi-roms-1182.tar.xz
[pve-qemu.git] / debian / patches / pve / 0022-glusterfs-allow-partial-reads.patch
CommitLineData
23102ed6 1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
95259824
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Wed, 30 Nov 2016 10:27:47 +0100
23102ed6 4Subject: [PATCH] glusterfs: allow partial reads
95259824
WB
5
6This should deal with qemu bug #1644754 until upstream
7decides which way to go. The general direction seems to be
8away from sector based block APIs and with that in mind, and
9when comparing to other network block backends (eg. nfs)
10treating partial reads as errors doesn't seem to make much
11sense.
12---
13 block/gluster.c | 10 +++++++++-
14 1 file changed, 9 insertions(+), 1 deletion(-)
15
16diff --git a/block/gluster.c b/block/gluster.c
9b05d1d4 17index 4e398af5c1..453c5824ce 100644
95259824
WB
18--- a/block/gluster.c
19+++ b/block/gluster.c
6838f038 20@@ -41,6 +41,7 @@ typedef struct GlusterAIOCB {
a544966d 21 int ret;
95259824
WB
22 Coroutine *coroutine;
23 AioContext *aio_context;
24+ bool is_write;
25 } GlusterAIOCB;
26
27 typedef struct BDRVGlusterState {
9b05d1d4 28@@ -722,8 +723,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
95259824
WB
29 acb->ret = 0; /* Success */
30 } else if (ret < 0) {
31 acb->ret = -errno; /* Read/Write failed */
32+ } else if (acb->is_write) {
33+ acb->ret = -EIO; /* Partial write - fail it */
34 } else {
35- acb->ret = -EIO; /* Partial read/write - fail it */
36+ acb->ret = 0; /* Success */
37 }
38
a544966d 39 aio_co_schedule(acb->aio_context, acb->coroutine);
9b05d1d4 40@@ -971,6 +974,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
95259824
WB
41 acb.ret = 0;
42 acb.coroutine = qemu_coroutine_self();
43 acb.aio_context = bdrv_get_aio_context(bs);
44+ acb.is_write = true;
45
46 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
47 if (ret < 0) {
9b05d1d4 48@@ -1096,9 +1100,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
95259824
WB
49 acb.aio_context = bdrv_get_aio_context(bs);
50
51 if (write) {
52+ acb.is_write = true;
53 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
54 gluster_finish_aiocb, &acb);
55 } else {
56+ acb.is_write = false;
57 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
58 gluster_finish_aiocb, &acb);
59 }
9b05d1d4 60@@ -1171,6 +1177,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
95259824
WB
61 acb.ret = 0;
62 acb.coroutine = qemu_coroutine_self();
63 acb.aio_context = bdrv_get_aio_context(bs);
64+ acb.is_write = true;
65
66 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
67 if (ret < 0) {
9b05d1d4 68@@ -1217,6 +1224,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
95259824
WB
69 acb.ret = 0;
70 acb.coroutine = qemu_coroutine_self();
71 acb.aio_context = bdrv_get_aio_context(bs);
72+ acb.is_write = true;
73
74 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
75 if (ret < 0) {
76--
45169293 772.11.0
95259824 78