]>
Commit | Line | Data |
---|---|---|
89a514bd WB |
1 | From e9a50006a7f86adacff211fbd98d5b3ad79f22ef Mon Sep 17 00:00:00 2001 |
2 | From: Wolfgang Bumiller <w.bumiller@proxmox.com> | |
3 | Date: Wed, 30 Nov 2016 10:27:47 +0100 | |
4 | Subject: [PATCH 47/47] glusterfs: allow partial reads | |
5 | ||
6 | This should deal with qemu bug #1644754 until upstream | |
7 | decides which way to go. The general direction seems to be | |
8 | away from sector based block APIs and with that in mind, and | |
9 | when comparing to other network block backends (eg. nfs) | |
10 | treating partial reads as errors doesn't seem to make much | |
11 | sense. | |
12 | --- | |
13 | block/gluster.c | 10 +++++++++- | |
14 | 1 file changed, 9 insertions(+), 1 deletion(-) | |
15 | ||
16 | diff --git a/block/gluster.c b/block/gluster.c | |
17 | index 6dcf926..17c51ed 100644 | |
18 | --- a/block/gluster.c | |
19 | +++ b/block/gluster.c | |
20 | @@ -39,6 +39,7 @@ typedef struct GlusterAIOCB { | |
21 | QEMUBH *bh; | |
22 | Coroutine *coroutine; | |
23 | AioContext *aio_context; | |
24 | + bool is_write; | |
25 | } GlusterAIOCB; | |
26 | ||
27 | typedef struct BDRVGlusterState { | |
28 | @@ -623,8 +624,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) | |
29 | acb->ret = 0; /* Success */ | |
30 | } else if (ret < 0) { | |
31 | acb->ret = -errno; /* Read/Write failed */ | |
32 | + } else if (acb->is_write) { | |
33 | + acb->ret = -EIO; /* Partial write - fail it */ | |
34 | } else { | |
35 | - acb->ret = -EIO; /* Partial read/write - fail it */ | |
36 | + acb->ret = 0; /* Success */ | |
37 | } | |
38 | ||
39 | acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb); | |
40 | @@ -861,6 +864,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, | |
41 | acb.ret = 0; | |
42 | acb.coroutine = qemu_coroutine_self(); | |
43 | acb.aio_context = bdrv_get_aio_context(bs); | |
44 | + acb.is_write = true; | |
45 | ||
46 | ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb); | |
47 | if (ret < 0) { | |
48 | @@ -979,9 +983,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, | |
49 | acb.aio_context = bdrv_get_aio_context(bs); | |
50 | ||
51 | if (write) { | |
52 | + acb.is_write = true; | |
53 | ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
54 | gluster_finish_aiocb, &acb); | |
55 | } else { | |
56 | + acb.is_write = false; | |
57 | ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
58 | gluster_finish_aiocb, &acb); | |
59 | } | |
60 | @@ -1044,6 +1050,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) | |
61 | acb.ret = 0; | |
62 | acb.coroutine = qemu_coroutine_self(); | |
63 | acb.aio_context = bdrv_get_aio_context(bs); | |
64 | + acb.is_write = true; | |
65 | ||
66 | ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb); | |
67 | if (ret < 0) { | |
68 | @@ -1090,6 +1097,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs, | |
69 | acb.ret = 0; | |
70 | acb.coroutine = qemu_coroutine_self(); | |
71 | acb.aio_context = bdrv_get_aio_context(bs); | |
72 | + acb.is_write = true; | |
73 | ||
74 | ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb); | |
75 | if (ret < 0) { | |
76 | -- | |
77 | 2.1.4 | |
78 |