qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [RFC PATCH] glusterfs: allow partial reads


From: Wolfgang Bumiller
Subject: [Qemu-block] [RFC PATCH] glusterfs: allow partial reads
Date: Thu, 1 Dec 2016 11:59:24 +0100

Fixes #1644754.

Signed-off-by: Wolfgang Bumiller <address@hidden>
---
I'm not sure what the original rationale was to treat both partial
reads as well as well as writes as I/O error. (Seems to have happened
from original glusterfs v1 to v2 series with a note but no reasoning
for the read side as far as I could see.)
The general direction lately seems to be to move away from sector
based block APIs. Also eg. the NFS code allows partial reads. (It
does, however, have an old patch (c2eb918e3) dedicated to aligning
sizes to 512 byte boundaries for file creation for compatibility to
other parts of qemu like qcow2. This already happens in glusterfs,
though, but if you move a file from a different storage over to
glusterfs you may end up with a qcow2 file with eg. the L1 table in
the last 80 bytes of the file aligned to _begin_ at a 512 boundary,
but not _end_ at one.)

 block/gluster.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/block/gluster.c b/block/gluster.c
index 891c13b..3db0bf8 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -41,6 +41,7 @@ typedef struct GlusterAIOCB {
     int ret;
     Coroutine *coroutine;
     AioContext *aio_context;
+    bool is_write;
 } GlusterAIOCB;
 
 typedef struct BDRVGlusterState {
@@ -716,8 +717,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
         acb->ret = 0; /* Success */
     } else if (ret < 0) {
         acb->ret = -errno; /* Read/Write failed */
+    } else if (acb->is_write) {
+        acb->ret = -EIO; /* Partial write - fail it */
     } else {
-        acb->ret = -EIO; /* Partial read/write - fail it */
+        acb->ret = 0; /* Success */
     }
 
     aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
@@ -965,6 +968,7 @@ static coroutine_fn int 
qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
     acb.ret = 0;
     acb.coroutine = qemu_coroutine_self();
     acb.aio_context = bdrv_get_aio_context(bs);
+    acb.is_write = true;
 
     ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
     if (ret < 0) {
@@ -1087,9 +1091,11 @@ static coroutine_fn int 
qemu_gluster_co_rw(BlockDriverState *bs,
     acb.aio_context = bdrv_get_aio_context(bs);
 
     if (write) {
+        acb.is_write = true;
         ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
                                  gluster_finish_aiocb, &acb);
     } else {
+        acb.is_write = false;
         ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
                                 gluster_finish_aiocb, &acb);
     }
@@ -1153,6 +1159,7 @@ static coroutine_fn int 
qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
     acb.ret = 0;
     acb.coroutine = qemu_coroutine_self();
     acb.aio_context = bdrv_get_aio_context(bs);
+    acb.is_write = true;
 
     ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
     if (ret < 0) {
@@ -1199,6 +1206,7 @@ static coroutine_fn int 
qemu_gluster_co_pdiscard(BlockDriverState *bs,
     acb.ret = 0;
     acb.coroutine = qemu_coroutine_self();
     acb.aio_context = bdrv_get_aio_context(bs);
+    acb.is_write = true;
 
     ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
     if (ret < 0) {
-- 
2.1.4





reply via email to

[Prev in Thread] Current Thread [Next in Thread]