qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 02/10] dma-helpers: track position in the QEMUSGList


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 02/10] dma-helpers: track position in the QEMUSGList
Date: Thu, 4 Aug 2011 19:14:40 +0200

The DMA helpers infrastructures cannot at the moment track how many bytes
have been actually written, so the users cannot detect short transfers.
Adding an accessor to the DMAAIOCB cannot fix this however, because the
callback may not have access at all to the AIOCB if the transfer is
completed synchronously.  In this case, the operation is completed before
the caller of bdrv_aio_{read,write}v has the opportunity to store the
AIOCB anywhere.

So, augment the SGList API with functions to walk the QEMUSGList and
map segments along the way.  Track the number of residual bytes, and
add a function to retrieve it (so that it can even be used where
you have no access to target_phys_addr_t).

An alternative would be to add the AIOCB as a third parameter to the
BlockDriverCompletionFunc.  This would have been a much bigger patch,
but I can do it if requested.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 dma-helpers.c |   64 +++++++++++++++++++++++++++++++++++++++++---------------
 dma.h         |   12 +++++++++-
 2 files changed, 57 insertions(+), 19 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index ba7f897..6a59f59 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -28,9 +28,51 @@ void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t 
base,
     qsg->sg[qsg->nsg].base = base;
     qsg->sg[qsg->nsg].len = len;
     qsg->size += len;
+    qsg->resid += len;
     ++qsg->nsg;
 }
 
+void qemu_sglist_rewind(QEMUSGList *qsg)
+{
+    qsg->resid = qsg->size;
+    qsg->cur_index = 0;
+    qsg->cur_byte = 0;
+}
+
+int64_t qemu_sglist_get_resid(QEMUSGList *qsg)
+{
+    return qsg->resid;
+}
+
+void qemu_sglist_advance(QEMUSGList *qsg, target_phys_addr_t bytes)
+{
+    assert(qsg->cur_index < qsg->nsg);
+    assert(bytes <= qsg->sg[qsg->cur_index].len - qsg->cur_byte);
+    qsg->cur_byte += bytes;
+    qsg->resid -= bytes;
+    if (qsg->cur_byte == qsg->sg[qsg->cur_index].len) {
+        qsg->cur_index++;
+        qsg->cur_byte = 0;
+    }
+}
+
+void *qemu_sglist_map_segment(QEMUSGList *qsg, target_phys_addr_t *cur_len, 
bool is_write)
+{
+    target_phys_addr_t cur_addr;
+    void *mem;
+
+    if (qsg->cur_index == qsg->nsg) {
+        return NULL;
+    }
+    cur_addr = qsg->sg[qsg->cur_index].base + qsg->cur_byte;
+    *cur_len = qsg->sg[qsg->cur_index].len - qsg->cur_byte;
+    mem = cpu_physical_memory_map(cur_addr, cur_len, is_write);
+    if (mem) {
+        qemu_sglist_advance(qsg, *cur_len);
+    }
+    return mem;
+}
+
 void qemu_sglist_destroy(QEMUSGList *qsg)
 {
     qemu_free(qsg->sg);
@@ -43,8 +85,6 @@ typedef struct {
     QEMUSGList *sg;
     uint64_t sector_num;
     int is_write;
-    int sg_cur_index;
-    target_phys_addr_t sg_cur_byte;
     QEMUIOVector iov;
     QEMUBH *bh;
     DMAIOFunc *io_func;
@@ -83,33 +123,24 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
 static void dma_bdrv_cb(void *opaque, int ret)
 {
     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-    target_phys_addr_t cur_addr, cur_len;
     void *mem;
+    target_phys_addr_t cur_len;
 
     dbs->acb = NULL;
     dbs->sector_num += dbs->iov.size / 512;
     dma_bdrv_unmap(dbs);
     qemu_iovec_reset(&dbs->iov);
 
-    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
+    if (dbs->sg->cur_index == dbs->sg->nsg || ret < 0) {
         dbs->common.cb(dbs->common.opaque, ret);
         qemu_iovec_destroy(&dbs->iov);
         qemu_aio_release(dbs);
         return;
     }
 
-    while (dbs->sg_cur_index < dbs->sg->nsg) {
-        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
-        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
-        if (!mem)
-            break;
+    while ((mem = qemu_sglist_map_segment(dbs->sg, &cur_len,
+                                          !dbs->is_write)) != NULL) {
         qemu_iovec_add(&dbs->iov, mem, cur_len);
-        dbs->sg_cur_byte += cur_len;
-        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
-            dbs->sg_cur_byte = 0;
-            ++dbs->sg_cur_index;
-        }
     }
 
     if (dbs->iov.size == 0) {
@@ -151,12 +182,11 @@ BlockDriverAIOCB *dma_bdrv_io(
     dbs->bs = bs;
     dbs->sg = sg;
     dbs->sector_num = sector_num;
-    dbs->sg_cur_index = 0;
-    dbs->sg_cur_byte = 0;
     dbs->is_write = is_write;
     dbs->io_func = io_func;
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
+    qemu_sglist_rewind(sg);
     dma_bdrv_cb(dbs, 0);
     if (!dbs->acb) {
         qemu_aio_release(dbs);
diff --git a/dma.h b/dma.h
index f7e0142..363e932 100644
--- a/dma.h
+++ b/dma.h
@@ -27,15 +27,23 @@ struct QEMUSGList {
     ScatterGatherEntry *sg;
     int nsg;
     int nalloc;
+    int cur_index;
+    target_phys_addr_t cur_byte;
     target_phys_addr_t size;
+    target_phys_addr_t resid;
 };
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
                      target_phys_addr_t len);
-void qemu_sglist_destroy(QEMUSGList *qsg);
+void qemu_sglist_advance(QEMUSGList *qsg, target_phys_addr_t bytes);
+void *qemu_sglist_map_segment(QEMUSGList *qsg, target_phys_addr_t *cur_len, 
bool is_write);
 #endif
 
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_rewind(QEMUSGList *qsg);
+void qemu_sglist_destroy(QEMUSGList *qsg);
+int64_t qemu_sglist_get_resid(QEMUSGList *qsg);
+
 typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
                                  QEMUIOVector *iov, int nb_sectors,
                                  BlockDriverCompletionFunc *cb, void *opaque);
-- 
1.7.6





reply via email to

[Prev in Thread] Current Thread [Next in Thread]