qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 04/14] ide: use the DMA memory access interface for


From: David Gibson
Subject: [Qemu-devel] [PATCH 04/14] ide: use the DMA memory access interface for PCI IDE controllers
Date: Fri, 3 Jun 2011 01:12:32 +1000

Emulated PCI IDE controllers now use the memory access interface. This
also allows an emulated IOMMU to translate and check accesses.

Map invalidation results in cancelling DMA transfers. Since the guest OS
can't properly recover the DMA results in case the mapping is changed,
this is a fairly good approximation.

Note this doesn't handle AHCI emulation yet!

Signed-off-by: Eduard - Gabriel Munteanu <address@hidden>
Signed-off-by: David Gibson <address@hidden>m
---
 dma-helpers.c     |   25 +++++++++++++++++++------
 dma.h             |   10 ++++++----
 hw/ide/ahci.c     |    3 ++-
 hw/ide/internal.h |    1 +
 hw/ide/macio.c    |    4 ++--
 hw/ide/pci.c      |   18 +++++++++++-------
 6 files changed, 41 insertions(+), 20 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index 712ed89..6a00df4 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,12 +10,13 @@
 #include "dma.h"
 #include "block_int.h"
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DeviceState *dev)
 {
     qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
     qsg->nsg = 0;
     qsg->nalloc = alloc_hint;
     qsg->size = 0;
+    qsg->dev = dev;
 }
 
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
@@ -73,16 +74,27 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
     int i;
 
     for (i = 0; i < dbs->iov.niov; ++i) {
-        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
-                                  dbs->iov.iov[i].iov_len);
+        dma_memory_unmap(dbs->sg->dev,
+                         dbs->iov.iov[i].iov_base,
+                         dbs->iov.iov[i].iov_len, !dbs->is_write,
+                         dbs->iov.iov[i].iov_len);
     }
 }
 
+static void dma_bdrv_cancel(void *opaque)
+{
+    DMAAIOCB *dbs = opaque;
+
+    bdrv_aio_cancel(dbs->acb);
+    dma_bdrv_unmap(dbs);
+    qemu_iovec_destroy(&dbs->iov);
+    qemu_aio_release(dbs);
+}
+
 static void dma_bdrv_cb(void *opaque, int ret)
 {
     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-    target_phys_addr_t cur_addr, cur_len;
+    dma_addr_t cur_addr, cur_len;
     void *mem;
 
     dbs->acb = NULL;
@@ -100,7 +112,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        mem = dma_memory_map(dbs->sg->dev, dma_bdrv_cancel, dbs,
+                             cur_addr, &cur_len, !dbs->is_write);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
diff --git a/dma.h b/dma.h
index f3bb275..a2cd649 100644
--- a/dma.h
+++ b/dma.h
@@ -14,20 +14,22 @@
 //#include "cpu.h"
 #include "hw/hw.h"
 #include "block.h"
+#include "hw/dma_rw.h"
 
 typedef struct {
-    target_phys_addr_t base;
-    target_phys_addr_t len;
+    dma_addr_t base;
+    dma_addr_t len;
 } ScatterGatherEntry;
 
 typedef struct {
     ScatterGatherEntry *sg;
     int nsg;
     int nalloc;
-    target_phys_addr_t size;
+    dma_addr_t size;
+    DeviceState *dev;
 } QEMUSGList;
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DeviceState *dev);
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
                      target_phys_addr_t len);
 void qemu_sglist_destroy(QEMUSGList *qsg);
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 1f008a3..5bc3f4a 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -680,7 +680,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList 
*sglist)
     if (sglist_alloc_hint > 0) {
         AHCI_SG *tbl = (AHCI_SG *)prdt;
 
-        qemu_sglist_init(sglist, sglist_alloc_hint);
+        /* FIXME: pass a proper DMADevice. */
+        qemu_sglist_init(sglist, sglist_alloc_hint, NULL);
         for (i = 0; i < sglist_alloc_hint; i++) {
             /* flags_size is zero-based */
             qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index c2b35ec..fd7e04f 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -474,6 +474,7 @@ struct IDEDMA {
     struct iovec iov;
     QEMUIOVector qiov;
     BlockDriverAIOCB *aiocb;
+    DeviceState *dev;
 };
 
 struct IDEBus {
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 7107f6b..a111481 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -78,7 +78,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
 
     s->io_buffer_size = io->len;
 
-    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
     qemu_sglist_add(&s->sg, io->addr, io->len);
     io->addr += io->len;
     io->len = 0;
@@ -140,7 +140,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
     s->io_buffer_index = 0;
     s->io_buffer_size = io->len;
 
-    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
     qemu_sglist_add(&s->sg, io->addr, io->len);
     io->addr += io->len;
     io->len = 0;
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index a4726ad..6ee2018 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -63,7 +63,8 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
     } prd;
     int l, len;
 
-    qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
+    qemu_sglist_init(&s->sg,
+                     s->nsector / (BMDMA_PAGE_SIZE / 512) + 1, dma->dev);
     s->io_buffer_size = 0;
     for(;;) {
         if (bm->cur_prd_len == 0) {
@@ -71,7 +72,7 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
                 return s->io_buffer_size != 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -113,7 +114,7 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
                 return 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -128,11 +129,11 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
             l = bm->cur_prd_len;
         if (l > 0) {
             if (is_write) {
-                cpu_physical_memory_write(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, 
l);
+                dma_memory_write(dma->dev, bm->cur_prd_addr,
+                                 s->io_buffer + s->io_buffer_index, l);
             } else {
-                cpu_physical_memory_read(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, 
l);
+                dma_memory_read(dma->dev, bm->cur_prd_addr,
+                                s->io_buffer + s->io_buffer_index, l);
             }
             bm->cur_prd_addr += l;
             bm->cur_prd_len -= l;
@@ -432,6 +433,9 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo 
**hd_table)
             continue;
         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
     }
+
+    d->bmdma[0].dma.dev = &dev->qdev;
+    d->bmdma[1].dma.dev = &dev->qdev;
 }
 
 static const struct IDEDMAOps bmdma_ops = {
-- 
1.7.4.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]