[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 10/13] iommu: Add facility to cancel in-use dma memo
From: |
David Gibson |
Subject: |
[Qemu-devel] [PATCH 10/13] iommu: Add facility to cancel in-use dma memory maps |
Date: |
Fri, 20 Apr 2012 14:16:54 +1000 |
One new complication raised by IOMMU support over only handling DMA
directly to physical addresses is handling dma_memory_map() case
(replacing cpu_physical_memory_map()) when the IOMMU translation the
IOVAs covered by such a map are invalidated or changed while the map
is active. This should never happen with correct guest software, but
we do need to handle buggy guests. This case might also occur during
handovers between different guest software stages if the handover
protocols aren't fully seamless.
Presently, we handle this by having the IOMMU driver use a helper to
wait (blocking the initiating CPU thread) for any such mappings to go
away before completing the IOMMU update operation. This is correct,
because maps are transient in all existing cases, but it's possible
that delay could be quite long.
This patch adds an infrastructure to reduce such delays, by
(optionally) signalling drivers holding maps with a callback when
their map is invalidated from under them. From this callback they
should cancel in-progress DMAs using the map. They should then unmap
the buffer ASAP, although this need not be synchronous with the
callback. This will allow the invalidated maps to disappear faster,
unblocking the CPU thread triggering the invalidation.
In addition, this adds a user of the infrastructure in the bdrv code -
the most common and potentially longest lived used of maps.
Signed-off-by: David Gibson <address@hidden>
---
dma-helpers.c | 52 ++++++++++++++++++++++++++++++++--------------------
dma.h | 20 ++++++++++++++++----
2 files changed, 48 insertions(+), 24 deletions(-)
diff --git a/dma-helpers.c b/dma-helpers.c
index 09591ef..36fa963 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -107,6 +107,28 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
}
}
+static void dma_aio_cancel(BlockDriverAIOCB *acb)
+{
+ DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
+
+ trace_dma_aio_cancel(dbs);
+
+ if (dbs->acb) {
+ BlockDriverAIOCB *acb = dbs->acb;
+ dbs->acb = NULL;
+ dbs->in_cancel = true;
+ bdrv_aio_cancel(acb);
+ dbs->in_cancel = false;
+ }
+ dbs->common.cb = NULL;
+ dma_complete(dbs, 0);
+}
+
+static void dma_bdrv_cancel_cb(void *opaque)
+{
+ dma_aio_cancel(&((DMAAIOCB *)opaque)->common);
+}
+
static void dma_bdrv_cb(void *opaque, int ret)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -127,7 +149,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
- mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
+ mem = dma_memory_map_with_cancel(dbs->sg->dma, dma_bdrv_cancel_cb, dbs,
+ cur_addr, &cur_len, dbs->dir);
if (!mem)
break;
qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -149,23 +172,6 @@ static void dma_bdrv_cb(void *opaque, int ret)
assert(dbs->acb);
}
-static void dma_aio_cancel(BlockDriverAIOCB *acb)
-{
- DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
-
- trace_dma_aio_cancel(dbs);
-
- if (dbs->acb) {
- BlockDriverAIOCB *acb = dbs->acb;
- dbs->acb = NULL;
- dbs->in_cancel = true;
- bdrv_aio_cancel(acb);
- dbs->in_cancel = false;
- }
- dbs->common.cb = NULL;
- dma_complete(dbs, 0);
-}
-
static AIOPool dma_aio_pool = {
.aiocb_size = sizeof(DMAAIOCB),
.cancel = dma_aio_cancel,
@@ -350,6 +356,8 @@ struct DMAMemoryMap {
dma_addr_t addr;
size_t len;
void *buf;
+ DMACancelMapFunc *cancel;
+ void *cancel_opaque;
DMAInvalidationState *invalidate;
QLIST_ENTRY(DMAMemoryMap) list;
@@ -364,7 +372,9 @@ void dma_context_init(DMAContext *dma, DMATranslateFunc fn)
QLIST_INIT(&dma->memory_maps);
}
-void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
+void *iommu_dma_memory_map(DMAContext *dma,
+ DMACancelMapFunc cb, void *cb_opaque,
+ dma_addr_t addr, dma_addr_t *len,
DMADirection dir)
{
int err;
@@ -397,6 +407,8 @@ void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t
addr, dma_addr_t *len,
map->len = *len;
map->buf = buf;
map->invalidate = NULL;
+ map->cancel = cb;
+ map->cancel_opaque = cb_opaque;
QLIST_INSERT_HEAD(&dma->memory_maps, map, list);
@@ -430,7 +442,6 @@ void iommu_dma_memory_unmap(DMAContext *dma, void *buffer,
dma_addr_t len,
}
}
-
/* unmap called on a buffer that wasn't mapped */
assert(false);
}
@@ -450,6 +461,7 @@ void iommu_wait_for_invalidated_maps(DMAContext *dma,
if (ranges_overlap(addr, len, map->addr, map->len)) {
is.count++;
map->invalidate = &is;
+ map->cancel(map->cancel_opaque);
}
}
diff --git a/dma.h b/dma.h
index b57d72f..51914c6 100644
--- a/dma.h
+++ b/dma.h
@@ -60,6 +60,8 @@ static inline bool dma_has_iommu(DMAContext *dma)
return !!dma;
}
+typedef void DMACancelMapFunc(void *);
+
/* Checks that the given range of addresses is valid for DMA. This is
* useful for certain cases, but usually you should just use
* dma_memory_{read,write}() and check for errors */
@@ -118,11 +120,15 @@ static inline int dma_memory_zero(DMAContext *dma,
dma_addr_t addr,
}
void *iommu_dma_memory_map(DMAContext *dma,
+ DMACancelMapFunc *cb, void *opaque,
dma_addr_t addr, dma_addr_t *len,
DMADirection dir);
-static inline void *dma_memory_map(DMAContext *dma,
- dma_addr_t addr, dma_addr_t *len,
- DMADirection dir)
+static inline void *dma_memory_map_with_cancel(DMAContext *dma,
+ DMACancelMapFunc *cb,
+ void *opaque,
+ dma_addr_t addr,
+ dma_addr_t *len,
+ DMADirection dir)
{
if (!dma_has_iommu(dma)) {
target_phys_addr_t xlen = *len;
@@ -133,9 +139,15 @@ static inline void *dma_memory_map(DMAContext *dma,
*len = xlen;
return p;
} else {
- return iommu_dma_memory_map(dma, addr, len, dir);
+ return iommu_dma_memory_map(dma, cb, opaque, addr, len, dir);
}
}
+static inline void *dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ return dma_memory_map_with_cancel(dma, NULL, NULL, addr, len, dir);
+}
void iommu_dma_memory_unmap(DMAContext *dma,
void *buffer, dma_addr_t len,
--
1.7.9.5
- [Qemu-devel] [PATCH 03/13] Implement cpu_physical_memory_zero(), (continued)
- [Qemu-devel] [PATCH 03/13] Implement cpu_physical_memory_zero(), David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 07/13] ide/ahci: Use universal DMA helper functions, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 04/13] iommu: Add universal DMA helper functions, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 01/13] Better support for dma_addr_t variables, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 12/13] iommu: Allow PCI to use IOMMU infrastructure, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 08/13] usb: Convert usb_packet_{map, unmap} to universal DMA helpers, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 02/13] usb-xhci: Use PCI DMA helper functions, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 13/13] pseries: Implement IOMMU and DMA for PAPR PCI devices, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 06/13] iommu: Make sglists and dma_bdrv helpers use new universal DMA helpers, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 09/13] iommu: Introduce IOMMU emulation infrastructure, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 10/13] iommu: Add facility to cancel in-use dma memory maps,
David Gibson <=
- [Qemu-devel] [PATCH 05/13] usb-ohci: Use universal DMA helper functions, David Gibson, 2012/04/20
- [Qemu-devel] [PATCH 11/13] pseries: Convert sPAPR TCEs to use generic IOMMU infrastructure, David Gibson, 2012/04/20
- Re: [Qemu-devel] [RFC 0/13] Support for guest visible IOMMUs, Kevin Wolf, 2012/04/20