qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH qemu v15 14/17] spapr_iommu, vfio, memory: Notify IO


From: Alexey Kardashevskiy
Subject: [Qemu-devel] [PATCH qemu v15 14/17] spapr_iommu, vfio, memory: Notify IOMMU about starting/stopping being used by VFIO
Date: Mon, 4 Apr 2016 19:33:43 +1000

The sPAPR TCE tables manage 2 copies when VFIO is using an IOMMU -
a guest view of the table and a hardware TCE table. If there is no VFIO
presense in the address space, then just the guest view is used, if
this is the case, it is allocated in the KVM. However since there is no
support yet for VFIO in KVM TCE hypercalls, when we start using VFIO,
we need to move the guest view from KVM to the userspace; and we need
to do this for every IOMMU on a bus with VFIO devices.

This adds vfio_start/vfio_stop callbacks in MemoryRegionIOMMUOps to
notifiy IOMMU about changing environment so it can reallocate the table
to/from KVM or (when available) hook the IOMMU groups with the logical
bus (LIOBN) in the KVM.

This removes explicit spapr_tce_set_need_vfio() call from PCI hotplug
path as the new callbacks do this better - they notify IOMMU at
the exact moment when the configuration is changed, and this also
includes the case of PCI hot unplug.

As there can be multiple containers attached to the same PHB/LIOBN,
this replaces the @need_vfio flag in sPAPRTCETable with the counter
of VFIO users.

Signed-off-by: Alexey Kardashevskiy <address@hidden>
---
Changes:
v15:
* s/need_vfio/vfio-Users/g
---
 hw/ppc/spapr_iommu.c   | 30 ++++++++++++++++++++----------
 hw/ppc/spapr_pci.c     |  6 ------
 hw/vfio/common.c       |  9 +++++++++
 include/exec/memory.h  |  4 ++++
 include/hw/ppc/spapr.h |  2 +-
 5 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index c945dba..ea09414 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -155,6 +155,16 @@ static uint64_t spapr_tce_get_page_sizes(MemoryRegion 
*iommu)
     return 1ULL << tcet->page_shift;
 }
 
+static void spapr_tce_vfio_start(MemoryRegion *iommu)
+{
+    spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true);
+}
+
+static void spapr_tce_vfio_stop(MemoryRegion *iommu)
+{
+    spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false);
+}
+
 static void spapr_tce_table_do_enable(sPAPRTCETable *tcet);
 static void spapr_tce_table_do_disable(sPAPRTCETable *tcet);
 
@@ -239,6 +249,8 @@ static const VMStateDescription vmstate_spapr_tce_table = {
 static MemoryRegionIOMMUOps spapr_iommu_ops = {
     .translate = spapr_tce_translate_iommu,
     .get_page_sizes = spapr_tce_get_page_sizes,
+    .vfio_start = spapr_tce_vfio_start,
+    .vfio_stop = spapr_tce_vfio_stop,
 };
 
 static int spapr_tce_table_realize(DeviceState *dev)
@@ -248,7 +260,7 @@ static int spapr_tce_table_realize(DeviceState *dev)
     char tmp[32];
 
     tcet->fd = -1;
-    tcet->need_vfio = false;
+    tcet->vfio_users = 0;
     snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn);
     memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX);
 
@@ -268,20 +280,18 @@ void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool 
need_vfio)
     size_t table_size = tcet->nb_table * sizeof(uint64_t);
     void *newtable;
 
-    if (need_vfio == tcet->need_vfio) {
-        /* Nothing to do */
-        return;
-    }
+    tcet->vfio_users += need_vfio ? 1 : -1;
+    g_assert(tcet->vfio_users >= 0);
+    g_assert(tcet->table);
 
-    if (!need_vfio) {
+    if (!tcet->vfio_users) {
         /* FIXME: We don't support transition back to KVM accelerated
          * TCEs yet */
         return;
     }
 
-    tcet->need_vfio = true;
-
-    if (tcet->fd < 0) {
+    if (tcet->vfio_users > 1) {
+        g_assert(tcet->fd < 0);
         /* Table is already in userspace, nothing to be do */
         return;
     }
@@ -327,7 +337,7 @@ static void spapr_tce_table_do_enable(sPAPRTCETable *tcet)
                                         tcet->page_shift,
                                         tcet->nb_table,
                                         &tcet->fd,
-                                        tcet->need_vfio);
+                                        tcet->vfio_users != 0);
 
     memory_region_set_size(&tcet->iommu,
                            (uint64_t)tcet->nb_table << tcet->page_shift);
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 5497a18..f864fde 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -1083,12 +1083,6 @@ static void spapr_phb_add_pci_device(sPAPRDRConnector 
*drc,
     void *fdt = NULL;
     int fdt_start_offset = 0, fdt_size;
 
-    if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
-        sPAPRTCETable *tcet = spapr_tce_find_by_liobn(phb->dma_liobn);
-
-        spapr_tce_set_need_vfio(tcet, true);
-    }
-
     if (dev->hotplugged) {
         fdt = create_device_tree(&fdt_size);
         fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0);
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index ea79311..5e5b77c 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -421,6 +421,9 @@ static void vfio_listener_region_add(MemoryListener 
*listener,
         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
 
         memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
+        if (section->mr->iommu_ops && section->mr->iommu_ops->vfio_start) {
+            section->mr->iommu_ops->vfio_start(section->mr);
+        }
         memory_region_iommu_replay(giommu->iommu, &giommu->n,
                                    false);
 
@@ -466,6 +469,7 @@ static void vfio_listener_region_del(MemoryListener 
*listener,
     VFIOContainer *container = container_of(listener, VFIOContainer, listener);
     hwaddr iova, end;
     int ret;
+    MemoryRegion *iommu = NULL;
 
     if (vfio_listener_skipped_section(section)) {
         trace_vfio_listener_region_del_skip(
@@ -487,6 +491,7 @@ static void vfio_listener_region_del(MemoryListener 
*listener,
         QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
             if (giommu->iommu == section->mr) {
                 memory_region_unregister_iommu_notifier(&giommu->n);
+                iommu = giommu->iommu;
                 QLIST_REMOVE(giommu, giommu_next);
                 g_free(giommu);
                 break;
@@ -519,6 +524,10 @@ static void vfio_listener_region_del(MemoryListener 
*listener,
                      "0x%"HWADDR_PRIx") = %d (%m)",
                      container, iova, end - iova, ret);
     }
+
+    if (iommu && iommu->iommu_ops && iommu->iommu_ops->vfio_stop) {
+        iommu->iommu_ops->vfio_stop(section->mr);
+    }
 }
 
 static const MemoryListener vfio_memory_listener = {
diff --git a/include/exec/memory.h b/include/exec/memory.h
index eb5ce67..f1de133f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -152,6 +152,10 @@ struct MemoryRegionIOMMUOps {
     IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool 
is_write);
     /* Returns supported page sizes */
     uint64_t (*get_page_sizes)(MemoryRegion *iommu);
+    /* Called when VFIO starts using this */
+    void (*vfio_start)(MemoryRegion *iommu);
+    /* Called when VFIO stops using this */
+    void (*vfio_stop)(MemoryRegion *iommu);
 };
 
 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 471eb4a..5c00e38 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -548,7 +548,7 @@ struct sPAPRTCETable {
     uint32_t mig_nb_table;
     uint64_t *mig_table;
     bool bypass;
-    bool need_vfio;
+    int vfio_users;
     int fd;
     MemoryRegion root, iommu;
     struct VIOsPAPRDevice *vdev; /* for @bypass migration compatibility only */
-- 
2.5.0.rc3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]