[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v8 15/28] vfio: Pass stage 1 MSI bindings to the host
From: |
Eric Auger |
Subject: |
[RFC v8 15/28] vfio: Pass stage 1 MSI bindings to the host |
Date: |
Thu, 25 Feb 2021 11:52:20 +0100 |
We register the stage1 MSI bindings when enabling the vectors
and we unregister them on msi disable.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v7 -> v8:
- add unregistration on msix_diable
- remove vfio_container_unbind_msis()
v4 -> v5:
- use VFIO_IOMMU_SET_MSI_BINDING
v2 -> v3:
- only register the notifier if the IOMMU translates MSIs
- record the msi bindings in a container list and unregister on
container release
---
hw/vfio/common.c | 59 +++++++++++++++++++++++++++
hw/vfio/pci.c | 76 ++++++++++++++++++++++++++++++++++-
hw/vfio/trace-events | 2 +
include/hw/vfio/vfio-common.h | 12 ++++++
4 files changed, 147 insertions(+), 2 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 9bd40f5299..8a64ba414b 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -667,6 +667,65 @@ static void vfio_iommu_unmap_notify(IOMMUNotifier *n,
IOMMUTLBEntry *iotlb)
}
}
+int vfio_iommu_set_msi_binding(VFIOContainer *container, int n,
+ IOMMUTLBEntry *iotlb)
+{
+ struct vfio_iommu_type1_set_msi_binding ustruct;
+ VFIOMSIBinding *binding;
+ int ret;
+
+ QLIST_FOREACH(binding, &container->msibinding_list, next) {
+ if (binding->index == n) {
+ return 0;
+ }
+ }
+
+ ustruct.argsz = sizeof(struct vfio_iommu_type1_set_msi_binding);
+ ustruct.iova = iotlb->iova;
+ ustruct.flags = VFIO_IOMMU_BIND_MSI;
+ ustruct.gpa = iotlb->translated_addr;
+ ustruct.size = iotlb->addr_mask + 1;
+ ret = ioctl(container->fd, VFIO_IOMMU_SET_MSI_BINDING , &ustruct);
+ if (ret) {
+ error_report("%s: failed to register the stage1 MSI binding (%m)",
+ __func__);
+ return ret;
+ }
+ binding = g_new0(VFIOMSIBinding, 1);
+ binding->iova = ustruct.iova;
+ binding->gpa = ustruct.gpa;
+ binding->size = ustruct.size;
+ binding->index = n;
+
+ QLIST_INSERT_HEAD(&container->msibinding_list, binding, next);
+ return 0;
+}
+
+int vfio_iommu_unset_msi_binding(VFIOContainer *container, int n)
+{
+ struct vfio_iommu_type1_set_msi_binding ustruct;
+ VFIOMSIBinding *binding, *tmp;
+ int ret;
+
+ ustruct.argsz = sizeof(struct vfio_iommu_type1_set_msi_binding);
+ QLIST_FOREACH_SAFE(binding, &container->msibinding_list, next, tmp) {
+ if (binding->index != n) {
+ continue;
+ }
+ ustruct.flags = VFIO_IOMMU_UNBIND_MSI;
+ ustruct.iova = binding->iova;
+ ret = ioctl(container->fd, VFIO_IOMMU_SET_MSI_BINDING , &ustruct);
+ if (ret) {
+ error_report("Failed to unregister the stage1 MSI binding for
iova=0x%"PRIx64" (%m)",
+ binding->iova);
+ }
+ QLIST_REMOVE(binding, next);
+ g_free(binding);
+ return ret;
+ }
+ return 0;
+}
+
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index b28e58db34..573c74b466 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -366,6 +366,65 @@ static void vfio_msi_interrupt(void *opaque)
notify(&vdev->pdev, nr);
}
+static bool vfio_iommu_require_msi_binding(IOMMUMemoryRegion *iommu_mr)
+{
+ bool msi_translate = false, nested = false;
+
+ memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_MSI_TRANSLATE,
+ (void *)&msi_translate);
+ memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_VFIO_NESTED,
+ (void *)&nested);
+ if (!nested || !msi_translate) {
+ return false;
+ }
+ return true;
+}
+
+static int vfio_register_msi_binding(VFIOPCIDevice *vdev,
+ int vector_n, bool set)
+{
+ VFIOContainer *container = vdev->vbasedev.group->container;
+ PCIDevice *dev = &vdev->pdev;
+ AddressSpace *as = pci_device_iommu_address_space(dev);
+ IOMMUMemoryRegionClass *imrc;
+ IOMMUMemoryRegion *iommu_mr;
+ IOMMUTLBEntry entry;
+ MSIMessage msg;
+
+ if (as == &address_space_memory) {
+ return 0;
+ }
+
+ iommu_mr = IOMMU_MEMORY_REGION(as->root);
+ if (!vfio_iommu_require_msi_binding(iommu_mr)) {
+ return 0;
+ }
+
+ /* MSI doorbell address is translated by an IOMMU */
+
+ if (!set) { /* unregister */
+ trace_vfio_unregister_msi_binding(vdev->vbasedev.name, vector_n);
+
+ return vfio_iommu_unset_msi_binding(container, vector_n);
+ }
+
+ msg = pci_get_msi_message(dev, vector_n);
+ imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
+
+ rcu_read_lock();
+ entry = imrc->translate(iommu_mr, msg.address, IOMMU_WO, 0);
+ rcu_read_unlock();
+
+ if (entry.perm == IOMMU_NONE) {
+ return -ENOENT;
+ }
+
+ trace_vfio_register_msi_binding(vdev->vbasedev.name, vector_n,
+ msg.address, entry.translated_addr);
+
+ return vfio_iommu_set_msi_binding(container, vector_n, &entry);
+}
+
static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
{
struct vfio_irq_set *irq_set;
@@ -383,7 +442,7 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool
msix)
fds = (int32_t *)&irq_set->data;
for (i = 0; i < vdev->nr_vectors; i++) {
- int fd = -1;
+ int ret, fd = -1;
/*
* MSI vs MSI-X - The guest has direct access to MSI mask and pending
@@ -392,6 +451,12 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool
msix)
* KVM signaling path only when configured and unmasked.
*/
if (vdev->msi_vectors[i].use) {
+ ret = vfio_register_msi_binding(vdev, i, true);
+ if (ret) {
+ error_report("%s failed to register S1 MSI binding "
+ "for vector %d(%d)", vdev->vbasedev.name, i, ret);
+ goto out;
+ }
if (vdev->msi_vectors[i].virq < 0 ||
(msix && msix_is_masked(&vdev->pdev, i))) {
fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
@@ -405,6 +470,7 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool
msix)
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
+out:
g_free(irq_set);
return ret;
@@ -705,7 +771,8 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
static void vfio_msix_disable(VFIOPCIDevice *vdev)
{
- int i;
+ int ret, i;
+
msix_unset_vector_notifiers(&vdev->pdev);
@@ -717,6 +784,11 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev)
if (vdev->msi_vectors[i].use) {
vfio_msix_vector_release(&vdev->pdev, i);
msix_vector_unuse(&vdev->pdev, i);
+ ret = vfio_register_msi_binding(vdev, i, false);
+ if (ret) {
+ error_report("%s: failed to unregister S1 MSI binding "
+ "for vector %d(%d)", vdev->vbasedev.name, i, ret);
+ }
}
}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 35fd74833c..8e2a297a4c 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -122,6 +122,8 @@ vfio_get_dev_region(const char *name, int index, uint32_t
type, uint32_t subtype
vfio_dma_unmap_overflow_workaround(void) ""
vfio_iommu_addr_inv_iotlb(int asid, uint64_t addr, uint64_t size, uint64_t
nb_granules, bool leaf) "nested IOTLB invalidate asid=%d, addr=0x%"PRIx64"
granule_size=0x%"PRIx64" nb_granules=0x%"PRIx64" leaf=%d"
vfio_iommu_asid_inv_iotlb(int asid) "nested IOTLB invalidate asid=%d"
+vfio_register_msi_binding(const char *name, int vector, uint64_t giova,
uint64_t gdb) "%s: register vector %d gIOVA=0x%"PRIx64 "-> gDB=0x%"PRIx64"
stage 1 mapping"
+vfio_unregister_msi_binding(const char *name, int vector) "%s: unregister
vector %d stage 1 mapping"
# platform.c
vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group
#%d"
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 6141162d7a..f30133b2a3 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -74,6 +74,14 @@ typedef struct VFIOAddressSpace {
QLIST_ENTRY(VFIOAddressSpace) list;
} VFIOAddressSpace;
+typedef struct VFIOMSIBinding {
+ int index;
+ hwaddr iova;
+ hwaddr gpa;
+ hwaddr size;
+ QLIST_ENTRY(VFIOMSIBinding) next;
+} VFIOMSIBinding;
+
struct VFIOGroup;
typedef struct VFIOContainer {
@@ -91,6 +99,7 @@ typedef struct VFIOContainer {
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
+ QLIST_HEAD(, VFIOMSIBinding) msibinding_list;
QLIST_ENTRY(VFIOContainer) next;
} VFIOContainer;
@@ -200,6 +209,9 @@ VFIOGroup *vfio_get_group(int groupid, AddressSpace *as,
Error **errp);
void vfio_put_group(VFIOGroup *group);
int vfio_get_device(VFIOGroup *group, const char *name,
VFIODevice *vbasedev, Error **errp);
+int vfio_iommu_set_msi_binding(VFIOContainer *container, int n,
+ IOMMUTLBEntry *entry);
+int vfio_iommu_unset_msi_binding(VFIOContainer *container, int n);
extern const MemoryRegionOps vfio_region_ops;
typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
--
2.26.2
- [RFC v8 05/28] hw/arm/smmuv3: Properly propagate S1 asid invalidation, (continued)
- [RFC v8 05/28] hw/arm/smmuv3: Properly propagate S1 asid invalidation, Eric Auger, 2021/02/25
- [RFC v8 06/28] memory: Add IOMMU_ATTR_VFIO_NESTED IOMMU memory region attribute, Eric Auger, 2021/02/25
- [RFC v8 07/28] memory: Add IOMMU_ATTR_MSI_TRANSLATE IOMMU memory region attribute, Eric Auger, 2021/02/25
- [RFC v8 08/28] memory: Introduce IOMMU Memory Region inject_faults API, Eric Auger, 2021/02/25
- [RFC v8 09/28] iommu: Introduce generic header, Eric Auger, 2021/02/25
- [RFC v8 10/28] pci: introduce PCIPASIDOps to PCIDevice, Eric Auger, 2021/02/25
- [RFC v8 11/28] vfio: Force nested if iommu requires it, Eric Auger, 2021/02/25
- [RFC v8 12/28] vfio: Introduce hostwin_from_range helper, Eric Auger, 2021/02/25
- [RFC v8 13/28] vfio: Introduce helpers to DMA map/unmap a RAM section, Eric Auger, 2021/02/25
- [RFC v8 14/28] vfio: Set up nested stage mappings, Eric Auger, 2021/02/25
- [RFC v8 15/28] vfio: Pass stage 1 MSI bindings to the host,
Eric Auger <=
- [RFC v8 16/28] vfio: Helper to get IRQ info including capabilities, Eric Auger, 2021/02/25
- [RFC v8 17/28] vfio/pci: Register handler for iommu fault, Eric Auger, 2021/02/25
- [RFC v8 18/28] vfio/pci: Set up the DMA FAULT region, Eric Auger, 2021/02/25
- [RFC v8 19/28] vfio/pci: Implement the DMA fault handler, Eric Auger, 2021/02/25
- [RFC v8 20/28] hw/arm/smmuv3: Advertise MSI_TRANSLATE attribute, Eric Auger, 2021/02/25
- [RFC v8 21/28] hw/arm/smmuv3: Store the PASID table GPA in the translation config, Eric Auger, 2021/02/25
- [RFC v8 22/28] hw/arm/smmuv3: Fill the IOTLBEntry arch_id on NH_VA invalidation, Eric Auger, 2021/02/25
- [RFC v8 23/28] hw/arm/smmuv3: Fill the IOTLBEntry leaf field on NH_VA invalidation, Eric Auger, 2021/02/25
- [RFC v8 24/28] hw/arm/smmuv3: Pass stage 1 configurations to the host, Eric Auger, 2021/02/25
- [RFC v8 25/28] hw/arm/smmuv3: Implement fault injection, Eric Auger, 2021/02/25