[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
RE: [RFC 4/7] virtio-iommu: Implement PCIIOMMUOps set_host_resv_regions
From: |
Duan, Zhenzhong |
Subject: |
RE: [RFC 4/7] virtio-iommu: Implement PCIIOMMUOps set_host_resv_regions |
Date: |
Thu, 18 Jan 2024 07:43:08 +0000 |
Hi Eric,
>-----Original Message-----
>From: Eric Auger <eric.auger@redhat.com>
>Subject: [RFC 4/7] virtio-iommu: Implement PCIIOMMUOps
>set_host_resv_regions
>
>Reuse the implementation of virtio_iommu_set_iova_ranges() which
>will be removed in subsequent patches.
>
>Signed-off-by: Eric Auger <eric.auger@redhat.com>
>---
> hw/virtio/virtio-iommu.c | 134 +++++++++++++++++++++++++++++---------
>-
> 1 file changed, 101 insertions(+), 33 deletions(-)
>
>diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
>index 8a4bd933c6..716a3fcfbf 100644
>--- a/hw/virtio/virtio-iommu.c
>+++ b/hw/virtio/virtio-iommu.c
>@@ -461,8 +461,109 @@ static AddressSpace
>*virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
> return &sdev->as;
> }
>
>+/**
>+ * rebuild_resv_regions: rebuild resv regions with both the
>+ * info of host resv ranges and property set resv ranges
>+ */
>+static int rebuild_resv_regions(IOMMUDevice *sdev)
>+{
>+ GList *l;
>+ int i = 0;
>+
>+ /* free the existing list and rebuild it from scratch */
>+ g_list_free_full(sdev->resv_regions, g_free);
>+ sdev->resv_regions = NULL;
>+
>+ /* First add host reserved regions if any, all tagged as RESERVED */
>+ for (l = sdev->host_resv_ranges; l; l = l->next) {
>+ ReservedRegion *reg = g_new0(ReservedRegion, 1);
>+ Range *r = (Range *)l->data;
>+
>+ reg->type = VIRTIO_IOMMU_RESV_MEM_T_RESERVED;
>+ range_set_bounds(®->range, range_lob(r), range_upb(r));
>+ sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
>+ trace_virtio_iommu_host_resv_regions(sdev-
>>iommu_mr.parent_obj.name, i,
>+ range_lob(®->range),
>+ range_upb(®->range));
>+ i++;
>+ }
>+ /*
>+ * then add higher priority reserved regions set by the machine
>+ * through properties
>+ */
>+ add_prop_resv_regions(sdev);
>+ return 0;
>+}
>+
>+static int virtio_iommu_set_host_iova_ranges(PCIBus *bus, void *opaque,
>+ int devfn, GList *iova_ranges,
>+ Error **errp)
>+{
>+ VirtIOIOMMU *s = opaque;
>+ IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
>+ IOMMUDevice *sdev;
>+ GList *current_ranges;
>+ GList *l, *tmp, *new_ranges = NULL;
>+ int ret = -EINVAL;
>+
>+ if (!sbus) {
>+ error_report("%s no sbus", __func__);
>+ }
Do we plan to support multiple devices in same iommu group?
as_by_busptr is hashed by bus which is an aliased bus of the device.
So sbus may be NULL if device's bus is different from aliased bus.
>+
>+ sdev = sbus->pbdev[devfn];
>+
>+ current_ranges = sdev->host_resv_ranges;
>+
>+ warn_report("%s: host_resv_regions=%d", __func__, !!sdev-
>>host_resv_ranges);
>+ /* check that each new resv region is included in an existing one */
>+ if (sdev->host_resv_ranges) {
May be we could just error out as vfio_realize should not call
set_host_iova_ranges() twice.
Thanks
Zhenzhong
>+ range_inverse_array(iova_ranges,
>+ &new_ranges,
>+ 0, UINT64_MAX);
>+
>+ for (tmp = new_ranges; tmp; tmp = tmp->next) {
>+ Range *newr = (Range *)tmp->data;
>+ bool included = false;
>+
>+ for (l = current_ranges; l; l = l->next) {
>+ Range * r = (Range *)l->data;
>+
>+ if (range_contains_range(r, newr)) {
>+ included = true;
>+ break;
>+ }
>+ }
>+ if (!included) {
>+ goto error;
>+ }
>+ }
>+ /* all new reserved ranges are included in existing ones */
>+ ret = 0;
>+ goto out;
>+ }
>+
>+ if (sdev->probe_done) {
>+ warn_report("%s: Notified about new host reserved regions after
>probe",
>+ __func__);
>+ }
>+
>+ range_inverse_array(iova_ranges,
>+ &sdev->host_resv_ranges,
>+ 0, UINT64_MAX);
>+ rebuild_resv_regions(sdev);
>+
>+ return 0;
>+error:
>+ error_setg(errp, "%s Conflicting host reserved ranges set!",
>+ __func__);
>+out:
>+ g_list_free_full(new_ranges, g_free);
>+ return ret;
>+}
>+
> static const PCIIOMMUOps virtio_iommu_ops = {
> .get_address_space = virtio_iommu_find_add_as,
>+ .set_host_iova_ranges = virtio_iommu_set_host_iova_ranges,
> };
>
> static int virtio_iommu_attach(VirtIOIOMMU *s,
>@@ -1158,39 +1259,6 @@ static int
>virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
> return 0;
> }
>
>-/**
>- * rebuild_resv_regions: rebuild resv regions with both the
>- * info of host resv ranges and property set resv ranges
>- */
>-static int rebuild_resv_regions(IOMMUDevice *sdev)
>-{
>- GList *l;
>- int i = 0;
>-
>- /* free the existing list and rebuild it from scratch */
>- g_list_free_full(sdev->resv_regions, g_free);
>- sdev->resv_regions = NULL;
>-
>- /* First add host reserved regions if any, all tagged as RESERVED */
>- for (l = sdev->host_resv_ranges; l; l = l->next) {
>- ReservedRegion *reg = g_new0(ReservedRegion, 1);
>- Range *r = (Range *)l->data;
>-
>- reg->type = VIRTIO_IOMMU_RESV_MEM_T_RESERVED;
>- range_set_bounds(®->range, range_lob(r), range_upb(r));
>- sdev->resv_regions = resv_region_list_insert(sdev->resv_regions, reg);
>- trace_virtio_iommu_host_resv_regions(sdev-
>>iommu_mr.parent_obj.name, i,
>- range_lob(®->range),
>- range_upb(®->range));
>- i++;
>- }
>- /*
>- * then add higher priority reserved regions set by the machine
>- * through properties
>- */
>- add_prop_resv_regions(sdev);
>- return 0;
>-}
>
> /**
> * virtio_iommu_set_iova_ranges: Conveys the usable IOVA ranges
>--
>2.41.0
[RFC 5/7] virtio-iommu: Remove the implementation of iommu_set_iova_ranges, Eric Auger, 2024/01/17
[RFC 2/7] hw/pci: Introduce pci_device_iommu_bus, Eric Auger, 2024/01/17
[RFC 3/7] vfio/pci: Pass the usable IOVA ranges through PCIIOMMUOps, Eric Auger, 2024/01/17
[RFC 6/7] hw/vfio: Remove memory_region_iommu_set_iova_ranges() call, Eric Auger, 2024/01/17
[RFC 7/7] memory: Remove IOMMU MR iommu_set_iova_range API, Eric Auger, 2024/01/17
RE: [RFC 0/7] VIRTIO-IOMMU/VFIO: Fix host iommu geometry handling for hotplugged devices, Duan, Zhenzhong, 2024/01/18