[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v4 4/5] vdpa: Allocate memory for svq and map them to vdpa
From: |
Sahil Siddiq |
Subject: |
[RFC v4 4/5] vdpa: Allocate memory for svq and map them to vdpa |
Date: |
Fri, 6 Dec 2024 02:04:29 +0530 |
Allocate memory for the packed vq format and map them to the vdpa
device.
Since "struct vring" and "struct vring_packed's vring" both have
the same layout memory, the implementation in svq start and svq
stop should not differ based on the vq's format.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v3 -> v4:
- Based on commit #3 of v3.
- vhost-shadow-virtqueue.c
(vhost_svq_memory_packed): Remove function.
(vhost_svq_driver_area_size,vhost_svq_descriptor_area_size): Decouple
functions.
(vhost_svq_device_area_size): Rewrite function.
(vhost_svq_start): Simplify implementation.
(vhost_svq_stop): Unconditionally munmap().
- vhost-shadow-virtqueue.h: New function declaration.
- vhost-vdpa.c
(vhost_vdpa_svq_unmap_rings): Call vhost_vdpa_svq_unmap_ring().
(vhost_vdpa_svq_map_rings): New mappings.
hw/virtio/vhost-shadow-virtqueue.c | 47 ++++++++++++++++++++----------
hw/virtio/vhost-shadow-virtqueue.h | 1 +
hw/virtio/vhost-vdpa.c | 34 +++++++++++++++++----
3 files changed, 60 insertions(+), 22 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c
b/hw/virtio/vhost-shadow-virtqueue.c
index 6eee01ab3c..be06b12c9a 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -314,7 +314,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct
iovec *out_sg,
return -EINVAL;
}
- if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (svq->is_packed) {
vhost_svq_add_packed(svq, out_sg, out_num, in_sg,
in_num, sgs, &qemu_head);
} else {
@@ -661,19 +661,33 @@ void vhost_svq_get_vring_addr(const VhostShadowVirtqueue
*svq,
addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
}
-size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
+size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq)
{
size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
- size_t avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
- sizeof(uint16_t);
+ return ROUND_UP(desc_size, qemu_real_host_page_size());
+}
- return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
+size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
+{
+ size_t avail_size;
+ if (svq->is_packed) {
+ avail_size = sizeof(uint32_t);
+ } else {
+ avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
+ sizeof(uint16_t);
+ }
+ return ROUND_UP(avail_size, qemu_real_host_page_size());
}
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
{
- size_t used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
- sizeof(uint16_t);
+ size_t used_size;
+ if (svq->is_packed) {
+ used_size = sizeof(uint32_t);
+ } else {
+ used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
+ sizeof(uint16_t);
+ }
return ROUND_UP(used_size, qemu_real_host_page_size());
}
@@ -718,8 +732,6 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq,
int svq_kick_fd)
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree)
{
- size_t desc_size;
-
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->next_guest_avail_elem = NULL;
svq->shadow_avail_idx = 0;
@@ -728,20 +740,22 @@ void vhost_svq_start(VhostShadowVirtqueue *svq,
VirtIODevice *vdev,
svq->vdev = vdev;
svq->vq = vq;
svq->iova_tree = iova_tree;
+ svq->is_packed = virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED);
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
- svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
+ svq->vring.desc = mmap(NULL, vhost_svq_descriptor_area_size(svq),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
- desc_size = sizeof(vring_desc_t) * svq->vring.num;
- svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+ svq->vring.avail = mmap(NULL, vhost_svq_driver_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
- svq->desc_state = g_new0(SVQDescState, svq->vring.num);
- svq->desc_next = g_new0(uint16_t, svq->vring.num);
- for (unsigned i = 0; i < svq->vring.num - 1; i++) {
+ svq->desc_state = g_new0(SVQDescState, svq->num_free);
+ svq->desc_next = g_new0(uint16_t, svq->num_free);
+ for (unsigned i = 0; i < svq->num_free - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
}
}
@@ -781,7 +795,8 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->desc_state);
- munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
+ munmap(svq->vring.desc, vhost_svq_descriptor_area_size(svq));
+ munmap(svq->vring.avail, vhost_svq_driver_area_size(svq));
munmap(svq->vring.used, vhost_svq_device_area_size(svq));
event_notifier_set_handler(&svq->hdev_call, NULL);
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h
b/hw/virtio/vhost-shadow-virtqueue.h
index ce89bafedc..6c0e0c4f67 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -151,6 +151,7 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq,
int svq_kick_fd);
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
struct vhost_vring_addr *addr);
+size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3cdaa12ed5..97ed569792 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1134,6 +1134,8 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev
*dev,
vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
+ vhost_vdpa_svq_unmap_ring(v, svq_addr.avail_user_addr);
+
vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
}
@@ -1181,38 +1183,58 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev
*dev,
Error **errp)
{
ERRP_GUARD();
- DMAMap device_region, driver_region;
+ DMAMap descriptor_region, device_region, driver_region;
struct vhost_vring_addr svq_addr;
struct vhost_vdpa *v = dev->opaque;
+ size_t descriptor_size = vhost_svq_descriptor_area_size(svq);
size_t device_size = vhost_svq_device_area_size(svq);
size_t driver_size = vhost_svq_driver_area_size(svq);
- size_t avail_offset;
bool ok;
vhost_svq_get_vring_addr(svq, &svq_addr);
- driver_region = (DMAMap) {
+ descriptor_region = (DMAMap) {
.translated_addr = svq_addr.desc_user_addr,
+ .size = descriptor_size - 1,
+ .perm = IOMMU_RO,
+ };
+ if (svq->is_packed) {
+ descriptor_region.perm = IOMMU_RW;
+ }
+
+ ok = vhost_vdpa_svq_map_ring(v, &descriptor_region, errp);
+ if (unlikely(!ok)) {
+ error_prepend(errp, "Cannot create vq descriptor region: ");
+ return false;
+ }
+ addr->desc_user_addr = descriptor_region.iova;
+
+ driver_region = (DMAMap) {
+ .translated_addr = svq_addr.avail_user_addr,
.size = driver_size - 1,
.perm = IOMMU_RO,
};
ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq driver region: ");
+ vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
return false;
}
- addr->desc_user_addr = driver_region.iova;
- avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
- addr->avail_user_addr = driver_region.iova + avail_offset;
+ addr->avail_user_addr = driver_region.iova;
device_region = (DMAMap) {
.translated_addr = svq_addr.used_user_addr,
.size = device_size - 1,
.perm = IOMMU_RW,
};
+ if (svq->is_packed) {
+ device_region.perm = IOMMU_WO;
+ }
+
ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq device region: ");
+ vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
}
addr->used_user_addr = device_region.iova;
--
2.47.0
- [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Sahil Siddiq, 2024/12/05
- [RFC v4 1/5] vhost: Refactor vhost_svq_add_split, Sahil Siddiq, 2024/12/05
- [RFC v4 2/5] vhost: Write descriptors to packed svq, Sahil Siddiq, 2024/12/05
- [RFC v4 3/5] vhost: Data structure changes to support packed vqs, Sahil Siddiq, 2024/12/05
- [RFC v4 4/5] vdpa: Allocate memory for svq and map them to vdpa,
Sahil Siddiq <=
- [RFC v4 5/5] vdpa: Support setting vring_base for packed svq, Sahil Siddiq, 2024/12/05
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Eugenio Perez Martin, 2024/12/10
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Sahil Siddiq, 2024/12/11
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Sahil Siddiq, 2024/12/15
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Eugenio Perez Martin, 2024/12/16
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Sahil Siddiq, 2024/12/17
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Eugenio Perez Martin, 2024/12/17
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Sahil Siddiq, 2024/12/19
- Re: [RFC v4 0/5] Add packed virtqueue to shadow virtqueue, Eugenio Perez Martin, 2024/12/20