[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v2 3/3] vhost: Allocate memory for packed vring.
From: |
Sahil Siddiq |
Subject: |
[RFC v2 3/3] vhost: Allocate memory for packed vring. |
Date: |
Fri, 26 Jul 2024 15:28:22 +0530 |
Allocate memory for the packed vq format and support
packed vq in the SVQ "start" operation.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v1 -> v2:
* vhost-shadow-virtqueue.h
(struct VhostShadowVirtqueue): New member "is_packed"
(vhost_svq_get_vring_addr): Renamed function.
(vhost_svq_get_vring_addr_packed): New function.
(vhost_svq_memory_packed): Likewise.
* vhost-shadow-virtqueue.c:
(vhost_svq_add): Use "is_packed" to check vq format.
(vhost_svq_get_vring_addr): Rename function.
(vhost_svq_get_vring_addr_packed): New function but is yet to be implemented.
(vhost_svq_memory_packed): New function.
(vhost_svq_start): Support packed vq format.
* vhost-vdpa.c
(vhost_svq_get_vring_addr): Rename function.
hw/virtio/vhost-shadow-virtqueue.c | 70 ++++++++++++++++++++++--------
hw/virtio/vhost-shadow-virtqueue.h | 10 ++++-
hw/virtio/vhost-vdpa.c | 4 +-
3 files changed, 63 insertions(+), 21 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c
b/hw/virtio/vhost-shadow-virtqueue.c
index c7b7e0c477..045c07304c 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -343,7 +343,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct
iovec *out_sg,
return -ENOSPC;
}
- if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (svq->is_packed) {
ok = vhost_svq_add_packed(svq, out_sg, out_num,
in_sg, in_num, &qemu_head);
} else {
@@ -679,18 +679,29 @@ void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq,
int call_fd)
}
/**
- * Get the shadow vq vring address.
+ * Get the split shadow vq vring address.
* @svq: Shadow virtqueue
* @addr: Destination to store address
*/
-void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
- struct vhost_vring_addr *addr)
+void vhost_svq_get_vring_addr_split(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr)
{
addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
}
+/**
+ * Get the packed shadow vq vring address.
+ * @svq: Shadow virtqueue
+ * @addr: Destination to store address
+ */
+void vhost_svq_get_vring_addr_packed(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr)
+{
+ /* TODO */
+}
+
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
{
size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
@@ -707,6 +718,16 @@ size_t vhost_svq_device_area_size(const
VhostShadowVirtqueue *svq)
return ROUND_UP(used_size, qemu_real_host_page_size());
}
+size_t vhost_svq_memory_packed(const VhostShadowVirtqueue *svq)
+{
+ size_t desc_size = sizeof(struct vring_packed_desc) * svq->num_free;
+ size_t driver_event_suppression = sizeof(struct vring_packed_desc_event);
+ size_t device_event_suppression = sizeof(struct vring_packed_desc_event);
+
+ return ROUND_UP(desc_size + driver_event_suppression +
device_event_suppression,
+ qemu_real_host_page_size());
+}
+
/**
* Set a new file descriptor for the guest to kick the SVQ and notify for avail
*
@@ -759,19 +780,34 @@ void vhost_svq_start(VhostShadowVirtqueue *svq,
VirtIODevice *vdev,
svq->vq = vq;
svq->iova_tree = iova_tree;
- svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
- svq->num_free = svq->vring.num;
- svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
- -1, 0);
- desc_size = sizeof(vring_desc_t) * svq->vring.num;
- svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
- svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
- -1, 0);
- svq->desc_state = g_new0(SVQDescState, svq->vring.num);
- svq->desc_next = g_new0(uint16_t, svq->vring.num);
- for (unsigned i = 0; i < svq->vring.num - 1; i++) {
+ if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ svq->is_packed = true;
+ svq->vring_packed.vring.num = virtio_queue_get_num(vdev,
virtio_get_queue_index(vq));
+ svq->num_free = svq->vring_packed.vring.num;
+ svq->vring_packed.vring.desc = mmap(NULL, vhost_svq_memory_packed(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED
| MAP_ANONYMOUS,
+ -1, 0);
+ desc_size = sizeof(struct vring_packed_desc) * svq->vring.num;
+ svq->vring_packed.vring.driver = (void *)((char
*)svq->vring_packed.vring.desc + desc_size);
+ svq->vring_packed.vring.device = (void *)((char
*)svq->vring_packed.vring.driver +
+ sizeof(struct
vring_packed_desc_event));
+ } else {
+ svq->is_packed = false;
+ svq->vring.num = virtio_queue_get_num(vdev,
virtio_get_queue_index(vq));
+ svq->num_free = svq->vring.num;
+ svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED |
MAP_ANONYMOUS,
+ -1, 0);
+ desc_size = sizeof(vring_desc_t) * svq->vring.num;
+ svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+ svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED |
MAP_ANONYMOUS,
+ -1, 0);
+ }
+
+ svq->desc_state = g_new0(SVQDescState, svq->num_free);
+ svq->desc_next = g_new0(uint16_t, svq->num_free);
+ for (unsigned i = 0; i < svq->num_free - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
}
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h
b/hw/virtio/vhost-shadow-virtqueue.h
index ee1a87f523..b396daf57d 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -67,6 +67,9 @@ struct vring_packed {
/* Shadow virtqueue to relay notifications */
typedef struct VhostShadowVirtqueue {
+ /* True if packed virtqueue */
+ bool is_packed;
+
/* Virtio queue shadowing */
VirtQueue *vq;
@@ -146,10 +149,13 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t
num);
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
-void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
- struct vhost_vring_addr *addr);
+void vhost_svq_get_vring_addr_split(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr);
+void vhost_svq_get_vring_addr_packed(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr);
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
+size_t vhost_svq_memory_packed(const VhostShadowVirtqueue *svq);
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3cdaa12ed5..688de4a662 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1130,7 +1130,7 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev
*dev,
struct vhost_vdpa *v = dev->opaque;
struct vhost_vring_addr svq_addr;
- vhost_svq_get_vring_addr(svq, &svq_addr);
+ vhost_svq_get_vring_addr_split(svq, &svq_addr);
vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
@@ -1189,7 +1189,7 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev
*dev,
size_t avail_offset;
bool ok;
- vhost_svq_get_vring_addr(svq, &svq_addr);
+ vhost_svq_get_vring_addr_split(svq, &svq_addr);
driver_region = (DMAMap) {
.translated_addr = svq_addr.desc_user_addr,
--
2.45.2