[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v2 1/3] vhost: Introduce packed vq and add buffer elements
From: |
Sahil Siddiq |
Subject: |
[RFC v2 1/3] vhost: Introduce packed vq and add buffer elements |
Date: |
Fri, 26 Jul 2024 15:28:20 +0530 |
This is the first patch in a series to add support for packed
virtqueues in vhost_shadow_virtqueue. This patch implements the
insertion of available buffers in the descriptor area. It takes
into account descriptor chains, but does not consider indirect
descriptors.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v1 -> v2:
* Split commit from RFC v1 into two commits.
* vhost-shadow-virtqueue.c
(vhost_svq_add_packed):
- Merge with "vhost_svq_vring_write_descs_packed()"
- Remove "num == 0" check
hw/virtio/vhost-shadow-virtqueue.c | 93 +++++++++++++++++++++++++++++-
1 file changed, 92 insertions(+), 1 deletion(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c
b/hw/virtio/vhost-shadow-virtqueue.c
index fc5f408f77..c7b7e0c477 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -217,6 +217,91 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return true;
}
+static bool vhost_svq_add_packed(VhostShadowVirtqueue *svq,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num,
+ unsigned *head)
+{
+ bool ok;
+ uint16_t head_flags = 0;
+ g_autofree hwaddr *sgs = g_new(hwaddr, out_num + in_num);
+
+ *head = svq->vring_packed.next_avail_idx;
+
+ /* We need some descriptors here */
+ if (unlikely(!out_num && !in_num)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest provided element with no descriptors");
+ return false;
+ }
+
+ uint16_t id, curr, i;
+ unsigned n;
+ struct vring_packed_desc *descs = svq->vring_packed.vring.desc;
+
+ i = *head;
+ id = svq->free_head;
+ curr = id;
+
+ size_t num = out_num + in_num;
+
+ ok = vhost_svq_translate_addr(svq, sgs, out_sg, out_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ ok = vhost_svq_translate_addr(svq, sgs + out_num, in_sg, in_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ /* Write descriptors to SVQ packed vring */
+ for (n = 0; n < num; n++) {
+ uint16_t flags = cpu_to_le16(svq->vring_packed.avail_used_flags |
+ (n < out_num ? 0 : VRING_DESC_F_WRITE) |
+ (n + 1 == num ? 0 : VRING_DESC_F_NEXT));
+ if (i == *head) {
+ head_flags = flags;
+ } else {
+ descs[i].flags = flags;
+ }
+
+ descs[i].addr = cpu_to_le64(sgs[n]);
+ descs[i].id = id;
+ if (n < out_num) {
+ descs[i].len = cpu_to_le32(out_sg[n].iov_len);
+ } else {
+ descs[i].len = cpu_to_le32(in_sg[n - out_num].iov_len);
+ }
+
+ curr = cpu_to_le16(svq->desc_next[curr]);
+
+ if (++i >= svq->vring_packed.vring.num) {
+ i = 0;
+ svq->vring_packed.avail_used_flags ^=
+ 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ }
+
+ if (i <= *head) {
+ svq->vring_packed.avail_wrap_counter ^= 1;
+ }
+
+ svq->vring_packed.next_avail_idx = i;
+ svq->free_head = curr;
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ smp_wmb();
+ svq->vring_packed.vring.desc[*head].flags = head_flags;
+
+ return true;
+}
+
static void vhost_svq_kick(VhostShadowVirtqueue *svq)
{
bool needs_kick;
@@ -258,7 +343,13 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct
iovec *out_sg,
return -ENOSPC;
}
- ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
+ if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ ok = vhost_svq_add_packed(svq, out_sg, out_num,
+ in_sg, in_num, &qemu_head);
+ } else {
+ ok = vhost_svq_add_split(svq, out_sg, out_num,
+ in_sg, in_num, &qemu_head);
+ }
if (unlikely(!ok)) {
return -EINVAL;
}
--
2.45.2