qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 09/10] virtio: read avail_idx from VQ only when nece


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 09/10] virtio: read avail_idx from VQ only when necessary
Date: Fri, 15 Jan 2016 13:41:57 +0100

From: Vincenzo Maffione <address@hidden>

The virtqueue_pop() implementation needs to check if the avail ring
contains some pending buffers. To perform this check, it is not
always necessary to fetch the avail_idx in the VQ memory, which is
expensive. This patch introduces a shadow variable tracking avail_idx
and modifies virtio_queue_empty() to access avail_idx in physical
memory only when necessary.

Signed-off-by: Vincenzo Maffione <address@hidden>
Message-Id: <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
---
 hw/virtio/virtio.c | 25 +++++++++++++++++++++----
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 3e7c6bf..01142da 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -70,8 +70,13 @@ typedef struct VRing
 struct VirtQueue
 {
     VRing vring;
+
+    /* Next head to pop */
     uint16_t last_avail_idx;
 
+    /* Last avail_idx read from VQ. */
+    uint16_t shadow_avail_idx;
+
     uint16_t used_idx;
 
     /* Last used index value we have signalled on */
@@ -132,7 +137,8 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
 {
     hwaddr pa;
     pa = vq->vring.avail + offsetof(VRingAvail, idx);
-    return virtio_lduw_phys(vq->vdev, pa);
+    vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
+    return vq->shadow_avail_idx;
 }
 
 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
@@ -223,8 +229,14 @@ int virtio_queue_ready(VirtQueue *vq)
     return vq->vring.avail != 0;
 }
 
+/* Fetch avail_idx from VQ memory only when we really need to know if
+ * guest has added some buffers. */
 int virtio_queue_empty(VirtQueue *vq)
 {
+    if (vq->shadow_avail_idx != vq->last_avail_idx) {
+        return 0;
+    }
+
     return vring_avail_idx(vq) == vq->last_avail_idx;
 }
 
@@ -300,7 +312,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int 
idx)
     /* Check it isn't doing very strange things with descriptor numbers. */
     if (num_heads > vq->vring.num) {
         error_report("Guest moved used index from %u to %u",
-                     idx, vring_avail_idx(vq));
+                     idx, vq->shadow_avail_idx);
         exit(1);
     }
     /* On success, callers read a descriptor at vq->last_avail_idx.
@@ -534,9 +546,12 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     struct iovec iov[VIRTQUEUE_MAX_SIZE];
     VRingDesc desc;
 
-    if (!virtqueue_num_heads(vq, vq->last_avail_idx)) {
+    if (virtio_queue_empty(vq)) {
         return NULL;
     }
+    /* Needed after virtio_queue_empty(), see comment in
+     * virtqueue_num_heads(). */
+    smp_rmb();
 
     /* When we start there are none of either input nor output. */
     out_num = in_num = 0;
@@ -786,6 +801,7 @@ void virtio_reset(void *opaque)
         vdev->vq[i].vring.avail = 0;
         vdev->vq[i].vring.used = 0;
         vdev->vq[i].last_avail_idx = 0;
+        vdev->vq[i].shadow_avail_idx = 0;
         vdev->vq[i].used_idx = 0;
         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
         vdev->vq[i].signalled_used = 0;
@@ -1155,7 +1171,7 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue 
*vq)
     smp_mb();
     /* Always notify when queue is empty (when feature acknowledge) */
     if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
-        !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
+        !vq->inuse && virtio_queue_empty(vq)) {
         return true;
     }
 
@@ -1579,6 +1595,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int 
version_id)
                 return -1;
             }
             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
+            vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
         }
     }
 
-- 
2.5.0





reply via email to

[Prev in Thread] Current Thread [Next in Thread]