qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed r


From: Jason Wang
Subject: Re: [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring
Date: Tue, 19 Feb 2019 15:30:41 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.4.0


On 2019/2/14 下午12:26, address@hidden wrote:
From: Wei Xu <address@hidden>

Both userspace and vhost-net/user are supported with this patch.

A new subsection is introduced for packed ring, only 'last_avail_idx'
and 'last_avail_wrap_counter' are saved/loaded presumably based on
all the others relevant data(inuse, used/avail index and wrap count
should be the same.


This is probably only true for net device, see comment in virtio_load():

            /*
             * Some devices migrate VirtQueueElements that have been popped
             * from the avail ring but not yet returned to the used ring.
             * Since max ring size < UINT16_MAX it's safe to use modulo
             * UINT16_MAX + 1 subtraction.
             */
            vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
                                vdev->vq[i].used_idx);


So you need to migrate used_idx and used_wrap_counter since we don't have used idx.



Signed-off-by: Wei Xu <address@hidden>
---
  hw/virtio/virtio.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++---
  1 file changed, 66 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 8cfc7b6..7c5de07 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2349,6 +2349,13 @@ static bool virtio_virtqueue_needed(void *opaque)
      return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
  }
+static bool virtio_packed_virtqueue_needed(void *opaque)
+{
+    VirtIODevice *vdev = opaque;
+
+    return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
+}
+
  static bool virtio_ringsize_needed(void *opaque)
  {
      VirtIODevice *vdev = opaque;
@@ -2390,6 +2397,17 @@ static const VMStateDescription vmstate_virtqueue = {
      }
  };
+static const VMStateDescription vmstate_packed_virtqueue = {
+    .name = "packed_virtqueue_state",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
+        VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
  static const VMStateDescription vmstate_virtio_virtqueues = {
      .name = "virtio/virtqueues",
      .version_id = 1,
@@ -2402,6 +2420,18 @@ static const VMStateDescription 
vmstate_virtio_virtqueues = {
      }
  };
+static const VMStateDescription vmstate_virtio_packed_virtqueues = {
+    .name = "virtio/packed_virtqueues",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = &virtio_packed_virtqueue_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
+                      VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, 
VirtQueue),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
  static const VMStateDescription vmstate_ringsize = {
      .name = "ringsize_state",
      .version_id = 1,
@@ -2522,6 +2552,7 @@ static const VMStateDescription vmstate_virtio = {
          &vmstate_virtio_ringsize,
          &vmstate_virtio_broken,
          &vmstate_virtio_extra_state,
+        &vmstate_virtio_packed_virtqueues,
          NULL
      }
  };
@@ -2794,6 +2825,17 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int 
version_id)
                  virtio_queue_update_rings(vdev, i);
              }
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+                vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
+                vdev->vq[i].avail_wrap_counter =
+                                        vdev->vq[i].last_avail_wrap_counter;
+
+                vdev->vq[i].used_idx = vdev->vq[i].last_avail_idx;
+                vdev->vq[i].used_wrap_counter =
+                                        vdev->vq[i].last_avail_wrap_counter;
+                continue;
+            }
+
              nheads = vring_avail_idx(&vdev->vq[i]) - 
vdev->vq[i].last_avail_idx;
              /* Check it isn't doing strange things with descriptor numbers. */
              if (nheads > vdev->vq[i].vring.num) {
@@ -2955,17 +2997,34 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, 
int n)
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
  {
-    return vdev->vq[n].last_avail_idx;
+    uint16_t idx;
+
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        idx = vdev->vq[n].last_avail_idx;
+        idx |= ((int)vdev->vq[n].avail_wrap_counter) << 15;
+    } else {
+        idx = (int)vdev->vq[n].last_avail_idx;
+    }
+    return idx;
  }
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
  {
-    vdev->vq[n].last_avail_idx = idx;
-    vdev->vq[n].shadow_avail_idx = idx;
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        vdev->vq[n].last_avail_idx = idx & 0x7fff;
+        vdev->vq[n].avail_wrap_counter = !!(idx & 0x8000);


Let's define some macros for those magic number.


+    } else {
+        vdev->vq[n].last_avail_idx = idx;
+        vdev->vq[n].shadow_avail_idx = idx;
+    }
  }
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
  {
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return;
+    }


Why doesn't packed ring care about this?


+
      rcu_read_lock();
      if (vdev->vq[n].vring.desc) {
          vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
@@ -2976,6 +3035,10 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice 
*vdev, int n)
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
  {
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return;
+    }


And this?

Thanks


+
      rcu_read_lock();
      if (vdev->vq[n].vring.desc) {
          vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);



reply via email to

[Prev in Thread] Current Thread [Next in Thread]