qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 1/6] virtio-net: use the backend cross-endian capabi


From: Greg Kurz
Subject: [Qemu-devel] [PATCH 1/6] virtio-net: use the backend cross-endian capabilities
Date: Thu, 07 Jan 2016 12:32:02 +0100
User-agent: StGit/0.17.1-dirty

When running a fully emulated device in cross-endian conditions, including
a virtio 1.0 device offered to a big endian guest, we need to fix the vnet
headers. This is currently handled by the virtio_net_hdr_swap() function
in the core virtio-net code but it should actually be handled by the net
backend.

With this patch, virtio-net now tries to configure the backend to do the
endian fixing when the device starts. If the backend cannot support the
requested endiannes, we have to fall back on virtio_net_hdr_swap(): this
is recorded in the needs_vnet_hdr_swap flag, to be used in the TX and RX
paths.

The current vhost-net code also tries to configure net backends. This will
be no more needed and will be addressed in a subsequent patch.

Signed-off-by: Greg Kurz <address@hidden>
---
 hw/net/virtio-net.c            |   33 +++++++++++++++++++++++++++++++--
 include/hw/virtio/virtio-net.h |    1 +
 2 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index a877614e3e7a..d4cc94ea5e55 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -152,6 +152,31 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t 
status)
     }
 }
 
+static void virtio_net_vnet_status(VirtIONet *n, uint8_t status)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    NetClientState *peer = qemu_get_queue(n->nic)->peer;
+
+    if (virtio_net_started(n, status)) {
+        int r;
+
+        if (virtio_is_big_endian(vdev)) {
+            r = qemu_set_vnet_be(peer, true);
+        } else {
+            r = qemu_set_vnet_le(peer, true);
+        }
+
+        n->needs_vnet_hdr_swap = !!r;
+    } else if (virtio_net_started(n, vdev->status) &&
+               !virtio_net_started(n, status)) {
+        if (virtio_is_big_endian(vdev)) {
+            qemu_set_vnet_be(peer, false);
+        } else {
+            qemu_set_vnet_le(peer, false);
+        }
+    }
+}
+
 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
@@ -159,6 +184,7 @@ static void virtio_net_set_status(struct VirtIODevice 
*vdev, uint8_t status)
     int i;
     uint8_t queue_status;
 
+    virtio_net_vnet_status(n, status);
     virtio_net_vhost_status(n, status);
 
     for (i = 0; i < n->max_queues; i++) {
@@ -957,7 +983,10 @@ static void receive_header(VirtIONet *n, const struct 
iovec *iov, int iov_cnt,
         void *wbuf = (void *)buf;
         work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
                                     size - n->host_hdr_len);
-        virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+
+        if (n->needs_vnet_hdr_swap) {
+            virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+        }
         iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
     } else {
         struct virtio_net_hdr hdr = {
@@ -1167,7 +1196,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
                 error_report("virtio-net header incorrect");
                 exit(1);
             }
-            if (virtio_needs_swap(vdev)) {
+            if (n->needs_vnet_hdr_swap) {
                 virtio_net_hdr_swap(vdev, (void *) &mhdr);
                 sg2[0].iov_base = &mhdr;
                 sg2[0].iov_len = n->guest_hdr_len;
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index f3cc25feca2b..27bc868fbc7d 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -94,6 +94,7 @@ typedef struct VirtIONet {
     uint64_t curr_guest_offloads;
     QEMUTimer *announce_timer;
     int announce_counter;
+    bool needs_vnet_hdr_swap;
 } VirtIONet;
 
 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,




reply via email to

[Prev in Thread] Current Thread [Next in Thread]