qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v6 8/8] Add vhost-user reconnection


From: Antonios Motakis
Subject: [Qemu-devel] [PATCH v6 8/8] Add vhost-user reconnection
Date: Mon, 13 Jan 2014 15:25:19 +0100

At runtime vhost-user netdev will detect if the vhost backend is up or down.
Upon disconnection it will set link_down accordingly and notify virtio-net.
The virtio-net interface goes down. On the next polling cycle the connection
will be re-attempted. The poll cycle length is set through a -netdev option:

-netdev vhost-user,path=/path/to/sock[,poll_time=poll_time]

Signed-off-by: Antonios Motakis <address@hidden>
Signed-off-by: Nikolay Nikolaev <address@hidden>
---
 hw/net/vhost_net.c                | 16 +++++++++
 hw/virtio/vhost-backend.c         | 25 ++++++++++++--
 include/hw/virtio/vhost-backend.h |  2 ++
 include/net/vhost_net.h           |  1 +
 net/vhost-user.c                  | 70 +++++++++++++++++++++++++++++++++++++--
 qapi-schema.json                  |  5 ++-
 qemu-options.hx                   |  7 ++--
 7 files changed, 118 insertions(+), 8 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index e42f4d6..56c218e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -304,6 +304,17 @@ void vhost_net_virtqueue_mask(VHostNetState *net, 
VirtIODevice *dev,
     vhost_virtqueue_mask(&net->dev, dev, idx, mask);
 }
 
+int vhost_net_link_status(VHostNetState *net)
+{
+    int r = 0;
+
+    if (net->dev.vhost_ops->vhost_status) {
+        r = net->dev.vhost_ops->vhost_status(&net->dev);
+    }
+
+    return r;
+}
+
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
     VHostNetState *vhost_net = 0;
@@ -372,6 +383,11 @@ void vhost_net_virtqueue_mask(VHostNetState *net, 
VirtIODevice *dev,
 {
 }
 
+int vhost_net_link_status(VHostNetState *net)
+{
+    return 0;
+}
+
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
     return 0;
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 8f98562..65c7385 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -360,12 +360,12 @@ static int vhost_user_call(struct vhost_dev *dev, 
unsigned long int request,
     }
 
     if (vhost_user_send_fds(fd, &msg, fds, fd_num) < 0) {
-        return -1;
+        goto fail;
     }
 
     if (need_reply) {
         if (vhost_user_recv(fd, &msg) < 0) {
-            return -1;
+            goto fail;
         }
 
         if (msg_request != msg.request) {
@@ -398,6 +398,25 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned 
long int request,
     }
 
     return 0;
+
+fail:
+    /* mark the backend non operational */
+    error_report("Disconnect detected\n");
+    dev->vhost_ops->vhost_backend_cleanup(dev);
+    return -1;
+}
+
+static int vhost_user_status(struct vhost_dev *dev)
+{
+    int result = 1;
+
+    if (vhost_user_echo(dev) < 0) {
+        error_report("Disconnect detected\n");
+        dev->vhost_ops->vhost_backend_cleanup(dev);
+        result = 0;
+    }
+
+    return result;
 }
 
 static int vhost_user_init(struct vhost_dev *dev, const char *devpath)
@@ -479,6 +498,7 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
 static const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_call = vhost_user_call,
+        .vhost_status = vhost_user_status,
         .vhost_backend_init = vhost_user_init,
         .vhost_backend_cleanup = vhost_user_cleanup
 };
@@ -511,6 +531,7 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_call = vhost_kernel_call,
+        .vhost_status = 0,
         .vhost_backend_init = vhost_kernel_init,
         .vhost_backend_cleanup = vhost_kernel_cleanup
 };
diff --git a/include/hw/virtio/vhost-backend.h 
b/include/hw/virtio/vhost-backend.h
index ef87ffa..f2b4a6c 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -22,12 +22,14 @@ struct vhost_dev;
 
 typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
              void *arg);
+typedef int (*vhost_status)(struct vhost_dev *dev);
 typedef int (*vhost_backend_init)(struct vhost_dev *dev, const char *devpath);
 typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
 
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_call vhost_call;
+    vhost_status vhost_status;
     vhost_backend_init vhost_backend_init;
     vhost_backend_cleanup vhost_backend_cleanup;
 } VhostOps;
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index abd3d0b..6390907 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -31,5 +31,6 @@ void vhost_net_ack_features(VHostNetState *net, unsigned 
features);
 bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
                               int idx, bool mask);
+int vhost_net_link_status(VHostNetState *net);
 VHostNetState *get_vhost_net(NetClientState *nc);
 #endif
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 5ad8fd0..40c1265 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -12,13 +12,18 @@
 #include "net/vhost_net.h"
 #include "net/vhost-user.h"
 #include "qemu/error-report.h"
+#include "qemu/timer.h"
 
 typedef struct VhostUserState {
     NetClientState nc;
     VHostNetState *vhost_net;
     char *devpath;
+    int64_t poll_time;
 } VhostUserState;
 
+static QEMUTimer *vhost_user_timer;
+#define VHOST_USER_DEFAULT_POLL_TIME  (1*1000) /* ms */
+
 VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@@ -31,6 +36,11 @@ static int vhost_user_running(VhostUserState *s)
     return (s->vhost_net) ? 1 : 0;
 }
 
+static int vhost_user_link_status(VhostUserState *s)
+{
+    return (!s->nc.link_down) && vhost_net_link_status(s->vhost_net);
+}
+
 static int vhost_user_start(VhostUserState *s)
 {
     VhostNetOptions options;
@@ -59,6 +69,48 @@ static void vhost_user_stop(VhostUserState *s)
     s->vhost_net = 0;
 }
 
+static void vhost_user_timer_handler(void *opaque)
+{
+    VhostUserState *s = opaque;
+    int link_down = 0;
+
+    if (vhost_user_running(s)) {
+        if (!vhost_user_link_status(s)) {
+            link_down = 1;
+        }
+    } else {
+        vhost_user_start(s);
+        if (!vhost_user_running(s)) {
+            link_down = 1;
+        }
+    }
+
+    if (link_down != s->nc.link_down) {
+
+        s->nc.link_down = link_down;
+
+        if (s->nc.peer) {
+            s->nc.peer->link_down = link_down;
+        }
+
+        if (s->nc.info->link_status_changed) {
+            s->nc.info->link_status_changed(&s->nc);
+        }
+
+        if (s->nc.peer && s->nc.peer->info->link_status_changed) {
+            s->nc.peer->info->link_status_changed(s->nc.peer);
+        }
+
+        if (link_down) {
+            vhost_user_stop(s);
+        }
+    }
+
+    /* reschedule */
+    timer_mod(vhost_user_timer,
+              qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + s->poll_time);
+}
+
 static void vhost_user_cleanup(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@@ -74,7 +126,8 @@ static NetClientInfo net_vhost_user_info = {
 };
 
 static int net_vhost_user_init(NetClientState *peer, const char *device,
-                          const char *name, const char *path)
+                               const char *name, const char *path,
+                               int64_t poll_time)
 {
     NetClientState *nc;
     VhostUserState *s;
@@ -90,9 +143,15 @@ static int net_vhost_user_init(NetClientState *peer, const 
char *device,
     s->nc.receive_disabled = 1;
 
     s->devpath = g_strdup(path);
+    s->poll_time = poll_time;
 
     r = vhost_user_start(s);
 
+    vhost_user_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
+            vhost_user_timer_handler, s);
+    timer_mod(vhost_user_timer,
+            qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + s->poll_time);
+
     return r;
 }
 
@@ -101,11 +160,18 @@ int net_init_vhost_user(const NetClientOptions *opts, 
const char *name,
 {
     const char *path;
     const NetdevVhostUserOptions *vhost_user;
+    int64_t poll_time;
 
     assert(opts->kind == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
     vhost_user = opts->vhost_user;
 
     path = vhost_user->path;
 
-    return net_vhost_user_init(peer, "vhost_user", name, path);
+    if (vhost_user->has_poll_time) {
+        poll_time = vhost_user->poll_time;
+    } else {
+        poll_time = VHOST_USER_DEFAULT_POLL_TIME;
+    }
+
+    return net_vhost_user_init(peer, "vhost_user", name, path, poll_time);
 }
diff --git a/qapi-schema.json b/qapi-schema.json
index d3e0363..7fbc2ce 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -3036,11 +3036,14 @@
 #
 # @path: control socket path
 #
+# @poll_time: #optional polling time for connection probing
+#
 # Since 2.0
 ##
 { 'type': 'NetdevVhostUserOptions',
   'data': {
-    'path': 'str' } }
+    'path': 'str',
+    '*poll_time': 'int' } }
 
 ##
 
diff --git a/qemu-options.hx b/qemu-options.hx
index 7fdb1f3..f307e72 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1768,15 +1768,16 @@ The hubport netdev lets you connect a NIC to a QEMU 
"vlan" instead of a single
 netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 required hub automatically.
 
address@hidden -netdev vhost-user,address@hidden
address@hidden -netdev vhost-user,address@hidden,poll_time=poll_time]
 
 Connect to a unix domain socket @var{path} on which listens a server that
-implements vhost-user backend.
+implements vhost-user backend. The connection is probed on @var{poll_time} 
interval (in milliseconds).
+The default @var{poll_time} is 1000 ms.
 
 Example:
 @example
 qemu -m 1024 -mem-path /hugetlbfs,prealloc=on,share=on \
-     -netdev type=vhost-user,id=net0,path=/path/to/sock \
+     -netdev type=vhost-user,id=net0,path=/path/to/sock,poll_time=2500 \
      -device virtio-net-pci,netdev=net0
 @end example
 
-- 
1.8.3.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]