qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientStat


From: Liu Ping Fan
Subject: [Qemu-devel] [PATCH v3 4/7] net: introduce lock to protect NetClientState's peer's access
Date: Thu, 27 Jun 2013 11:38:46 +0800

Introduce nc->peer_lock to shield off the race of nc->peer's reader and
deleter. With it, after deleter finish, no new qemu_send_packet_xx()
will append packet to peer->send_queue, therefore no new reference from
packet->sender to nc will exist in nc->peer->send_queue.

The lock sequence: peer_lock then NetQueue->lock inside.

Signed-off-by: Liu Ping Fan <address@hidden>
---
 include/net/net.h |  7 ++++++
 net/net.c         | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 67 insertions(+), 4 deletions(-)

diff --git a/include/net/net.h b/include/net/net.h
index 2f72b26..27aa5cb 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -67,6 +67,11 @@ struct NetClientState {
     NetClientInfo *info;
     int link_down;
     QTAILQ_ENTRY(NetClientState) next;
+    /* protect the race access of peer only between reader and writer.
+     * to resolve the writer's race condition, resort on biglock.
+     * Lock sequence: holding peer_lock outside, then NetQueue->lock inside.
+     */
+    QemuMutex peer_lock;
     NetClientState *peer;
     NetQueue *send_queue;
     char *model;
@@ -79,6 +84,7 @@ struct NetClientState {
 
 typedef struct NICState {
     NetClientState *ncs;
+    NetClientState **pending_peer;
     NICConf *conf;
     void *opaque;
     bool peer_deleted;
@@ -106,6 +112,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, 
int vlan_id,
                                               const char *client_str);
 typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
 void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_send_packet_nolock(NetClientState *sender);
 int qemu_can_send_packet(NetClientState *nc);
 ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
                           int iovcnt);
diff --git a/net/net.c b/net/net.c
index 43a74e4..6fc50d8 100644
--- a/net/net.c
+++ b/net/net.c
@@ -204,6 +204,7 @@ static void qemu_net_client_setup(NetClientState *nc,
         nc->peer = peer;
         peer->peer = nc;
     }
+    qemu_mutex_init(&nc->peer_lock);
     QTAILQ_INSERT_TAIL(&net_clients, nc, next);
 
     nc->send_queue = qemu_new_net_queue(nc);
@@ -243,6 +244,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
     nic->ncs = (void *)nic + info->size;
     nic->conf = conf;
     nic->opaque = opaque;
+    nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues);
 
     for (i = 0; i < queues; i++) {
         qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
@@ -301,6 +303,35 @@ static void qemu_free_net_client(NetClientState *nc)
     }
 }
 
+/* Elimate the reference and sync with exit of rx/tx action.
+ * And flush out peer's queue.
+ */
+static void qemu_net_client_detach_flush(NetClientState *nc)
+{
+    NetClientState *peer;
+
+    qemu_mutex_lock(&nc->peer_lock);
+    peer = nc->peer;
+    qemu_mutex_unlock(&nc->peer_lock);
+
+    /* writer of peer's peer field*/
+    if (peer) {
+        /* exclude the race with tx to @nc */
+        qemu_mutex_lock(&peer->peer_lock);
+        peer->peer = NULL;
+        qemu_mutex_unlock(&peer->peer_lock);
+    }
+
+    /* writer of self's peer field*/
+    /*  exclude the race with tx from @nc */
+    qemu_mutex_lock(&nc->peer_lock);
+    nc->peer = NULL;
+    if (peer) {
+        qemu_net_queue_purge(peer->send_queue, nc);
+    }
+    qemu_mutex_unlock(&nc->peer_lock);
+}
+
 void qemu_del_net_client(NetClientState *nc)
 {
     NetClientState *ncs[MAX_QUEUE_NUM];
@@ -331,7 +362,9 @@ void qemu_del_net_client(NetClientState *nc)
         }
 
         for (i = 0; i < queues; i++) {
+            qemu_net_client_detach_flush(ncs[i]);
             qemu_cleanup_net_client(ncs[i]);
+            nic->pending_peer[i] = ncs[i];
         }
 
         return;
@@ -340,6 +373,7 @@ void qemu_del_net_client(NetClientState *nc)
     assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
 
     for (i = 0; i < queues; i++) {
+        qemu_net_client_detach_flush(ncs[i]);
         qemu_cleanup_net_client(ncs[i]);
         qemu_free_net_client(ncs[i]);
     }
@@ -352,17 +386,19 @@ void qemu_del_nic(NICState *nic)
     /* If this is a peer NIC and peer has already been deleted, free it now. */
     if (nic->peer_deleted) {
         for (i = 0; i < queues; i++) {
-            qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
+            qemu_free_net_client(nic->pending_peer[i]);
         }
     }
 
     for (i = queues - 1; i >= 0; i--) {
         NetClientState *nc = qemu_get_subqueue(nic, i);
 
+        qemu_net_client_detach_flush(nc);
         qemu_cleanup_net_client(nc);
         qemu_free_net_client(nc);
     }
 
+    g_free(nic->pending_peer);
     g_free(nic);
 }
 
@@ -379,7 +415,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
     }
 }
 
-int qemu_can_send_packet(NetClientState *sender)
+int qemu_can_send_packet_nolock(NetClientState *sender)
 {
     if (!sender->peer) {
         return 1;
@@ -394,6 +430,16 @@ int qemu_can_send_packet(NetClientState *sender)
     return 1;
 }
 
+int qemu_can_send_packet(NetClientState *sender)
+{
+    int ret = 1;
+
+    qemu_mutex_lock(&sender->peer_lock);
+    qemu_can_send_packet_nolock(sender);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return ret;
+}
+
 ssize_t qemu_deliver_packet(NetClientState *sender,
                             unsigned flags,
                             const uint8_t *data,
@@ -457,19 +503,24 @@ static ssize_t 
qemu_send_packet_async_with_flags(NetClientState *sender,
                                                  NetPacketSent *sent_cb)
 {
     NetQueue *queue;
+    ssize_t sz;
 
 #ifdef DEBUG_NET
     printf("qemu_send_packet_async:\n");
     hex_dump(stdout, buf, size);
 #endif
 
+    qemu_mutex_lock(&sender->peer_lock);
     if (sender->link_down || !sender->peer) {
+        qemu_mutex_unlock(&sender->peer_lock);
         return size;
     }
 
     queue = sender->peer->send_queue;
 
-    return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+    sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return sz;
 }
 
 ssize_t qemu_send_packet_async(NetClientState *sender,
@@ -537,16 +588,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
                                 NetPacketSent *sent_cb)
 {
     NetQueue *queue;
+    ssize_t sz;
 
+    qemu_mutex_lock(&sender->peer_lock);
     if (sender->link_down || !sender->peer) {
+        qemu_mutex_unlock(&sender->peer_lock);
         return iov_size(iov, iovcnt);
     }
 
     queue = sender->peer->send_queue;
 
-    return qemu_net_queue_send_iov(queue, sender,
+    sz = qemu_net_queue_send_iov(queue, sender,
                                    QEMU_NET_PACKET_FLAG_NONE,
                                    iov, iovcnt, sent_cb);
+    qemu_mutex_unlock(&sender->peer_lock);
+    return sz;
 }
 
 ssize_t
-- 
1.8.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]