qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientSta


From: Stefan Hajnoczi
Subject: Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
Date: Mon, 4 Mar 2013 15:49:14 +0100
User-agent: Mutt/1.5.21 (2010-09-15)

On Sun, Mar 03, 2013 at 09:21:21PM +0800, Liu Ping Fan wrote:
> From: Liu Ping Fan <address@hidden>
> 
> Use nc->transfer_lock to protect the nc->peer->send_queue. All of the

Please use consistent names: the lock protects ->send_queue so it's best
called send_queue_lock or send_lock.

> deleter and senders will sync on this lock, so we can also survive across
> unplug.
> 
> Signed-off-by: Liu Ping Fan <address@hidden>
> ---
>  include/net/net.h   |    4 +++
>  include/net/queue.h |    1 +
>  net/hub.c           |   21 +++++++++++++-
>  net/net.c           |   72 
> ++++++++++++++++++++++++++++++++++++++++++++++++---
>  net/queue.c         |   15 +++++++++-
>  5 files changed, 105 insertions(+), 8 deletions(-)
> 
> diff --git a/include/net/net.h b/include/net/net.h
> index 24563ef..3e4b9df 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -63,6 +63,8 @@ typedef struct NetClientInfo {
>  } NetClientInfo;
>  
>  struct NetClientState {
> +    /* protect peer's send_queue */
> +    QemuMutex transfer_lock;
>      NetClientInfo *info;
>      int link_down;
>      QTAILQ_ENTRY(NetClientState) next;
> @@ -78,6 +80,7 @@ struct NetClientState {
>  
>  typedef struct NICState {
>      NetClientState ncs[MAX_QUEUE_NUM];
> +    NetClientState *pending_peer[MAX_QUEUE_NUM];

Please rebase onto github.com/stefanha/qemu.git net.  ncs[] is no longer
statically sized to MAX_QUEUE_NUM.

>      NICConf *conf;
>      void *opaque;
>      bool peer_deleted;
> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor 
> *mon, int vlan_id,
>                                                const char *client_str);
>  typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>  void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
> +int qemu_can_send_packet_nolock(NetClientState *sender);
>  int qemu_can_send_packet(NetClientState *nc);
>  ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>                            int iovcnt);
> diff --git a/include/net/queue.h b/include/net/queue.h
> index f60e57f..0ecd23b 100644
> --- a/include/net/queue.h
> +++ b/include/net/queue.h
> @@ -67,6 +67,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>                                  NetPacketSent *sent_cb);
>  
>  void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
> +void qemu_net_queue_purge_all(NetQueue *queue);
>  bool qemu_net_queue_flush(NetQueue *queue);
>  
>  #endif /* QEMU_NET_QUEUE_H */
> diff --git a/net/hub.c b/net/hub.c
> index 81d2a04..97c3ac3 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -53,9 +53,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort 
> *source_port,
>          if (port == source_port) {
>              continue;
>          }
> -
> +        qemu_mutex_lock(&port->nc.transfer_lock);
> +        if (!port->nc.peer) {

.peer is protected by transfer_lock too?  This was not documented above
and I think it's not necessary to protect .peer?

> +            qemu_mutex_unlock(&port->nc.transfer_lock);
> +            continue;
> +        }
>          qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>                              QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
> +        qemu_mutex_unlock(&port->nc.transfer_lock);
>          event_notifier_set(&port->e);
>      }
>      return len;
> @@ -65,7 +70,13 @@ static void hub_port_deliver_packet(void *opaque)
>  {
>      NetHubPort *port = (NetHubPort *)opaque;
>  
> +    qemu_mutex_lock(&port->nc.transfer_lock);
> +    if (!port->nc.peer) {
> +        qemu_mutex_unlock(&port->nc.transfer_lock);
> +        return;
> +    }
>      qemu_net_queue_flush(port->nc.peer->send_queue);
> +    qemu_mutex_unlock(&port->nc.transfer_lock);
>  }
>  
>  static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> @@ -78,10 +89,16 @@ static ssize_t net_hub_receive_iov(NetHub *hub, 
> NetHubPort *source_port,
>          if (port == source_port) {
>              continue;
>          }
> -
> +        qemu_mutex_lock(&port->nc.transfer_lock);
> +        if (!port->nc.peer) {
> +            qemu_mutex_unlock(&port->nc.transfer_lock);
> +            continue;
> +        }
>          qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
>                              QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
> +        qemu_mutex_unlock(&port->nc.transfer_lock);
>          event_notifier_set(&port->e);
> +
>      }
>      return len;
>  }
> diff --git a/net/net.c b/net/net.c
> index 544542b..0acb933 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
>          nc->peer = peer;
>          peer->peer = nc;
>      }
> +    qemu_mutex_init(&nc->transfer_lock);
>      QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>  
>      nc->send_queue = qemu_new_net_queue(nc);
> @@ -285,6 +286,7 @@ void *qemu_get_nic_opaque(NetClientState *nc)
>  
>  static void qemu_cleanup_net_client(NetClientState *nc)
>  {
> +    /* This is the place where may be out of big lock, when dev finalized */

I don't understand this comment.

>      QTAILQ_REMOVE(&net_clients, nc, next);
>  
>      if (nc->info->cleanup) {
> @@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
>      }
>  }
>  
> +/* exclude race with rx/tx path, flush out peer's queue */
> +static void qemu_flushout_net_client(NetClientState *nc)

This function detaches the peer, the name should reflect that.

> +{
> +    NetClientState *peer;
> +
> +    /* sync on receive path */
> +    peer = nc->peer;
> +    if (peer) {
> +        qemu_mutex_lock(&peer->transfer_lock);
> +        peer->peer = NULL;
> +        qemu_mutex_unlock(&peer->transfer_lock);
> +    }

This is weird.  You don't lock to read nc->peer but you do lock to write
peer->peer.  If you use a lock it must be used consistently.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]