qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCHv3 4/6] virtio-net: stop/start bh when appropriate


From: Jason Wang
Subject: [Qemu-devel] [PATCHv3 4/6] virtio-net: stop/start bh when appropriate
Date: Wed, 1 Dec 2010 13:45:09 +0800

Michael S. Tsirkin writes:
 > Avoid sending out packets, and modifying
 > device state, when VM is stopped.
 > Add assert statements to verify this does not happen.
 > 
 > Avoid scheduling bh when vhost-net is started.
 > 
 > Stop bh when driver disabled bus mastering
 > (we must not access memory after this).
 > 
 > Signed-off-by: Michael S. Tsirkin <address@hidden>
 > 

There's no need to disable it bh we call qemu_aio_flush() after
vm_state_notify() in do_vm_stop(). And for timer, looks like every device should
stop its timer in vm state change handler, not only for virtio-net?

 > ---
 > 
 > Respinning just this one patch:
 > virtio-net: stop/start bh on vm start/stop
 > 
 > it turns out under kvm vcpu can be running after vm stop
 > notifier callbacks are done. And it makes sense: vcpu is
 > just another device, so we should not
 > depend on the order of vmstate notifiers.
 > The state callback is thus a request for device to avoid changing state
 > of other devices, not a guarantee that other devices
 > will not send requests to it.
 > 
 > diff --git a/hw/virtio-net.c b/hw/virtio-net.c
 > index 43a2b3d..260433f 100644
 > --- a/hw/virtio-net.c
 > +++ b/hw/virtio-net.c
 > @@ -99,7 +99,13 @@ static void virtio_net_set_config(VirtIODevice *vdev, 
 > const uint8_t *config)
 >      }
 >  }
 >  
 > -static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 > +bool virtio_net_started(VirtIONet *n, uint8_t status)
 > +{
 > +    return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
 > +        (n->status & VIRTIO_NET_S_LINK_UP) && n->vm_running;
 > +}
 > +
 > +static void virtio_net_vhost_status(struct VirtIODevice *vdev, uint8_t 
 > status)
 >  {
 >      VirtIONet *n = to_virtio_net(vdev);
 >      if (!n->nic->nc.peer) {
 > @@ -112,9 +118,7 @@ static void virtio_net_set_status(struct VirtIODevice 
 > *vdev, uint8_t status)
 >      if (!tap_get_vhost_net(n->nic->nc.peer)) {
 >          return;
 >      }
 > -    if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
 > -                               (n->status & VIRTIO_NET_S_LINK_UP) &&
 > -                               n->vm_running)) {
 > +    if (!!n->vhost_started == virtio_net_started(n, status)) {
 >          return;
 >      }
 >      if (!n->vhost_started) {
 > @@ -131,6 +135,30 @@ static void virtio_net_set_status(struct VirtIODevice 
 > *vdev, uint8_t status)
 >      }
 >  }
 >  
 > +static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 > +{
 > +    virtio_net_vhost_status(vdev, status);
 > +
 > +    if (!n->tx_waiting) {
 > +        return;
 > +    }
 > +
 > +    if (virtio_net_started(n, status) && !n->vhost_started) {
 > +        if (n->tx_timer) {
 > +            qemu_mod_timer(n->tx_timer,
 > +                           qemu_get_clock(vm_clock) + n->tx_timeout);
 > +        } else {
 > +            qemu_bh_schedule(n->tx_bh);
 > +        }
 > +    } else {
 > +        if (n->tx_timer) {
 > +            qemu_del_timer(n->tx_timer);
 > +        } else {
 > +            qemu_bh_cancel(n->tx_bh);
 > +        }
 > +    }
 > +}
 > +
 >  static void virtio_net_set_link_status(VLANClientState *nc)
 >  {
 >      VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
 > @@ -675,11 +703,13 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, 
 > VirtQueue *vq)
 >  {
 >      VirtQueueElement elem;
 >      int32_t num_packets = 0;
 >  
 >      if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
 >          return num_packets;
 >      }
 >  
 > +    assert(n->vm_running);
 > +
 >      if (n->async_tx.elem.out_num) {
 >          virtio_queue_set_notification(n->tx_vq, 0);
 >          return num_packets;
 > @@ -738,6 +767,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice 
 > *vdev, VirtQueue *vq)
 >  {
 >      VirtIONet *n = to_virtio_net(vdev);
 >  
 > +    /* This happens when device was stopped but VCPU wasn't. */
 > +    if (!n->vm_running) {
 > +        n->tx_waiting = 1;
 > +        return;
 > +    }
 > +
 >      if (n->tx_waiting) {
 >          virtio_queue_set_notification(vq, 1);
 >          qemu_del_timer(n->tx_timer);
 > @@ -758,14 +793,19 @@ static void virtio_net_handle_tx_bh(VirtIODevice 
 > *vdev, VirtQueue *vq)
 >      if (unlikely(n->tx_waiting)) {
 >          return;
 >      }
 > +    n->tx_waiting = 1;
 > +    /* This happens when device was stopped but VCPU wasn't. */
 > +    if (!n->vm_running) {
 > +        return;
 > +    }
 >      virtio_queue_set_notification(vq, 0);
 >      qemu_bh_schedule(n->tx_bh);
 > -    n->tx_waiting = 1;
 >  }
 >  
 >  static void virtio_net_tx_timer(void *opaque)
 >  {
 >      VirtIONet *n = opaque;
 > +    assert(n->vm_running);
 >  
 >      n->tx_waiting = 0;
 >  
 > @@ -782,6 +822,8 @@ static void virtio_net_tx_bh(void *opaque)
 >      VirtIONet *n = opaque;
 >      int32_t ret;
 >  
 > +    assert(n->vm_running);
 > +
 >      n->tx_waiting = 0;
 >  
 >      /* Just in case the driver is not ready on more */
 > @@ -926,15 +968,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, 
 > int version_id)
 >          }
 >      }
 >      n->mac_table.first_multi = i;
 > -
 > -    if (n->tx_waiting) {
 > -        if (n->tx_timer) {
 > -            qemu_mod_timer(n->tx_timer,
 > -                           qemu_get_clock(vm_clock) + n->tx_timeout);
 > -        } else {
 > -            qemu_bh_schedule(n->tx_bh);
 > -        }
 > -    }
 >      return 0;
 >  }
 >  
 > 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]