qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v1 2/2] virtio-scsi: dataplane: notify guest as


From: Fam Zheng
Subject: Re: [Qemu-devel] [PATCH v1 2/2] virtio-scsi: dataplane: notify guest as batch
Date: Tue, 11 Nov 2014 10:52:32 +0800
User-agent: Mutt/1.5.23 (2014-03-12)

On Tue, 11/11 10:29, Ming Lei wrote:
> On Tue, Nov 11, 2014 at 9:52 AM, Fam Zheng <address@hidden> wrote:
> > On Tue, 11/11 09:17, Ming Lei wrote:
> >> It isn't necessery to notify guest each time when one request
> >> is completed, and it should be enough to just notify one time
> >> for each running of virtio_scsi_iothread_handle_cmd().
> >>
> >> This patch supresses about 30K/sec write on eventfd.
> >>
> >> Signed-off-by: Ming Lei <address@hidden>
> >> ---
> >>  hw/scsi/virtio-scsi-dataplane.c |    4 +++-
> >>  hw/scsi/virtio-scsi.c           |   26 +++++++++++++++++++++++++-
> >>  include/hw/virtio/virtio-scsi.h |    4 ++++
> >>  3 files changed, 32 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/hw/scsi/virtio-scsi-dataplane.c 
> >> b/hw/scsi/virtio-scsi-dataplane.c
> >> index df17229..294515a 100644
> >> --- a/hw/scsi/virtio-scsi-dataplane.c
> >> +++ b/hw/scsi/virtio-scsi-dataplane.c
> >> @@ -62,6 +62,7 @@ static VirtIOSCSIVring 
> >> *virtio_scsi_vring_init(VirtIOSCSI *s,
> >>      aio_set_event_notifier(s->ctx, &r->host_notifier, handler);
> >>
> >>      r->parent = s;
> >> +    r->qid = n;
> >>
> >>      if (!vring_setup(&r->vring, VIRTIO_DEVICE(s), n)) {
> >>          fprintf(stderr, "virtio-scsi: VRing setup failed\n");
> >> @@ -95,7 +96,8 @@ void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
> >>  {
> >>      vring_push(&req->vring->vring, &req->elem,
> >>                 req->qsgl.size + req->resp_iov.size);
> >> -    event_notifier_set(&req->vring->guest_notifier);
> >> +    req->dev->pending_guest_notify |= 1 << (req->vring->qid - 2);
> >> +    qemu_bh_schedule(req->dev->guest_notify_bh);
> >>  }
> >>
> >>  static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
> >> diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
> >> index 6e34a2c..98411ef 100644
> >> --- a/hw/scsi/virtio-scsi.c
> >> +++ b/hw/scsi/virtio-scsi.c
> >> @@ -86,6 +86,21 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
> >>      virtio_scsi_free_req(req);
> >>  }
> >>
> >> +static void notify_guest_bh(void *opaque)
> >> +{
> >> +    VirtIOSCSI *s = opaque;
> >> +    unsigned int qid;
> >> +    uint64_t pending = s->pending_guest_notify;
> >> +
> >> +    s->pending_guest_notify = 0;
> >> +
> >> +    while ((qid = ffsl(pending))) {
> >> +        qid--;
> >> +        event_notifier_set(&s->cmd_vrings[qid]->guest_notifier);
> >
> > Don't we need to handle ctrlq and eventq as well?
> 
> Most of times no frequent activity in ctrl and event vq, so it isn't
> necessary to make them involved.

I mean virtio_scsi_vring_push_notify is not only used to notify cmd, it should
also work for ctrl and event, because its caller virtio_scsi_complete_req is
also called in virtio_scsi_handle_ctrl_req and virtio_scsi_push_event.

> 
> >
> >> +        pending &= ~(1 << qid);
> >> +    }
> >> +}
> >> +
> >>  static void virtio_scsi_bad_req(void)
> >>  {
> >>      error_report("wrong size for virtio-scsi headers");
> >> @@ -824,7 +839,12 @@ void virtio_scsi_common_realize(DeviceState *dev, 
> >> Error **errp,
> >>      }
> >>
> >>      if (s->conf.iothread) {
> >> -        virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
> >> +        VirtIOSCSI *vis = VIRTIO_SCSI(s);
> >> +
> >> +        QEMU_BUILD_BUG_ON(VIRTIO_PCI_QUEUE_MAX > 64);
> >> +        virtio_scsi_set_iothread(vis, s->conf.iothread);
> >> +        vis->pending_guest_notify = 0;
> >> +        vis->guest_notify_bh = aio_bh_new(vis->ctx, notify_guest_bh, vis);
> >>      }
> >>  }
> >>
> >> @@ -901,7 +921,11 @@ void virtio_scsi_common_unrealize(DeviceState *dev, 
> >> Error **errp)
> >>  {
> >>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >>      VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
> >> +    VirtIOSCSI *s = VIRTIO_SCSI(vs);
> >>
> >> +    if (vs->conf.iothread) {
> >> +        qemu_bh_delete(s->guest_notify_bh);
> >> +    }
> >>      g_free(vs->cmd_vqs);
> >>      virtio_cleanup(vdev);
> >>  }
> >> diff --git a/include/hw/virtio/virtio-scsi.h 
> >> b/include/hw/virtio/virtio-scsi.h
> >> index 9e1a49c..5e6c57e 100644
> >> --- a/include/hw/virtio/virtio-scsi.h
> >> +++ b/include/hw/virtio/virtio-scsi.h
> >> @@ -163,6 +163,7 @@ typedef struct {
> >>      Vring vring;
> >>      EventNotifier host_notifier;
> >>      EventNotifier guest_notifier;
> >> +    uint32_t qid;
> >
> > Could this "bool notify_pending"? In this case pending_guest_notify in
> > VirtIOSCSI is not necessary. I guess looking into no more than 64
> > VirtIOSCSIVring elements in guest_notify_bh is not _that_ expensive so it's 
> > not
> > worth the complexity.
> 
> We need to know which queue the pending notifier is sent to in BH
> handler, and 64 is the limit for queue number, not element number.
> 
> Actually it is a bit expensive, and 30K/sec write syscall is wasted in
> the unnecessary & repeated notifying.
> 
> And the similar approach is applied in virtio-blk dataplane too, :-)
> 

I mean something like:

diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 9e1a49c..5e6c57e 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -163,6 +163,7 @@ typedef struct {
     Vring vring;
     EventNotifier host_notifier;
     EventNotifier guest_notifier;
+    bool notify_pending;

@@ -95,7 +96,8 @@ void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
 {
     vring_push(&req->vring->vring, &req->elem,
                req->qsgl.size + req->resp_iov.size);
-    event_notifier_set(&req->vring->guest_notifier);
+    req->vring->pending_notify = true;
+    qemu_bh_schedule(req->dev->guest_notify_bh);
 }

And in notify_guest_bh you go through all the vrings and call
event_notifier_set on those with vring->pending_notify == true.

Fam



reply via email to

[Prev in Thread] Current Thread [Next in Thread]