qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PULL v2 for-2.0 13/24] dataplane: replace internal thr


From: Christian Borntraeger
Subject: Re: [Qemu-devel] [PULL v2 for-2.0 13/24] dataplane: replace internal thread with IOThread
Date: Mon, 17 Mar 2014 16:52:59 +0100
User-agent: Mozilla/5.0 (X11; Linux i686; rv:24.0) Gecko/20100101 Thunderbird/24.3.0

This causes the following bug during managedsave of s390 with a guest that has 
multiple disks with dataplane.

(gdb) bt
#0  object_deinit (type=0x0, obj=0x807e8750) at 
/home/cborntra/REPOS/qemu/qom/object.c:410
#1  object_finalize (data=0x807e8750) at 
/home/cborntra/REPOS/qemu/qom/object.c:424
#2  object_unref (obj=0x807e8750) at /home/cborntra/REPOS/qemu/qom/object.c:726
#3  0x000000008011da60 in object_property_del (obj=0x807e8d80, name=0x807e8d60 
"virtio-blk", errp=<optimized out>) at 
/home/cborntra/REPOS/qemu/qom/object.c:783
#4  0x000000008011dc0e in object_property_del_child (errp=0x0, 
child=0x807e8750, obj=0x807e8d80) at /home/cborntra/REPOS/qemu/qom/object.c:386
#5  object_unparent (obj=0x807e8750) at 
/home/cborntra/REPOS/qemu/qom/object.c:403
#6  0x0000000080183898 in virtio_blk_data_plane_destroy (s=0x8088a750) at 
/home/cborntra/REPOS/qemu/hw/block/dataplane/virtio-blk.c:430
#7  0x00000000801845a0 in virtio_blk_migration_state_changed 
(notifier=0x807ebf20, data=0x80314088 <current_migration.25446>) at 
/home/cborntra/REPOS/qemu/hw/block/virtio-blk.c:666
#8  0x00000000802161f8 in notifier_list_notify (address@hidden 
<migration_state_notifiers>, address@hidden <current_migration.25446>)
    at /home/cborntra/REPOS/qemu/util/notify.c:39
#9  0x00000000800ccecc in migrate_fd_connect (address@hidden 
<current_migration.25446>) at /home/cborntra/REPOS/qemu/migration.c:696
#10 0x00000000800cae9c in fd_start_outgoing_migration (address@hidden 
<current_migration.25446>, fdname=<optimized out>, address@hidden)
    at /home/cborntra/REPOS/qemu/migration-fd.c:42
#11 0x00000000800cc986 in qmp_migrate (uri=0x808f6190 "fd:migrate", 
has_blk=<optimized out>, blk=<optimized out>, has_inc=<optimized out>, 
address@hidden, has_detach=true, detach=true, errp=
    0x3fffff29998) at /home/cborntra/REPOS/qemu/migration.c:450
#12 0x00000000801166f6 in qmp_marshal_input_migrate (mon=<optimized out>, 
qdict=<optimized out>, ret=<optimized out>) at qmp-marshal.c:3285
#13 0x00000000801b252a in qmp_call_cmd (cmd=<optimized out>, params=0x808f6280, 
mon=0x808150d0) at /home/cborntra/REPOS/qemu/monitor.c:4760
#14 handle_qmp_command (parser=<optimized out>, tokens=<optimized out>) at 
/home/cborntra/REPOS/qemu/monitor.c:4826
#15 0x0000000080202f66 in json_message_process_token (lexer=0x80815050, 
token=0x808f3e20, type=<optimized out>, x=<optimized out>, y=9) at 
/home/cborntra/REPOS/qemu/qobject/json-streamer.c:87
#16 0x000000008021b36a in json_lexer_feed_char (address@hidden, ch=<optimized 
out>, address@hidden) at /home/cborntra/REPOS/qemu/qobject/json-lexer.c:303
#17 0x000000008021b4ec in json_lexer_feed (lexer=0x80815050, buffer=<optimized 
out>, size=<optimized out>) at 
/home/cborntra/REPOS/qemu/qobject/json-lexer.c:356
#18 0x00000000802031a2 in json_message_parser_feed (parser=<optimized out>, 
buffer=<optimized out>, size=<optimized out>) at 
/home/cborntra/REPOS/qemu/qobject/json-streamer.c:110
#19 0x00000000801b015a in monitor_control_read (opaque=<optimized out>, 
buf=<optimized out>, size=<optimized out>) at 
/home/cborntra/REPOS/qemu/monitor.c:4847
#20 0x00000000801023e2 in qemu_chr_be_write (len=1, buf=0x3fffff29e18 "}[A", 
s=0x807cdf40) at /home/cborntra/REPOS/qemu/qemu-char.c:165
#21 tcp_chr_read (chan=<optimized out>, cond=<optimized out>, 
opaque=0x807cdf40) at /home/cborntra/REPOS/qemu/qemu-char.c:2487
#22 0x000003fffd54005a in g_main_context_dispatch () from 
/lib64/libglib-2.0.so.0
#23 0x00000000800ca9b2 in glib_pollfds_poll () at 
/home/cborntra/REPOS/qemu/main-loop.c:190
#24 os_host_main_loop_wait (timeout=<optimized out>) at 
/home/cborntra/REPOS/qemu/main-loop.c:235
#25 main_loop_wait (address@hidden) at /home/cborntra/REPOS/qemu/main-loop.c:484
#26 0x0000000080014f2a in main_loop () at /home/cborntra/REPOS/qemu/vl.c:2053
#27 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at 
/home/cborntra/REPOS/qemu/vl.c:4524


Reverting  commit 48ff269272f18d2b8fa53cb08365df417588f585 seems to fix this 
particualar bug.
Any ideas?

Christian

On 13/03/14 15:10, Stefan Hajnoczi wrote:
> Today virtio-blk dataplane uses a 1:1 device-per-thread model.  Now that
> IOThreads have been introduced we can generalize this to N:M devices per
> threads.
> 
> This patch drops thread code from dataplane in favor of running inside
> an IOThread AioContext.
> 
> As a bonus we solve the case where a guest keeps submitting I/O requests
> while dataplane is trying to stop.  Previously the dataplane thread
> would continue to process requests until the request gave it a break.
> Now we can shut down in bounded time thanks to
> aio_context_acquire/release.
> 
> Signed-off-by: Stefan Hajnoczi <address@hidden>
> ---
>  hw/block/dataplane/virtio-blk.c | 96 
> +++++++++++++++++++++++------------------
>  include/hw/virtio/virtio-blk.h  |  8 +++-
>  2 files changed, 60 insertions(+), 44 deletions(-)
> 
> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
> index d1c7ad4..a5afc21 100644
> --- a/hw/block/dataplane/virtio-blk.c
> +++ b/hw/block/dataplane/virtio-blk.c
> @@ -23,6 +23,7 @@
>  #include "virtio-blk.h"
>  #include "block/aio.h"
>  #include "hw/virtio/virtio-bus.h"
> +#include "monitor/monitor.h" /* for object_add() */
> 
>  enum {
>      SEG_MAX = 126,                  /* maximum number of I/O segments */
> @@ -44,8 +45,6 @@ struct VirtIOBlockDataPlane {
>      bool started;
>      bool starting;
>      bool stopping;
> -    QEMUBH *start_bh;
> -    QemuThread thread;
> 
>      VirtIOBlkConf *blk;
>      int fd;                         /* image file descriptor */
> @@ -59,12 +58,14 @@ struct VirtIOBlockDataPlane {
>       * (because you don't own the file descriptor or handle; you just
>       * use it).
>       */
> +    IOThread *iothread;
> +    bool internal_iothread;
>      AioContext *ctx;
>      EventNotifier io_notifier;      /* Linux AIO completion */
>      EventNotifier host_notifier;    /* doorbell */
> 
>      IOQueue ioqueue;                /* Linux AIO queue (should really be per
> -                                       dataplane thread) */
> +                                       IOThread) */
>      VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the
>                                               queue */
> 
> @@ -342,26 +343,7 @@ static void handle_io(EventNotifier *e)
>      }
>  }
> 
> -static void *data_plane_thread(void *opaque)
> -{
> -    VirtIOBlockDataPlane *s = opaque;
> -
> -    while (!s->stopping || s->num_reqs > 0) {
> -        aio_poll(s->ctx, true);
> -    }
> -    return NULL;
> -}
> -
> -static void start_data_plane_bh(void *opaque)
> -{
> -    VirtIOBlockDataPlane *s = opaque;
> -
> -    qemu_bh_delete(s->start_bh);
> -    s->start_bh = NULL;
> -    qemu_thread_create(&s->thread, "data_plane", data_plane_thread,
> -                       s, QEMU_THREAD_JOINABLE);
> -}
> -
> +/* Context: QEMU global mutex held */
>  void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
>                                    VirtIOBlockDataPlane **dataplane,
>                                    Error **errp)
> @@ -408,12 +390,33 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, 
> VirtIOBlkConf *blk,
>      s->fd = fd;
>      s->blk = blk;
> 
> +    if (blk->iothread) {
> +        s->internal_iothread = false;
> +        s->iothread = blk->iothread;
> +        object_ref(OBJECT(s->iothread));
> +    } else {
> +        /* Create per-device IOThread if none specified */
> +        Error *local_err = NULL;
> +
> +        s->internal_iothread = true;
> +        object_add(TYPE_IOTHREAD, vdev->name, NULL, NULL, &local_err);
> +        if (error_is_set(&local_err)) {
> +            error_propagate(errp, local_err);
> +            g_free(s);
> +            return;
> +        }
> +        s->iothread = iothread_find(vdev->name);
> +        assert(s->iothread);
> +    }
> +    s->ctx = iothread_get_aio_context(s->iothread);
> +
>      /* Prevent block operations that conflict with data plane thread */
>      bdrv_set_in_use(blk->conf.bs, 1);
> 
>      *dataplane = s;
>  }
> 
> +/* Context: QEMU global mutex held */
>  void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
>  {
>      if (!s) {
> @@ -422,9 +425,14 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane 
> *s)
> 
>      virtio_blk_data_plane_stop(s);
>      bdrv_set_in_use(s->blk->conf.bs, 0);
> +    object_unref(OBJECT(s->iothread));
> +    if (s->internal_iothread) {
> +        object_unparent(OBJECT(s->iothread));
> +    }
>      g_free(s);
>  }
> 
> +/* Context: QEMU global mutex held */
>  void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
>  {
>      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
> @@ -448,8 +456,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
>          return;
>      }
> 
> -    s->ctx = aio_context_new();
> -
>      /* Set up guest notifier (irq) */
>      if (k->set_guest_notifiers(qbus->parent, 1, true) != 0) {
>          fprintf(stderr, "virtio-blk failed to set guest notifier, "
> @@ -464,7 +470,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
>          exit(1);
>      }
>      s->host_notifier = *virtio_queue_get_host_notifier(vq);
> -    aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
> 
>      /* Set up ioqueue */
>      ioq_init(&s->ioqueue, s->fd, REQ_MAX);
> @@ -472,7 +477,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
>          ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
>      }
>      s->io_notifier = *ioq_get_notifier(&s->ioqueue);
> -    aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);
> 
>      s->starting = false;
>      s->started = true;
> @@ -481,11 +485,14 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane 
> *s)
>      /* Kick right away to begin processing requests already in vring */
>      event_notifier_set(virtio_queue_get_host_notifier(vq));
> 
> -    /* Spawn thread in BH so it inherits iothread cpusets */
> -    s->start_bh = qemu_bh_new(start_data_plane_bh, s);
> -    qemu_bh_schedule(s->start_bh);
> +    /* Get this show started by hooking up our callbacks */
> +    aio_context_acquire(s->ctx);
> +    aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
> +    aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);
> +    aio_context_release(s->ctx);
>  }
> 
> +/* Context: QEMU global mutex held */
>  void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
>  {
>      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
> @@ -496,27 +503,32 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
>      s->stopping = true;
>      trace_virtio_blk_data_plane_stop(s);
> 
> -    /* Stop thread or cancel pending thread creation BH */
> -    if (s->start_bh) {
> -        qemu_bh_delete(s->start_bh);
> -        s->start_bh = NULL;
> -    } else {
> -        aio_notify(s->ctx);
> -        qemu_thread_join(&s->thread);
> +    aio_context_acquire(s->ctx);
> +
> +    /* Stop notifications for new requests from guest */
> +    aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
> +
> +    /* Complete pending requests */
> +    while (s->num_reqs > 0) {
> +        aio_poll(s->ctx, true);
>      }
> 
> +    /* Stop ioq callbacks (there are no pending requests left) */
>      aio_set_event_notifier(s->ctx, &s->io_notifier, NULL);
> -    ioq_cleanup(&s->ioqueue);
> 
> -    aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
> -    k->set_host_notifier(qbus->parent, 0, false);
> +    aio_context_release(s->ctx);
> 
> -    aio_context_unref(s->ctx);
> +    /* Sync vring state back to virtqueue so that non-dataplane request
> +     * processing can continue when we disable the host notifier below.
> +     */
> +    vring_teardown(&s->vring, s->vdev, 0);
> +
> +    ioq_cleanup(&s->ioqueue);
> +    k->set_host_notifier(qbus->parent, 0, false);
> 
>      /* Clean up guest notifier (irq) */
>      k->set_guest_notifiers(qbus->parent, 1, false);
> 
> -    vring_teardown(&s->vring, s->vdev, 0);
>      s->started = false;
>      s->stopping = false;
>  }
> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
> index 41885da..e4c41ff 100644
> --- a/include/hw/virtio/virtio-blk.h
> +++ b/include/hw/virtio/virtio-blk.h
> @@ -16,6 +16,7 @@
> 
>  #include "hw/virtio/virtio.h"
>  #include "hw/block/block.h"
> +#include "sysemu/iothread.h"
> 
>  #define TYPE_VIRTIO_BLK "virtio-blk-device"
>  #define VIRTIO_BLK(obj) \
> @@ -106,6 +107,7 @@ struct virtio_scsi_inhdr
>  struct VirtIOBlkConf
>  {
>      BlockConf conf;
> +    IOThread *iothread;
>      char *serial;
>      uint32_t scsi;
>      uint32_t config_wce;
> @@ -140,13 +142,15 @@ typedef struct VirtIOBlock {
>          DEFINE_BLOCK_CHS_PROPERTIES(_state, _field.conf),                    
>  \
>          DEFINE_PROP_STRING("serial", _state, _field.serial),                 
>  \
>          DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true),   
>  \
> -        DEFINE_PROP_BIT("scsi", _state, _field.scsi, 0, true)
> +        DEFINE_PROP_BIT("scsi", _state, _field.scsi, 0, true),               
>  \
> +        DEFINE_PROP_IOTHREAD("x-iothread", _state, _field.iothread)
>  #else
>  #define DEFINE_VIRTIO_BLK_PROPERTIES(_state, _field)                         
>  \
>          DEFINE_BLOCK_PROPERTIES(_state, _field.conf),                        
>  \
>          DEFINE_BLOCK_CHS_PROPERTIES(_state, _field.conf),                    
>  \
>          DEFINE_PROP_STRING("serial", _state, _field.serial),                 
>  \
> -        DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true)
> +        DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true),   
>  \
> +        DEFINE_PROP_IOTHREAD("x-iothread", _state, _field.iothread)
>  #endif /* __linux__ */
> 
>  void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk);
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]