qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 15/24] block: switch bdrv_read()/bdrv_write() to


From: Pierre Riteau
Subject: Re: [Qemu-devel] [PATCH 15/24] block: switch bdrv_read()/bdrv_write() to coroutines
Date: Mon, 24 Oct 2011 17:12:24 +0200

This commit (1c9805a398cc1125b4defa6367172c8c2c0bca9f in Git) breaks qemu-nbd 
for me. I cannot mount any VM image (raw or qcow2 format) with this commit or 
today's HEAD. Previous commit c5fbe57111ef59c315a71cd80e8b0af59e36ff21 works 
fine.

The qemu-nbd process hangs while reading disk:

31175 ?        Ss     0:00 qemu-nbd --connect=/dev/nbd0 /tmp/lenny-vm.raw
31176 ?        S      0:00  \_ qemu-nbd --connect=/dev/nbd0 /tmp/lenny-vm.raw
31177 ?        D      0:00  \_ qemu-nbd --connect=/dev/nbd0 /tmp/lenny-vm.raw

In dmesg I see only:

[18304.541058]  nbd0:

Then, if I pkill -9 nbd, dmesg gets more verbose:

[18467.288183] nbd (pid 31175: qemu-nbd) got signal 9
[18467.303175] nbd0: shutting down socket
[18467.314446] nbd0: Receive control failed (result -4)
[18467.329354] end_request: I/O error, dev nbd0, sector 0
[18467.344771] __ratelimit: 38 callbacks suppressed
[18467.358620] Buffer I/O error on device nbd0, logical block 0
[18467.375591] Buffer I/O error on device nbd0, logical block 1
[18467.392560] Buffer I/O error on device nbd0, logical block 2
[18467.409530] Buffer I/O error on device nbd0, logical block 3
[18467.426508] nbd0: queue cleared
[18467.435962] nbd0: Attempted send on closed socket
[18467.450095] end_request: I/O error, dev nbd0, sector 0
[18467.465527] Buffer I/O error on device nbd0, logical block 0
[18467.482496] Buffer I/O error on device nbd0, logical block 1
[18467.499464] Buffer I/O error on device nbd0, logical block 2
[18467.516433] Buffer I/O error on device nbd0, logical block 3
[18467.533418] nbd0: Attempted send on closed socket
[18467.547539] end_request: I/O error, dev nbd0, sector 0
[18467.562945] Buffer I/O error on device nbd0, logical block 0
[18467.579917] Buffer I/O error on device nbd0, logical block 1
[18467.596897] nbd0: Attempted send on closed socket
[18467.611022] end_request: I/O error, dev nbd0, sector 0
[18467.626442] nbd0: Attempted send on closed socket
[18467.640569] end_request: I/O error, dev nbd0, sector 0
[18467.655984] ldm_validate_partition_table(): Disk read failed.
[18467.673242] nbd0: Attempted send on closed socket
[18467.687369] end_request: I/O error, dev nbd0, sector 0
[18467.702788] nbd0: Attempted send on closed socket
[18467.716915] end_request: I/O error, dev nbd0, sector 0
[18467.732359] nbd0: Attempted send on closed socket
[18467.746487] end_request: I/O error, dev nbd0, sector 0
[18467.761931] nbd0: Attempted send on closed socket
[18467.776058] end_request: I/O error, dev nbd0, sector 0
[18467.791473] Dev nbd0: unable to read RDB block 0
[18467.805348] nbd0: Attempted send on closed socket
[18467.819479] end_request: I/O error, dev nbd0, sector 0
[18467.834897] nbd0: Attempted send on closed socket
[18467.849025] end_request: I/O error, dev nbd0, sector 0
[18467.864446] nbd0: Attempted send on closed socket
[18467.878572] end_request: I/O error, dev nbd0, sector 0
[18467.893985]  unable to read partition table

-- 
Pierre Riteau -- PhD student, Myriads team, IRISA, Rennes, France
http://perso.univ-rennes1.fr/pierre.riteau/

On 14 oct. 2011, at 18:49, Kevin Wolf wrote:

> From: Stefan Hajnoczi <address@hidden>
> 
> The bdrv_read()/bdrv_write() functions call .bdrv_read()/.bdrv_write().
> They should go through bdrv_co_do_readv() and bdrv_co_do_writev()
> instead in order to unify request processing code across sync, aio, and
> coroutine interfaces.  This is also an important step towards removing
> BlockDriverState .bdrv_read()/.bdrv_write() in the future.
> 
> Signed-off-by: Stefan Hajnoczi <address@hidden>
> Signed-off-by: Kevin Wolf <address@hidden>
> ---
> block.c |  112 +++++++++++++++++++++++++++++++++++----------------------------
> 1 files changed, 62 insertions(+), 50 deletions(-)
> 
> diff --git a/block.c b/block.c
> index f4731ec..ae8fc80 100644
> --- a/block.c
> +++ b/block.c
> @@ -44,6 +44,8 @@
> #include <windows.h>
> #endif
> 
> +#define NOT_DONE 0x7fffffff /* used while emulated sync operation in 
> progress */
> +
> static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
> static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
>         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
> @@ -74,6 +76,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState 
> *bs,
> static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs);
> static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
>     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
> +static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
> +    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
> 
> static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
>     QTAILQ_HEAD_INITIALIZER(bdrv_states);
> @@ -1042,30 +1046,69 @@ static inline bool bdrv_has_async_flush(BlockDriver 
> *drv)
>     return drv->bdrv_aio_flush != bdrv_aio_flush_em;
> }
> 
> -/* return < 0 if error. See bdrv_write() for the return codes */
> -int bdrv_read(BlockDriverState *bs, int64_t sector_num,
> -              uint8_t *buf, int nb_sectors)
> +typedef struct RwCo {
> +    BlockDriverState *bs;
> +    int64_t sector_num;
> +    int nb_sectors;
> +    QEMUIOVector *qiov;
> +    bool is_write;
> +    int ret;
> +} RwCo;
> +
> +static void coroutine_fn bdrv_rw_co_entry(void *opaque)
> {
> -    BlockDriver *drv = bs->drv;
> +    RwCo *rwco = opaque;
> 
> -    if (!drv)
> -        return -ENOMEDIUM;
> +    if (!rwco->is_write) {
> +        rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
> +                                     rwco->nb_sectors, rwco->qiov);
> +    } else {
> +        rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
> +                                      rwco->nb_sectors, rwco->qiov);
> +    }
> +}
> 
> -    if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
> -        QEMUIOVector qiov;
> -        struct iovec iov = {
> -            .iov_base = (void *)buf,
> -            .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> -        };
> +/*
> + * Process a synchronous request using coroutines
> + */
> +static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
> +                      int nb_sectors, bool is_write)
> +{
> +    QEMUIOVector qiov;
> +    struct iovec iov = {
> +        .iov_base = (void *)buf,
> +        .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> +    };
> +    Coroutine *co;
> +    RwCo rwco = {
> +        .bs = bs,
> +        .sector_num = sector_num,
> +        .nb_sectors = nb_sectors,
> +        .qiov = &qiov,
> +        .is_write = is_write,
> +        .ret = NOT_DONE,
> +    };
> 
> -        qemu_iovec_init_external(&qiov, &iov, 1);
> -        return bdrv_co_readv(bs, sector_num, nb_sectors, &qiov);
> -    }
> +    qemu_iovec_init_external(&qiov, &iov, 1);
> 
> -    if (bdrv_check_request(bs, sector_num, nb_sectors))
> -        return -EIO;
> +    if (qemu_in_coroutine()) {
> +        /* Fast-path if already in coroutine context */
> +        bdrv_rw_co_entry(&rwco);
> +    } else {
> +        co = qemu_coroutine_create(bdrv_rw_co_entry);
> +        qemu_coroutine_enter(co, &rwco);
> +        while (rwco.ret == NOT_DONE) {
> +            qemu_aio_wait();
> +        }
> +    }
> +    return rwco.ret;
> +}
> 
> -    return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
> +/* return < 0 if error. See bdrv_write() for the return codes */
> +int bdrv_read(BlockDriverState *bs, int64_t sector_num,
> +              uint8_t *buf, int nb_sectors)
> +{
> +    return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
> }
> 
> static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
> @@ -1105,36 +1148,7 @@ static void set_dirty_bitmap(BlockDriverState *bs, 
> int64_t sector_num,
> int bdrv_write(BlockDriverState *bs, int64_t sector_num,
>                const uint8_t *buf, int nb_sectors)
> {
> -    BlockDriver *drv = bs->drv;
> -
> -    if (!bs->drv)
> -        return -ENOMEDIUM;
> -
> -    if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
> -        QEMUIOVector qiov;
> -        struct iovec iov = {
> -            .iov_base = (void *)buf,
> -            .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> -        };
> -
> -        qemu_iovec_init_external(&qiov, &iov, 1);
> -        return bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
> -    }
> -
> -    if (bs->read_only)
> -        return -EACCES;
> -    if (bdrv_check_request(bs, sector_num, nb_sectors))
> -        return -EIO;
> -
> -    if (bs->dirty_bitmap) {
> -        set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
> -    }
> -
> -    if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
> -        bs->wr_highest_sector = sector_num + nb_sectors - 1;
> -    }
> -
> -    return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
> +    return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
> }
> 
> int bdrv_pread(BlockDriverState *bs, int64_t offset,
> @@ -2912,8 +2926,6 @@ static void bdrv_rw_em_cb(void *opaque, int ret)
>     *(int *)opaque = ret;
> }
> 
> -#define NOT_DONE 0x7fffffff
> -
> static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
>                         uint8_t *buf, int nb_sectors)
> {
> -- 
> 1.7.6.4
> 
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]