qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
Date: Wed, 18 Feb 2015 08:52:49 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.4.0


On 17/02/2015 22:33, Max Reitz wrote:
> Concurrently modifying the bmap is not a good idea; this patch adds a
> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> can go wrong without.
> 
> Signed-off-by: Max Reitz <address@hidden>
> ---
>  block/vdi.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)
> 
> diff --git a/block/vdi.c b/block/vdi.c
> index 74030c6..c5ff428 100644
> --- a/block/vdi.c
> +++ b/block/vdi.c
> @@ -51,6 +51,7 @@
>  
>  #include "qemu-common.h"
>  #include "block/block_int.h"
> +#include "block/coroutine.h"
>  #include "qemu/module.h"
>  #include "migration/migration.h"
>  
> @@ -196,6 +197,8 @@ typedef struct {
>      /* VDI header (converted to host endianness). */
>      VdiHeader header;
>  
> +    CoMutex bmap_lock;
> +
>      Error *migration_blocker;
>  } BDRVVdiState;
>  
> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, 
> int flags,
>          goto fail_free_bmap;
>      }
>  
> +    qemu_co_mutex_init(&s->bmap_lock);
> +
>      /* Disable migration when vdi images are used */
>      error_set(&s->migration_blocker,
>                QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
>                 n_sectors, sector_num);
>  
>          /* prepare next AIO request */
> +        if (!block) {
> +            qemu_co_mutex_lock(&s->bmap_lock);
> +        }
>          bmap_entry = le32_to_cpu(s->bmap[block_index]);
>          if (!VDI_IS_ALLOCATED(bmap_entry)) {
>              /* Allocate new block and write to it. */
> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
>                     (s->block_sectors - n_sectors - sector_in_block) * 
> SECTOR_SIZE);
>              ret = bdrv_write(bs->file, offset, block, s->block_sectors);
>          } else {
> -            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
> -                              (uint64_t)bmap_entry * s->block_sectors +
> -                              sector_in_block;
> +            uint64_t offset;
> +
> +            qemu_co_mutex_unlock(&s->bmap_lock);

This qemu_co_mutex_unlock should only be done "if (!block)", because you
can have two iterations of the loop, the first going down the "then" and
the second going down the "else".  In that case you must not give away
the lock, because the lock will be needed when you write the bitmap.

> +
> +            offset = s->header.offset_data / SECTOR_SIZE +
> +                     (uint64_t)bmap_entry * s->block_sectors +
> +                     sector_in_block;
>              ret = bdrv_write(bs->file, offset, buf, n_sectors);
>          }
>  
> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
>  
>      logout("finished data write\n");
>      if (ret < 0) {
> +        if (block) {
> +            qemu_co_mutex_unlock(&s->bmap_lock);
> +        }
>          return ret;
>      }
>  
> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
>          logout("will write %u block map sectors starting from entry %u\n",
>                 n_sectors, bmap_first);
>          ret = bdrv_write(bs->file, offset, base, n_sectors);
> +
> +        qemu_co_mutex_unlock(&s->bmap_lock);
>      }
>  
>      return ret;
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]