qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-block] [Qemu-devel] [PATCH 12/17] block: protect tracked_reque


From: Fam Zheng
Subject: Re: [Qemu-block] [Qemu-devel] [PATCH 12/17] block: protect tracked_requests and flush_queue with reqs_lock
Date: Thu, 4 May 2017 15:30:58 +0800
User-agent: Mutt/1.8.0 (2017-02-23)

On Thu, 04/20 14:00, Paolo Bonzini wrote:
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  block.c                   |  1 +
>  block/io.c                | 20 +++++++++++++++++---
>  include/block/block_int.h | 12 +++++++-----
>  3 files changed, 25 insertions(+), 8 deletions(-)
> 
> diff --git a/block.c b/block.c
> index f1aec36..3b2ed29 100644
> --- a/block.c
> +++ b/block.c
> @@ -234,6 +234,7 @@ BlockDriverState *bdrv_new(void)
>          QLIST_INIT(&bs->op_blockers[i]);
>      }
>      notifier_with_return_list_init(&bs->before_write_notifiers);
> +    qemu_co_mutex_init(&bs->reqs_lock);
>      bs->refcnt = 1;
>      bs->aio_context = qemu_get_aio_context();
>  
> diff --git a/block/io.c b/block/io.c
> index d17564b..7af9d47 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -378,8 +378,10 @@ static void tracked_request_end(BdrvTrackedRequest *req)
>          atomic_dec(&req->bs->serialising_in_flight);
>      }
>  
> +    qemu_co_mutex_lock(&req->bs->reqs_lock);
>      QLIST_REMOVE(req, list);
>      qemu_co_queue_restart_all(&req->wait_queue);
> +    qemu_co_mutex_unlock(&req->bs->reqs_lock);
>  }
>  
>  /**
> @@ -404,7 +406,9 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
>  
>      qemu_co_queue_init(&req->wait_queue);
>  
> +    qemu_co_mutex_lock(&bs->reqs_lock);
>      QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
> +    qemu_co_mutex_unlock(&bs->reqs_lock);
>  }
>  
>  static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
> @@ -526,6 +530,7 @@ static bool coroutine_fn 
> wait_serialising_requests(BdrvTrackedRequest *self)
>  
>      do {
>          retry = false;
> +        qemu_co_mutex_lock(&bs->reqs_lock);
>          QLIST_FOREACH(req, &bs->tracked_requests, list) {
>              if (req == self || (!req->serialising && !self->serialising)) {
>                  continue;
> @@ -544,7 +549,7 @@ static bool coroutine_fn 
> wait_serialising_requests(BdrvTrackedRequest *self)
>                   * (instead of producing a deadlock in the former case). */
>                  if (!req->waiting_for) {
>                      self->waiting_for = req;
> -                    qemu_co_queue_wait(&req->wait_queue, NULL);
> +                    qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
>                      self->waiting_for = NULL;
>                      retry = true;
>                      waited = true;
> @@ -552,6 +557,7 @@ static bool coroutine_fn 
> wait_serialising_requests(BdrvTrackedRequest *self)
>                  }
>              }
>          }
> +        qemu_co_mutex_unlock(&bs->reqs_lock);
>      } while (retry);
>  
>      return waited;
> @@ -2302,11 +2308,13 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
>      current_gen = atomic_read(&bs->write_gen);
>  
>      /* Wait until any previous flushes are completed */
> +    qemu_co_mutex_lock(&bs->reqs_lock);
>      while (bs->active_flush_req) {
> -        qemu_co_queue_wait(&bs->flush_queue, NULL);
> +        qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
>      }
>  
>      bs->active_flush_req = true;
> +    qemu_co_mutex_unlock(&bs->reqs_lock);
>  
>      /* Write back all layers by calling one driver function */
>      if (bs->drv->bdrv_co_flush) {
> @@ -2328,10 +2336,14 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
>          goto flush_parent;
>      }
>  
> -    /* Check if we really need to flush anything */
> +    /* Check if we really need to flush anything
> +     * TODO: use int and atomic access */
> +    qemu_co_mutex_lock(&bs->reqs_lock);
>      if (bs->flushed_gen == current_gen) {

Should the atomic reading of current_gen be moved down here, to avoid TOCTOU?

> +        qemu_co_mutex_unlock(&bs->reqs_lock);
>          goto flush_parent;
>      }
> +    qemu_co_mutex_unlock(&bs->reqs_lock);
>  
>      BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
>      if (bs->drv->bdrv_co_flush_to_disk) {
> @@ -2375,12 +2387,14 @@ flush_parent:
>      ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
>  out:
>      /* Notify any pending flushes that we have completed */
> +    qemu_co_mutex_lock(&bs->reqs_lock);
>      if (ret == 0) {
>          bs->flushed_gen = current_gen;
>      }
>      bs->active_flush_req = false;
>      /* Return value is ignored - it's ok if wait queue is empty */
>      qemu_co_queue_next(&bs->flush_queue);
> +    qemu_co_mutex_unlock(&bs->reqs_lock);
>  
>  early_exit:
>      bdrv_dec_in_flight(bs);



reply via email to

[Prev in Thread] Current Thread [Next in Thread]