qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 11/11] migration: normalize locking in migration


From: Greg Kurz
Subject: Re: [Qemu-devel] [PATCH 11/11] migration: normalize locking in migration/savevm.c
Date: Wed, 18 Nov 2015 14:59:26 +0100

On Tue, 17 Nov 2015 12:08:31 +0300
"Denis V. Lunev" <address@hidden> wrote:

> basically all bdrv_* operations must be called under aio_context_acquire
> except ones with bdrv_all prefix.
> 
> Signed-off-by: Denis V. Lunev <address@hidden>
> Reviewed-by: Stefan Hajnoczi <address@hidden>
> Reviewed-by: Fam Zheng <address@hidden>
> CC: Juan Quintela <address@hidden>
> CC: Kevin Wolf <address@hidden>
> ---
>  migration/savevm.c | 19 ++++++++++++++++++-
>  1 file changed, 18 insertions(+), 1 deletion(-)
> 
> diff --git a/migration/savevm.c b/migration/savevm.c
> index a845e69..09e6a43 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1917,6 +1917,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>      struct tm tm;
>      const char *name = qdict_get_try_str(qdict, "name");
>      Error *local_err = NULL;
> +    AioContext *aio_context;
> 
>      if (!bdrv_all_can_snapshot(&bs)) {
>          monitor_printf(mon, "Device '%s' is writable but does not "
> @@ -1938,6 +1939,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>          monitor_printf(mon, "No block device can accept snapshots\n");
>          return;
>      }
> +    aio_context = bdrv_get_aio_context(bs);
> 
>      saved_vm_running = runstate_is_running();
> 
> @@ -1948,6 +1950,8 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>      }
>      vm_stop(RUN_STATE_SAVE_VM);
> 
> +    aio_context_acquire(aio_context);
> +
>      memset(sn, 0, sizeof(*sn));
> 
>      /* fill auxiliary fields */
> @@ -1992,6 +1996,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>      }
> 
>   the_end:
> +    aio_context_release(aio_context);
>      if (saved_vm_running) {
>          vm_start();
>      }
> @@ -2030,6 +2035,7 @@ int load_vmstate(const char *name)
>      QEMUSnapshotInfo sn;
>      QEMUFile *f;
>      int ret;
> +    AioContext *aio_context;
> 
>      if (!bdrv_all_can_snapshot(&bs)) {
>          error_report("Device '%s' is writable but does not support 
> snapshots.",
> @@ -2048,9 +2054,12 @@ int load_vmstate(const char *name)
>          error_report("No block device supports snapshots");
>          return -ENOTSUP;
>      }
> +    aio_context = bdrv_get_aio_context(bs);
> 

bs is used for error reporting earlier in this function.

bs_vm_state is the one to be used here.

>      /* Don't even try to load empty VM states */
> +    aio_context_acquire(aio_context);
>      ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
> +    aio_context_release(aio_context);
>      if (ret < 0) {
>          return ret;
>      } else if (sn.vm_state_size == 0) {
> @@ -2078,9 +2087,12 @@ int load_vmstate(const char *name)
> 
>      qemu_system_reset(VMRESET_SILENT);
>      migration_incoming_state_new(f);
> -    ret = qemu_loadvm_state(f);
> 
> +    aio_context_acquire(aio_context);
> +    ret = qemu_loadvm_state(f);
>      qemu_fclose(f);
> +    aio_context_release(aio_context);
> +
>      migration_incoming_state_destroy();
>      if (ret < 0) {
>          error_report("Error %d while loading VM state", ret);
> @@ -2111,14 +2123,19 @@ void hmp_info_snapshots(Monitor *mon, const QDict 
> *qdict)
>      int nb_sns, i;
>      int total;
>      int *available_snapshots;
> +    AioContext *aio_context;
> 
>      bs = bdrv_all_find_vmstate_bs();
>      if (!bs) {
>          monitor_printf(mon, "No available block device supports 
> snapshots\n");
>          return;
>      }
> +    aio_context = bdrv_get_aio_context(bs);
> 
> +    aio_context_acquire(aio_context);
>      nb_sns = bdrv_snapshot_list(bs, &sn_tab);
> +    aio_context_release(aio_context);
> +
>      if (nb_sns < 0) {
>          monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
>          return;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]