[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 18/41] block-migration: add lock
From: |
Paolo Bonzini |
Subject: |
Re: [Qemu-devel] [PATCH 18/41] block-migration: add lock |
Date: |
Mon, 18 Feb 2013 15:35:43 +0100 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130110 Thunderbird/17.0.2 |
Il 18/02/2013 15:33, Orit Wasserman ha scritto:
>> @@ -199,6 +222,7 @@ static void blk_mig_read_cb(void *opaque, int ret)
>>
>> block_mig_state.submitted--;
>> block_mig_state.read_done++;
>> + blk_mig_unlock();
>> assert(block_mig_state.submitted >= 0);
> Shouldn't the assert be under the lock too?
Yes, it works anyway but it's clearer and safer.
Paolo
>
>> }
>>
>> @@ -244,7 +268,9 @@ static int mig_save_device_bulk(QEMUFile *f,
>> BlkMigDevState *bmds)
>> blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
>> qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
>>
>> + blk_mig_lock();
>> block_mig_state.submitted++;
>> + blk_mig_unlock();
>>
>> blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
>> nr_sectors, blk_mig_read_cb, blk);
>> @@ -366,8 +392,12 @@ static int mig_save_device_dirty(QEMUFile *f,
>> BlkMigDevState *bmds,
>> int ret = -EIO;
>>
>> for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
>> + blk_mig_lock();
>> if (bmds_aio_inflight(bmds, sector)) {
>> + blk_mig_unlock();
>> bdrv_drain_all();
>> + } else {
>> + blk_mig_unlock();
>> }
>> if (bdrv_get_dirty(bmds->bs, sector)) {
>>
>> @@ -389,8 +419,11 @@ static int mig_save_device_dirty(QEMUFile *f,
>> BlkMigDevState *bmds,
>>
>> blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
>> nr_sectors, blk_mig_read_cb,
>> blk);
>> +
>> + blk_mig_lock();
>> block_mig_state.submitted++;
>> bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
>> + blk_mig_unlock();
>> } else {
>> ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
>> if (ret < 0) {
>> @@ -446,6 +479,7 @@ static int flush_blks(QEMUFile *f)
>> __FUNCTION__, block_mig_state.submitted,
>> block_mig_state.read_done,
>> block_mig_state.transferred);
>>
>> + blk_mig_lock();
>> while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
>> if (qemu_file_rate_limit(f)) {
>> break;
>> @@ -456,7 +490,9 @@ static int flush_blks(QEMUFile *f)
>> }
>>
>> QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
>> + blk_mig_unlock();
>> blk_send(f, blk);
>> + blk_mig_lock();
>>
>> g_free(blk->buf);
>> g_free(blk);
>> @@ -465,6 +501,7 @@ static int flush_blks(QEMUFile *f)
>> block_mig_state.transferred++;
>> assert(block_mig_state.read_done >= 0);
>> }
>> + blk_mig_unlock();
>>
>> DPRINTF("%s Exit submitted %d read_done %d transferred %d\n",
>> __FUNCTION__,
>> block_mig_state.submitted, block_mig_state.read_done,
>> @@ -493,6 +530,7 @@ static void blk_mig_cleanup(void)
>>
>> set_dirty_tracking(0);
>>
>> + blk_mig_lock();
>> while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
>> QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
>> bdrv_set_in_use(bmds->bs, 0);
>> @@ -506,6 +544,7 @@ static void blk_mig_cleanup(void)
>> g_free(blk->buf);
>> g_free(blk);
>> }
>> + blk_mig_unlock();
>> }
>>
>> static void block_migration_cancel(void *opaque)
>> @@ -548,9 +587,11 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
>> blk_mig_reset_dirty_cursor();
>>
>> /* control the rate of transfer */
>> + blk_mig_lock();
>> while ((block_mig_state.submitted +
>> block_mig_state.read_done) * BLOCK_SIZE <
>> qemu_file_get_rate_limit(f)) {
>> + blk_mig_unlock();
>> if (block_mig_state.bulk_completed == 0) {
>> /* first finish the bulk phase */
>> if (blk_mig_save_bulked_block(f) == 0) {
>> @@ -564,11 +605,13 @@ static int block_save_iterate(QEMUFile *f, void
>> *opaque)
>> if (ret < 0) {
>> return ret;
>> }
>> + blk_mig_lock();
>> if (ret != 0) {
>> /* no more dirty blocks */
>> break;
>> }
>> }
>> + blk_mig_unlock();
>>
>> ret = flush_blks(f);
>> if (ret) {
>> @@ -595,7 +638,9 @@ static int block_save_complete(QEMUFile *f, void *opaque)
>>
>> /* we know for sure that save bulk is completed and
>> all async read completed */
>> + blk_mig_lock();
>> assert(block_mig_state.submitted == 0);
>> + blk_mig_unlock();
>>
>> do {
>> ret = blk_mig_save_dirty_block(f, 0);
>> @@ -620,6 +665,7 @@ static uint64_t block_save_pending(QEMUFile *f, void
>> *opaque, uint64_t max_size)
>> /* Estimate pending number of bytes to send */
>> uint64_t pending;
>>
>> + blk_mig_lock();
>> pending = get_remaining_dirty() +
>> block_mig_state.submitted * BLOCK_SIZE +
>> block_mig_state.read_done * BLOCK_SIZE;
>> @@ -628,6 +674,7 @@ static uint64_t block_save_pending(QEMUFile *f, void
>> *opaque, uint64_t max_size)
>> if (pending == 0 && !block_mig_state.bulk_completed) {
>> pending = BLOCK_SIZE;
>> }
>> + blk_mig_unlock();
>>
>> DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
>> return pending;
>> @@ -739,6 +786,7 @@ void blk_mig_init(void)
>> {
>> QSIMPLEQ_INIT(&block_mig_state.bmds_list);
>> QSIMPLEQ_INIT(&block_mig_state.blk_list);
>> + qemu_mutex_init(&block_mig_state.lock);
>>
>> register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
>> &block_mig_state);
>> diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
>> index 96a194b..10becb6 100644
>> --- a/include/qemu/atomic.h
>> +++ b/include/qemu/atomic.h
>> @@ -16,6 +16,7 @@
>> */
>> #define smp_wmb() barrier()
>> #define smp_rmb() barrier()
>> +
> White space damage
>
> Orit
>> /*
>> * We use GCC builtin if it's available, as that can use
>> * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
>>
>
- Re: [Qemu-devel] [PATCH 09/41] migration: flush all data to fd when buffered_flush is called, (continued)
- [Qemu-devel] [PATCH 08/41] qemu-file: temporarily expose qemu_file_set_error and qemu_fflush, Paolo Bonzini, 2013/02/15
- [Qemu-devel] [PATCH 21/41] migration: run setup callbacks out of big lock, Paolo Bonzini, 2013/02/15
- [Qemu-devel] [PATCH 18/41] block-migration: add lock, Paolo Bonzini, 2013/02/15
- [Qemu-devel] [PATCH 23/41] Rename buffered_ to migration_, Paolo Bonzini, 2013/02/15
- [Qemu-devel] [PATCH 20/41] migration: run pending/iterate callbacks out of big lock, Paolo Bonzini, 2013/02/15
- Re: [Qemu-devel] [PATCH 20/41] migration: run pending/iterate callbacks out of big lock, Juan Quintela, 2013/02/22