qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [v3 07/13] migraion: Rewrite the function ram_save_page


From: Dr. David Alan Gilbert
Subject: Re: [Qemu-devel] [v3 07/13] migraion: Rewrite the function ram_save_page()
Date: Fri, 23 Jan 2015 13:38:32 +0000
User-agent: Mutt/1.5.23 (2014-03-12)

* Liang Li (address@hidden) wrote:
> We rewrite this function to reuse the code in it
> 
> Signed-off-by: Liang Li <address@hidden>
> Signed-off-by: Yang Zhang <address@hidden>
> ---
>  arch_init.c | 107 
> ++++++++++++++++++++++++++++++++++--------------------------
>  1 file changed, 61 insertions(+), 46 deletions(-)

The title would probably be better as 'Split ram_save_page()' - you
don't actually rewrite the code that much.
Note the important comment below.

> diff --git a/arch_init.c b/arch_init.c
> index 71cc756..0a575ed 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -596,6 +596,63 @@ static void migration_bitmap_sync_range(ram_addr_t 
> start, ram_addr_t length)
>      }
>  }
>  
> +static int save_zero_and_xbzrle_page(QEMUFile *f, RAMBlock* block,
> +        ram_addr_t offset, bool last_stage, bool *send_async)
> +{
> +    int bytes_sent;
> +    int cont;
> +    ram_addr_t current_addr;
> +    MemoryRegion *mr = block->mr;
> +    uint8_t *p;
> +    int ret;
> +
> +    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
> +
> +    p = memory_region_get_ram_ptr(mr) + offset;
> +
> +    /* In doubt sent page as normal */
> +    bytes_sent = -1;
> +    ret = ram_control_save_page(f, block->offset,
> +                           offset, TARGET_PAGE_SIZE, &bytes_sent);
> +
> +    XBZRLE_cache_lock();
> +
> +    current_addr = block->offset + offset;
> +    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> +        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> +            if (bytes_sent > 0) {
> +                acct_info.norm_pages++;
> +            } else if (bytes_sent == 0) {
> +                acct_info.dup_pages++;
> +            }
> +        }
> +    } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
> +        acct_info.dup_pages++;
> +        bytes_sent = save_block_hdr(f, block, offset, cont,
> +                                    RAM_SAVE_FLAG_COMPRESS);
> +        qemu_put_byte(f, 0);
> +        bytes_sent++;
> +        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> +         * page would be stale
> +         */
> +        xbzrle_cache_zero_page(current_addr);
> +    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
> +        bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
> +                                      offset, cont, last_stage);
> +        if (!last_stage) {
> +            /* Can't send this cached data async, since the cache page
> +             * might get updated before it gets to the wire
> +             */
> +            if (send_async != NULL) {
> +                *send_async = false;
> +            }
> +        }
> +    }
> +
> +    XBZRLE_cache_unlock();
> +
> +    return bytes_sent;
> +}
>  
>  /* Needs iothread lock! */
>  /* Fix me: there are too many global variables used in migration process. */
> @@ -691,55 +748,15 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, 
> ram_addr_t offset,
>  {
>      int bytes_sent;
>      int cont;
> -    ram_addr_t current_addr;
>      MemoryRegion *mr = block->mr;
>      uint8_t *p;
> -    int ret;
>      bool send_async = true;
>  
> -    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
> -
> -    p = memory_region_get_ram_ptr(mr) + offset;
> -
> -    /* In doubt sent page as normal */
> -    bytes_sent = -1;
> -    ret = ram_control_save_page(f, block->offset,
> -                           offset, TARGET_PAGE_SIZE, &bytes_sent);
> -
> -    XBZRLE_cache_lock();
> -
> -    current_addr = block->offset + offset;
> -    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> -        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> -            if (bytes_sent > 0) {
> -                acct_info.norm_pages++;
> -            } else if (bytes_sent == 0) {
> -                acct_info.dup_pages++;
> -            }
> -        }
> -    } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
> -        acct_info.dup_pages++;
> -        bytes_sent = save_block_hdr(f, block, offset, cont,
> -                                    RAM_SAVE_FLAG_COMPRESS);
> -        qemu_put_byte(f, 0);
> -        bytes_sent++;
> -        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> -         * page would be stale
> -         */
> -        xbzrle_cache_zero_page(current_addr);
> -    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
> -        bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
> -                                      offset, cont, last_stage);
> -        if (!last_stage) {
> -            /* Can't send this cached data async, since the cache page
> -             * might get updated before it gets to the wire
> -             */
> -            send_async = false;
> -        }
> -    }
> -
> -    /* XBZRLE overflow or normal page */
> +    bytes_sent = save_zero_and_xbzrle_page(f, block, offset,
> +            last_stage, &send_async);
>      if (bytes_sent == -1) {
> +        cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
> +        p = memory_region_get_ram_ptr(mr) + offset;

I think this breaks XBZRLE; the 'p' pointer is updated by save_xbzrle_page when 
it
copies the page into the cache; when that happens ram_save_page must use that 
cache copy
rather than the page in main memory; you're recalculating p.
See the commit 1534ee93 'XBZRLE: Fix one XBZRLE corruption issues'

>          bytes_sent = save_block_hdr(f, block, offset, cont, 
> RAM_SAVE_FLAG_PAGE);
>          if (send_async) {
>              qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
> @@ -750,8 +767,6 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, 
> ram_addr_t offset,
>          acct_info.norm_pages++;
>      }
>  
> -    XBZRLE_cache_unlock();
> -
>      return bytes_sent;
>  }
>  
> -- 
> 1.8.3.1

Dave

--
Dr. David Alan Gilbert / address@hidden / Manchester, UK



reply via email to

[Prev in Thread] Current Thread [Next in Thread]