qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 11/51] ram: Move dup_pages into RAMState


From: Peter Xu
Subject: Re: [Qemu-devel] [PATCH 11/51] ram: Move dup_pages into RAMState
Date: Mon, 27 Mar 2017 17:23:50 +0800
User-agent: Mutt/1.5.24 (2015-08-30)

On Thu, Mar 23, 2017 at 09:45:04PM +0100, Juan Quintela wrote:
> Once there rename it to its actual meaning, zero_pages.
> 
> Signed-off-by: Juan Quintela <address@hidden>
> Reviewed-by: Dr. David Alan Gilbert <address@hidden>

Reviewed-by: Peter Xu <address@hidden>

Will post a question below though (not directly related to this patch
but context-wide)...

> ---
>  migration/ram.c | 29 ++++++++++++++++++-----------
>  1 file changed, 18 insertions(+), 11 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index d8428c1..0da133f 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -165,6 +165,9 @@ struct RAMState {
>      uint64_t xbzrle_cache_miss_prev;
>      /* number of iterations at the beginning of period */
>      uint64_t iterations_prev;
> +    /* Accounting fields */
> +    /* number of zero pages.  It used to be pages filled by the same char. */
> +    uint64_t zero_pages;
>  };
>  typedef struct RAMState RAMState;
>  
> @@ -172,7 +175,6 @@ static RAMState ram_state;
>  
>  /* accounting for migration statistics */
>  typedef struct AccountingInfo {
> -    uint64_t dup_pages;
>      uint64_t skipped_pages;
>      uint64_t norm_pages;
>      uint64_t iterations;
> @@ -192,12 +194,12 @@ static void acct_clear(void)
>  
>  uint64_t dup_mig_bytes_transferred(void)
>  {
> -    return acct_info.dup_pages * TARGET_PAGE_SIZE;
> +    return ram_state.zero_pages * TARGET_PAGE_SIZE;
>  }
>  
>  uint64_t dup_mig_pages_transferred(void)
>  {
> -    return acct_info.dup_pages;
> +    return ram_state.zero_pages;
>  }
>  
>  uint64_t skipped_mig_bytes_transferred(void)
> @@ -737,19 +739,21 @@ static void migration_bitmap_sync(RAMState *rs)
>   *
>   * Returns the number of pages written.
>   *
> + * @rs: current RAM state
>   * @f: QEMUFile where to send the data
>   * @block: block that contains the page we want to send
>   * @offset: offset inside the block for the page
>   * @p: pointer to the page
>   * @bytes_transferred: increase it with the number of transferred bytes
>   */
> -static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
> +static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block,
> +                          ram_addr_t offset,
>                            uint8_t *p, uint64_t *bytes_transferred)
>  {
>      int pages = -1;
>  
>      if (is_zero_range(p, TARGET_PAGE_SIZE)) {
> -        acct_info.dup_pages++;
> +        rs->zero_pages++;
>          *bytes_transferred += save_page_header(f, block,
>                                                 offset | 
> RAM_SAVE_FLAG_COMPRESS);
>          qemu_put_byte(f, 0);
> @@ -822,11 +826,11 @@ static int ram_save_page(RAMState *rs, MigrationState 
> *ms, QEMUFile *f,
>              if (bytes_xmit > 0) {
>                  acct_info.norm_pages++;
>              } else if (bytes_xmit == 0) {
> -                acct_info.dup_pages++;
> +                rs->zero_pages++;

This code path looks suspicous... since iiuc currently it should only
be triggered by RDMA case, and I believe here qemu_rdma_save_page()
should have met something wrong (so that it didn't return with
RAM_SAVE_CONTROL_DELAYED). Then is it correct we do increase zero page
counting unconditionally here? (hmm, the default bytes_xmit is zero as
well...)

Another thing is that I see when RDMA is enabled we are updating
accounting info with acct_update_position(), while we updated it here
as well. Is this an issue of duplicated accounting?

Similar question in ram_save_compressed_page().

Thanks,

>              }
>          }
>      } else {
> -        pages = save_zero_page(f, block, offset, p, bytes_transferred);
> +        pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
>          if (pages > 0) {
>              /* Must let xbzrle know, otherwise a previous (now 0'd) cached
>               * page would be stale
> @@ -998,7 +1002,7 @@ static int ram_save_compressed_page(RAMState *rs, 
> MigrationState *ms,
>              if (bytes_xmit > 0) {
>                  acct_info.norm_pages++;
>              } else if (bytes_xmit == 0) {
> -                acct_info.dup_pages++;
> +                rs->zero_pages++;
>              }
>          }
>      } else {
> @@ -1010,7 +1014,7 @@ static int ram_save_compressed_page(RAMState *rs, 
> MigrationState *ms,
>           */
>          if (block != rs->last_sent_block) {
>              flush_compressed_data(f);
> -            pages = save_zero_page(f, block, offset, p, bytes_transferred);
> +            pages = save_zero_page(rs, f, block, offset, p, 
> bytes_transferred);
>              if (pages == -1) {
>                  /* Make sure the first page is sent out before other pages */
>                  bytes_xmit = save_page_header(f, block, offset |
> @@ -1031,7 +1035,7 @@ static int ram_save_compressed_page(RAMState *rs, 
> MigrationState *ms,
>              }
>          } else {
>              offset |= RAM_SAVE_FLAG_CONTINUE;
> -            pages = save_zero_page(f, block, offset, p, bytes_transferred);
> +            pages = save_zero_page(rs, f, block, offset, p, 
> bytes_transferred);
>              if (pages == -1) {
>                  pages = compress_page_with_multi_thread(f, block, offset,
>                                                          bytes_transferred);
> @@ -1462,8 +1466,10 @@ static int ram_find_and_save_block(RAMState *rs, 
> QEMUFile *f, bool last_stage,
>  void acct_update_position(QEMUFile *f, size_t size, bool zero)
>  {
>      uint64_t pages = size / TARGET_PAGE_SIZE;
> +    RAMState *rs = &ram_state;
> +
>      if (zero) {
> -        acct_info.dup_pages += pages;
> +        rs->zero_pages += pages;
>      } else {
>          acct_info.norm_pages += pages;
>          bytes_transferred += size;
> @@ -2005,6 +2011,7 @@ static int ram_save_init_globals(RAMState *rs)
>  
>      rs->dirty_rate_high_cnt = 0;
>      rs->bitmap_sync_count = 0;
> +    rs->zero_pages = 0;
>      migration_bitmap_sync_init(rs);
>      qemu_mutex_init(&migration_bitmap_mutex);
>  
> -- 
> 2.9.3
> 
> 

-- peterx



reply via email to

[Prev in Thread] Current Thread [Next in Thread]