qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH V7 RESEND 07/17] COLO: Load dirty pages into SVM


From: Zhang Chen
Subject: Re: [Qemu-devel] [PATCH V7 RESEND 07/17] COLO: Load dirty pages into SVM's RAM cache firstly
Date: Mon, 21 May 2018 02:30:43 +0800

On Wed, May 16, 2018 at 12:55 AM, Dr. David Alan Gilbert <
address@hidden> wrote:

> * Zhang Chen (address@hidden) wrote:
> > We should not load PVM's state directly into SVM, because there maybe
> some
> > errors happen when SVM is receving data, which will break SVM.
> >
> > We need to ensure receving all data before load the state into SVM. We
> use
> > an extra memory to cache these data (PVM's ram). The ram cache in
> secondary side
> > is initially the same as SVM/PVM's memory. And in the process of
> checkpoint,
> > we cache the dirty pages of PVM into this ram cache firstly, so this ram
> cache
> > always the same as PVM's memory at every checkpoint, then we flush this
> cached ram
> > to SVM after we receive all PVM's state.
> >
> > Signed-off-by: zhanghailiang <address@hidden>
> > Signed-off-by: Li Zhijian <address@hidden>
> > Signed-off-by: Zhang Chen <address@hidden>
> > ---
> >  include/exec/ram_addr.h |  1 +
> >  migration/migration.c   |  2 +
> >  migration/ram.c         | 99 +++++++++++++++++++++++++++++++++++++++--
> >  migration/ram.h         |  4 ++
> >  migration/savevm.c      |  2 +-
> >  5 files changed, 104 insertions(+), 4 deletions(-)
> >
> > diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> > index cf2446a176..51ec153a57 100644
> > --- a/include/exec/ram_addr.h
> > +++ b/include/exec/ram_addr.h
> > @@ -27,6 +27,7 @@ struct RAMBlock {
> >      struct rcu_head rcu;
> >      struct MemoryRegion *mr;
> >      uint8_t *host;
> > +    uint8_t *colo_cache; /* For colo, VM's ram cache */
> >      ram_addr_t offset;
> >      ram_addr_t used_length;
> >      ram_addr_t max_length;
> > diff --git a/migration/migration.c b/migration/migration.c
> > index 8dee7dd309..cfc1b958b9 100644
> > --- a/migration/migration.c
> > +++ b/migration/migration.c
> > @@ -421,6 +421,8 @@ static void process_incoming_migration_co(void
> *opaque)
> >
> >          /* Wait checkpoint incoming thread exit before free resource */
> >          qemu_thread_join(&mis->colo_incoming_thread);
> > +        /* We hold the global iothread lock, so it is safe here */
> > +        colo_release_ram_cache();
> >      }
> >
> >      if (ret < 0) {
> > diff --git a/migration/ram.c b/migration/ram.c
> > index 912810c18e..7ca845f8a9 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -2520,6 +2520,20 @@ static inline void 
> > *host_from_ram_block_offset(RAMBlock
> *block,
> >      return block->host + offset;
> >  }
> >
> > +static inline void *colo_cache_from_block_offset(RAMBlock *block,
> > +                                                 ram_addr_t offset)
> > +{
> > +    if (!offset_in_ramblock(block, offset)) {
> > +        return NULL;
> > +    }
> > +    if (!block->colo_cache) {
> > +        error_report("%s: colo_cache is NULL in block :%s",
> > +                     __func__, block->idstr);
> > +        return NULL;
> > +    }
> > +    return block->colo_cache + offset;
> > +}
> > +
> >  /**
> >   * ram_handle_compressed: handle the zero page case
> >   *
> > @@ -2724,6 +2738,57 @@ static void 
> > decompress_data_with_multi_threads(QEMUFile
> *f,
> >      qemu_mutex_unlock(&decomp_done_lock);
> >  }
> >
> > +/*
> > + * colo cache: this is for secondary VM, we cache the whole
> > + * memory of the secondary VM, it is need to hold the global lock
> > + * to call this helper.
> > + */
> > +int colo_init_ram_cache(void)
> > +{
> > +    RAMBlock *block;
> > +
> > +    rcu_read_lock();
> > +    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> > +        block->colo_cache = qemu_anon_ram_alloc(block->used_length,
> > +                                                NULL,
> > +                                                false);
> > +        if (!block->colo_cache) {
> > +            error_report("%s: Can't alloc memory for COLO cache of
> block %s,"
> > +                         "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
> > +                         block->used_length);
> > +            goto out_locked;
> > +        }
> > +    }
> > +    rcu_read_unlock();
> > +    return 0;
> > +
> > +out_locked:
> > +    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> > +        if (block->colo_cache) {
> > +            qemu_anon_ram_free(block->colo_cache, block->used_length);
> > +            block->colo_cache = NULL;
> > +        }
> > +    }
> > +
> > +    rcu_read_unlock();
> > +    return -errno;
> > +}
> > +
> > +/* It is need to hold the global lock to call this helper */
> > +void colo_release_ram_cache(void)
> > +{
> > +    RAMBlock *block;
> > +
> > +    rcu_read_lock();
> > +    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> > +        if (block->colo_cache) {
> > +            qemu_anon_ram_free(block->colo_cache, block->used_length);
> > +            block->colo_cache = NULL;
> > +        }
> > +    }
> > +    rcu_read_unlock();
> > +}
> > +
> >  /**
> >   * ram_load_setup: Setup RAM for migration incoming side
> >   *
> > @@ -2740,6 +2805,7 @@ static int ram_load_setup(QEMUFile *f, void
> *opaque)
> >
> >      xbzrle_load_setup();
> >      ramblock_recv_map_init();
> > +
> >      return 0;
> >  }
> >
> > @@ -2753,6 +2819,7 @@ static int ram_load_cleanup(void *opaque)
> >          g_free(rb->receivedmap);
> >          rb->receivedmap = NULL;
> >      }
> > +
> >      return 0;
> >  }
> >
> > @@ -2966,7 +3033,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
> version_id)
> >
> >      while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
> >          ram_addr_t addr, total_ram_bytes;
> > -        void *host = NULL;
> > +        void *host = NULL, *host_bak = NULL;
> >          uint8_t ch;
> >
> >          addr = qemu_get_be64(f);
> > @@ -2986,13 +3053,36 @@ static int ram_load(QEMUFile *f, void *opaque,
> int version_id)
> >                       RAM_SAVE_FLAG_COMPRESS_PAGE |
> RAM_SAVE_FLAG_XBZRLE)) {
> >              RAMBlock *block = ram_block_from_stream(f, flags);
> >
> > -            host = host_from_ram_block_offset(block, addr);
> > +             /*
> > +             * After going into COLO, we should load the Page into
> colo_cache
> > +             * NOTE: We need to keep a copy of SVM's ram in colo_cache.
> > +             * Privously, we copied all these memory in preparing stage
> of COLO
> > +             * while we need to stop VM, which is a time-consuming
> process.
> > +             * Here we optimize it by a trick, back-up every page while
> in
> > +             * migration process while COLO is enabled, though it
> affects the
> > +             * speed of the migration, but it obviously reduce the
> downtime of
> > +             * back-up all SVM'S memory in COLO preparing stage.
> > +             */
> > +            if (migration_incoming_in_colo_state()) {
> > +                host = colo_cache_from_block_offset(block, addr);
> > +                /* After goes into COLO state, don't backup it any more
> */
> > +                if (!migration_incoming_in_colo_state()) {
>
> I don't understand how we can reach this nested 'if';
> colo_cache_from_block_offset is short and simple; so how can
> migration_incoming_in_colo_state() be both true and false?
>
> I think this is trying to do it for when COLO is enabled but when
> receiving the first checkpoint you want to take a copy; but I don't
> think that's what the 'if' is doing.
>

I'm also confused about this part.
I checked old version codes for this part, but it looks same .
How do you think this should be done?

Or maybe the original author know some detail?
CC Lizhijian and Zhanghailiang

Thanks
Zhang Chen




>
> Dave
>
> > +                    host_bak = host;
> > +                }
> > +            }
> > +            if (!migration_incoming_in_colo_state()) {
> > +                host = host_from_ram_block_offset(block, addr);
> > +            }
> >              if (!host) {
> >                  error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
> >                  ret = -EINVAL;
> >                  break;
> >              }
> > -            ramblock_recv_bitmap_set(block, host);
> > +
> > +            if (!migration_incoming_in_colo_state()) {
> > +                ramblock_recv_bitmap_set(block, host);
> > +            }
> > +
> >              trace_ram_load_loop(block->idstr, (uint64_t)addr, flags,
> host);
> >          }
> >
> > @@ -3087,6 +3177,9 @@ static int ram_load(QEMUFile *f, void *opaque, int
> version_id)
> >          if (!ret) {
> >              ret = qemu_file_get_error(f);
> >          }
> > +        if (!ret && host_bak && host) {
> > +            memcpy(host_bak, host, TARGET_PAGE_SIZE);
> > +        }
> >      }
> >
> >      ret |= wait_for_decompress_done();
> > diff --git a/migration/ram.h b/migration/ram.h
> > index 5030be110a..66e9b86ff0 100644
> > --- a/migration/ram.h
> > +++ b/migration/ram.h
> > @@ -64,4 +64,8 @@ bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
> *rb, uint64_t byte_offset);
> >  void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr);
> >  void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
> size_t nr);
> >
> > +/* ram cache */
> > +int colo_init_ram_cache(void);
> > +void colo_release_ram_cache(void);
> > +
> >  #endif
> > diff --git a/migration/savevm.c b/migration/savevm.c
> > index c43d220220..ec0bff09ce 100644
> > --- a/migration/savevm.c
> > +++ b/migration/savevm.c
> > @@ -1807,7 +1807,7 @@ static int 
> > loadvm_handle_cmd_packaged(MigrationIncomingState
> *mis)
> >  static int loadvm_process_enable_colo(MigrationIncomingState *mis)
> >  {
> >      migration_incoming_enable_colo();
> > -    return 0;
> > +    return colo_init_ram_cache();
> >  }
> >
> >  /*
> > --
> > 2.17.0
> >
> --
> Dr. David Alan Gilbert / address@hidden / Manchester, UK
>


reply via email to

[Prev in Thread] Current Thread [Next in Thread]