[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 13/13] migration: [HACK]Transfer pages over new
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [Qemu-devel] [PATCH 13/13] migration: [HACK]Transfer pages over new channels |
Date: |
Fri, 22 Apr 2016 13:09:14 +0100 |
User-agent: |
Mutt/1.5.24 (2015-08-30) |
* Juan Quintela (address@hidden) wrote:
> We switch for sending the page number to send real pages.
>
> [HACK]
> How we calculate the bandwidth is beyond repair, there is a hack there
> that would work for x86 and archs thta have 4kb pages.
>
> If you are having a nice day just go to migration/ram.c and look at
> acct_update_position(). Now you are depressed, right?
>
> Signed-off-by: Juan Quintela <address@hidden>
> ---
> migration/migration.c | 15 +++++++++++----
> migration/ram.c | 42 +++++++++++++++++++++++++++++-------------
> 2 files changed, 40 insertions(+), 17 deletions(-)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index efdd981..1db6e52 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1665,7 +1665,8 @@ static void *migration_thread(void *opaque)
> /* Used by the bandwidth calcs, updated later */
> int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
> - int64_t initial_bytes = 0;
> + int64_t qemu_file_bytes = 0;
> + int64_t multifd_pages = 0;
> int64_t max_size = 0;
> int64_t start_time = initial_time;
> int64_t end_time;
> @@ -1748,9 +1749,14 @@ static void *migration_thread(void *opaque)
> }
> current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> if (current_time >= initial_time + BUFFER_DELAY) {
> - uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
> - initial_bytes;
> uint64_t time_spent = current_time - initial_time;
> + uint64_t qemu_file_bytes_now = qemu_ftell(s->to_dst_file);
> + uint64_t multifd_pages_now = multifd_mig_pages_transferred();
> + /* Hack ahead. Why the hell we don't have a function to now the
> + target_page_size. Hard coding it to 4096 */
> + uint64_t transferred_bytes =
> + (qemu_file_bytes_now - qemu_file_bytes) +
> + (multifd_pages_now - multifd_pages) * 4096;
We do; I added qemu_target_page_bits in the postcopy series; so add
1ul << qemu_target_page_bits()
(I added bits so that you can get page easily; adding just page
can't go the other way).
> double bandwidth = (double)transferred_bytes / time_spent;
> max_size = bandwidth * migrate_max_downtime() / 1000000;
>
> @@ -1767,7 +1773,8 @@ static void *migration_thread(void *opaque)
>
> qemu_file_reset_rate_limit(s->to_dst_file);
> initial_time = current_time;
> - initial_bytes = qemu_ftell(s->to_dst_file);
> + qemu_file_bytes = qemu_file_bytes_now;
> + multifd_pages = multifd_pages_now;
> }
> if (qemu_file_rate_limit(s->to_dst_file)) {
> /* usleep expects microseconds */
> diff --git a/migration/ram.c b/migration/ram.c
> index b1b69cb..1d9ecb9 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -430,8 +430,8 @@ static void *multifd_send_thread(void *opaque)
> params->address = 0;
> qemu_mutex_unlock(¶ms->mutex);
>
> - if (write(params->s, &address, sizeof(uint8_t *))
> - != sizeof(uint8_t*)) {
> + if (write(params->s, address, TARGET_PAGE_SIZE)
> + != TARGET_PAGE_SIZE) {
> /* Shuoudn't ever happen */
> exit(-1);
> }
> @@ -537,6 +537,23 @@ static int multifd_send_page(uint8_t *address)
> return i;
> }
>
> +static void flush_multifd_send_data(QEMUFile *f)
> +{
> + int i, thread_count;
> +
> + if (!migrate_multifd()) {
> + return;
> + }
> + qemu_fflush(f);
> + thread_count = migrate_multifd_threads();
> + qemu_mutex_lock(&multifd_send_mutex);
> + for (i = 0; i < thread_count; i++) {
> + while(!multifd_send[i].done) {
> + qemu_cond_wait(&multifd_send_cond, &multifd_send_mutex);
> + }
> + }
> +}
> +
> struct MultiFDRecvParams {
> QemuThread thread;
> QemuCond cond;
> @@ -559,7 +576,6 @@ static void *multifd_recv_thread(void *opaque)
> {
> MultiFDRecvParams *params = opaque;
> uint8_t *address;
> - uint8_t *recv_address;
>
> qemu_mutex_lock(¶ms->mutex);
> while (!params->quit){
> @@ -568,18 +584,12 @@ static void *multifd_recv_thread(void *opaque)
> params->address = 0;
> qemu_mutex_unlock(¶ms->mutex);
>
> - if (read(params->s, &recv_address, sizeof(uint8_t*))
> - != sizeof(uint8_t *)) {
> + if (read(params->s, address, TARGET_PAGE_SIZE)
> + != TARGET_PAGE_SIZE) {
> /* shouldn't ever happen */
> exit(-1);
> }
>
> - if (address != recv_address) {
> - printf("We received %p what we were expecting %p\n",
> - recv_address, address);
> - exit(-1);
> - }
> -
> qemu_mutex_lock(&multifd_recv_mutex);
> params->done = true;
> qemu_cond_signal(&multifd_recv_cond);
> @@ -1097,6 +1107,7 @@ static int ram_multifd_page(QEMUFile *f,
> PageSearchStatus *pss,
> uint8_t *p;
> RAMBlock *block = pss->block;
> ram_addr_t offset = pss->offset;
> + static int count = 32;
>
> p = block->host + offset;
>
> @@ -1108,9 +1119,14 @@ static int ram_multifd_page(QEMUFile *f,
> PageSearchStatus *pss,
> *bytes_transferred +=
> save_page_header(f, block, offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
> fd_num = multifd_send_page(p);
> + count--;
> + if (!count) {
> + qemu_fflush(f);
> + count = 32;
> + }
> +
> qemu_put_be16(f, fd_num);
> *bytes_transferred += 2; /* size of fd_num */
> - qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
> *bytes_transferred += TARGET_PAGE_SIZE;
> pages = 1;
> acct_info.norm_pages++;
> @@ -2375,6 +2391,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
> }
>
> flush_compressed_data(f);
> + flush_multifd_send_data(f);
> ram_control_after_iterate(f, RAM_CONTROL_FINISH);
>
> rcu_read_unlock();
> @@ -2850,7 +2867,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
> version_id)
> case RAM_SAVE_FLAG_MULTIFD_PAGE:
> fd_num = qemu_get_be16(f);
> multifd_recv_page(host, fd_num);
> - qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
I think this breaks postcopy, because 'host' is often the same between multiple
calls around this loop.
Dave
> break;
>
> case RAM_SAVE_FLAG_EOS:
> --
> 2.5.5
>
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK
- [Qemu-devel] [PATCH 09/13] migration: Create thread infrastructure for multifd send side, (continued)
- [Qemu-devel] [PATCH 09/13] migration: Create thread infrastructure for multifd send side, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 06/13] migration: create multifd migration threads, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 07/13] migration: Start of multiple fd work, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 04/13] migration: Add multifd capability, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 05/13] migration: Create x-multifd-threads parameter, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 08/13] migration: create ram_multifd_page, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 10/13] migration: Send the fd number which we are going to use for this page, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 12/13] migration: Test new fd infrastructure, Juan Quintela, 2016/04/20
- [Qemu-devel] [PATCH 13/13] migration: [HACK]Transfer pages over new channels, Juan Quintela, 2016/04/20
- Re: [Qemu-devel] [PATCH 13/13] migration: [HACK]Transfer pages over new channels,
Dr. David Alan Gilbert <=
- [Qemu-devel] [PATCH 11/13] migration: Create thread infrastructure for multifd recv side, Juan Quintela, 2016/04/20
- Re: [Qemu-devel] [RFC 00/13] Multiple fd migration support, Michael S. Tsirkin, 2016/04/20
- Re: [Qemu-devel] [RFC 00/13] Multiple fd migration support, Dr. David Alan Gilbert, 2016/04/22