[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 15/23] multifd: Use normal pages array on the recv side
|
From: |
Dr. David Alan Gilbert |
|
Subject: |
Re: [PATCH v4 15/23] multifd: Use normal pages array on the recv side |
|
Date: |
Tue, 18 Jan 2022 19:29:29 +0000 |
|
User-agent: |
Mutt/2.1.5 (2021-12-30) |
* Juan Quintela (quintela@redhat.com) wrote:
> Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
>
> ---
>
> Rename num_normal_pages to total_normal_pages (peter)
> ---
> migration/multifd.h | 8 +++++--
> migration/multifd-zlib.c | 8 +++----
> migration/multifd-zstd.c | 6 +++---
> migration/multifd.c | 45 ++++++++++++++++++----------------------
> 4 files changed, 33 insertions(+), 34 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index 7823199dbe..850889c5d8 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -151,12 +151,16 @@ typedef struct {
> uint32_t next_packet_size;
> /* packets sent through this channel */
> uint64_t num_packets;
> - /* pages sent through this channel */
> - uint64_t num_pages;
> + /* non zero pages recv through this channel */
> + uint64_t total_normal_pages;
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
> /* buffers to recv */
> struct iovec *iov;
> + /* Pages that are not zero */
> + ram_addr_t *normal;
> + /* num of non zero pages */
> + uint32_t normal_num;
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
> index 7f4fbef2c9..8239c840d3 100644
> --- a/migration/multifd-zlib.c
> +++ b/migration/multifd-zlib.c
> @@ -225,7 +225,7 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error
> **errp)
> uint32_t in_size = p->next_packet_size;
> /* we measure the change of total_out */
> uint32_t out_size = zs->total_out;
> - uint32_t expected_size = p->pages->num * qemu_target_page_size();
> + uint32_t expected_size = p->normal_num * page_size;
> uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
> int ret;
> int i;
> @@ -244,16 +244,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error
> **errp)
> zs->avail_in = in_size;
> zs->next_in = z->zbuff;
>
> - for (i = 0; i < p->pages->num; i++) {
> + for (i = 0; i < p->normal_num; i++) {
> int flush = Z_NO_FLUSH;
> unsigned long start = zs->total_out;
>
> - if (i == p->pages->num - 1) {
> + if (i == p->normal_num - 1) {
> flush = Z_SYNC_FLUSH;
> }
>
> zs->avail_out = page_size;
> - zs->next_out = p->pages->block->host + p->pages->offset[i];
> + zs->next_out = p->pages->block->host + p->normal[i];
>
> /*
> * Welcome to inflate semantics
> diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
> index 907d07805c..c5ed72ddcd 100644
> --- a/migration/multifd-zstd.c
> +++ b/migration/multifd-zstd.c
> @@ -242,7 +242,7 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error
> **errp)
> uint32_t in_size = p->next_packet_size;
> uint32_t out_size = 0;
> size_t page_size = qemu_target_page_size();
> - uint32_t expected_size = p->pages->num * page_size;
> + uint32_t expected_size = p->normal_num * page_size;
> uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
> struct zstd_data *z = p->data;
> int ret;
> @@ -263,8 +263,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error
> **errp)
> z->in.size = in_size;
> z->in.pos = 0;
>
> - for (i = 0; i < p->pages->num; i++) {
> - z->out.dst = p->pages->block->host + p->pages->offset[i];
> + for (i = 0; i < p->normal_num; i++) {
> + z->out.dst = p->pages->block->host + p->normal[i];
> z->out.size = page_size;
> z->out.pos = 0;
>
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 7b804928a2..e362b1bb89 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -146,11 +146,11 @@ static int nocomp_recv_pages(MultiFDRecvParams *p,
> Error **errp)
> p->id, flags, MULTIFD_FLAG_NOCOMP);
> return -1;
> }
> - for (int i = 0; i < p->pages->num; i++) {
> - p->iov[i].iov_base = p->pages->block->host + p->pages->offset[i];
> + for (int i = 0; i < p->normal_num; i++) {
> + p->iov[i].iov_base = p->pages->block->host + p->normal[i];
> p->iov[i].iov_len = page_size;
> }
> - return qio_channel_readv_all(p->c, p->iov, p->pages->num, errp);
> + return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
> }
>
> static MultiFDMethods multifd_nocomp_ops = {
> @@ -282,7 +282,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> {
> MultiFDPacket_t *packet = p->packet;
> size_t page_size = qemu_target_page_size();
> - uint32_t pages_max = MULTIFD_PACKET_SIZE / page_size;
> + uint32_t page_count = MULTIFD_PACKET_SIZE / page_size;
> RAMBlock *block;
> int i;
>
> @@ -309,33 +309,25 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> * If we received a packet that is 100 times bigger than expected
> * just stop migration. It is a magic number.
> */
> - if (packet->pages_alloc > pages_max * 100) {
> + if (packet->pages_alloc > page_count) {
> error_setg(errp, "multifd: received packet "
> - "with size %u and expected a maximum size of %u",
> - packet->pages_alloc, pages_max * 100) ;
> + "with size %u and expected a size of %u",
> + packet->pages_alloc, page_count) ;
> return -1;
> }
> - /*
> - * We received a packet that is bigger than expected but inside
> - * reasonable limits (see previous comment). Just reallocate.
> - */
> - if (packet->pages_alloc > p->pages->allocated) {
> - multifd_pages_clear(p->pages);
> - p->pages = multifd_pages_init(packet->pages_alloc);
> - }
>
> - p->pages->num = be32_to_cpu(packet->pages_used);
> - if (p->pages->num > packet->pages_alloc) {
> + p->normal_num = be32_to_cpu(packet->pages_used);
> + if (p->normal_num > packet->pages_alloc) {
> error_setg(errp, "multifd: received packet "
> "with %u pages and expected maximum pages are %u",
> - p->pages->num, packet->pages_alloc) ;
> + p->normal_num, packet->pages_alloc) ;
> return -1;
> }
>
> p->next_packet_size = be32_to_cpu(packet->next_packet_size);
> p->packet_num = be64_to_cpu(packet->packet_num);
>
> - if (p->pages->num == 0) {
> + if (p->normal_num == 0) {
> return 0;
> }
>
> @@ -349,7 +341,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> }
>
> p->pages->block = block;
> - for (i = 0; i < p->pages->num; i++) {
> + for (i = 0; i < p->normal_num; i++) {
> uint64_t offset = be64_to_cpu(packet->offset[i]);
>
> if (offset > (block->used_length - page_size)) {
> @@ -358,7 +350,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> offset, block->used_length);
> return -1;
> }
> - p->pages->offset[i] = offset;
> + p->normal[i] = offset;
> }
>
> return 0;
> @@ -1022,6 +1014,8 @@ int multifd_load_cleanup(Error **errp)
> p->packet = NULL;
> g_free(p->iov);
> p->iov = NULL;
> + g_free(p->normal);
> + p->normal = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1095,13 +1089,13 @@ static void *multifd_recv_thread(void *opaque)
> flags = p->flags;
> /* recv methods don't know how to handle the SYNC flag */
> p->flags &= ~MULTIFD_FLAG_SYNC;
> - trace_multifd_recv(p->id, p->packet_num, p->pages->num, flags,
> + trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
> p->next_packet_size);
> p->num_packets++;
> - p->num_pages += p->pages->num;
> + p->total_normal_pages += p->normal_num;
> qemu_mutex_unlock(&p->mutex);
>
> - if (p->pages->num) {
> + if (p->normal_num) {
> ret = multifd_recv_state->ops->recv_pages(p, &local_err);
> if (ret != 0) {
> break;
> @@ -1123,7 +1117,7 @@ static void *multifd_recv_thread(void *opaque)
> qemu_mutex_unlock(&p->mutex);
>
> rcu_unregister_thread();
> - trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
> + trace_multifd_recv_thread_end(p->id, p->num_packets,
> p->total_normal_pages);
>
> return NULL;
> }
> @@ -1161,6 +1155,7 @@ int multifd_load_setup(Error **errp)
> p->packet = g_malloc0(p->packet_len);
> p->name = g_strdup_printf("multifdrecv_%d", i);
> p->iov = g_new0(struct iovec, page_count);
> + p->normal = g_new0(ram_addr_t, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> --
> 2.34.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
- [PATCH v4 12/23] multifd: Use a single writev on the send side, (continued)
- [PATCH v4 12/23] multifd: Use a single writev on the send side, Juan Quintela, 2022/01/11
- [PATCH v4 19/23] multifd: Add property to enable/disable zero_page, Juan Quintela, 2022/01/11
- [PATCH v4 22/23] migration: Use multifd before we check for the zero page, Juan Quintela, 2022/01/11
- [PATCH v4 16/23] multifd: recv side only needs the RAMBlock host address, Juan Quintela, 2022/01/11
- [PATCH v4 14/23] multifd: Use normal pages array on the send side, Juan Quintela, 2022/01/11
- [PATCH v4 15/23] multifd: Use normal pages array on the recv side, Juan Quintela, 2022/01/11
- Re: [PATCH v4 15/23] multifd: Use normal pages array on the recv side,
Dr. David Alan Gilbert <=
- [PATCH v4 18/23] migration: Make ram_save_target_page() a pointer, Juan Quintela, 2022/01/11
- [PATCH v4 17/23] multifd: Rename pages_used to normal_pages, Juan Quintela, 2022/01/11
- [PATCH v4 13/23] multifd: Unfold "used" variable by its value, Juan Quintela, 2022/01/11
- [PATCH v4 23/23] migration: Export ram_release_page(), Juan Quintela, 2022/01/11
- [PATCH v4 21/23] multifd: Zero pages transmission, Juan Quintela, 2022/01/11