[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 6/7] migration/multifd: Cleanup src flushes on condition c
From: |
Fabiano Rosas |
Subject: |
Re: [PATCH v2 6/7] migration/multifd: Cleanup src flushes on condition check |
Date: |
Fri, 06 Dec 2024 11:18:59 -0300 |
Peter Xu <peterx@redhat.com> writes:
> The src flush condition check is over complicated, and it's getting more
> out of control if postcopy will be involved.
>
> In general, we have two modes to do the sync: legacy or modern ways.
> Legacy uses per-section flush, modern uses per-round flush.
>
> Mapped-ram always uses the modern, which is per-round.
>
> Introduce two helpers, which can greatly simplify the code, and hopefully
> make it readable again.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> migration/multifd.h | 2 ++
> migration/multifd-nocomp.c | 42 ++++++++++++++++++++++++++++++++++++++
> migration/ram.c | 10 +++------
> 3 files changed, 47 insertions(+), 7 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index c9ae57ea02..582040922f 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -351,6 +351,8 @@ static inline uint32_t multifd_ram_page_count(void)
> void multifd_ram_save_setup(void);
> void multifd_ram_save_cleanup(void);
> int multifd_ram_flush_and_sync(QEMUFile *f);
> +bool multifd_ram_sync_per_round(void);
> +bool multifd_ram_sync_per_section(void);
> size_t multifd_ram_payload_size(void);
> void multifd_ram_fill_packet(MultiFDSendParams *p);
> int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp);
> diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c
> index 58372db0f4..c1f686c0ce 100644
> --- a/migration/multifd-nocomp.c
> +++ b/migration/multifd-nocomp.c
> @@ -344,6 +344,48 @@ retry:
> return true;
> }
>
> +/*
> + * We have two modes for multifd flushes:
> + *
> + * - Per-section mode: this is the legacy way to flush, it requires one
> + * MULTIFD_FLAG_SYNC message for each RAM_SAVE_FLAG_EOS.
> + *
> + * - Per-round mode: this is the modern way to flush, it requires one
> + * MULTIFD_FLAG_SYNC message only for each round of RAM scan. Normally
> + * it's paired with a new RAM_SAVE_FLAG_MULTIFD_FLUSH message in network
> + * based migrations.
> + *
> + * One thing to mention is mapped-ram always use the modern way to sync.
> + */
> +
> +/* Do we need a per-section multifd flush (legacy way)? */
> +bool multifd_ram_sync_per_section(void)
> +{
> + if (!migrate_multifd()) {
> + return false;
> + }
> +
> + if (migrate_mapped_ram()) {
> + return false;
> + }
> +
> + return migrate_multifd_flush_after_each_section();
> +}
> +
> +/* Do we need a per-round multifd flush (modern way)? */
> +bool multifd_ram_sync_per_round(void)
> +{
> + if (!migrate_multifd()) {
> + return false;
> + }
> +
> + if (migrate_mapped_ram()) {
> + return true;
> + }
> +
> + return !migrate_multifd_flush_after_each_section();
> +}
> +
> int multifd_ram_flush_and_sync(QEMUFile *f)
> {
> MultiFDSyncReq req;
> diff --git a/migration/ram.c b/migration/ram.c
> index 154ff5abd4..5d4bdefe69 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1302,9 +1302,7 @@ static int find_dirty_block(RAMState *rs,
> PageSearchStatus *pss)
> pss->page = 0;
> pss->block = QLIST_NEXT_RCU(pss->block, next);
> if (!pss->block) {
> - if (migrate_multifd() &&
> - (!migrate_multifd_flush_after_each_section() ||
> - migrate_mapped_ram())) {
> + if (multifd_ram_sync_per_round()) {
If we're already implicitly declaring which parts of the code mean
"round" or "section", we could fold the flush into the function and call
it unconditionally.
We don't need ram.c code to be deciding about multifd. This could be all
hidden away in the multifd-nocomp code:
-- >8 --
diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c
index c1f686c0ce..6a7eee4c25 100644
--- a/migration/multifd-nocomp.c
+++ b/migration/multifd-nocomp.c
@@ -358,32 +358,26 @@ retry:
* One thing to mention is mapped-ram always use the modern way to sync.
*/
-/* Do we need a per-section multifd flush (legacy way)? */
-bool multifd_ram_sync_per_section(void)
+int multifd_ram_sync_per_section(QEMUFile *f)
{
- if (!migrate_multifd()) {
- return false;
+ if (!migrate_multifd() || !migrate_multifd_flush_after_each_section()) {
+ return 0;
}
if (migrate_mapped_ram()) {
- return false;
+ return 0;
}
- return migrate_multifd_flush_after_each_section();
+ return multifd_ram_flush_and_sync(f);
}
-/* Do we need a per-round multifd flush (modern way)? */
-bool multifd_ram_sync_per_round(void)
+int multifd_ram_sync_per_round(QEMUFile *f)
{
- if (!migrate_multifd()) {
- return false;
+ if (!migrate_multifd() || migrate_multifd_flush_after_each_section()) {
+ return 0;
}
- if (migrate_mapped_ram()) {
- return true;
- }
-
- return !migrate_multifd_flush_after_each_section();
+ return multifd_ram_flush_and_sync(f);
}
int multifd_ram_flush_and_sync(QEMUFile *f)
diff --git a/migration/multifd.h b/migration/multifd.h
index 582040922f..3b42128167 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -351,8 +351,8 @@ static inline uint32_t multifd_ram_page_count(void)
void multifd_ram_save_setup(void);
void multifd_ram_save_cleanup(void);
int multifd_ram_flush_and_sync(QEMUFile *f);
-bool multifd_ram_sync_per_round(void);
-bool multifd_ram_sync_per_section(void);
+int multifd_ram_sync_per_round(QEMUFile *f);
+int multifd_ram_sync_per_section(QEMUFile *f);
size_t multifd_ram_payload_size(void);
void multifd_ram_fill_packet(MultiFDSendParams *p);
int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp);
diff --git a/migration/ram.c b/migration/ram.c
index ddee703585..fe33c8e0e8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1302,12 +1302,10 @@ static int find_dirty_block(RAMState *rs,
PageSearchStatus *pss)
pss->page = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
- if (multifd_ram_sync_per_round()) {
- QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
- int ret = multifd_ram_flush_and_sync(f);
- if (ret < 0) {
- return ret;
- }
+ QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
+ int ret = multifd_ram_sync_per_round(f);
+ if (ret < 0) {
+ return ret;
}
/* Hit the end of the list */
@@ -3203,11 +3201,9 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
out:
if (ret >= 0 && migration_is_running()) {
- if (multifd_ram_sync_per_section()) {
- ret = multifd_ram_flush_and_sync(f);
- if (ret < 0) {
- return ret;
- }
+ ret = multifd_ram_sync_per_section(f);
+ if (ret < 0) {
+ return ret;
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -3276,15 +3272,13 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
}
- if (multifd_ram_sync_per_section()) {
- /*
- * Only the old dest QEMU will need this sync, because each EOS
- * will require one SYNC message on each channel.
- */
- ret = multifd_ram_flush_and_sync(f);
- if (ret < 0) {
- return ret;
- }
+ /*
+ * Only the old dest QEMU will need this sync, because each EOS
+ * will require one SYNC message on each channel.
+ */
+ ret = multifd_ram_sync_per_section(f);
+ if (ret < 0) {
+ return ret;
}
if (migrate_mapped_ram()) {
--
2.35.3
- Re: [PATCH v2 3/7] migration/ram: Move RAM_SAVE_FLAG* into ram.h, (continued)
- [PATCH v2 4/7] migration/multifd: Unify RAM_SAVE_FLAG_MULTIFD_FLUSH messages, Peter Xu, 2024/12/05
- [PATCH v2 5/7] migration/multifd: Remove sync processing on postcopy, Peter Xu, 2024/12/05
- [PATCH v2 6/7] migration/multifd: Cleanup src flushes on condition check, Peter Xu, 2024/12/05
- Re: [PATCH v2 6/7] migration/multifd: Cleanup src flushes on condition check,
Fabiano Rosas <=
- [PATCH v2 7/7] migration/multifd: Document the reason to sync for save_setup(), Peter Xu, 2024/12/05