[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions
From: |
Fabiano Rosas |
Subject: |
Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions |
Date: |
Tue, 26 Nov 2024 18:18:45 -0300 |
Prasad Pandit <ppandit@redhat.com> writes:
> From: Prasad Pandit <pjp@fedoraproject.org>
>
> Refactor ram_save_target_page legacy and multifd
> functions into one. Other than simplifying it,
> it frees 'migration_ops' object from usage, so it
> is expunged.
>
> When both Multifd and Postcopy modes are enabled,
> to avoid errors, the Multifd threads are active until
> migration reaches the Postcopy mode. This is done by
> checking the state returned by migration_in_postcopy().
This patch should be just the actual refactoring on top of master, with
no mention to postcopy at all.
>
> Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
> ---
> migration/multifd-nocomp.c | 3 +-
> migration/ram.c | 74 ++++++++++++--------------------------
> 2 files changed, 24 insertions(+), 53 deletions(-)
>
> v1: Further refactor ram_save_target_page() function to conflate
> save_zero_page() calls.
>
> Add migration_in_postcopy() call to check migration state
> instead of combining it with migrate_multifd().
>
> v0:
> https://lore.kernel.org/qemu-devel/20241029150908.1136894-1-ppandit@redhat.com/T/#u
>
> diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c
> index 55191152f9..e92821e8f6 100644
> --- a/migration/multifd-nocomp.c
> +++ b/migration/multifd-nocomp.c
> @@ -14,6 +14,7 @@
> #include "exec/ramblock.h"
> #include "exec/target_page.h"
> #include "file.h"
> +#include "migration.h"
> #include "multifd.h"
> #include "options.h"
> #include "qapi/error.h"
> @@ -345,7 +346,7 @@ retry:
>
> int multifd_ram_flush_and_sync(void)
> {
> - if (!migrate_multifd()) {
> + if (!migrate_multifd() || migration_in_postcopy()) {
> return 0;
> }
>
> diff --git a/migration/ram.c b/migration/ram.c
> index 05ff9eb328..9d1ec6209c 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -467,13 +467,6 @@ void ram_transferred_add(uint64_t bytes)
> }
> }
>
> -struct MigrationOps {
> - int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
> -};
> -typedef struct MigrationOps MigrationOps;
> -
> -MigrationOps *migration_ops;
> -
> static int ram_save_host_page_urgent(PageSearchStatus *pss);
>
> /* NOTE: page is the PFN not real ram_addr_t. */
> @@ -1323,9 +1316,9 @@ static int find_dirty_block(RAMState *rs,
> PageSearchStatus *pss)
> pss->page = 0;
> pss->block = QLIST_NEXT_RCU(pss->block, next);
> if (!pss->block) {
> - if (migrate_multifd() &&
> - (!migrate_multifd_flush_after_each_section() ||
> - migrate_mapped_ram())) {
> + if (migrate_multifd() && !migration_in_postcopy()
> + && (!migrate_multifd_flush_after_each_section()
> + || migrate_mapped_ram())) {
This is getting out of hand. We can't keep growing this condition every
time something new comes up. Any ideas?
> QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
> int ret = multifd_ram_flush_and_sync();
> if (ret < 0) {
> @@ -1986,55 +1979,39 @@ int ram_save_queue_pages(const char *rbname,
> ram_addr_t start, ram_addr_t len,
> }
>
> /**
> - * ram_save_target_page_legacy: save one target page
> + * ram_save_target_page: save one target page to the precopy thread
> + * OR to multifd workers.
> *
> - * Returns the number of pages written
> + * Multifd mode: returns 1 if the page was queued, -1 otherwise.
> + * Non-multifd mode: returns the number of pages written.
Yes, although I wonder if we should keep documenting this when we only
call this function for a single page and it always returns at most 1.
> *
> * @rs: current RAM state
> * @pss: data about the page we want to send
> */
> -static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
> +static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
> {
> ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
> int res;
>
> + if (!migrate_multifd()
> + || migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
> + if (save_zero_page(rs, pss, offset)) {
> + return 1;
> + }
> + }
> +
> + if (migrate_multifd() && !migration_in_postcopy()) {
> + RAMBlock *block = pss->block;
> + return ram_save_multifd_page(block, offset);
> + }
> +
> if (control_save_page(pss, offset, &res)) {
> return res;
> }
>
> - if (save_zero_page(rs, pss, offset)) {
> - return 1;
> - }
> -
> return ram_save_page(rs, pss);
> }
>
> -/**
> - * ram_save_target_page_multifd: send one target page to multifd workers
> - *
> - * Returns 1 if the page was queued, -1 otherwise.
> - *
> - * @rs: current RAM state
> - * @pss: data about the page we want to send
> - */
> -static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss)
> -{
> - RAMBlock *block = pss->block;
> - ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
> -
> - /*
> - * While using multifd live migration, we still need to handle zero
> - * page checking on the migration main thread.
> - */
> - if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
> - if (save_zero_page(rs, pss, offset)) {
> - return 1;
> - }
> - }
> -
> - return ram_save_multifd_page(block, offset);
> -}
> -
> /* Should be called before sending a host page */
> static void pss_host_page_prepare(PageSearchStatus *pss)
> {
> @@ -2121,7 +2098,7 @@ static int ram_save_host_page_urgent(PageSearchStatus
> *pss)
>
> if (page_dirty) {
> /* Be strict to return code; it must be 1, or what else? */
... this^ comment, for instance.
> - if (migration_ops->ram_save_target_page(rs, pss) != 1) {
> + if (ram_save_target_page(rs, pss) != 1) {
> error_report_once("%s: ram_save_target_page failed",
> __func__);
> ret = -1;
> goto out;
> @@ -2190,7 +2167,7 @@ static int ram_save_host_page(RAMState *rs,
> PageSearchStatus *pss)
> if (preempt_active) {
> qemu_mutex_unlock(&rs->bitmap_mutex);
> }
> - tmppages = migration_ops->ram_save_target_page(rs, pss);
> + tmppages = ram_save_target_page(rs, pss);
> if (tmppages >= 0) {
> pages += tmppages;
> /*
> @@ -2388,8 +2365,6 @@ static void ram_save_cleanup(void *opaque)
> xbzrle_cleanup();
> multifd_ram_save_cleanup();
> ram_state_cleanup(rsp);
> - g_free(migration_ops);
> - migration_ops = NULL;
> }
>
> static void ram_state_reset(RAMState *rs)
> @@ -3055,13 +3030,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque,
> Error **errp)
> return ret;
> }
>
> - migration_ops = g_malloc0(sizeof(MigrationOps));
> -
> if (migrate_multifd()) {
> multifd_ram_save_setup();
> - migration_ops->ram_save_target_page = ram_save_target_page_multifd;
> - } else {
> - migration_ops->ram_save_target_page = ram_save_target_page_legacy;
> }
>
> bql_unlock();
- [PATCH v1 0/4] Allow to enable multifd and postcopy migration together, Prasad Pandit, 2024/11/26
- [PATCH v1 1/4] migration/multifd: move macros to multifd header, Prasad Pandit, 2024/11/26
- [PATCH v1 2/4] migration: remove multifd check with postcopy, Prasad Pandit, 2024/11/26
- [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Prasad Pandit, 2024/11/26
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions,
Fabiano Rosas <=
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Prasad Pandit, 2024/11/27
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Fabiano Rosas, 2024/11/27
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Prasad Pandit, 2024/11/28
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Fabiano Rosas, 2024/11/27
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Prasad Pandit, 2024/11/28
- Re: [PATCH v1 3/4] migration: refactor ram_save_target_page functions, Fabiano Rosas, 2024/11/28
- [PATCH v1 4/4] migration: enable multifd and postcopy together, Prasad Pandit, 2024/11/26