qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructur


From: Dr. David Alan Gilbert
Subject: Re: [Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure for multifd recv side
Date: Thu, 20 Jul 2017 11:29:46 +0100
User-agent: Mutt/1.8.3 (2017-05-23)

* Juan Quintela (address@hidden) wrote:
> We make the locking and the transfer of information specific, even if we
> are still receiving things through the main thread.
> 
> Signed-off-by: Juan Quintela <address@hidden>
> ---
>  migration/ram.c | 68 
> ++++++++++++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 60 insertions(+), 8 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index ac0742f..49c4880 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -49,6 +49,7 @@
>  #include "migration/colo.h"
>  #include "sysemu/sysemu.h"
>  #include "qemu/uuid.h"
> +#include "qemu/iov.h"
>  
>  /***********************************************************/
>  /* ram save/restore */
> @@ -527,7 +528,7 @@ int multifd_save_setup(void)
>      return 0;
>  }
>  
> -static int multifd_send_page(uint8_t *address)
> +static uint16_t multifd_send_page(uint8_t *address, bool last_page)
>  {
>      int i, j;
>      MultiFDSendParams *p = NULL; /* make happy gcc */
> @@ -543,8 +544,10 @@ static int multifd_send_page(uint8_t *address)
>      pages.iov[pages.num].iov_len = TARGET_PAGE_SIZE;
>      pages.num++;
>  
> -    if (pages.num < (pages.size - 1)) {
> -        return UINT16_MAX;
> +    if (!last_page) {
> +        if (pages.num < (pages.size - 1)) {
> +            return UINT16_MAX;
> +        }
>      }

This doesn't feel like it should be in a recv patch.

>      qemu_sem_wait(&multifd_send_state->sem);
> @@ -572,12 +575,17 @@ static int multifd_send_page(uint8_t *address)
>  }
>  
>  struct MultiFDRecvParams {
> +    /* not changed */
>      uint8_t id;
>      QemuThread thread;
>      QIOChannel *c;
> +    QemuSemaphore ready;
>      QemuSemaphore sem;
>      QemuMutex mutex;
> +    /* proteced by param mutex */
>      bool quit;
> +    multifd_pages_t pages;
> +    bool done;
>  };
>  typedef struct MultiFDRecvParams MultiFDRecvParams;

The params between Send and Recv keep looking very similar; I wonder
if we can share them.

> @@ -629,12 +637,20 @@ static void *multifd_recv_thread(void *opaque)
>  {
>      MultiFDRecvParams *p = opaque;
>  
> +    qemu_sem_post(&p->ready);
>      while (true) {
>          qemu_mutex_lock(&p->mutex);
>          if (p->quit) {
>              qemu_mutex_unlock(&p->mutex);
>              break;
>          }
> +        if (p->pages.num) {
> +            p->pages.num = 0;
> +            p->done = true;
> +            qemu_mutex_unlock(&p->mutex);
> +            qemu_sem_post(&p->ready);
> +            continue;
> +        }
>          qemu_mutex_unlock(&p->mutex);
>          qemu_sem_wait(&p->sem);
>      }
> @@ -679,8 +695,11 @@ gboolean multifd_new_channel(QIOChannel *ioc)
>      }
>      qemu_mutex_init(&p->mutex);
>      qemu_sem_init(&p->sem, 0);
> +    qemu_sem_init(&p->ready, 0);
>      p->quit = false;
>      p->id = id;
> +    p->done = false;
> +    multifd_init_group(&p->pages);
>      p->c = ioc;
>      atomic_set(&multifd_recv_state->params[id], p);
>      qemu_thread_create(&p->thread, "multifd_recv", multifd_recv_thread, p,
> @@ -709,6 +728,42 @@ int multifd_load_setup(void)
>      return 0;
>  }
>  
> +static void multifd_recv_page(uint8_t *address, uint16_t fd_num)
> +{
> +    int thread_count;
> +    MultiFDRecvParams *p;
> +    static multifd_pages_t pages;
> +    static bool once;
> +
> +    if (!once) {
> +        multifd_init_group(&pages);
> +        once = true;
> +    }
> +
> +    pages.iov[pages.num].iov_base = address;
> +    pages.iov[pages.num].iov_len = TARGET_PAGE_SIZE;
> +    pages.num++;
> +
> +    if (fd_num == UINT16_MAX) {
> +        return;
> +    }
> +
> +    thread_count = migrate_multifd_threads();
> +    assert(fd_num < thread_count);
> +    p = multifd_recv_state->params[fd_num];
> +
> +    qemu_sem_wait(&p->ready);
> +
> +    qemu_mutex_lock(&p->mutex);
> +    p->done = false;
> +    iov_copy(p->pages.iov, pages.num, pages.iov, pages.num, 0,
> +             iov_size(pages.iov, pages.num));
> +    p->pages.num = pages.num;
> +    pages.num = 0;
> +    qemu_mutex_unlock(&p->mutex);
> +    qemu_sem_post(&p->sem);
> +}
> +
>  /**
>   * save_page_header: write page header to wire
>   *
> @@ -1155,7 +1210,7 @@ static int ram_multifd_page(RAMState *rs, 
> PageSearchStatus *pss,
>          ram_counters.transferred +=
>              save_page_header(rs, rs->f, block,
>                               offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
> -        fd_num = multifd_send_page(p);
> +        fd_num = multifd_send_page(p, rs->migration_dirty_pages == 1);

I think that belongs in the previous patch and probably answers one of
my questions.

>          qemu_put_be16(rs->f, fd_num);
>          ram_counters.transferred += 2; /* size of fd_num */
>          qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
> @@ -3020,10 +3075,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
> version_id)
>  
>          case RAM_SAVE_FLAG_MULTIFD_PAGE:
>              fd_num = qemu_get_be16(f);
> -            if (fd_num != 0) {
> -                /* this is yet an unused variable, changed later */
> -                fd_num = fd_num;
> -            }
> +            multifd_recv_page(host, fd_num);
>              qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
>              break;
>  
> -- 
> 2.9.4
> 

Dave

--
Dr. David Alan Gilbert / address@hidden / Manchester, UK



reply via email to

[Prev in Thread] Current Thread [Next in Thread]