qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v2 09/11] linux-user: Support CPU work queue


From: Alex Bennée
Subject: Re: [Qemu-devel] [RFC v2 09/11] linux-user: Support CPU work queue
Date: Thu, 14 Jul 2016 16:10:28 +0100
User-agent: mu4e 0.9.17; emacs 25.0.95.9

Sergey Fedorov <address@hidden> writes:

> From: Sergey Fedorov <address@hidden>
>
> Make CPU work core functions common between system and user-mode
> emulation. User-mode does not have BQL, so process_queued_cpu_work() is
> protected by 'exclusive_lock'.
>
> Signed-off-by: Sergey Fedorov <address@hidden>
> Signed-off-by: Sergey Fedorov <address@hidden>
> ---
>
> Changes in v2:
>  - 'qemu_work_cond' definition moved to cpu-exec-common.c
>  - documentation commend for new public API added

Reviewed-by: Alex Bennée <address@hidden>

>
> ---
>  cpu-exec-common.c       | 85 ++++++++++++++++++++++++++++++++++++++++++++++++
>  cpus.c                  | 86 
> +------------------------------------------------
>  include/exec/exec-all.h | 17 ++++++++++
>  linux-user/main.c       |  8 +++++
>  4 files changed, 111 insertions(+), 85 deletions(-)
>
> diff --git a/cpu-exec-common.c b/cpu-exec-common.c
> index 0cb4ae60eff9..a233f0124559 100644
> --- a/cpu-exec-common.c
> +++ b/cpu-exec-common.c
> @@ -77,3 +77,88 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
>      }
>      siglongjmp(cpu->jmp_env, 1);
>  }
> +
> +QemuCond qemu_work_cond;
> +
> +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> +{
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    if (cpu->queued_work_first == NULL) {
> +        cpu->queued_work_first = wi;
> +    } else {
> +        cpu->queued_work_last->next = wi;
> +    }
> +    cpu->queued_work_last = wi;
> +    wi->next = NULL;
> +    wi->done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
> +    qemu_cpu_kick(cpu);
> +}
> +
> +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> +{
> +    struct qemu_work_item wi;
> +
> +    if (qemu_cpu_is_self(cpu)) {
> +        func(cpu, data);
> +        return;
> +    }
> +
> +    wi.func = func;
> +    wi.data = data;
> +    wi.free = false;
> +
> +    queue_work_on_cpu(cpu, &wi);
> +    while (!atomic_mb_read(&wi.done)) {
> +        CPUState *self_cpu = current_cpu;
> +
> +        qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex());
> +        current_cpu = self_cpu;
> +    }
> +}
> +
> +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> +{
> +    struct qemu_work_item *wi;
> +
> +    if (qemu_cpu_is_self(cpu)) {
> +        func(cpu, data);
> +        return;
> +    }
> +
> +    wi = g_malloc0(sizeof(struct qemu_work_item));
> +    wi->func = func;
> +    wi->data = data;
> +    wi->free = true;
> +
> +    queue_work_on_cpu(cpu, wi);
> +}
> +
> +void process_queued_cpu_work(CPUState *cpu)
> +{
> +    struct qemu_work_item *wi;
> +
> +    if (cpu->queued_work_first == NULL) {
> +        return;
> +    }
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    while (cpu->queued_work_first != NULL) {
> +        wi = cpu->queued_work_first;
> +        cpu->queued_work_first = wi->next;
> +        if (!cpu->queued_work_first) {
> +            cpu->queued_work_last = NULL;
> +        }
> +        qemu_mutex_unlock(&cpu->work_mutex);
> +        wi->func(cpu, wi->data);
> +        qemu_mutex_lock(&cpu->work_mutex);
> +        if (wi->free) {
> +            g_free(wi);
> +        } else {
> +            atomic_mb_set(&wi->done, true);
> +        }
> +    }
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +    qemu_cond_broadcast(&qemu_work_cond);
> +}
> diff --git a/cpus.c b/cpus.c
> index 51fd8c18b4c8..282d7e399902 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -896,7 +896,6 @@ static QemuThread io_thread;
>  static QemuCond qemu_cpu_cond;
>  /* system init */
>  static QemuCond qemu_pause_cond;
> -static QemuCond qemu_work_cond;
>
>  void qemu_init_cpu_loop(void)
>  {
> @@ -910,66 +909,11 @@ void qemu_init_cpu_loop(void)
>      qemu_thread_get_self(&io_thread);
>  }
>
> -static QemuMutex *qemu_get_cpu_work_mutex(void)
> +QemuMutex *qemu_get_cpu_work_mutex(void)
>  {
>      return &qemu_global_mutex;
>  }
>
> -static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> -{
> -    qemu_mutex_lock(&cpu->work_mutex);
> -    if (cpu->queued_work_first == NULL) {
> -        cpu->queued_work_first = wi;
> -    } else {
> -        cpu->queued_work_last->next = wi;
> -    }
> -    cpu->queued_work_last = wi;
> -    wi->next = NULL;
> -    wi->done = false;
> -    qemu_mutex_unlock(&cpu->work_mutex);
> -
> -    qemu_cpu_kick(cpu);
> -}
> -
> -void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> -{
> -    struct qemu_work_item wi;
> -
> -    if (qemu_cpu_is_self(cpu)) {
> -        func(cpu, data);
> -        return;
> -    }
> -
> -    wi.func = func;
> -    wi.data = data;
> -    wi.free = false;
> -
> -    queue_work_on_cpu(cpu, &wi);
> -    while (!atomic_mb_read(&wi.done)) {
> -        CPUState *self_cpu = current_cpu;
> -
> -        qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex());
> -        current_cpu = self_cpu;
> -    }
> -}
> -
> -void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> -{
> -    struct qemu_work_item *wi;
> -
> -    if (qemu_cpu_is_self(cpu)) {
> -        func(cpu, data);
> -        return;
> -    }
> -
> -    wi = g_malloc0(sizeof(struct qemu_work_item));
> -    wi->func = func;
> -    wi->data = data;
> -    wi->free = true;
> -
> -    queue_work_on_cpu(cpu, wi);
> -}
> -
>  static void qemu_kvm_destroy_vcpu(CPUState *cpu)
>  {
>      if (kvm_destroy_vcpu(cpu) < 0) {
> @@ -982,34 +926,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu)
>  {
>  }
>
> -static void process_queued_cpu_work(CPUState *cpu)
> -{
> -    struct qemu_work_item *wi;
> -
> -    if (cpu->queued_work_first == NULL) {
> -        return;
> -    }
> -
> -    qemu_mutex_lock(&cpu->work_mutex);
> -    while (cpu->queued_work_first != NULL) {
> -        wi = cpu->queued_work_first;
> -        cpu->queued_work_first = wi->next;
> -        if (!cpu->queued_work_first) {
> -            cpu->queued_work_last = NULL;
> -        }
> -        qemu_mutex_unlock(&cpu->work_mutex);
> -        wi->func(cpu, wi->data);
> -        qemu_mutex_lock(&cpu->work_mutex);
> -        if (wi->free) {
> -            g_free(wi);
> -        } else {
> -            atomic_mb_set(&wi->done, true);
> -        }
> -    }
> -    qemu_mutex_unlock(&cpu->work_mutex);
> -    qemu_cond_broadcast(&qemu_work_cond);
> -}
> -
>  static void qemu_wait_io_event_common(CPUState *cpu)
>  {
>      if (cpu->stop) {
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index c1f59fa59d2c..bceecbd5222a 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -407,4 +407,21 @@ extern int singlestep;
>  extern CPUState *tcg_current_cpu;
>  extern bool exit_request;
>
> +/**
> + * qemu_work_cond - condition to wait for CPU work items completion
> + */
> +extern QemuCond qemu_work_cond;
> +
> +/**
> + * qemu_get_cpu_work_mutex() - get the mutex which protects CPU work 
> execution
> + *
> + * Return: A pointer to the mutex.
> + */
> +QemuMutex *qemu_get_cpu_work_mutex(void);
> +/**
> + * process_queued_cpu_work() - process all items on CPU work queue
> + * @cpu: The CPU which work queue to process.
> + */
> +void process_queued_cpu_work(CPUState *cpu);
> +
>  #endif
> diff --git a/linux-user/main.c b/linux-user/main.c
> index a8790ac63f68..fce61d5a35fc 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -121,6 +121,7 @@ void qemu_init_cpu_loop(void)
>      qemu_mutex_init(&exclusive_lock);
>      qemu_cond_init(&exclusive_cond);
>      qemu_cond_init(&exclusive_resume);
> +    qemu_cond_init(&qemu_work_cond);
>  }
>
>  /* Make sure everything is in a consistent state for calling fork().  */
> @@ -149,6 +150,7 @@ void fork_end(int child)
>          qemu_mutex_init(&cpu_list_mutex);
>          qemu_cond_init(&exclusive_cond);
>          qemu_cond_init(&exclusive_resume);
> +        qemu_cond_init(&qemu_work_cond);
>          qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
>          gdbserver_fork(thread_cpu);
>      } else {
> @@ -157,6 +159,11 @@ void fork_end(int child)
>      }
>  }
>
> +QemuMutex *qemu_get_cpu_work_mutex(void)
> +{
> +    return &exclusive_lock;
> +}
> +
>  /* Wait for pending exclusive operations to complete.  The exclusive lock
>     must be held.  */
>  static inline void exclusive_idle(void)
> @@ -215,6 +222,7 @@ static inline void cpu_exec_end(CPUState *cpu)
>          qemu_cond_signal(&exclusive_cond);
>      }
>      exclusive_idle();
> +    process_queued_cpu_work(cpu);
>      qemu_mutex_unlock(&exclusive_lock);
>  }


--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]