qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v2 06/11] linux-user: Use QemuMutex and QemuCond


From: Alex Bennée
Subject: Re: [Qemu-devel] [RFC v2 06/11] linux-user: Use QemuMutex and QemuCond
Date: Mon, 11 Jul 2016 13:08:52 +0100
User-agent: mu4e 0.9.17; emacs 25.0.95.9

Sergey Fedorov <address@hidden> writes:

> From: Sergey Fedorov <address@hidden>
>
> Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
> This will allow to make some locks and conditional variables common
> between user and system mode emulation.
>
> Signed-off-by: Sergey Fedorov <address@hidden>
> Signed-off-by: Sergey Fedorov <address@hidden>

Reviewed-by: Alex Bennée <address@hidden>

> ---
>  linux-user/main.c | 53 +++++++++++++++++++++++++++++++----------------------
>  1 file changed, 31 insertions(+), 22 deletions(-)
>
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 617a179f14a4..bdbda693cc5f 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -108,17 +108,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
>     We don't require a full sync, only that no cpus are executing guest code.
>     The alternative is to map target atomic ops onto host equivalents,
>     which requires quite a lot of per host/target work.  */
> -static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
> -static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
> -static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
> -static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
> +static QemuMutex cpu_list_mutex;
> +static QemuMutex exclusive_lock;
> +static QemuCond exclusive_cond;
> +static QemuCond exclusive_resume;
>  static int pending_cpus;
>
> +void qemu_init_cpu_loop(void)
> +{
> +    qemu_mutex_init(&cpu_list_mutex);
> +    qemu_mutex_init(&exclusive_lock);
> +    qemu_cond_init(&exclusive_cond);
> +    qemu_cond_init(&exclusive_resume);
> +}
> +
>  /* Make sure everything is in a consistent state for calling fork().  */
>  void fork_start(void)
>  {
>      qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
> -    pthread_mutex_lock(&exclusive_lock);
> +    qemu_mutex_lock(&exclusive_lock);
>      mmap_fork_start();
>  }
>
> @@ -135,14 +143,14 @@ void fork_end(int child)
>              }
>          }
>          pending_cpus = 0;
> -        pthread_mutex_init(&exclusive_lock, NULL);
> -        pthread_mutex_init(&cpu_list_mutex, NULL);
> -        pthread_cond_init(&exclusive_cond, NULL);
> -        pthread_cond_init(&exclusive_resume, NULL);
> +        qemu_mutex_init(&exclusive_lock);
> +        qemu_mutex_init(&cpu_list_mutex);
> +        qemu_cond_init(&exclusive_cond);
> +        qemu_cond_init(&exclusive_resume);
>          qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
>          gdbserver_fork(thread_cpu);
>      } else {
> -        pthread_mutex_unlock(&exclusive_lock);
> +        qemu_mutex_unlock(&exclusive_lock);
>          qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
>      }
>  }
> @@ -152,7 +160,7 @@ void fork_end(int child)
>  static inline void exclusive_idle(void)
>  {
>      while (pending_cpus) {
> -        pthread_cond_wait(&exclusive_resume, &exclusive_lock);
> +        qemu_cond_wait(&exclusive_resume, &exclusive_lock);
>      }
>  }
>
> @@ -162,7 +170,7 @@ static inline void start_exclusive(void)
>  {
>      CPUState *other_cpu;
>
> -    pthread_mutex_lock(&exclusive_lock);
> +    qemu_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>
>      pending_cpus = 1;
> @@ -174,7 +182,7 @@ static inline void start_exclusive(void)
>          }
>      }
>      if (pending_cpus > 1) {
> -        pthread_cond_wait(&exclusive_cond, &exclusive_lock);
> +        qemu_cond_wait(&exclusive_cond, &exclusive_lock);
>      }
>  }
>
> @@ -182,42 +190,42 @@ static inline void start_exclusive(void)
>  static inline void __attribute__((unused)) end_exclusive(void)
>  {
>      pending_cpus = 0;
> -    pthread_cond_broadcast(&exclusive_resume);
> -    pthread_mutex_unlock(&exclusive_lock);
> +    qemu_cond_broadcast(&exclusive_resume);
> +    qemu_mutex_unlock(&exclusive_lock);
>  }
>
>  /* Wait for exclusive ops to finish, and begin cpu execution.  */
>  static inline void cpu_exec_start(CPUState *cpu)
>  {
> -    pthread_mutex_lock(&exclusive_lock);
> +    qemu_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>      cpu->running = true;
> -    pthread_mutex_unlock(&exclusive_lock);
> +    qemu_mutex_unlock(&exclusive_lock);
>  }
>
>  /* Mark cpu as not executing, and release pending exclusive ops.  */
>  static inline void cpu_exec_end(CPUState *cpu)
>  {
> -    pthread_mutex_lock(&exclusive_lock);
> +    qemu_mutex_lock(&exclusive_lock);
>      cpu->running = false;
>      if (pending_cpus > 1) {
>          pending_cpus--;
>          if (pending_cpus == 1) {
> -            pthread_cond_signal(&exclusive_cond);
> +            qemu_cond_signal(&exclusive_cond);
>          }
>      }
>      exclusive_idle();
> -    pthread_mutex_unlock(&exclusive_lock);
> +    qemu_mutex_unlock(&exclusive_lock);
>  }
>
>  void cpu_list_lock(void)
>  {
> -    pthread_mutex_lock(&cpu_list_mutex);
> +    qemu_mutex_lock(&cpu_list_mutex);
>  }
>
>  void cpu_list_unlock(void)
>  {
> -    pthread_mutex_unlock(&cpu_list_mutex);
> +    qemu_mutex_unlock(&cpu_list_mutex);
>  }
>
>
> @@ -4210,6 +4218,7 @@ int main(int argc, char **argv, char **envp)
>      int ret;
>      int execfd;
>
> +    qemu_init_cpu_loop();
>      module_call_init(MODULE_INIT_QOM);
>
>      if ((envlist = envlist_create()) == NULL) {


--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]