qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 08/12] cpus-common: move exclusive work infrastr


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [PATCH 08/12] cpus-common: move exclusive work infrastructure from
Date: Mon, 5 Sep 2016 16:57:33 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.2.0


On 05/09/2016 16:55, Alex Bennée wrote:
>> > -/* These are no-ops because we are not threadsafe.  */
>> > -static inline void cpu_exec_start(CPUState *cpu)
>> > -{
>> > -}
>> > -
>> > -static inline void cpu_exec_end(CPUState *cpu)
>> > -{
>> > -}
>> > -
>> > -static inline void start_exclusive(void)
>> > -{
>> > -}
>> > -
>> > -static inline void end_exclusive(void)
>> > -{
>> > -}
>> > -
> 
> Does this mean BSD user now gets exclusive support as well or is other
> plumbing required for the tb_flush to work?

It does, though I have forgotten to add the call to process the work items.

>> >  void fork_start(void)
>> >  {
>> >  }
>> > diff --git a/cpus-common.c b/cpus-common.c
>> > index 12729e5..47f7c06 100644
>> > --- a/cpus-common.c
>> > +++ b/cpus-common.c
>> > @@ -23,11 +23,21 @@
>> >  #include "exec/memory-internal.h"
>> >
>> >  static QemuMutex qemu_cpu_list_mutex;
>> > +static QemuCond exclusive_cond;
>> > +static QemuCond exclusive_resume;
>> >  static QemuCond qemu_work_cond;
>> >
>> > +static int pending_cpus;
>> > +
>> >  void qemu_init_cpu_list(void)
>> >  {
>> > +    /* This is needed because qemu_init_cpu_list is also called by the
>> > +     * child process in a fork.  */
>> > +    pending_cpus = 0;
>> > +
>> >      qemu_mutex_init(&qemu_cpu_list_mutex);
>> > +    qemu_cond_init(&exclusive_cond);
>> > +    qemu_cond_init(&exclusive_resume);
>> >      qemu_cond_init(&qemu_work_cond);
>> >  }
>> >
>> > @@ -52,6 +62,12 @@ static int cpu_get_free_index(void)
>> >      return cpu_index;
>> >  }
>> >
>> > +static void finish_safe_work(CPUState *cpu)
>> > +{
>> > +    cpu_exec_start(cpu);
>> > +    cpu_exec_end(cpu);
>> > +}
>> > +
>> >  void cpu_list_add(CPUState *cpu)
>> >  {
>> >      qemu_mutex_lock(&qemu_cpu_list_mutex);
>> > @@ -62,6 +78,8 @@ void cpu_list_add(CPUState *cpu)
>> >
>> >      QTAILQ_INSERT_TAIL(&cpus, cpu, node);
>> >      qemu_mutex_unlock(&qemu_cpu_list_mutex);
>> > +
>> > +    finish_safe_work(cpu);
>> >  }
>> >
>> >  void cpu_list_remove(CPUState *cpu)
>> > @@ -131,6 +149,70 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func 
>> > func, void *data)
>> >      queue_work_on_cpu(cpu, wi);
>> >  }
>> >
>> > +/* Wait for pending exclusive operations to complete.  The exclusive lock
>> > +   must be held.  */
>> > +static inline void exclusive_idle(void)
>> > +{
>> > +    while (pending_cpus) {
>> > +        qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_mutex);
>> > +    }
>> > +}
>> > +
>> > +/* Start an exclusive operation.
>> > +   Must only be called from outside cpu_exec, takes
>> > +   qemu_cpu_list_mutex.   */
>> > +void start_exclusive(void)
>> > +{
>> > +    CPUState *other_cpu;
>> > +
>> > +    qemu_mutex_lock(&qemu_cpu_list_mutex);
>> > +    exclusive_idle();
>> > +
>> > +    /* Make all other cpus stop executing.  */
>> > +    pending_cpus = 1;
>> > +    CPU_FOREACH(other_cpu) {
>> > +        if (other_cpu->running) {
>> > +            pending_cpus++;
>> > +            qemu_cpu_kick(other_cpu);
>> > +        }
>> > +    }
>> > +    if (pending_cpus > 1) {
>> > +        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_mutex);
>> > +    }
>> > +}
>> > +
>> > +/* Finish an exclusive operation.  Releases qemu_cpu_list_mutex.  */
>> > +void end_exclusive(void)
>> > +{
>> > +    pending_cpus = 0;
>> > +    qemu_cond_broadcast(&exclusive_resume);
>> > +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
>> > +}
>> > +
>> > +/* Wait for exclusive ops to finish, and begin cpu execution.  */
>> > +void cpu_exec_start(CPUState *cpu)
>> > +{
>> > +    qemu_mutex_lock(&qemu_cpu_list_mutex);
>> > +    exclusive_idle();
>> > +    cpu->running = true;
>> > +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
>> > +}
>> > +
>> > +/* Mark cpu as not executing, and release pending exclusive ops.  */
>> > +void cpu_exec_end(CPUState *cpu)
>> > +{
>> > +    qemu_mutex_lock(&qemu_cpu_list_mutex);
>> > +    cpu->running = false;
>> > +    if (pending_cpus > 1) {
>> > +        pending_cpus--;
>> > +        if (pending_cpus == 1) {
>> > +            qemu_cond_signal(&exclusive_cond);
>> > +        }
>> > +    }
>> > +    exclusive_idle();
>> > +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
>> > +}
>> > +
>> >  void process_queued_cpu_work(CPUState *cpu)
>> >  {
>> >      struct qemu_work_item *wi;
>> > diff --git a/cpus.c b/cpus.c
>> > index e1bdc16..b024604 100644
>> > --- a/cpus.c
>> > +++ b/cpus.c
>> > @@ -1456,7 +1456,9 @@ static int tcg_cpu_exec(CPUState *cpu)
>> >          cpu->icount_decr.u16.low = decr;
>> >          cpu->icount_extra = count;
>> >      }
>> > +    cpu_exec_start(cpu);
>> >      ret = cpu_exec(cpu);
>> > +    cpu_exec_end(cpu);
>> >  #ifdef CONFIG_PROFILER
>> >      tcg_time += profile_getclock() - ti;
>> >  #endif
>> > diff --git a/include/qom/cpu.h b/include/qom/cpu.h
>> > index d7688f6..0e04e8f 100644
>> > --- a/include/qom/cpu.h
>> > +++ b/include/qom/cpu.h
>> > @@ -249,7 +249,8 @@ struct qemu_work_item {
>> >   * @nr_threads: Number of threads within this CPU.
>> >   * @numa_node: NUMA node this CPU is belonging to.
>> >   * @host_tid: Host thread ID.
>> > - * @running: #true if CPU is currently running (usermode).
>> > + * @running: #true if CPU is currently running;
>> > + * valid under cpu_list_lock.
>> >   * @created: Indicates whether the CPU thread has been successfully 
>> > created.
>> >   * @interrupt_request: Indicates a pending interrupt request.
>> >   * @halted: Nonzero if the CPU is in suspended state.
>> > @@ -826,6 +827,43 @@ void cpu_remove_sync(CPUState *cpu);
>> >  void process_queued_cpu_work(CPUState *cpu);
>> >
>> >  /**
>> > + * cpu_exec_start:
>> > + * @cpu: The CPU for the current thread.
>> > + *
>> > + * Record that a CPU has started execution and can be interrupted with
>> > + * cpu_exit.
>> > + */
>> > +void cpu_exec_start(CPUState *cpu);
>> > +
>> > +/**
>> > + * cpu_exec_end:
>> > + * @cpu: The CPU for the current thread.
>> > + *
>> > + * Record that a CPU has stopped execution and safe work can be executed
>> > + * without interrupting it.
>> > + */
>> > +void cpu_exec_end(CPUState *cpu);
>> > +
>> > +/**
>> > + * start_exclusive:
>> > + * @cpu: The CPU for the current thread.
>> > + *
>> > + * Similar to async_safe_work_run_on_cpu, but the work item is only
>> > + * run on one CPU and is between start_exclusive and end_exclusive.
>> > + * Returns with the CPU list lock taken (which nests outside all
>> > + * other locks except the BQL).
>> > + */
>> > +void start_exclusive(void);
> 
> Hmm the comment here looks as though it has been transplanted or is
> somehow mangled. The function doesn't take @cpu and isn't like
> async_safe_wrok_run_on_cpu.

It doesn't take @cpu, but it is kind of like async_safe_run_on_cpu.
However, async_safe_run_on_cpu doesn't exist yet. :)

Paolo



reply via email to

[Prev in Thread] Current Thread [Next in Thread]