qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU re


From: Wen Congyang
Subject: Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
Date: Thu, 23 Jul 2015 19:04:13 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.1.0

On 07/23/2015 06:42 PM, Paolo Bonzini wrote:
> 
> 
> On 23/07/2015 12:30, Christian Borntraeger wrote:
>> Am 22.07.2015 um 16:18 schrieb Paolo Bonzini:
>>> Otherwise, grace periods are detected too early!
>>
>> I guess this or Wens proposal is still necessary for 2.4?
> 
> Yes.  I think this is better for 2.4.  There are threads that do not
> need RCU, for example the thread-pool.c worker threads, so it may just

If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right?

> be simpler to add an assertion in rcu_register_thread.  I'm just a bit
> wary of doing little more than the bare minimum in 2.4, because of the
> OS X failure that I didn't quite understand.

Which problem? I don't find it in the maillist. Do I miss something?

Thanks
Wen Congyang

> 
> Paolo
> 
>>
>>>
>>> Signed-off-by: Paolo Bonzini <address@hidden>
>>> ---
>>>  cpus.c                | 6 ++++++
>>>  iothread.c            | 5 +++++
>>>  migration/migration.c | 4 ++++
>>>  tests/test-rcu-list.c | 4 ++++
>>>  util/rcu.c            | 2 ++
>>>  5 files changed, 21 insertions(+)
>>>
>>> diff --git a/cpus.c b/cpus.c
>>> index b00a423..a822ce3 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>>>      CPUState *cpu = arg;
>>>      int r;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      qemu_mutex_lock_iothread();
>>>      qemu_thread_get_self(cpu->thread);
>>>      cpu->thread_id = qemu_get_thread_id();
>>> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>>>      sigset_t waitset;
>>>      int r;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      qemu_mutex_lock_iothread();
>>>      qemu_thread_get_self(cpu->thread);
>>>      cpu->thread_id = qemu_get_thread_id();
>>> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>>>  {
>>>      CPUState *cpu = arg;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      qemu_mutex_lock_iothread();
>>>      qemu_tcg_init_cpu_signals();
>>>      qemu_thread_get_self(cpu->thread);
>>> diff --git a/iothread.c b/iothread.c
>>> index 6d2a33f..da6ce7b 100644
>>> --- a/iothread.c
>>> +++ b/iothread.c
>>> @@ -18,6 +18,7 @@
>>>  #include "sysemu/iothread.h"
>>>  #include "qmp-commands.h"
>>>  #include "qemu/error-report.h"
>>> +#include "qemu/rcu.h"
>>>
>>>  typedef ObjectClass IOThreadClass;
>>>
>>> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
>>>      IOThread *iothread = opaque;
>>>      bool blocking;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      qemu_mutex_lock(&iothread->init_done_lock);
>>>      iothread->thread_id = qemu_get_thread_id();
>>>      qemu_cond_signal(&iothread->init_done_cond);
>>> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
>>>          }
>>>          aio_context_release(iothread->ctx);
>>>      }
>>> +
>>> +    rcu_unregister_thread();
>>>      return NULL;
>>>  }
>>>
>>> diff --git a/migration/migration.c b/migration/migration.c
>>> index 86ca099..fd4f99b 100644
>>> --- a/migration/migration.c
>>> +++ b/migration/migration.c
>>> @@ -22,6 +22,7 @@
>>>  #include "block/block.h"
>>>  #include "qapi/qmp/qerror.h"
>>>  #include "qemu/sockets.h"
>>> +#include "qemu/rcu.h"
>>>  #include "migration/block.h"
>>>  #include "qemu/thread.h"
>>>  #include "qmp-commands.h"
>>> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
>>>      int64_t start_time = initial_time;
>>>      bool old_vm_running = false;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      qemu_savevm_state_header(s->file);
>>>      qemu_savevm_state_begin(s->file, &s->params);
>>>
>>> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
>>>      qemu_bh_schedule(s->cleanup_bh);
>>>      qemu_mutex_unlock_iothread();
>>>
>>> +    rcu_unregister_thread();
>>>      return NULL;
>>>  }
>>>
>>> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
>>> index 4c5f62e..daa8bf4 100644
>>> --- a/tests/test-rcu-list.c
>>> +++ b/tests/test-rcu-list.c
>>> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
>>>      long long n_reads_local = 0;
>>>      struct list_element *el;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      *(struct rcu_reader_data **)arg = &rcu_reader;
>>>      atomic_inc(&nthreadsrunning);
>>>      while (goflag == GOFLAG_INIT) {
>>> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
>>>      qemu_mutex_lock(&counts_mutex);
>>>      n_reads += n_reads_local;
>>>      qemu_mutex_unlock(&counts_mutex);
>>> +
>>> +    rcu_unregister_thread();
>>>      return NULL;
>>>  }
>>>
>>> diff --git a/util/rcu.c b/util/rcu.c
>>> index 7270151..cdcad67 100644
>>> --- a/util/rcu.c
>>> +++ b/util/rcu.c
>>> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
>>>  {
>>>      struct rcu_head *node;
>>>
>>> +    rcu_register_thread();
>>> +
>>>      for (;;) {
>>>          int tries = 0;
>>>          int n = atomic_read(&rcu_call_count);
>>>
>>
>>
>>
> 
> .
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]