qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC 4/8] linux-user: Rework exclusive operation mechan


From: Sergey Fedorov
Subject: Re: [Qemu-devel] [RFC 4/8] linux-user: Rework exclusive operation mechanism
Date: Wed, 29 Jun 2016 17:57:59 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.8.0

On 27/06/16 12:02, Alex Bennée wrote:
> Sergey Fedorov <address@hidden> writes:
>
>> From: Sergey Fedorov <address@hidden>
>>
(snip)
>> diff --git a/linux-user/main.c b/linux-user/main.c
>> index b9a4e0ea45ac..485336f78b8f 100644
>> --- a/linux-user/main.c
>> +++ b/linux-user/main.c
>> @@ -111,7 +111,8 @@ static pthread_mutex_t cpu_list_mutex = 
>> PTHREAD_MUTEX_INITIALIZER;
>>  static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
>>  static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
>>  static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
>> -static int pending_cpus;
>> +static bool exclusive_pending;
>> +static int tcg_pending_cpus;
> I'm not sure you need to re-name to tcg_pending_cpus as TCG is implied
> for linux-user. Also they are not really CPUs (although we are using the
> CPU structure for each running thread). I'm not sure if there is a
> neater way to make the distinction clear.

How about 'tcg_pending_threads'? It is going to be used in system-mode
soon, so I'd like to keep "tcg_" prefix.

>
>>  /* Make sure everything is in a consistent state for calling fork().  */
>>  void fork_start(void)
>> @@ -133,7 +134,8 @@ void fork_end(int child)
>>                  QTAILQ_REMOVE(&cpus, cpu, node);
>>              }
>>          }
>> -        pending_cpus = 0;
>> +        tcg_pending_cpus = 0;
>> +        exclusive_pending = false;
>>          pthread_mutex_init(&exclusive_lock, NULL);
>>          pthread_mutex_init(&cpu_list_mutex, NULL);
>>          pthread_cond_init(&exclusive_cond, NULL);
>> @@ -150,7 +152,7 @@ void fork_end(int child)
>>     must be held.  */
>>  static inline void exclusive_idle(void)
>>  {
>> -    while (pending_cpus) {
>> +    while (exclusive_pending) {
>>          pthread_cond_wait(&exclusive_resume, &exclusive_lock);
>>      }
>>  }
>> @@ -164,15 +166,14 @@ static inline void start_exclusive(void)
>>      pthread_mutex_lock(&exclusive_lock);
>>      exclusive_idle();
>>
>> -    pending_cpus = 1;
>> +    exclusive_pending = true;
>>      /* Make all other cpus stop executing.  */
>>      CPU_FOREACH(other_cpu) {
>>          if (other_cpu->running) {
>> -            pending_cpus++;
>>              cpu_exit(other_cpu);
>>          }
>>      }
>> -    if (pending_cpus > 1) {
>> +    while (tcg_pending_cpus) {
>>          pthread_cond_wait(&exclusive_cond, &exclusive_lock);
>>      }
>>  }
>> @@ -180,7 +181,7 @@ static inline void start_exclusive(void)
>>  /* Finish an exclusive operation.  */
>>  static inline void __attribute__((unused)) end_exclusive(void)
>>  {
>> -    pending_cpus = 0;
>> +    exclusive_pending = false;
>>      pthread_cond_broadcast(&exclusive_resume);
>>      pthread_mutex_unlock(&exclusive_lock);
>>  }
>> @@ -191,6 +192,7 @@ static inline void cpu_exec_start(CPUState *cpu)
>>      pthread_mutex_lock(&exclusive_lock);
>>      exclusive_idle();
>>      cpu->running = true;
>> +    tcg_pending_cpus++;
> These aren't TLS variables so shouldn't we be ensuring all access is atomic?

It is protected by 'exclusive_lock'.

>
>>      pthread_mutex_unlock(&exclusive_lock);
>>  }
>>
>> @@ -199,11 +201,9 @@ static inline void cpu_exec_end(CPUState *cpu)
>>  {
>>      pthread_mutex_lock(&exclusive_lock);
>>      cpu->running = false;
>> -    if (pending_cpus > 1) {
>> -        pending_cpus--;
>> -        if (pending_cpus == 1) {
>> -            pthread_cond_signal(&exclusive_cond);
>> -        }
>> +    tcg_pending_cpus--;
>> +    if (!tcg_pending_cpus) {
>> +        pthread_cond_broadcast(&exclusive_cond);
>>      }
> Couldn't two threads race to -1 here?

See comment above.

Kind regards,
Sergey

>
>>      exclusive_idle();
>>      pthread_mutex_unlock(&exclusive_lock);
>
> --
> Alex Bennée




reply via email to

[Prev in Thread] Current Thread [Next in Thread]