qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] win32: replace custom mutex and condition varia


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [PATCH] win32: replace custom mutex and condition variable with native primitives
Date: Fri, 24 Mar 2017 18:28:47 -0400 (EDT)

> From: Andrey Shedel <address@hidden>
> 
> The multithreaded TCG implementation exposed deadlocks in the win32
> condition variables: as implemented, qemu_cond_broadcast waited on
> receivers, whereas the pthreads API it was intended to emulate does
> not. This was causing a deadlock because broadcast was called while
> holding the IO lock, as well as all possible waiters blocked on the
> same lock.
> 
> This patch replaces all the custom synchronisation code for mutexes
> and condition variables with native Windows primitives (SRWlocks and
> condition variables) with the same semantics as their POSIX
> equivalents. To enable that, it requires a Windows Vista or newer host
> OS.
> 
> [AB: edited commit message]
> Signed-off-by: Andrew Baumann <address@hidden>

Oops, just a nit but an important one: there should be a
Signed-off-by for Andrey as well.

Paolo

> ---
> The major implication of this patch is that it drops support for
> pre-Vista versions of Windows. However, those OSes are past their end
> of life, and other OSS projects have dropped support. e.g.; the last
> Cygwin release supporting XP was in Jun 2016. It doesn't seem like a
> good tradeoff to invest effort in fixing broken code needed to support
> them, so hopefully this isn't too controversial.
> 
>  include/qemu/thread-win32.h |   7 +--
>  util/qemu-thread-win32.c    | 136
>  +++++---------------------------------------
>  2 files changed, 17 insertions(+), 126 deletions(-)
> 
> diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h
> index 5fb6541..4c4a261 100644
> --- a/include/qemu/thread-win32.h
> +++ b/include/qemu/thread-win32.h
> @@ -4,8 +4,7 @@
>  #include <windows.h>
>  
>  struct QemuMutex {
> -    CRITICAL_SECTION lock;
> -    LONG owner;
> +    SRWLOCK lock;
>  };
>  
>  typedef struct QemuRecMutex QemuRecMutex;
> @@ -19,9 +18,7 @@ int qemu_rec_mutex_trylock(QemuRecMutex *mutex);
>  void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
>  
>  struct QemuCond {
> -    LONG waiters, target;
> -    HANDLE sema;
> -    HANDLE continue_event;
> +    CONDITION_VARIABLE var;
>  };
>  
>  struct QemuSemaphore {
> diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
> index 29c3e4d..59befd5 100644
> --- a/util/qemu-thread-win32.c
> +++ b/util/qemu-thread-win32.c
> @@ -10,6 +10,11 @@
>   * See the COPYING file in the top-level directory.
>   *
>   */
> +
> +#ifndef _WIN32_WINNT
> +#define _WIN32_WINNT 0x0600
> +#endif
> +
>  #include "qemu/osdep.h"
>  #include "qemu-common.h"
>  #include "qemu/thread.h"
> @@ -39,44 +44,30 @@ static void error_exit(int err, const char *msg)
>  
>  void qemu_mutex_init(QemuMutex *mutex)
>  {
> -    mutex->owner = 0;
> -    InitializeCriticalSection(&mutex->lock);
> +    InitializeSRWLock(&mutex->lock);
>  }
>  
>  void qemu_mutex_destroy(QemuMutex *mutex)
>  {
> -    assert(mutex->owner == 0);
> -    DeleteCriticalSection(&mutex->lock);
> +    InitializeSRWLock(&mutex->lock);
>  }
>  
>  void qemu_mutex_lock(QemuMutex *mutex)
>  {
> -    EnterCriticalSection(&mutex->lock);
> -
> -    /* Win32 CRITICAL_SECTIONs are recursive.  Assert that we're not
> -     * using them as such.
> -     */
> -    assert(mutex->owner == 0);
> -    mutex->owner = GetCurrentThreadId();
> +    AcquireSRWLockExclusive(&mutex->lock);
>  }
>  
>  int qemu_mutex_trylock(QemuMutex *mutex)
>  {
>      int owned;
>  
> -    owned = TryEnterCriticalSection(&mutex->lock);
> -    if (owned) {
> -        assert(mutex->owner == 0);
> -        mutex->owner = GetCurrentThreadId();
> -    }
> +    owned = TryAcquireSRWLockExclusive(&mutex->lock);
>      return !owned;
>  }
>  
>  void qemu_mutex_unlock(QemuMutex *mutex)
>  {
> -    assert(mutex->owner == GetCurrentThreadId());
> -    mutex->owner = 0;
> -    LeaveCriticalSection(&mutex->lock);
> +    ReleaseSRWLockExclusive(&mutex->lock);
>  }
>  
>  void qemu_rec_mutex_init(QemuRecMutex *mutex)
> @@ -107,124 +98,27 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
>  void qemu_cond_init(QemuCond *cond)
>  {
>      memset(cond, 0, sizeof(*cond));
> -
> -    cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
> -    if (!cond->sema) {
> -        error_exit(GetLastError(), __func__);
> -    }
> -    cond->continue_event = CreateEvent(NULL,    /* security */
> -                                       FALSE,   /* auto-reset */
> -                                       FALSE,   /* not signaled */
> -                                       NULL);   /* name */
> -    if (!cond->continue_event) {
> -        error_exit(GetLastError(), __func__);
> -    }
> +    InitializeConditionVariable(&cond->var);
>  }
>  
>  void qemu_cond_destroy(QemuCond *cond)
>  {
> -    BOOL result;
> -    result = CloseHandle(cond->continue_event);
> -    if (!result) {
> -        error_exit(GetLastError(), __func__);
> -    }
> -    cond->continue_event = 0;
> -    result = CloseHandle(cond->sema);
> -    if (!result) {
> -        error_exit(GetLastError(), __func__);
> -    }
> -    cond->sema = 0;
> +    InitializeConditionVariable(&cond->var);
>  }
>  
>  void qemu_cond_signal(QemuCond *cond)
>  {
> -    DWORD result;
> -
> -    /*
> -     * Signal only when there are waiters.  cond->waiters is
> -     * incremented by pthread_cond_wait under the external lock,
> -     * so we are safe about that.
> -     */
> -    if (cond->waiters == 0) {
> -        return;
> -    }
> -
> -    /*
> -     * Waiting threads decrement it outside the external lock, but
> -     * only if another thread is executing pthread_cond_broadcast and
> -     * has the mutex.  So, it also cannot be decremented concurrently
> -     * with this particular access.
> -     */
> -    cond->target = cond->waiters - 1;
> -    result = SignalObjectAndWait(cond->sema, cond->continue_event,
> -                                 INFINITE, FALSE);
> -    if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
> -        error_exit(GetLastError(), __func__);
> -    }
> +    WakeConditionVariable(&cond->var);
>  }
>  
>  void qemu_cond_broadcast(QemuCond *cond)
>  {
> -    BOOLEAN result;
> -    /*
> -     * As in pthread_cond_signal, access to cond->waiters and
> -     * cond->target is locked via the external mutex.
> -     */
> -    if (cond->waiters == 0) {
> -        return;
> -    }
> -
> -    cond->target = 0;
> -    result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
> -    if (!result) {
> -        error_exit(GetLastError(), __func__);
> -    }
> -
> -    /*
> -     * At this point all waiters continue. Each one takes its
> -     * slice of the semaphore. Now it's our turn to wait: Since
> -     * the external mutex is held, no thread can leave cond_wait,
> -     * yet. For this reason, we can be sure that no thread gets
> -     * a chance to eat *more* than one slice. OTOH, it means
> -     * that the last waiter must send us a wake-up.
> -     */
> -    WaitForSingleObject(cond->continue_event, INFINITE);
> +    WakeAllConditionVariable(&cond->var);
>  }
>  
>  void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
>  {
> -    /*
> -     * This access is protected under the mutex.
> -     */
> -    cond->waiters++;
> -
> -    /*
> -     * Unlock external mutex and wait for signal.
> -     * NOTE: we've held mutex locked long enough to increment
> -     * waiters count above, so there's no problem with
> -     * leaving mutex unlocked before we wait on semaphore.
> -     */
> -    qemu_mutex_unlock(mutex);
> -    WaitForSingleObject(cond->sema, INFINITE);
> -
> -    /* Now waiters must rendez-vous with the signaling thread and
> -     * let it continue.  For cond_broadcast this has heavy contention
> -     * and triggers thundering herd.  So goes life.
> -     *
> -     * Decrease waiters count.  The mutex is not taken, so we have
> -     * to do this atomically.
> -     *
> -     * All waiters contend for the mutex at the end of this function
> -     * until the signaling thread relinquishes it.  To ensure
> -     * each waiter consumes exactly one slice of the semaphore,
> -     * the signaling thread stops until it is told by the last
> -     * waiter that it can go on.
> -     */
> -    if (InterlockedDecrement(&cond->waiters) == cond->target) {
> -        SetEvent(cond->continue_event);
> -    }
> -
> -    qemu_mutex_lock(mutex);
> +    SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
>  }
>  
>  void qemu_sem_init(QemuSemaphore *sem, int init)
> --
> 2.8.3
> 
> 
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]