qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 16/16] cpus-common: lock-free fast path for cpu_exec


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 16/16] cpus-common: lock-free fast path for cpu_exec_start/end
Date: Mon, 12 Sep 2016 13:12:41 +0200

Set cpu->running without taking the cpu_list lock, only look at it if
there is a concurrent exclusive section.  This requires adding a new
field to CPUState, which records whether a running CPU is being counted
in pending_cpus.  When an exclusive section is started concurrently with
cpu_exec_start, cpu_exec_start can use the new field to wait for the end
of the exclusive section.

This a separate patch for easier bisection of issues.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 cpus-common.c              | 73 ++++++++++++++++++++++++++++++++++++++++------
 docs/tcg-exclusive.promela | 53 +++++++++++++++++++++++++++++++--
 include/qom/cpu.h          |  5 ++--
 3 files changed, 117 insertions(+), 14 deletions(-)

diff --git a/cpus-common.c b/cpus-common.c
index 50a92dd..67b42c6 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -184,8 +184,12 @@ void start_exclusive(void)
 
     /* Make all other cpus stop executing.  */
     pending_cpus = 1;
+
+    /* Write pending_cpus before reading other_cpu->running.  */
+    smp_mb();
     CPU_FOREACH(other_cpu) {
         if (other_cpu->running) {
+            other_cpu->has_waiter = true;
             pending_cpus++;
             qemu_cpu_kick(other_cpu);
         }
@@ -212,24 +216,75 @@ void end_exclusive(void)
 /* Wait for exclusive ops to finish, and begin cpu execution.  */
 void cpu_exec_start(CPUState *cpu)
 {
-    qemu_mutex_lock(&qemu_cpu_list_mutex);
-    exclusive_idle();
     cpu->running = true;
-    qemu_mutex_unlock(&qemu_cpu_list_mutex);
+
+    /* Write cpu->running before reading pending_cpus.  */
+    smp_mb();
+
+    /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
+     * After taking the lock we'll see cpu->has_waiter == true and run---not
+     * for long because start_exclusive kicked us.  cpu_exec_end will
+     * decrement pending_cpus and signal the waiter.
+     *
+     * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
+     * This includes the case when an exclusive item is running now.
+     * Then we'll see cpu->has_waiter == false and wait for the item to
+     * complete.
+     *
+     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
+     * see cpu->running == true, and it will kick the CPU.
+     */
+    if (pending_cpus) {
+        qemu_mutex_lock(&qemu_cpu_list_mutex);
+        if (!cpu->has_waiter) {
+            /* Not counted in pending_cpus, let the exclusive item
+             * run.  Since we have the lock, set cpu->running to true
+             * while holding it instead of retrying.
+             */
+            cpu->running = false;
+            exclusive_idle();
+            /* Now pending_cpus is zero.  */
+            cpu->running = true;
+        } else {
+            /* Counted in pending_cpus, go ahead.  */
+        }
+        qemu_mutex_unlock(&qemu_cpu_list_mutex);
+    }
 }
 
 /* Mark cpu as not executing, and release pending exclusive ops.  */
 void cpu_exec_end(CPUState *cpu)
 {
-    qemu_mutex_lock(&qemu_cpu_list_mutex);
     cpu->running = false;
-    if (pending_cpus > 1) {
-        pending_cpus--;
-        if (pending_cpus == 1) {
-            qemu_cond_signal(&exclusive_cond);
+
+    /* Write cpu->running before reading pending_cpus.  */
+    smp_mb();
+
+    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
+     * pending_cpus and wait for exclusive_cond.  After taking the lock
+     * we'll see cpu->has_waiter == true.
+     *
+     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
+     * This includes the case when an exclusive item started after setting
+     * cpu->running to false and before we read pending_cpus.  Then we'll see
+     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
+     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
+     * for the item to complete.
+     *
+     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
+     * see cpu->running == false, and it can ignore this CPU until the
+     * next cpu_exec_start.
+     */
+    if (pending_cpus) {
+        qemu_mutex_lock(&qemu_cpu_list_mutex);
+        if (cpu->has_waiter) {
+            cpu->has_waiter = false;
+            if (--pending_cpus == 1) {
+                qemu_cond_signal(&exclusive_cond);
+            }
         }
+        qemu_mutex_unlock(&qemu_cpu_list_mutex);
     }
-    qemu_mutex_unlock(&qemu_cpu_list_mutex);
 }
 
 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
diff --git a/docs/tcg-exclusive.promela b/docs/tcg-exclusive.promela
index 3ef0f34..f21213f 100644
--- a/docs/tcg-exclusive.promela
+++ b/docs/tcg-exclusive.promela
@@ -12,7 +12,8 @@
  *     spin -a docs/event.promela
  *     ./a.out -a
  *
- * Tunable processor macros: N_CPUS, N_EXCLUSIVE, N_CYCLES, TEST_EXPENSIVE.
+ * Tunable processor macros: N_CPUS, N_EXCLUSIVE, N_CYCLES, USE_MUTEX,
+ *                           TEST_EXPENSIVE.
  */
 
 // Define the missing parameters for the model
@@ -21,8 +22,10 @@
 #warning defaulting to 2 CPU processes
 #endif
 
-// the expensive test is not so expensive for <= 3 CPUs
-#if N_CPUS <= 3
+// the expensive test is not so expensive for <= 2 CPUs
+// If the mutex is used, it's also cheap (300 MB / 4 seconds) for 3 CPUs
+// For 3 CPUs and the lock-free option it needs 1.5 GB of RAM
+#if N_CPUS <= 2 || (N_CPUS <= 3 && defined USE_MUTEX)
 #define TEST_EXPENSIVE
 #endif
 
@@ -106,6 +109,8 @@ byte has_waiter[N_CPUS];
     COND_BROADCAST(exclusive_resume);                             \
     MUTEX_UNLOCK(mutex);
 
+#ifdef USE_MUTEX
+// Simple version using mutexes
 #define cpu_exec_start(id)                                                   \
     MUTEX_LOCK(mutex);                                                       \
     exclusive_idle();                                                        \
@@ -126,6 +131,48 @@ byte has_waiter[N_CPUS];
         :: else -> skip;                                                     \
     fi;                                                                      \
     MUTEX_UNLOCK(mutex);
+#else
+// Wait-free fast path, only needs mutex when concurrent with
+// an exclusive section
+#define cpu_exec_start(id)                                                   \
+    running[id] = 1;                                                         \
+    if                                                                       \
+        :: pending_cpus -> {                                                 \
+            MUTEX_LOCK(mutex);                                               \
+            if                                                               \
+                :: !has_waiter[id] -> {                                      \
+                    running[id] = 0;                                         \
+                    exclusive_idle();                                        \
+                    running[id] = 1;                                         \
+                }                                                            \
+                :: else -> skip;                                             \
+            fi;                                                              \
+            MUTEX_UNLOCK(mutex);                                             \
+        }                                                                    \
+        :: else -> skip;                                                     \
+    fi;
+
+#define cpu_exec_end(id)                                                     \
+    running[id] = 0;                                                         \
+    if                                                                       \
+        :: pending_cpus -> {                                                 \
+            MUTEX_LOCK(mutex);                                               \
+            if                                                               \
+                :: has_waiter[id] -> {                                       \
+                    has_waiter[id] = 0;                                      \
+                    pending_cpus--;                                          \
+                    if                                                       \
+                        :: pending_cpus == 1 -> 
COND_BROADCAST(exclusive_cond); \
+                        :: else -> skip;                                     \
+                    fi;                                                      \
+                }                                                            \
+                :: else -> skip;                                             \
+            fi;                                                              \
+            MUTEX_UNLOCK(mutex);                                             \
+        }                                                                    \
+        :: else -> skip;                                                     \
+    fi
+#endif
 
 // Promela processes
 
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 3eb595c..7589e46 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -242,7 +242,8 @@ struct qemu_work_item;
  * @nr_threads: Number of threads within this CPU.
  * @numa_node: NUMA node this CPU is belonging to.
  * @host_tid: Host thread ID.
- * @running: #true if CPU is currently running;
+ * @running: #true if CPU is currently running (lockless).
+ * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
  * valid under cpu_list_lock.
  * @created: Indicates whether the CPU thread has been successfully created.
  * @interrupt_request: Indicates a pending interrupt request.
@@ -296,7 +297,7 @@ struct CPUState {
 #endif
     int thread_id;
     uint32_t host_tid;
-    bool running;
+    bool running, has_waiter;
     struct QemuCond *halt_cond;
     bool thread_kicked;
     bool created;
-- 
2.7.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]