qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 5/8] linux-user: add stop-the-world to be called fro


From: Emilio G. Cota
Subject: [Qemu-devel] [PATCH 5/8] linux-user: add stop-the-world to be called from CPU loop
Date: Wed, 24 Aug 2016 18:18:00 -0400

Signed-off-by: Emilio G. Cota <address@hidden>
---
 cpu-exec.c              |  1 +
 include/exec/exec-all.h |  5 +++
 linux-user/main.c       | 89 +++++++++++++++++++++++++++++++++++++++++++++++++
 linux-user/syscall.c    |  1 +
 4 files changed, 96 insertions(+)

diff --git a/cpu-exec.c b/cpu-exec.c
index 63d739a..8f1adc4 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -649,6 +649,7 @@ int cpu_exec(CPUState *cpu)
             g_assert(cc == CPU_GET_CLASS(cpu));
 #endif /* buggy compiler */
             cpu->can_do_io = 1;
+            stop_the_world_reset();
             tb_lock_reset();
         }
     } /* for(;;) */
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index ec72c5a..c483d80 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -61,6 +61,11 @@ void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
 
+void stop_the_world_lock(CPUState *cpu);
+void stop_the_world_unlock(void);
+void stop_the_world_reset(void);
+extern __thread bool stw_held;
+
 #if !defined(CONFIG_USER_ONLY)
 void cpu_reloading_memory_map(void);
 /**
diff --git a/linux-user/main.c b/linux-user/main.c
index 9880505..94c6625 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -114,11 +114,19 @@ static pthread_cond_t exclusive_cond = 
PTHREAD_COND_INITIALIZER;
 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
 static int pending_cpus;
 
+static pthread_cond_t stw_sleep_cond   = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t stw_request_cond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t stw_lock = PTHREAD_MUTEX_INITIALIZER;
+static int stw_requests;
+static bool stw_ongoing;
+__thread bool stw_held;
+
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
 {
     qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
     pthread_mutex_lock(&exclusive_lock);
+    pthread_mutex_lock(&stw_lock);
     mmap_fork_start();
 }
 
@@ -137,11 +145,17 @@ void fork_end(int child)
         pending_cpus = 0;
         pthread_mutex_init(&exclusive_lock, NULL);
         pthread_mutex_init(&cpu_list_mutex, NULL);
+        pthread_mutex_init(&stw_lock, NULL);
+        stw_held = false;
+        stw_ongoing = false;
         pthread_cond_init(&exclusive_cond, NULL);
         pthread_cond_init(&exclusive_resume, NULL);
+        pthread_cond_init(&stw_sleep_cond, NULL);
+        pthread_cond_init(&stw_request_cond, NULL);
         qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
         gdbserver_fork(thread_cpu);
     } else {
+        pthread_mutex_unlock(&stw_lock);
         pthread_mutex_unlock(&exclusive_lock);
         qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
     }
@@ -198,6 +212,79 @@ static void step_atomic(CPUState *cpu)
     end_exclusive();
 }
 
+void stop_the_world_lock(CPUState *cpu)
+{
+    CPUState *other;
+
+    if (stw_held) {
+        return;
+    }
+    rcu_read_unlock();
+    assert(!rcu_read_lock_held());
+
+    pthread_mutex_lock(&stw_lock);
+    if (stw_ongoing) {
+        stw_requests++;
+        /* wait for ongoing stops to occur */
+        while (stw_ongoing) {
+            pthread_cond_wait(&stw_request_cond, &stw_lock);
+        }
+        stw_requests--;
+    }
+
+    /* it's our turn! */
+    stw_ongoing = true;
+    stw_held = true;
+    CPU_FOREACH(other) {
+        if (other != cpu) {
+            cpu_exit(other);
+        }
+    }
+    synchronize_rcu();
+}
+
+void stop_the_world_unlock(void)
+{
+    if (!stw_held) {
+        return;
+    }
+    assert(stw_ongoing);
+    assert(!rcu_read_lock_held());
+
+    if (stw_requests) {
+        pthread_cond_signal(&stw_request_cond);
+    } else {
+        pthread_cond_broadcast(&stw_sleep_cond);
+    }
+    /*
+     * Make sure the next STW requester (if any) will perceive that we're
+     * in an RCU read critical section
+     */
+    rcu_read_lock();
+    stw_ongoing = false;
+    stw_held = false;
+    pthread_mutex_unlock(&stw_lock);
+}
+
+void stop_the_world_reset(void)
+{
+    if (likely(!stw_held)) {
+        return;
+    }
+    stop_the_world_unlock();
+}
+
+static inline void stop_the_world_sleep(void)
+{
+    pthread_mutex_lock(&stw_lock);
+    if (unlikely(stw_ongoing)) {
+        while (stw_ongoing) {
+            pthread_cond_wait(&stw_sleep_cond, &stw_lock);
+        }
+    }
+    pthread_mutex_unlock(&stw_lock);
+}
+
 /* Wait for exclusive ops to finish, and begin cpu execution.  */
 static inline void cpu_exec_start(CPUState *cpu)
 {
@@ -205,6 +292,8 @@ static inline void cpu_exec_start(CPUState *cpu)
     exclusive_idle();
     cpu->running = true;
     pthread_mutex_unlock(&exclusive_lock);
+
+    stop_the_world_sleep();
 }
 
 /* Mark cpu as not executing, and release pending exclusive ops.  */
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 2911319..740af23 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -5403,6 +5403,7 @@ static void *clone_func(void *arg)
     /* Wait until the parent has finshed initializing the tls state.  */
     pthread_mutex_lock(&clone_lock);
     pthread_mutex_unlock(&clone_lock);
+    stw_held = false;
     cpu_loop(env);
     /* never exits */
     return NULL;
-- 
2.5.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]