[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v4 06/12] linux-user: Use QemuMutex and QemuCond
From: |
Sergey Fedorov |
Subject: |
[Qemu-devel] [PATCH v4 06/12] linux-user: Use QemuMutex and QemuCond |
Date: |
Fri, 15 Jul 2016 21:57:20 +0300 |
From: Sergey Fedorov <address@hidden>
Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.
Signed-off-by: Sergey Fedorov <address@hidden>
Signed-off-by: Sergey Fedorov <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>
---
linux-user/main.c | 53 +++++++++++++++++++++++++++++++----------------------
1 file changed, 31 insertions(+), 22 deletions(-)
diff --git a/linux-user/main.c b/linux-user/main.c
index 617a179f14a4..bdbda693cc5f 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -108,17 +108,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
We don't require a full sync, only that no cpus are executing guest code.
The alternative is to map target atomic ops onto host equivalents,
which requires quite a lot of per host/target work. */
-static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static QemuMutex cpu_list_mutex;
+static QemuMutex exclusive_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
static int pending_cpus;
+void qemu_init_cpu_loop(void)
+{
+ qemu_mutex_init(&cpu_list_mutex);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
+}
+
/* Make sure everything is in a consistent state for calling fork(). */
void fork_start(void)
{
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
mmap_fork_start();
}
@@ -135,14 +143,14 @@ void fork_end(int child)
}
}
pending_cpus = 0;
- pthread_mutex_init(&exclusive_lock, NULL);
- pthread_mutex_init(&cpu_list_mutex, NULL);
- pthread_cond_init(&exclusive_cond, NULL);
- pthread_cond_init(&exclusive_resume, NULL);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_mutex_init(&cpu_list_mutex);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu);
} else {
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
}
}
@@ -152,7 +160,7 @@ void fork_end(int child)
static inline void exclusive_idle(void)
{
while (pending_cpus) {
- pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ qemu_cond_wait(&exclusive_resume, &exclusive_lock);
}
}
@@ -162,7 +170,7 @@ static inline void start_exclusive(void)
{
CPUState *other_cpu;
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
pending_cpus = 1;
@@ -174,7 +182,7 @@ static inline void start_exclusive(void)
}
}
if (pending_cpus > 1) {
- pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ qemu_cond_wait(&exclusive_cond, &exclusive_lock);
}
}
@@ -182,42 +190,42 @@ static inline void start_exclusive(void)
static inline void __attribute__((unused)) end_exclusive(void)
{
pending_cpus = 0;
- pthread_cond_broadcast(&exclusive_resume);
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_cond_broadcast(&exclusive_resume);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
cpu->running = true;
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
cpu->running = false;
if (pending_cpus > 1) {
pending_cpus--;
if (pending_cpus == 1) {
- pthread_cond_signal(&exclusive_cond);
+ qemu_cond_signal(&exclusive_cond);
}
}
exclusive_idle();
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
void cpu_list_lock(void)
{
- pthread_mutex_lock(&cpu_list_mutex);
+ qemu_mutex_lock(&cpu_list_mutex);
}
void cpu_list_unlock(void)
{
- pthread_mutex_unlock(&cpu_list_mutex);
+ qemu_mutex_unlock(&cpu_list_mutex);
}
@@ -4210,6 +4218,7 @@ int main(int argc, char **argv, char **envp)
int ret;
int execfd;
+ qemu_init_cpu_loop();
module_call_init(MODULE_INIT_QOM);
if ((envlist = envlist_create()) == NULL) {
--
2.9.1
- [Qemu-devel] [PATCH v4 00/12] cpu-exec: Safe work in quiescent state, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 01/12] atomic: introduce atomic_dec_fetch., Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 11/12] cpu-exec-common: Introduce async_safe_run_on_cpu(), Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 10/12] bsd-user: Support CPU work queue, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 05/12] cpus: Rename flush_queued_work(), Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 06/12] linux-user: Use QemuMutex and QemuCond,
Sergey Fedorov <=
- [Qemu-devel] [PATCH v4 02/12] cpus: pass CPUState to run_on_cpu helpers, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 08/12] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick(), Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 03/12] cpus: Move common code out of {async_, }run_on_cpu(), Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 12/12] tcg: Make tb_flush() thread safe, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 04/12] cpus: Wrap mutex used to protect CPU work, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 07/12] linux-user: Rework exclusive operation mechanism, Sergey Fedorov, 2016/07/15
- [Qemu-devel] [PATCH v4 09/12] linux-user: Support CPU work queue, Sergey Fedorov, 2016/07/15