[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [patch 10/11] qemu: make iothread selectable at compile tim
From: |
Marcelo Tosatti |
Subject: |
[Qemu-devel] [patch 10/11] qemu: make iothread selectable at compile time |
Date: |
Thu, 02 Apr 2009 20:33:00 -0300 |
User-agent: |
quilt/0.46-1 |
Turned off by default.
Signed-off-by: Marcelo Tosatti <address@hidden>
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -277,6 +277,7 @@ static QEMUTimer *nographic_timer;
uint8_t qemu_uuid[16];
+#ifdef QEMU_THREADED
static int io_thread_fd = -1;
QemuMutex qemu_global_mutex;
@@ -294,6 +295,7 @@ QemuCond qemu_cpu_cond;
QemuCond qemu_system_cond;
QemuCond qemu_pause_cond;
+#endif
static void pause_all_vcpus(void);
static void resume_all_vcpus(void);
@@ -1367,6 +1369,14 @@ static void host_alarm_handler(int host_
write(alarm_timer_wfd, &byte, sizeof(byte));
#endif
alarm_timer->flags |= ALARM_FLAG_EXPIRED;
+#ifndef QEMU_THREADED
+ {
+ CPUState *env = next_cpu;
+ if (env)
+ cpu_exit(env);
+ event_pending = 1;
+ }
+#endif
}
}
@@ -3343,6 +3353,7 @@ static int ram_load(QEMUFile *f, void *o
void qemu_service_io(void)
{
+ qemu_notify_event();
}
/***********************************************************/
@@ -3560,11 +3571,13 @@ static int qemu_vmstop_requested(void)
return r;
}
+#ifdef QEMU_THREADED
static void qemu_system_vmstop_request(int reason)
{
vmstop_requested = reason;
main_loop_break();
}
+#endif
static void __vm_stop(int reason)
{
@@ -3578,6 +3591,7 @@ static void __vm_stop(int reason)
void vm_stop(int reason)
{
+#ifdef QEMU_THREADED
QemuThread me;
qemu_thread_self(&me);
@@ -3588,6 +3602,7 @@ void vm_stop(int reason)
cpu_single_env->stop = 1;
return;
}
+#endif
__vm_stop(reason);
}
@@ -3671,6 +3686,7 @@ void qemu_system_powerdown_request(void)
main_loop_break();
}
+#ifdef QEMU_THREADED
void qemu_notify_event(void)
{
main_loop_break();
@@ -3725,74 +3741,6 @@ static void io_thread_wakeup(void *opaqu
}
}
-#ifdef _WIN32
-static void host_main_loop_wait(int *timeout)
-{
- int ret, ret2, i;
- PollingEntry *pe;
-
-
- /* XXX: need to suppress polling by better using win32 events */
- ret = 0;
- for(pe = first_polling_entry; pe != NULL; pe = pe->next) {
- ret |= pe->func(pe->opaque);
- }
- if (ret == 0) {
- int err;
- WaitObjects *w = &wait_objects;
-
- ret = WaitForMultipleObjects(w->num, w->events, FALSE, *timeout);
- if (WAIT_OBJECT_0 + 0 <= ret && ret <= WAIT_OBJECT_0 + w->num - 1) {
- if (w->func[ret - WAIT_OBJECT_0])
- w->func[ret - WAIT_OBJECT_0](w->opaque[ret - WAIT_OBJECT_0]);
-
- /* Check for additional signaled events */
- for(i = (ret - WAIT_OBJECT_0 + 1); i < w->num; i++) {
-
- /* Check if event is signaled */
- ret2 = WaitForSingleObject(w->events[i], 0);
- if(ret2 == WAIT_OBJECT_0) {
- if (w->func[i])
- w->func[i](w->opaque[i]);
- } else if (ret2 == WAIT_TIMEOUT) {
- } else {
- err = GetLastError();
- fprintf(stderr, "WaitForSingleObject error %d %d\n", i,
err);
- }
- }
- } else if (ret == WAIT_TIMEOUT) {
- } else {
- err = GetLastError();
- fprintf(stderr, "WaitForMultipleObjects error %d %d\n", ret, err);
- }
- }
-
- *timeout = 0;
-}
-#else
-static void host_main_loop_wait(int *timeout)
-{
-}
-#endif
-
-/* Q: is it allowed to enter cpu_exec */
-static int cpu_can_run(CPUState *env)
-{
- if (env->stop)
- return 0;
- if (env->stopped)
- return 0;
- if (shutdown_requested)
- return 0;
- if (powerdown_requested)
- return 0;
- if (reset_requested)
- return 0;
- if (vmstop_requested)
- return 0;
- return 1;
-}
-
/* Q: should break out of the wait loop */
static int cpu_has_work(CPUState *env)
{
@@ -3950,6 +3898,187 @@ static void resume_all_vcpus(void)
}
}
+static void *cpu_thread(void *arg);
+
+void qemu_init_vcpu(void *_env)
+{
+ CPUState *env = _env;
+ /* share a single thread for all cpus with TCG */
+ if (!tcg_cpu_thread) {
+ env->thread = qemu_mallocz(sizeof(QemuThread));
+ env->halt_cond = qemu_mallocz(sizeof(QemuCond));
+ qemu_cond_init(env->halt_cond);
+ qemu_thread_create(env->thread, cpu_thread, env);
+ while (env->created == 0)
+ qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
+ tcg_cpu_thread = env->thread;
+ tcg_halt_cond = env->halt_cond;
+ } else {
+ env->thread = tcg_cpu_thread;
+ env->halt_cond = tcg_halt_cond;
+ }
+}
+
+static void qemu_init_state(void)
+{
+ qemu_cond_init(&qemu_pause_cond);
+ qemu_mutex_init(&qemu_fair_mutex);
+ qemu_mutex_init(&qemu_global_mutex);
+ qemu_mutex_lock(&qemu_global_mutex);
+}
+
+static void setup_iothread_fd(void);
+static void pre_main_loop(void)
+{
+ qemu_thread_self(&io_thread);
+ setup_iothread_fd();
+
+ unblock_io_signals();
+
+ qemu_system_ready = 1;
+ qemu_cond_broadcast(&qemu_system_cond);
+}
+
+static int qemu_cpu_exec(CPUState *env);
+static void *cpu_thread(void *arg)
+{
+ int r;
+ CPUState *env = arg;
+
+ block_io_signals();
+ qemu_thread_self(env->thread);
+
+ /* signal CPU creation */
+ qemu_mutex_lock(&qemu_global_mutex);
+ env->created = 1;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ /* and wait for machine initialization */
+ while (!qemu_system_ready)
+ qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+
+ while (1) {
+ r = qemu_cpu_exec(env);
+ qemu_wait_io_event(env, r);
+ }
+
+ return NULL;
+}
+
+static void setup_iothread_fd(void)
+{
+ int fds[2];
+
+ if (pipe(fds) == -1) {
+ fprintf(stderr, "failed to create iothread pipe");
+ exit(0);
+ }
+
+ qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
+ (void *)(unsigned long)fds[0]);
+ io_thread_fd = fds[1];
+ fcntl(io_thread_fd, F_SETFL, O_NONBLOCK);
+}
+
+#else /* QEMU_THREADED */
+#define qemu_signal_lock(a) do { } while(0)
+#define qemu_mutex_unlock(a) do { } while(0)
+#define pre_main_loop() do { } while(0)
+#define qemu_init_state() do { } while(0)
+
+void pause_all_vcpus(void)
+{
+ return;
+}
+
+void resume_all_vcpus(void)
+{
+ return;
+}
+
+void qemu_cpu_kick(void *env)
+{
+ return;
+}
+
+void qemu_init_vcpu(void *_env)
+{
+ CPUState *env = _env;
+
+ if (kvm_enabled())
+ kvm_init_vcpu(env);
+ return;
+}
+
+void qemu_notify_event(void)
+{
+ CPUState *env = cpu_single_env;
+
+ if (env)
+ cpu_exit(env);
+}
+
+void main_loop_break(void)
+{
+ qemu_notify_event();
+}
+
+int qemu_cpu_self(void *env)
+{
+ return 1;
+}
+#endif
+
+#ifdef _WIN32
+static void host_main_loop_wait(int *timeout)
+{
+ int ret, ret2, i;
+ PollingEntry *pe;
+
+
+ /* XXX: need to suppress polling by better using win32 events */
+ ret = 0;
+ for(pe = first_polling_entry; pe != NULL; pe = pe->next) {
+ ret |= pe->func(pe->opaque);
+ }
+ if (ret == 0) {
+ int err;
+ WaitObjects *w = &wait_objects;
+
+ ret = WaitForMultipleObjects(w->num, w->events, FALSE, *timeout);
+ if (WAIT_OBJECT_0 + 0 <= ret && ret <= WAIT_OBJECT_0 + w->num - 1) {
+ if (w->func[ret - WAIT_OBJECT_0])
+ w->func[ret - WAIT_OBJECT_0](w->opaque[ret - WAIT_OBJECT_0]);
+
+ /* Check for additional signaled events */
+ for(i = (ret - WAIT_OBJECT_0 + 1); i < w->num; i++) {
+
+ /* Check if event is signaled */
+ ret2 = WaitForSingleObject(w->events[i], 0);
+ if(ret2 == WAIT_OBJECT_0) {
+ if (w->func[i])
+ w->func[i](w->opaque[i]);
+ } else if (ret2 == WAIT_TIMEOUT) {
+ } else {
+ err = GetLastError();
+ fprintf(stderr, "WaitForSingleObject error %d %d\n", i,
err);
+ }
+ }
+ } else if (ret == WAIT_TIMEOUT) {
+ } else {
+ err = GetLastError();
+ fprintf(stderr, "WaitForMultipleObjects error %d %d\n", ret, err);
+ }
+ }
+
+ *timeout = 0;
+}
+#else
+static void host_main_loop_wait(int *timeout)
+{
+}
+#endif
+
void main_loop_wait(int timeout)
{
IOHandlerRecord *ioh;
@@ -4051,203 +4180,155 @@ void main_loop_wait(int timeout)
}
-static void setup_iothread_fd(void)
+/* Q: is it allowed to enter cpu_exec */
+static int cpu_can_run(CPUState *env)
{
- int fds[2];
-
- if (pipe(fds) == -1) {
- fprintf(stderr, "failed to create iothread pipe");
- exit(0);
- }
-
- qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
- (void *)(unsigned long)fds[0]);
- io_thread_fd = fds[1];
- fcntl(io_thread_fd, F_SETFL, O_NONBLOCK);
+ if (env->stop)
+ return 0;
+ if (env->stopped)
+ return 0;
+ if (shutdown_requested)
+ return 0;
+ if (powerdown_requested)
+ return 0;
+ if (reset_requested)
+ return 0;
+ if (vmstop_requested)
+ return 0;
+ return 1;
}
-static void *cpu_main_loop(void *arg)
+static int qemu_cpu_exec(CPUState *env)
{
int ret, timeout;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
- CPUState *env = arg;
-
- block_io_signals();
- qemu_thread_self(env->thread);
-
- /* signal CPU creation */
- qemu_mutex_lock(&qemu_global_mutex);
- env->created = 1;
- qemu_cond_signal(&qemu_cpu_cond);
-
- /* and wait for machine initialization */
- while (!qemu_system_ready)
- qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
-
- cur_cpu = env = first_cpu;
- next_cpu = cur_cpu->next_cpu ?: first_cpu;
for(;;) {
- if (vm_running) {
-
- for(;;) {
- /* get next cpu */
- env = next_cpu;
+ /* get next cpu */
+ env = next_cpu;
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- if (use_icount) {
- int64_t count;
- int decr;
- qemu_icount -= (env->icount_decr.u16.low +
env->icount_extra);
- env->icount_decr.u16.low = 0;
- env->icount_extra = 0;
- count = qemu_next_deadline();
- count = (count + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += count;
- decr = (count > 0xffff) ? 0xffff : count;
- count -= decr;
- env->icount_decr.u16.low = decr;
- env->icount_extra = count;
- }
- ret = EXCP_HALTED;
- if (cpu_can_run(env))
- ret = cpu_exec(env);
+ if (use_icount) {
+ int64_t count;
+ int decr;
+ qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
+ env->icount_decr.u16.low = 0;
+ env->icount_extra = 0;
+ count = qemu_next_deadline();
+ count = (count + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += count;
+ decr = (count > 0xffff) ? 0xffff : count;
+ count -= decr;
+ env->icount_decr.u16.low = decr;
+ env->icount_extra = count;
+ }
+ ret = EXCP_HALTED;
+ if (cpu_can_run(env))
+ ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
- qemu_time += profile_getclock() - ti;
+ qemu_time += profile_getclock() - ti;
#endif
- if (use_icount) {
- /* Fold pending instructions back into the
- instruction counter, and clear the interrupt flag. */
- qemu_icount -= (env->icount_decr.u16.low
- + env->icount_extra);
- env->icount_decr.u32 = 0;
- env->icount_extra = 0;
- }
- next_cpu = env->next_cpu ?: first_cpu;
- if (event_pending && likely(ret != EXCP_DEBUG)) {
- ret = EXCP_INTERRUPT;
- event_pending = 0;
- break;
- }
- if (ret == EXCP_HLT) {
- /* Give the next CPU a chance to run. */
- cur_cpu = env;
- continue;
- }
- if (ret != EXCP_HALTED)
- break;
- /* all CPUs are halted ? */
- if (env == cur_cpu)
- break;
- }
+ if (use_icount) {
+ /* Fold pending instructions back into the
+ instruction counter, and clear the interrupt flag. */
+ qemu_icount -= (env->icount_decr.u16.low
+ + env->icount_extra);
+ env->icount_decr.u32 = 0;
+ env->icount_extra = 0;
+ }
+ next_cpu = env->next_cpu ?: first_cpu;
+ if (event_pending && likely(ret != EXCP_DEBUG)) {
+ ret = EXCP_INTERRUPT;
+ event_pending = 0;
+ break;
+ }
+ if (ret == EXCP_HLT) {
+ /* Give the next CPU a chance to run. */
cur_cpu = env;
-
- if (unlikely(ret == EXCP_DEBUG)) {
- gdb_set_stop_cpu(cur_cpu);
- vm_stop(EXCP_DEBUG);
- }
- /* If all cpus are halted then wait until the next IRQ */
- /* XXX: use timeout computed from timers */
- if (ret == EXCP_HALTED) {
- if (use_icount) {
- int64_t add;
- int64_t delta;
- /* Advance virtual time to the next event. */
- if (use_icount == 1) {
- /* When not using an adaptive execution frequency
- we tend to get badly out of sync with real time,
- so just delay for a reasonable amount of time. */
- delta = 0;
- } else {
- delta = cpu_get_icount() - cpu_get_clock();
- }
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta / 1000000) + 1;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- add = (add + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += add;
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
- }
- } else {
- timeout = 5000;
- }
- } else {
- timeout = 0;
- }
- } else {
- env = env->next_cpu ?: first_cpu;
- timeout = 5000;
+ continue;
}
+ if (ret != EXCP_HALTED)
+ break;
+ /* all CPUs are halted ? */
+ if (env == cur_cpu)
+ break;
+ }
+ cur_cpu = env;
+
+ if (unlikely(ret == EXCP_DEBUG)) {
+ gdb_set_stop_cpu(cur_cpu);
+ vm_stop(EXCP_DEBUG);
+ }
+ /* If all cpus are halted then wait until the next IRQ */
+ /* XXX: use timeout computed from timers */
+ if (ret == EXCP_HALTED) {
+ if (use_icount) {
+ int64_t add;
+ int64_t delta;
+ /* Advance virtual time to the next event. */
+ if (use_icount == 1) {
+ /* When not using an adaptive execution frequency
+ we tend to get badly out of sync with real time,
+ so just delay for a reasonable amount of time. */
+ delta = 0;
+ } else {
+ delta = cpu_get_icount() - cpu_get_clock();
+ }
+ if (delta > 0) {
+ /* If virtual time is ahead of real time then just
+ wait for IO. */
+ timeout = (delta / 1000000) + 1;
+ } else {
+ /* Wait for either IO to occur or the next
+ timer event. */
+ add = qemu_next_deadline();
+ /* We advance the timer before checking for IO.
+ Limit the amount we advance so that early IO
+ activity won't get the guest too far ahead. */
+ if (add > 10000000)
+ add = 10000000;
+ delta += add;
+ add = (add + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += add;
+ timeout = delta / 1000000;
+ if (timeout < 0)
+ timeout = 0;
+ }
+ } else {
+ timeout = 5000;
+ }
+ } else {
+ timeout = 0;
+ }
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- qemu_wait_io_event(env, timeout);
#ifdef CONFIG_PROFILER
- dev_time += profile_getclock() - ti;
+ dev_time += profile_getclock() - ti;
#endif
- }
- cpu_disable_ticks();
- return NULL;
-}
-
-void qemu_init_vcpu(void *_env)
-{
- CPUState *env = _env;
- /* share a single thread for all cpus with TCG */
- if (!tcg_cpu_thread) {
- env->thread = qemu_mallocz(sizeof(QemuThread));
- env->halt_cond = qemu_mallocz(sizeof(QemuCond));
- qemu_cond_init(env->halt_cond);
- qemu_thread_create(env->thread, cpu_main_loop, env);
- while (env->created == 0)
- qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
- tcg_cpu_thread = env->thread;
- tcg_halt_cond = env->halt_cond;
- } else {
- env->thread = tcg_cpu_thread;
- env->halt_cond = tcg_halt_cond;
- }
-}
-
-static void qemu_init_state(void)
-{
- qemu_cond_init(&qemu_pause_cond);
- qemu_mutex_init(&qemu_fair_mutex);
- qemu_mutex_init(&qemu_global_mutex);
- qemu_mutex_lock(&qemu_global_mutex);
+ return timeout;
}
static void main_loop(void)
{
int r;
- qemu_thread_self(&io_thread);
- setup_iothread_fd();
-
- unblock_io_signals();
+ pre_main_loop();
- qemu_system_ready = 1;
- qemu_cond_broadcast(&qemu_system_cond);
+ cur_cpu = first_cpu;
+ next_cpu = cur_cpu->next_cpu ?: first_cpu;
while (1) {
+#ifdef QEMU_THREADED
main_loop_wait(1000);
+#else
+ r = qemu_cpu_exec(next_cpu);
+ main_loop_wait(r);
+#endif
if (qemu_shutdown_requested()) {
pause_all_vcpus();
if (no_shutdown)
--
- Re: [Qemu-devel] [patch 02/11] qemu: mutex/thread/cond wrappers, (continued)
- [Qemu-devel] [patch 03/11] qemu: per-arch cpu_has_work, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 07/11] qemu: handle reset/poweroff/shutdown in iothread, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 05/11] qemu: separate thread for io, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 06/11] qemu: per-cpu thread information, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 04/11] qemu: introduce main_loop_break, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 09/11] qemu: handle vmstop from cpu context, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 08/11] qemu: pause and resume cpu threads, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 11/11] qemu: basic kvm iothread support, Marcelo Tosatti, 2009/04/02
- [Qemu-devel] [patch 10/11] qemu: make iothread selectable at compile time,
Marcelo Tosatti <=