[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 3/6] qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_con
From: |
Stefan Hajnoczi |
Subject: |
[PATCH 3/6] qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql() |
Date: |
Wed, 29 Nov 2023 16:26:22 -0500 |
The name "iothread" is overloaded. Use the term Big QEMU Lock (BQL)
instead, it is already widely used and unambiguous.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/qemu/main-loop.h | 8 ++++----
accel/tcg/tcg-accel-ops-rr.c | 4 ++--
hw/display/virtio-gpu.c | 2 +-
hw/ppc/spapr_events.c | 2 +-
system/cpu-throttle.c | 2 +-
system/cpus.c | 4 ++--
target/i386/nvmm/nvmm-accel-ops.c | 2 +-
target/i386/whpx/whpx-accel-ops.c | 2 +-
8 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 0b6a3e4824..ec2a70f041 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -373,17 +373,17 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto,
qemu_bql_auto_unlock)
= qemu_bql_auto_lock(__FILE__, __LINE__)
/*
- * qemu_cond_wait_iothread: Wait on condition for the main loop mutex
+ * qemu_cond_wait_bql: Wait on condition for the main loop mutex
*
* This function atomically releases the main loop mutex and causes
* the calling thread to block on the condition.
*/
-void qemu_cond_wait_iothread(QemuCond *cond);
+void qemu_cond_wait_bql(QemuCond *cond);
/*
- * qemu_cond_timedwait_iothread: like the previous, but with timeout
+ * qemu_cond_timedwait_bql: like the previous, but with timeout
*/
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
/* internal interfaces */
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index c21215a094..1e5a688085 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -111,7 +111,7 @@ static void rr_wait_io_event(void)
while (all_cpu_threads_idle()) {
rr_stop_kick_timer();
- qemu_cond_wait_iothread(first_cpu->halt_cond);
+ qemu_cond_wait_bql(first_cpu->halt_cond);
}
rr_start_kick_timer();
@@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg)
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
- qemu_cond_wait_iothread(first_cpu->halt_cond);
+ qemu_cond_wait_bql(first_cpu->halt_cond);
/* process any pending work */
CPU_FOREACH(cpu) {
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index b016d3bac8..67c5be1a4e 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1512,7 +1512,7 @@ void virtio_gpu_reset(VirtIODevice *vdev)
g->reset_finished = false;
qemu_bh_schedule(g->reset_bh);
while (!g->reset_finished) {
- qemu_cond_wait_iothread(&g->reset_cond);
+ qemu_cond_wait_bql(&g->reset_cond);
}
} else {
virtio_gpu_reset_bh(g);
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index deb4641505..cb0eeee587 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -899,7 +899,7 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
}
return;
}
- qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond);
+ qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond);
if (spapr->fwnmi_machine_check_addr == -1) {
/*
* If the machine was reset while waiting for the interlock,
diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c
index e98836311b..1d2b73369e 100644
--- a/system/cpu-throttle.c
+++ b/system/cpu-throttle.c
@@ -54,7 +54,7 @@ static void cpu_throttle_thread(CPUState *cpu,
run_on_cpu_data opaque)
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
while (sleeptime_ns > 0 && !cpu->stop) {
if (sleeptime_ns > SCALE_MS) {
- qemu_cond_timedwait_iothread(cpu->halt_cond,
+ qemu_cond_timedwait_bql(cpu->halt_cond,
sleeptime_ns / SCALE_MS);
} else {
qemu_bql_unlock();
diff --git a/system/cpus.c b/system/cpus.c
index d5b98c11f5..eb24a4db8e 100644
--- a/system/cpus.c
+++ b/system/cpus.c
@@ -513,12 +513,12 @@ void qemu_bql_unlock(void)
qemu_mutex_unlock(&qemu_global_mutex);
}
-void qemu_cond_wait_iothread(QemuCond *cond)
+void qemu_cond_wait_bql(QemuCond *cond)
{
qemu_cond_wait(cond, &qemu_global_mutex);
}
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms)
{
qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
}
diff --git a/target/i386/nvmm/nvmm-accel-ops.c
b/target/i386/nvmm/nvmm-accel-ops.c
index 387ccfcce5..0fe8a76820 100644
--- a/target/i386/nvmm/nvmm-accel-ops.c
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -48,7 +48,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
}
}
while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait_iothread(cpu->halt_cond);
+ qemu_cond_wait_bql(cpu->halt_cond);
}
qemu_wait_io_event_common(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
diff --git a/target/i386/whpx/whpx-accel-ops.c
b/target/i386/whpx/whpx-accel-ops.c
index 1f29346a88..d8bec46081 100644
--- a/target/i386/whpx/whpx-accel-ops.c
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -48,7 +48,7 @@ static void *whpx_cpu_thread_fn(void *arg)
}
}
while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait_iothread(cpu->halt_cond);
+ qemu_cond_wait_bql(cpu->halt_cond);
}
qemu_wait_io_event_common(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
--
2.42.0