[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [RFC qom-cpu for-next 2/2] cpu: Replace qemu_for_each_c
From: |
Michael S. Tsirkin |
Subject: |
Re: [Qemu-devel] [RFC qom-cpu for-next 2/2] cpu: Replace qemu_for_each_cpu() |
Date: |
Tue, 30 Jul 2013 21:30:15 +0300 |
On Tue, Jul 30, 2013 at 06:55:59PM +0200, Andreas Färber wrote:
> It was introduced to loop over CPUs from target-independent code, but
> since commit 182735efaf956ccab50b6d74a4fed163e0f35660 target-independent
> CPUState is used.
>
> A loop can be considered more efficient than function calls in a loop,
> and CPU_FOREACH() hides implementation details just as well, so use that
> instead.
>
> Suggested-by: Markus Armbruster <address@hidden>
> Signed-off-by: Andreas Färber <address@hidden>
Efficiency is unlikely to be stellar seeing as there
are tons of string matching going on in QOM.
But I don't think it matters in any of this code, so
Acked-by: Michael S. Tsirkin <address@hidden>
> ---
> arch_init.c | 11 +++++------
> cpus.c | 11 ++++-------
> exec.c | 9 ---------
> hw/acpi/piix4.c | 20 +++++++++-----------
> include/qom/cpu.h | 9 ---------
> qom/cpu.c | 30 +++++++++---------------------
> 6 files changed, 27 insertions(+), 63 deletions(-)
>
> diff --git a/arch_init.c b/arch_init.c
> index 68a7ab7..148f76e 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -1195,15 +1195,14 @@ static void mig_sleep_cpu(void *opq)
> much time in the VM. The migration thread will try to catchup.
> Workload will experience a performance drop.
> */
> -static void mig_throttle_cpu_down(CPUState *cpu, void *data)
> -{
> - async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
> -}
> -
> static void mig_throttle_guest_down(void)
> {
> + CPUState *cpu;
> +
> qemu_mutex_lock_iothread();
> - qemu_for_each_cpu(mig_throttle_cpu_down, NULL);
> + CPU_FOREACH(cpu) {
> + async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
> + }
> qemu_mutex_unlock_iothread();
> }
>
> diff --git a/cpus.c b/cpus.c
> index 1e2fd8a..c1d0f18 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -837,12 +837,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>
> static void tcg_exec_all(void);
>
> -static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
> -{
> - cpu->thread_id = qemu_get_thread_id();
> - cpu->created = true;
> -}
> -
> static void *qemu_tcg_cpu_thread_fn(void *arg)
> {
> CPUState *cpu = arg;
> @@ -851,7 +845,10 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> qemu_thread_get_self(cpu->thread);
>
> qemu_mutex_lock(&qemu_global_mutex);
> - qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
> + CPU_FOREACH(cpu) {
> + cpu->thread_id = qemu_get_thread_id();
> + cpu->created = true;
> + }
> qemu_cond_signal(&qemu_cpu_cond);
>
> /* wait for initial kick-off after machine start */
> diff --git a/exec.c b/exec.c
> index 4ee61a6..7d90119 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -362,15 +362,6 @@ CPUState *qemu_get_cpu(int index)
> return NULL;
> }
>
> -void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
> -{
> - CPUState *cpu;
> -
> - CPU_FOREACH(cpu) {
> - func(cpu, data);
> - }
> -}
> -
> void cpu_exec_init(CPUArchState *env)
> {
> CPUState *cpu = ENV_GET_CPU(env);
> diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
> index c885690..12e40b0 100644
> --- a/hw/acpi/piix4.c
> +++ b/hw/acpi/piix4.c
> @@ -667,22 +667,14 @@ static void piix4_cpu_added_req(Notifier *n, void
> *opaque)
> piix4_cpu_hotplug_req(s, CPU(opaque), PLUG);
> }
>
> -static void piix4_init_cpu_status(CPUState *cpu, void *data)
> -{
> - CPUStatus *g = (CPUStatus *)data;
> - CPUClass *k = CPU_GET_CLASS(cpu);
> - int64_t id = k->get_arch_id(cpu);
> -
> - g_assert((id / 8) < PIIX4_PROC_LEN);
> - g->sts[id / 8] |= (1 << (id % 8));
> -}
> -
> static int piix4_device_hotplug(DeviceState *qdev, PCIDevice *dev,
> PCIHotplugState state);
>
> static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
> PCIBus *bus, PIIX4PMState *s)
> {
> + CPUState *cpu;
> +
> memory_region_init_io(&s->io_gpe, OBJECT(s), &piix4_gpe_ops, s,
> "acpi-gpe0", GPE_LEN);
> memory_region_add_subregion(parent, GPE_BASE, &s->io_gpe);
> @@ -693,7 +685,13 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion
> *parent,
> &s->io_pci);
> pci_bus_hotplug(bus, piix4_device_hotplug, DEVICE(s));
>
> - qemu_for_each_cpu(piix4_init_cpu_status, &s->gpe_cpu);
> + CPU_FOREACH(cpu) {
> + CPUClass *cc = CPU_GET_CLASS(cpu);
> + int64_t id = cc->get_arch_id(cpu);
> +
> + g_assert((id / 8) < PIIX4_PROC_LEN);
> + s->gpe_cpu.sts[id / 8] |= (1 << (id % 8));
> + }
> memory_region_init_io(&s->io_cpu, OBJECT(s), &cpu_hotplug_ops, s,
> "acpi-cpu-hotplug", PIIX4_PROC_LEN);
> memory_region_add_subregion(parent, PIIX4_PROC_BASE, &s->io_cpu);
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index 6d052ee..3cc28b5 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -401,15 +401,6 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data),
> void *data);
> void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
>
> /**
> - * qemu_for_each_cpu:
> - * @func: The function to be executed.
> - * @data: Data to pass to the function.
> - *
> - * Executes @func for each CPU.
> - */
> -void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data);
> -
> -/**
> * qemu_get_cpu:
> * @index: The address@hidden value of the CPU to obtain.
> *
> diff --git a/qom/cpu.c b/qom/cpu.c
> index aa95108..ecade59 100644
> --- a/qom/cpu.c
> +++ b/qom/cpu.c
> @@ -25,30 +25,18 @@
> #include "qemu/log.h"
> #include "sysemu/sysemu.h"
>
> -typedef struct CPUExistsArgs {
> - int64_t id;
> - bool found;
> -} CPUExistsArgs;
> -
> -static void cpu_exist_cb(CPUState *cpu, void *data)
> -{
> - CPUClass *klass = CPU_GET_CLASS(cpu);
> - CPUExistsArgs *arg = data;
> -
> - if (klass->get_arch_id(cpu) == arg->id) {
> - arg->found = true;
> - }
> -}
> -
> bool cpu_exists(int64_t id)
> {
> - CPUExistsArgs data = {
> - .id = id,
> - .found = false,
> - };
> + CPUState *cpu;
> +
> + CPU_FOREACH(cpu) {
> + CPUClass *cc = CPU_GET_CLASS(cpu);
>
> - qemu_for_each_cpu(cpu_exist_cb, &data);
> - return data.found;
> + if (cc->get_arch_id(cpu) == id) {
> + return true;
> + }
> + }
> + return false;
> }
>
> bool cpu_paging_enabled(const CPUState *cpu)
> --
> 1.8.1.4