qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC] ppc: qdev-ify CPU creation


From: Alexander Graf
Subject: Re: [Qemu-devel] [RFC] ppc: qdev-ify CPU creation
Date: Sun, 24 Jul 2011 11:28:45 +0200

On 21.12.2010, at 21:01, Andreas Färber wrote:

> From: Hervé Poussineau <address@hidden>
> 
> v1:
> * Coding style fixes.
> 
> Signed-off-by: Hervé Poussineau <address@hidden>
> Cc: Alexander Graf <address@hidden>
> Signed-off-by: Andreas Färber <address@hidden>
> ---
> 
> Hello Alex,
> 
> Seeing the discussions about Leon3, is this the way to go for ppc? Is 
> ppc.[hc] right?
> 
> The unconditional use of 6xx looks suspicious to me, no?
> Should we rename cpu_device_irq_request() to cpu_device_irq_request_6xx()?
> 
> Regards,
> Andreas
> 
> hw/ppc.c            |   75 +++++++++++++++++++++++++++++++++++++++++++++++++++
> hw/ppc.h            |    2 +
> target-ppc/cpu.h    |    1 +
> target-ppc/helper.c |   21 +++++++++++---
> 4 files changed, 94 insertions(+), 5 deletions(-)
> 
> diff --git a/hw/ppc.c b/hw/ppc.c
> index 968aec1..0927326 100644
> --- a/hw/ppc.c
> +++ b/hw/ppc.c
> @@ -30,6 +30,8 @@
> #include "loader.h"
> #include "kvm.h"
> #include "kvm_ppc.h"
> +#include "hw/qdev.h"
> +#include "hw/sysbus.h"
> 
> //#define PPC_DEBUG_IRQ
> //#define PPC_DEBUG_TB
> @@ -1286,3 +1288,76 @@ int PPC_NVRAM_set_params (nvram_t *nvram, uint16_t 
> NVRAM_size,
> 
>     return 0;
> }
> +
> +DeviceState *cpu_ppc_create_simple(const char *cpu_model)
> +{
> +    DeviceState *dev;
> +
> +    dev = qdev_create(NULL, "cpu-ppc");
> +    if (!dev) {
> +        return NULL;
> +    }
> +    qdev_prop_set_string(dev, "model", qemu_strdup(cpu_model));
> +    if (qdev_init(dev) < 0) {
> +        return NULL;
> +    }
> +    return dev;
> +}
> +
> +typedef struct CPUPPC {
> +    SysBusDevice busdev;

I'm not sure we really want CPUs on the sysbus. They belong to their own CPU 
bus. Basically, I think we should try to model our bus topology so that it 
reflects the bus topology in the device tree 1:1. Then generating a device tree 
from the bug information and some device specific callbacks would be possible.

> +    char *model;
> +    CPUPPCState state;
> +} CPUPPC;
> +
> +static void cpu_device_irq_request(void *opaque, int pin, int level)
> +{
> +    CPUPPC* cpu = opaque;
> +    CPUPPCState* env = &cpu->state;
> +    ppc6xx_set_irq(env, pin, level);
> +}
> +
> +static int cpu_device_init(SysBusDevice *dev)
> +{
> +    CPUPPC* cpu = FROM_SYSBUS(CPUPPC, dev);
> +    CPUPPCState* env = &cpu->state;
> +
> +    if (cpu_ppc_init_inplace(env, cpu->model) < 0) {
> +        return -1;
> +    }
> +
> +    if (env->flags & POWERPC_FLAG_RTC_CLK) {

Where does this flag suddenly come from? Is this related to qdev'ification?

> +        /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */
> +        cpu_ppc_tb_init(env, 7812500UL);
> +    } else {
> +        /* Set time-base frequency to 100 Mhz */
> +        cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL);

Usually we have a TB frequency of 400Mhz in our board/devtrees hardcoded in the 
TCG case. How about a qdev property that the creator could just modify to its 
needs? We won't need the special 601 flag then either - just move that into the 
PREP code.

> +    }
> +
> +    qdev_init_gpio_in(&dev->qdev, cpu_device_irq_request, PPC6xx_INPUT_NB);
> +    return 0;
> +}
> +
> +static void cpu_device_reset(DeviceState *d)
> +{
> +    CPUPPC *s = FROM_SYSBUS(CPUPPC, sysbus_from_qdev(d));
> +    cpu_reset(&s->state);
> +}
> +
> +static SysBusDeviceInfo cpu_device_info = {
> +    .qdev.name = "cpu-ppc",
> +    .qdev.size = sizeof(CPUPPC),
> +    .qdev.reset = cpu_device_reset,
> +    .init = cpu_device_init,
> +    .qdev.props = (Property[]) {
> +        DEFINE_PROP_STRING("model", CPUPPC, model),
> +        DEFINE_PROP_END_OF_LIST(),
> +    },
> +};
> +
> +static void ppc_register_devices(void)
> +{
> +    sysbus_register_withprop(&cpu_device_info);
> +}
> +
> +device_init(ppc_register_devices)
> diff --git a/hw/ppc.h b/hw/ppc.h
> index 34f54cf..ae8dd97 100644
> --- a/hw/ppc.h
> +++ b/hw/ppc.h
> @@ -37,6 +37,8 @@ void ppce500_irq_init (CPUState *env);
> void ppc6xx_irq_init (CPUState *env);
> void ppc970_irq_init (CPUState *env);
> 
> +DeviceState *cpu_ppc_create_simple(const char *cpu_model);
> +
> /* PPC machines for OpenBIOS */
> enum {
>     ARCH_PREP = 0,
> diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
> index deb8d7c..0f56d45 100644
> --- a/target-ppc/cpu.h
> +++ b/target-ppc/cpu.h
> @@ -721,6 +721,7 @@ struct mmu_ctx_t {
> 
> /*****************************************************************************/
> CPUPPCState *cpu_ppc_init (const char *cpu_model);
> +int cpu_ppc_init_inplace(CPUPPCState *env, const char *cpu_model);
> void ppc_translate_init(void);
> int cpu_ppc_exec (CPUPPCState *s);
> void cpu_ppc_close (CPUPPCState *s);
> diff --git a/target-ppc/helper.c b/target-ppc/helper.c
> index 4b49101..99af1f6 100644
> --- a/target-ppc/helper.c
> +++ b/target-ppc/helper.c
> @@ -2794,22 +2794,33 @@ void cpu_reset(CPUPPCState *env)
>     tlb_flush(env, 1);
> }
> 
> -CPUPPCState *cpu_ppc_init (const char *cpu_model)
> +int cpu_ppc_init_inplace(CPUPPCState *env, const char *cpu_model)
> {
> -    CPUPPCState *env;
>     const ppc_def_t *def;
> 
>     def = cpu_ppc_find_by_name(cpu_model);
> -    if (!def)
> -        return NULL;
> +    if (!def) {
> +        return -1;
> +    }
> 
> -    env = qemu_mallocz(sizeof(CPUPPCState));
>     cpu_exec_init(env);
>     ppc_translate_init();
>     env->cpu_model_str = cpu_model;
>     cpu_ppc_register_internal(env, def);
> 
>     qemu_init_vcpu(env);
> +    return 0;
> +}
> +
> +CPUPPCState *cpu_ppc_init(const char *cpu_model)
> +{
> +    CPUPPCState *env;
> +
> +    env = qemu_mallocz(sizeof(CPUPPCState));
> +    if (cpu_ppc_init_inplace(env, cpu_model) < 0) {

Why would we need this function again if the CPUs are qdev'ified?


Overall, I really like the idea of moving CPUs to qdev though. Makes things a 
lot more structured.

Alex




reply via email to

[Prev in Thread] Current Thread [Next in Thread]