[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [RFC PATCH v0 4/5] xics: Use arch_id instead of cpu_ind
From: |
David Gibson |
Subject: |
Re: [Qemu-devel] [RFC PATCH v0 4/5] xics: Use arch_id instead of cpu_index in XICS code |
Date: |
Tue, 5 Jul 2016 14:59:20 +1000 |
User-agent: |
Mutt/1.6.1 (2016-04-27) |
On Tue, Jul 05, 2016 at 10:12:51AM +0530, Bharata B Rao wrote:
> xics maintains an array of ICPState structures which is indexed
> by cpu_index. Change this to index the ICPState array by arch_id
> for pseries-2.7 onwards. This allows migration of guest to suceed
> when there are holes in cpu_index range due to CPU hot removal.
>
> Signed-off-by: Bharata B Rao <address@hidden>
> ---
> hw/intc/xics.c | 14 ++++++++++----
> hw/intc/xics_kvm.c | 12 ++++++------
> hw/intc/xics_spapr.c | 33 ++++++++++++++++++++++++++-------
> 3 files changed, 42 insertions(+), 17 deletions(-)
>
> diff --git a/hw/intc/xics.c b/hw/intc/xics.c
> index cd48f42..6b79a8e 100644
> --- a/hw/intc/xics.c
> +++ b/hw/intc/xics.c
> @@ -50,9 +50,12 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
> void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
> {
> CPUState *cs = CPU(cpu);
> - ICPState *ss = &xics->ss[cs->cpu_index];
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
Requiring this conditional in all the callsites is pretty ugly. I'd
prefer to see a get_arch_id() helper that implements this fallback
internally.
> + ICPState *ss = &xics->ss[server];
>
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
> assert(cs == ss->cs);
>
> ss->output = NULL;
> @@ -63,10 +66,13 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
> {
> CPUState *cs = CPU(cpu);
> CPUPPCState *env = &cpu->env;
> - ICPState *ss = &xics->ss[cs->cpu_index];
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> + ICPState *ss = &xics->ss[server];
> XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
>
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
>
> ss->cs = cs;
>
> diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
> index edbd62f..2610458 100644
> --- a/hw/intc/xics_kvm.c
> +++ b/hw/intc/xics_kvm.c
> @@ -326,14 +326,14 @@ static const TypeInfo ics_kvm_info = {
> */
> static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
> {
> - CPUState *cs;
> - ICPState *ss;
> + CPUState *cs = CPU(cpu);
> KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> + ICPState *ss = ss = &xics->ss[server];
>
> - cs = CPU(cpu);
> - ss = &xics->ss[cs->cpu_index];
> -
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
> if (xicskvm->kernel_xics_fd == -1) {
> abort();
> }
> diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
> index 618826d..fbc8205 100644
> --- a/hw/intc/xics_spapr.c
> +++ b/hw/intc/xics_spapr.c
> @@ -43,16 +43,21 @@ static target_ulong h_cppr(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> CPUState *cs = CPU(cpu);
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> target_ulong cppr = args[0];
>
> - icp_set_cppr(spapr->xics, cs->cpu_index, cppr);
> + icp_set_cppr(spapr->xics, server, cppr);
> return H_SUCCESS;
> }
>
> static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - target_ulong server = xics_get_cpu_index_by_dt_id(args[0]);
> + CPUState *cs = CPU(cpu);
> + target_ulong server = cs->prefer_arch_id_over_cpu_index ? args[0] :
> + xics_get_cpu_index_by_dt_id(args[0]);
> target_ulong mfrr = args[1];
>
> if (server >= spapr->xics->nr_servers) {
> @@ -67,7 +72,10 @@ static target_ulong h_xirr(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> CPUState *cs = CPU(cpu);
> - uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> + uint32_t xirr = icp_accept(spapr->xics->ss + server);
>
> args[0] = xirr;
> return H_SUCCESS;
> @@ -77,7 +85,10 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> CPUState *cs = CPU(cpu);
> - ICPState *ss = &spapr->xics->ss[cs->cpu_index];
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> + ICPState *ss = &spapr->xics->ss[server];
> uint32_t xirr = icp_accept(ss);
>
> args[0] = xirr;
> @@ -89,9 +100,12 @@ static target_ulong h_eoi(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> CPUState *cs = CPU(cpu);
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> target_ulong xirr = args[0];
>
> - icp_eoi(spapr->xics, cs->cpu_index, xirr);
> + icp_eoi(spapr->xics, server, xirr);
> return H_SUCCESS;
> }
>
> @@ -99,8 +113,11 @@ static target_ulong h_ipoll(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> CPUState *cs = CPU(cpu);
> + CPUClass *cc = CPU_GET_CLASS(cs);
> + int server = cs->prefer_arch_id_over_cpu_index ? cc->get_arch_id(cs) :
> + cs->cpu_index;
> uint32_t mfrr;
> - uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
> + uint32_t xirr = icp_ipoll(spapr->xics->ss + server, &mfrr);
>
> args[0] = xirr;
> args[1] = mfrr;
> @@ -113,6 +130,7 @@ static void rtas_set_xive(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> uint32_t nargs, target_ulong args,
> uint32_t nret, target_ulong rets)
> {
> + CPUState *cs = CPU(cpu);
> ICSState *ics = spapr->xics->ics;
> uint32_t nr, server, priority;
>
> @@ -122,7 +140,8 @@ static void rtas_set_xive(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> }
>
> nr = rtas_ld(args, 0);
> - server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
> + server = cs->prefer_arch_id_over_cpu_index ? rtas_ld(args, 1) :
> + xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
> priority = rtas_ld(args, 2);
>
> if (!ics_valid_irq(ics, nr) || (server >= ics->xics->nr_servers)
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
signature.asc
Description: PGP signature
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, (continued)
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Bharata B Rao, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Igor Mammedov, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Bharata B Rao, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Igor Mammedov, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Bharata B Rao, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Igor Mammedov, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 1/5] cpu: Factor out cpu vmstate_[un]register into separate routines, Greg Kurz, 2016/07/05
[Qemu-devel] [RFC PATCH v0 3/5] spapr: Implement CPUClass.get_arch_id() for PowerPC CPUs, Bharata B Rao, 2016/07/05
[Qemu-devel] [RFC PATCH v0 4/5] xics: Use arch_id instead of cpu_index in XICS code, Bharata B Rao, 2016/07/05
- Re: [Qemu-devel] [RFC PATCH v0 4/5] xics: Use arch_id instead of cpu_index in XICS code,
David Gibson <=
[Qemu-devel] [RFC PATCH v0 5/5] spapr: Prefer arch_id over cpu_index, Bharata B Rao, 2016/07/05
Re: [Qemu-devel] [RFC PATCH v0 0/5] sPAPR: Fix migration when CPUs are removed in random order, Greg Kurz, 2016/07/05