qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [Qemu-ppc] [PATCH v4 3/9] ppc/xics: Make the ICSState a


From: Nikunj A Dadhania
Subject: Re: [Qemu-devel] [Qemu-ppc] [PATCH v4 3/9] ppc/xics: Make the ICSState a list
Date: Mon, 19 Sep 2016 12:32:46 +0530
User-agent: Notmuch/0.21 (https://notmuchmail.org) Emacs/25.0.94.1 (x86_64-redhat-linux-gnu)

Cédric Le Goater <address@hidden> writes:

> On 09/19/2016 08:29 AM, Nikunj A Dadhania wrote:
>> From: Benjamin Herrenschmidt <address@hidden>
>> 
>> Instead of an array of fixed sized blocks, use a list, as we will need
>> to have sources with variable number of interrupts. SPAPR only uses
>> a single entry. Native will create more. If performance becomes an
>> issue we can add some hashed lookup but for now this will do fine.
>> 
>> Signed-off-by: Benjamin Herrenschmidt <address@hidden>
>> [ move the initialization of list to xics_common_initfn,
>>   restore xirr_owner after migration and move restoring to
>>   icp_post_load]
>> Signed-off-by: Nikunj A Dadhania <address@hidden>
>
> This looks good to me apart from the change of icp_post_load(). 
>
>> ---
>>  hw/intc/trace-events  |   5 +-
>>  hw/intc/xics.c        | 130 
>> ++++++++++++++++++++++++++++++++------------------
>>  hw/intc/xics_kvm.c    |  27 +++++++----
>>  hw/intc/xics_spapr.c  |  88 ++++++++++++++++++++++------------
>>  hw/ppc/spapr_events.c |   2 +-
>>  hw/ppc/spapr_pci.c    |   5 +-
>>  hw/ppc/spapr_vio.c    |   2 +-
>>  include/hw/ppc/xics.h |  13 ++---
>>  8 files changed, 173 insertions(+), 99 deletions(-)
>> 
>> diff --git a/hw/intc/trace-events b/hw/intc/trace-events
>> index f12192c..aa342a8 100644
>> --- a/hw/intc/trace-events
>> +++ b/hw/intc/trace-events
>> @@ -56,10 +56,11 @@ xics_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno 
>> %d [irq %#x]"
>>  xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) 
>> "ics_write_xive: irq %#x [src %d] server %#x prio %#x"
>>  xics_ics_reject(int nr, int srcno) "reject irq %#x [src %d]"
>>  xics_ics_eoi(int nr) "ics_eoi: irq %#x"
>> -xics_alloc(int src, int irq) "source#%d, irq %d"
>> -xics_alloc_block(int src, int first, int num, bool lsi, int align) 
>> "source#%d, first irq %d, %d irqs, lsi=%d, alignnum %d"
>> +xics_alloc(int irq) "irq %d"
>> +xics_alloc_block(int first, int num, bool lsi, int align) "first irq %d, %d 
>> irqs, lsi=%d, alignnum %d"
>>  xics_ics_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs"
>>  xics_ics_free_warn(int src, int irq) "Source#%d, irq %d is already free"
>> +xics_icp_post_load(uint32_t server_no, uint32_t xirr, uint64_t addr, 
>> uint8_t pend) "server_no %d, xirr %#x, xirr_owner 0x%" PRIx64 ", pending %d"
>>  
>>  # hw/intc/s390_flic_kvm.c
>>  flic_create_device(int err) "flic: create device failed %d"
>> diff --git a/hw/intc/xics.c b/hw/intc/xics.c
>> index f765b08..05e938f 100644
>> --- a/hw/intc/xics.c
>> +++ b/hw/intc/xics.c
>> @@ -32,6 +32,7 @@
>>  #include "hw/hw.h"
>>  #include "trace.h"
>>  #include "qemu/timer.h"
>> +#include "hw/ppc/spapr.h"
>>  #include "hw/ppc/xics.h"
>>  #include "qemu/error-report.h"
>>  #include "qapi/visitor.h"
>> @@ -96,13 +97,16 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
>>  static void xics_common_reset(DeviceState *d)
>>  {
>>      XICSState *xics = XICS_COMMON(d);
>> +    ICSState *ics;
>>      int i;
>>  
>>      for (i = 0; i < xics->nr_servers; i++) {
>>          device_reset(DEVICE(&xics->ss[i]));
>>      }
>>  
>> -    device_reset(DEVICE(xics->ics));
>> +    QLIST_FOREACH(ics, &xics->ics, list) {
>> +        device_reset(DEVICE(ics));
>> +    }
>>  }
>>  
>>  static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
>> @@ -134,7 +138,6 @@ static void xics_prop_set_nr_irqs(Object *obj, Visitor 
>> *v, const char *name,
>>      }
>>  
>>      assert(info->set_nr_irqs);
>> -    assert(xics->ics);
>>      info->set_nr_irqs(xics, value, errp);
>>  }
>>  
>> @@ -174,6 +177,9 @@ static void xics_prop_set_nr_servers(Object *obj, 
>> Visitor *v,
>>  
>>  static void xics_common_initfn(Object *obj)
>>  {
>> +    XICSState *xics = XICS_COMMON(obj);
>> +
>> +    QLIST_INIT(&xics->ics);
>>      object_property_add(obj, "nr_irqs", "int",
>>                          xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
>>                          NULL, NULL, NULL);
>> @@ -212,33 +218,35 @@ static void ics_reject(ICSState *ics, int nr);
>>  static void ics_resend(ICSState *ics, int server);
>>  static void ics_eoi(ICSState *ics, int nr);
>>  
>> -static void icp_check_ipi(XICSState *xics, int server)
>> +static void icp_check_ipi(ICPState *ss)
>>  {
>> -    ICPState *ss = xics->ss + server;
>> -
>>      if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
>>          return;
>>      }
>>  
>> -    trace_xics_icp_check_ipi(server, ss->mfrr);
>> +    trace_xics_icp_check_ipi(ss->cs->cpu_index, ss->mfrr);
>>  
>> -    if (XISR(ss)) {
>> -        ics_reject(xics->ics, XISR(ss));
>> +    if (XISR(ss) && ss->xirr_owner) {
>> +        ics_reject(ss->xirr_owner, XISR(ss));
>>      }
>>  
>>      ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
>>      ss->pending_priority = ss->mfrr;
>> +    ss->xirr_owner = NULL;
>>      qemu_irq_raise(ss->output);
>>  }
>>  
>>  static void icp_resend(XICSState *xics, int server)
>>  {
>>      ICPState *ss = xics->ss + server;
>> +    ICSState *ics;
>>  
>>      if (ss->mfrr < CPPR(ss)) {
>> -        icp_check_ipi(xics, server);
>> +        icp_check_ipi(ss);
>> +    }
>> +    QLIST_FOREACH(ics, &xics->ics, list) {
>> +        ics_resend(ics, server);
>>      }
>> -    ics_resend(xics->ics, server);
>>  }
>>  
>>  void icp_set_cppr(XICSState *xics, int server, uint8_t cppr)
>> @@ -256,7 +264,10 @@ void icp_set_cppr(XICSState *xics, int server, uint8_t 
>> cppr)
>>              ss->xirr &= ~XISR_MASK; /* Clear XISR */
>>              ss->pending_priority = 0xff;
>>              qemu_irq_lower(ss->output);
>> -            ics_reject(xics->ics, old_xisr);
>> +            if (ss->xirr_owner) {
>> +                ics_reject(ss->xirr_owner, old_xisr);
>> +                ss->xirr_owner = NULL;
>> +            }
>>          }
>>      } else {
>>          if (!XISR(ss)) {
>> @@ -271,7 +282,7 @@ void icp_set_mfrr(XICSState *xics, int server, uint8_t 
>> mfrr)
>>  
>>      ss->mfrr = mfrr;
>>      if (mfrr < CPPR(ss)) {
>> -        icp_check_ipi(xics, server);
>> +        icp_check_ipi(ss);
>>      }
>>  }
>>  
>> @@ -282,6 +293,7 @@ uint32_t icp_accept(ICPState *ss)
>>      qemu_irq_lower(ss->output);
>>      ss->xirr = ss->pending_priority << 24;
>>      ss->pending_priority = 0xff;
>> +    ss->xirr_owner = NULL;
>>  
>>      trace_xics_icp_accept(xirr, ss->xirr);
>>  
>> @@ -299,30 +311,40 @@ uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr)
>>  void icp_eoi(XICSState *xics, int server, uint32_t xirr)
>>  {
>>      ICPState *ss = xics->ss + server;
>> +    ICSState *ics;
>> +    uint32_t irq;
>>  
>>      /* Send EOI -> ICS */
>>      ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
>>      trace_xics_icp_eoi(server, xirr, ss->xirr);
>> -    ics_eoi(xics->ics, xirr & XISR_MASK);
>> +    irq = xirr & XISR_MASK;
>> +    QLIST_FOREACH(ics, &xics->ics, list) {
>> +        if (ics_valid_irq(ics, irq)) {
>> +            ics_eoi(ics, irq);
>> +        }
>> +    }
>>      if (!XISR(ss)) {
>>          icp_resend(xics, server);
>>      }
>>  }
>>  
>> -static void icp_irq(XICSState *xics, int server, int nr, uint8_t priority)
>> +static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
>>  {
>> +    XICSState *xics = ics->xics;
>>      ICPState *ss = xics->ss + server;
>>  
>>      trace_xics_icp_irq(server, nr, priority);
>>  
>>      if ((priority >= CPPR(ss))
>>          || (XISR(ss) && (ss->pending_priority <= priority))) {
>> -        ics_reject(xics->ics, nr);
>> +        ics_reject(ics, nr);
>>      } else {
>> -        if (XISR(ss)) {
>> -            ics_reject(xics->ics, XISR(ss));
>> +        if (XISR(ss) && ss->xirr_owner) {
>> +            ics_reject(ss->xirr_owner, XISR(ss));
>> +            ss->xirr_owner = NULL;
>>          }
>>          ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
>> +        ss->xirr_owner = ics;
>>          ss->pending_priority = priority;
>>          trace_xics_icp_raise(ss->xirr, ss->pending_priority);
>>          qemu_irq_raise(ss->output);
>> @@ -378,12 +400,45 @@ static void icp_reset(DeviceState *dev)
>>      qemu_set_irq(icp->output, 0);
>>  }
>>  
>> +static int icp_post_load(ICPState *ss, int version_id)
>> +{
>> +    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
>> +    XICSState *xics = spapr->xics;
>> +    uint32_t irq, i;
>> +    static uint32_t server_no;
>> +
>> +    server_no++;
>> +    irq = XISR(ss);
>> +    if (!ss->cs || !irq) {
>> +        goto icpend;
>> +    }
>> +
>> +    /* Restore the xirr_owner */
>> +    ss->xirr_owner = xics_find_source(xics, irq);
>> +
>> + icpend:
>> +    trace_xics_icp_post_load(server_no, ss->xirr, (uint64_t)ss->xirr_owner,
>> +                             ss->pending_priority);
>> +    if (xics->nr_servers != server_no) {
>> +        return 0;
>> +    }
>> +
>> +    /* All the ICPs are processed, now restore all the state */
>> +    for (i = 0; i < xics->nr_servers; i++) {
>> +        icp_resend(xics, i);
>> +    }
>> +    server_no = 0;
>> +    return 0;
>> +}
>
> Is the issue this change is trying to fix related to ICSState becoming 
> a list ? If not, It should be in another patch I think.

Yes, and we introduced xirr_owner to optimize. This needs to regenerated
at the destination after migration.

Regards
Nikunj




reply via email to

[Prev in Thread] Current Thread [Next in Thread]