qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH for-2.12 v3 05/11] spapr: introduce an IRQ alloc


From: Cédric Le Goater
Subject: Re: [Qemu-devel] [PATCH for-2.12 v3 05/11] spapr: introduce an IRQ allocator using a bitmap
Date: Wed, 15 Nov 2017 09:47:17 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.4.0

On 11/14/2017 04:28 PM, Greg Kurz wrote:
> On Tue, 14 Nov 2017 11:54:53 +0000
> Cédric Le Goater <address@hidden> wrote:
> 
>> On 11/14/2017 09:42 AM, Greg Kurz wrote:
>>> On Fri, 10 Nov 2017 15:20:11 +0000
>>> Cédric Le Goater <address@hidden> wrote:
>>>   
>>>> Let's define a new set of XICSFabric IRQ operations for the latest
>>>> pseries machine. These simply use a a bitmap 'irq_map' as a IRQ number
>>>> allocator.
>>>>
>>>> The previous pseries machines keep the old set of IRQ operations using
>>>> the ICSIRQState array.
>>>>
>>>> Signed-off-by: Cédric Le Goater <address@hidden>
>>>> ---
>>>>
>>>>  Changes since v2 :
>>>>
>>>>  - introduced a second set of XICSFabric IRQ operations for older
>>>>    pseries machines
>>>>
>>>>  hw/ppc/spapr.c         | 76 
>>>> ++++++++++++++++++++++++++++++++++++++++++++++----
>>>>  include/hw/ppc/spapr.h |  3 ++
>>>>  2 files changed, 74 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>>>> index 4bdceb45a14f..4ef0b73559ca 100644
>>>> --- a/hw/ppc/spapr.c
>>>> +++ b/hw/ppc/spapr.c
>>>> @@ -1681,6 +1681,22 @@ static const VMStateDescription 
>>>> vmstate_spapr_patb_entry = {
>>>>      },
>>>>  };
>>>>  
>>>> +static bool spapr_irq_map_needed(void *opaque)
>>>> +{
>>>> +    return true;  
>>>
>>> I see that the next patch adds some code to avoid sending the
>>> bitmap if it doesn't contain state, but I guess you should also
>>> explicitly have this function to return false for older machine
>>> types (see remark below).
>>>   
>>>> +}
>>>> +
>>>> +static const VMStateDescription vmstate_spapr_irq_map = {
>>>> +    .name = "spapr_irq_map",
>>>> +    .version_id = 0,
>>>> +    .minimum_version_id = 0,
>>>> +    .needed = spapr_irq_map_needed,
>>>> +    .fields = (VMStateField[]) {
>>>> +        VMSTATE_BITMAP(irq_map, sPAPRMachineState, 0, nr_irqs),
>>>> +        VMSTATE_END_OF_LIST()
>>>> +    },
>>>> +};
>>>> +
>>>>  static const VMStateDescription vmstate_spapr = {
>>>>      .name = "spapr",
>>>>      .version_id = 3,
>>>> @@ -1700,6 +1716,7 @@ static const VMStateDescription vmstate_spapr = {
>>>>          &vmstate_spapr_ov5_cas,
>>>>          &vmstate_spapr_patb_entry,
>>>>          &vmstate_spapr_pending_events,
>>>> +        &vmstate_spapr_irq_map,
>>>>          NULL
>>>>      }
>>>>  };
>>>> @@ -2337,8 +2354,12 @@ static void ppc_spapr_init(MachineState *machine)
>>>>      /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
>>>>      load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
>>>>  
>>>> +    /* Initialize the IRQ allocator */
>>>> +    spapr->nr_irqs  = XICS_IRQS_SPAPR;
> 
> BTW, is this constant for the machine lifetime ? If so, maybe it should go
> to sPAPRMachineClass.

For Xive, we will be increasing the value of 'nr_irqs' with the number 
of max_cpus to handle the IPIs.

C.


> 
>>>> +    spapr->irq_map  = bitmap_new(spapr->nr_irqs);
>>>> +  
>>>
>>> I think you should introduce a sPAPRMachineClass::has_irq_bitmap boolean
>>> so that the bitmap is only allocated for newer machine types. And you should
>>> then use this flag in spapr_irq_map_needed() above.  
>>
>> yes. I can add a boot to be more explicit on the use of the bitmap.
>>
>> Thanks,
>>
>> C. 
>>
>>
>>>
>>> Apart from that, the rest of the patch looks good.
>>>   
>>>>      /* Set up Interrupt Controller before we create the VCPUs */
>>>> -    xics_system_init(machine, XICS_IRQS_SPAPR, &error_fatal);
>>>> +    xics_system_init(machine, spapr->nr_irqs, &error_fatal);
>>>>  
>>>>      /* Set up containers for ibm,client-architecture-support negotiated 
>>>> options
>>>>       */
>>>> @@ -3560,7 +3581,7 @@ static int ics_find_free_block(ICSState *ics, int 
>>>> num, int alignnum)
>>>>      return -1;
>>>>  }
>>>>  
>>>> -static bool spapr_irq_test(XICSFabric *xi, int irq)
>>>> +static bool spapr_irq_test_2_11(XICSFabric *xi, int irq)
>>>>  {
>>>>      sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>>      ICSState *ics = spapr->ics;
>>>> @@ -3569,7 +3590,7 @@ static bool spapr_irq_test(XICSFabric *xi, int irq)
>>>>      return !ICS_IRQ_FREE(ics, srcno);
>>>>  }
>>>>  
>>>> -static int spapr_irq_alloc_block(XICSFabric *xi, int count, int align)
>>>> +static int spapr_irq_alloc_block_2_11(XICSFabric *xi, int count, int 
>>>> align)
>>>>  {
>>>>      sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>>      ICSState *ics = spapr->ics;
>>>> @@ -3583,7 +3604,7 @@ static int spapr_irq_alloc_block(XICSFabric *xi, int 
>>>> count, int align)
>>>>      return srcno + ics->offset;
>>>>  }
>>>>  
>>>> -static void spapr_irq_free_block(XICSFabric *xi, int irq, int num)
>>>> +static void spapr_irq_free_block_2_11(XICSFabric *xi, int irq, int num)
>>>>  {
>>>>      sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>>      ICSState *ics = spapr->ics;
>>>> @@ -3601,6 +3622,46 @@ static void spapr_irq_free_block(XICSFabric *xi, 
>>>> int irq, int num)
>>>>      }
>>>>  }
>>>>  
>>>> +static bool spapr_irq_test(XICSFabric *xi, int irq)
>>>> +{
>>>> +    sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>> +    int srcno = irq - spapr->ics->offset;
>>>> +
>>>> +    return test_bit(srcno, spapr->irq_map);
>>>> +}
>>>> +
>>>> +static int spapr_irq_alloc_block(XICSFabric *xi, int count, int align)
>>>> +{
>>>> +    sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>> +    int start = 0;
>>>> +    int srcno;
>>>> +
>>>> +    /*
>>>> +     * The 'align_mask' parameter of bitmap_find_next_zero_area()
>>>> +     * should be one less than a power of 2; 0 means no
>>>> +     * alignment. Adapt the 'align' value of the former allocator to
>>>> +     * fit the requirements of bitmap_find_next_zero_area()
>>>> +     */
>>>> +    align -= 1;
>>>> +
>>>> +    srcno = bitmap_find_next_zero_area(spapr->irq_map, spapr->nr_irqs, 
>>>> start,
>>>> +                                       count, align);
>>>> +    if (srcno == spapr->nr_irqs) {
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    bitmap_set(spapr->irq_map, srcno, count);
>>>> +    return srcno + spapr->ics->offset;
>>>> +}
>>>> +
>>>> +static void spapr_irq_free_block(XICSFabric *xi, int irq, int num)
>>>> +{
>>>> +    sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
>>>> +    int srcno = irq - spapr->ics->offset;
>>>> +
>>>> +    bitmap_clear(spapr->irq_map, srcno, num);
>>>> +}
>>>> +
>>>>  static void spapr_pic_print_info(InterruptStatsProvider *obj,
>>>>                                   Monitor *mon)
>>>>  {
>>>> @@ -3778,7 +3839,12 @@ static void 
>>>> spapr_machine_2_11_instance_options(MachineState *machine)
>>>>  
>>>>  static void spapr_machine_2_11_class_options(MachineClass *mc)
>>>>  {
>>>> -    /* Defaults for the latest behaviour inherited from the base class */
>>>> +    XICSFabricClass *xic = XICS_FABRIC_CLASS(mc);
>>>> +
>>>> +    spapr_machine_2_12_class_options(mc);
>>>> +    xic->irq_test = spapr_irq_test_2_11;
>>>> +    xic->irq_alloc_block = spapr_irq_alloc_block_2_11;
>>>> +    xic->irq_free_block = spapr_irq_free_block_2_11;
>>>>  }
>>>>  
>>>>  DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
>>>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>>>> index 9d21ca9bde3a..5835c694caff 100644
>>>> --- a/include/hw/ppc/spapr.h
>>>> +++ b/include/hw/ppc/spapr.h
>>>> @@ -7,6 +7,7 @@
>>>>  #include "hw/ppc/spapr_drc.h"
>>>>  #include "hw/mem/pc-dimm.h"
>>>>  #include "hw/ppc/spapr_ovec.h"
>>>> +#include "qemu/bitmap.h"
>>>>  
>>>>  struct VIOsPAPRBus;
>>>>  struct sPAPRPHBState;
>>>> @@ -78,6 +79,8 @@ struct sPAPRMachineState {
>>>>      struct VIOsPAPRBus *vio_bus;
>>>>      QLIST_HEAD(, sPAPRPHBState) phbs;
>>>>      struct sPAPRNVRAM *nvram;
>>>> +    int32_t nr_irqs;
>>>> +    unsigned long *irq_map;
>>>>      ICSState *ics;
>>>>      sPAPRRTCState rtc;
>>>>    
>>>   
>>
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]