qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3] hw/arm/virt: Add high MMIO PCI region


From: Igor Mammedov
Subject: Re: [Qemu-devel] [PATCH v3] hw/arm/virt: Add high MMIO PCI region
Date: Mon, 27 Jul 2015 15:26:58 +0200

On Mon, 27 Jul 2015 14:09:28 +0300
Pavel Fedin <address@hidden> wrote:

> This large region is necessary for some devices like ivshmem and video cards
> 
> Signed-off-by: Pavel Fedin <address@hidden>
> ---
> Changes since v2:
> - Region size increased to 512G
> - Added ACPI description
> Changes since v1:
> - Region address changed to 512G, leaving more space for RAM
> ---
>  hw/arm/virt-acpi-build.c |  8 ++++++++
>  hw/arm/virt.c            | 13 ++++++++++++-
>  include/hw/arm/virt.h    |  1 +
>  3 files changed, 21 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
> index f365140..020aad6 100644
> --- a/hw/arm/virt-acpi-build.c
> +++ b/hw/arm/virt-acpi-build.c
> @@ -169,6 +169,8 @@ static void acpi_dsdt_add_pci(Aml *scope, const 
> MemMapEntry *memmap, int irq)
>      hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
>      hwaddr base_ecam = memmap[VIRT_PCIE_ECAM].base;
>      hwaddr size_ecam = memmap[VIRT_PCIE_ECAM].size;
> +    hwaddr base_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].base;
> +    hwaddr size_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].size;
>      int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
>  
>      Aml *dev = aml_device("%s", "PCI0");
> @@ -234,6 +236,12 @@ static void acpi_dsdt_add_pci(Aml *scope, const 
> MemMapEntry *memmap, int irq)
>                       AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, 
> base_pio,
>                       size_pio));
>  
> +    aml_append(rbuf,
> +        aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
> +                         AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000,
> +                         base_mmio_high, base_mmio_high + size_mmio_high - 1,
> +                         0x0000, size_mmio_high));
> +
>      aml_append(method, aml_name_decl("RBUF", rbuf));
>      aml_append(method, aml_return(rbuf));
>      aml_append(dev, method);
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index e53ef4c..c20b3b8 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -124,6 +124,7 @@ static const MemMapEntry a15memmap[] = {
>      [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
>      [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
>      [VIRT_MEM] =                { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
> +    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000, 0x8000000000 },
I'm not sure  but fixed hole start/size might be a problem later when adding 
memory hotplug wasting address space.

On x86 we do it a little different, see call chain:
 acpi_setup -> build_ssdt ->
   i440fx_pcihost_get_pci_hole64_start -> pci_bus_get_w64_range
   ...                          _end -> ...

where acpi_setup() is called from pc_guest_info_machine_done() right before
guest starts and later after guest's BIOS(UEFI) initialized PCI devices.

Perhaps we should do the same for ARM as well, CCing Michael

>  };
>  
>  static const int a15irqmap[] = {
> @@ -758,6 +759,8 @@ static void create_pcie(const VirtBoardInfo *vbi, 
> qemu_irq *pic)
>      hwaddr size_pio = vbi->memmap[VIRT_PCIE_PIO].size;
>      hwaddr base_ecam = vbi->memmap[VIRT_PCIE_ECAM].base;
>      hwaddr size_ecam = vbi->memmap[VIRT_PCIE_ECAM].size;
> +    hwaddr base_mmio_high = vbi->memmap[VIRT_PCIE_MMIO_HIGH].base;
> +    hwaddr size_mmio_high = vbi->memmap[VIRT_PCIE_MMIO_HIGH].size;
>      hwaddr base = base_mmio;
>      int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
>      int irq = vbi->irqmap[VIRT_PCIE];
> @@ -793,6 +796,12 @@ static void create_pcie(const VirtBoardInfo *vbi, 
> qemu_irq *pic)
>      /* Map IO port space */
>      sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio);
>  
> +    /* High MMIO space */
> +    mmio_alias = g_new0(MemoryRegion, 1);
> +    memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio-high",
> +                             mmio_reg, base_mmio_high, size_mmio_high);
> +    memory_region_add_subregion(get_system_memory(), base_mmio_high, 
> mmio_alias);
Is there any specific reason to have 2 separate regions vs using 1 like in
 pc_pci_as_mapping_init()
using region priority instead of splitting.

>      for (i = 0; i < GPEX_NUM_IRQS; i++) {
>          sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
>      }
> @@ -818,7 +827,9 @@ static void create_pcie(const VirtBoardInfo *vbi, 
> qemu_irq *pic)
>                                   1, FDT_PCI_RANGE_IOPORT, 2, 0,
>                                   2, base_pio, 2, size_pio,
>                                   1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
> -                                 2, base_mmio, 2, size_mmio);
> +                                 2, base_mmio, 2, size_mmio,
> +                                 1, FDT_PCI_RANGE_MMIO, 2, base_mmio_high,
> +                                 2, base_mmio_high, 2, size_mmio_high);
>  
>      qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>      create_pcie_irq_map(vbi, vbi->gic_phandle, irq, nodename);
> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
> index 852efb9..1d43598 100644
> --- a/include/hw/arm/virt.h
> +++ b/include/hw/arm/virt.h
> @@ -60,6 +60,7 @@ enum {
>      VIRT_PCIE_PIO,
>      VIRT_PCIE_ECAM,
>      VIRT_PLATFORM_BUS,
> +    VIRT_PCIE_MMIO_HIGH,
>  };
>  
>  typedef struct MemMapEntry {




reply via email to

[Prev in Thread] Current Thread [Next in Thread]