[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 3/3] hw/riscv/virt: Add ACPI GED and PC-DIMM MHP support
From: |
Björn Töpel |
Subject: |
Re: [PATCH v2 3/3] hw/riscv/virt: Add ACPI GED and PC-DIMM MHP support |
Date: |
Mon, 27 May 2024 14:08:21 +0200 |
Daniel Henrique Barboza <dbarboza@ventanamicro.com> writes:
> On 5/21/24 07:56, Björn Töpel wrote:
>> From: Björn Töpel <bjorn@rivosinc.com>
>>
>> Add ACPI GED for the RISC-V "virt" machine, and wire up PC-DIMM memory
>> hotplugging support. Heavily based/copied from hw/arm/virt.c.
>>
>> Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
>> ---
>> hw/riscv/Kconfig | 3 ++
>> hw/riscv/virt-acpi-build.c | 16 ++++++
>> hw/riscv/virt.c | 104 ++++++++++++++++++++++++++++++++++++-
>> include/hw/riscv/virt.h | 6 ++-
>> 4 files changed, 126 insertions(+), 3 deletions(-)
>>
>> diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
>> index 08f82dbb681a..bebe412f2107 100644
>> --- a/hw/riscv/Kconfig
>> +++ b/hw/riscv/Kconfig
>> @@ -56,6 +56,9 @@ config RISCV_VIRT
>> select PLATFORM_BUS
>> select ACPI
>> select ACPI_PCI
>> + select MEM_DEVICE
>> + select DIMM
>> + select ACPI_HW_REDUCED
>> select VIRTIO_MEM_SUPPORTED
>> select VIRTIO_PMEM_SUPPORTED
>>
>> diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
>> index 6dc3baa9ec86..61813abdef3f 100644
>> --- a/hw/riscv/virt-acpi-build.c
>> +++ b/hw/riscv/virt-acpi-build.c
>> @@ -27,6 +27,8 @@
>> #include "hw/acpi/acpi-defs.h"
>> #include "hw/acpi/acpi.h"
>> #include "hw/acpi/aml-build.h"
>> +#include "hw/acpi/memory_hotplug.h"
>> +#include "hw/acpi/generic_event_device.h"
>> #include "hw/acpi/pci.h"
>> #include "hw/acpi/utils.h"
>> #include "hw/intc/riscv_aclint.h"
>> @@ -432,6 +434,20 @@ static void build_dsdt(GArray *table_data,
>> acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES
>> * 2);
>> }
>>
>> + if (s->acpi_dev) {
>> + uint32_t event = object_property_get_uint(OBJECT(s->acpi_dev),
>> + "ged-event",
>> &error_abort);
>> +
>> + build_ged_aml(scope, "\\_SB."GED_DEVICE,
>> HOTPLUG_HANDLER(s->acpi_dev),
>> + GED_IRQ, AML_SYSTEM_MEMORY,
>> memmap[VIRT_ACPI_GED].base);
>> +
>> + if (event & ACPI_GED_MEM_HOTPLUG_EVT) {
>> + build_memory_hotplug_aml(scope, ms->ram_slots, "\\_SB", NULL,
>> + AML_SYSTEM_MEMORY,
>> + memmap[VIRT_PCDIMM_ACPI].base);
>> + }
>> + }
>> +
>> aml_append(dsdt, scope);
>>
>> /* copy AML table into ACPI tables blob and patch header there */
>> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
>> index 443902f919d2..2e35890187f2 100644
>> --- a/hw/riscv/virt.c
>> +++ b/hw/riscv/virt.c
>> @@ -53,10 +53,13 @@
>> #include "hw/pci-host/gpex.h"
>> #include "hw/display/ramfb.h"
>> #include "hw/acpi/aml-build.h"
>> +#include "hw/acpi/generic_event_device.h"
>> +#include "hw/acpi/memory_hotplug.h"
>> #include "hw/mem/memory-device.h"
>> #include "hw/virtio/virtio-mem-pci.h"
>> #include "qapi/qapi-visit-common.h"
>> #include "hw/virtio/virtio-iommu.h"
>> +#include "hw/mem/pc-dimm.h"
>>
>> /* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by
>> QEMU. */
>> static bool virt_use_kvm_aia(RISCVVirtState *s)
>> @@ -84,6 +87,8 @@ static const MemMapEntry virt_memmap[] = {
>> [VIRT_UART0] = { 0x10000000, 0x100 },
>> [VIRT_VIRTIO] = { 0x10001000, 0x1000 },
>> [VIRT_FW_CFG] = { 0x10100000, 0x18 },
>> + [VIRT_PCDIMM_ACPI] = { 0x10200000, MEMORY_HOTPLUG_IO_LEN },
>> + [VIRT_ACPI_GED] = { 0x10210000, ACPI_GED_EVT_SEL_LEN },
>> [VIRT_FLASH] = { 0x20000000, 0x4000000 },
>> [VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE },
>> [VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE },
>> @@ -1400,6 +1405,28 @@ static void virt_machine_done(Notifier *notifier,
>> void *data)
>> }
>> }
>>
>> +static DeviceState *create_acpi_ged(RISCVVirtState *s)
>> +{
>> + DeviceState *dev;
>> + MachineState *ms = MACHINE(s);
>> + uint32_t event = 0;
>> +
>> + if (ms->ram_slots) {
>> + event |= ACPI_GED_MEM_HOTPLUG_EVT;
>> + }
>> +
>> + dev = qdev_new(TYPE_ACPI_GED);
>> + qdev_prop_set_uint32(dev, "ged-event", event);
>> + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
>> +
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, s->memmap[VIRT_ACPI_GED].base);
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1,
>> s->memmap[VIRT_PCDIMM_ACPI].base);
>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
>> qdev_get_gpio_in(s->irqchip[0],
>> + GED_IRQ));
>> +
>> + return dev;
>> +}
>> +
>> static void virt_machine_init(MachineState *machine)
>> {
>> const MemMapEntry *memmap = virt_memmap;
>> @@ -1612,6 +1639,10 @@ static void virt_machine_init(MachineState *machine)
>>
>> gpex_pcie_init(system_memory, pcie_irqchip, s);
>>
>> + if (virt_is_acpi_enabled(s)) {
>> + s->acpi_dev = create_acpi_ged(s);
>> + }
>> +
>> create_platform_bus(s, mmio_irqchip);
>>
>> serial_mm_init(system_memory, memmap[VIRT_UART0].base,
>> @@ -1752,6 +1783,7 @@ static HotplugHandler
>> *virt_machine_get_hotplug_handler(MachineState *machine,
>> MachineClass *mc = MACHINE_GET_CLASS(machine);
>>
>> if (device_is_dynamic_sysbus(mc, dev) ||
>> + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
>> object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
>> object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
>> return HOTPLUG_HANDLER(machine);
>> @@ -1759,14 +1791,42 @@ static HotplugHandler
>> *virt_machine_get_hotplug_handler(MachineState *machine,
>> return NULL;
>> }
>>
>> +static void virt_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState
>> *dev,
>> + Error **errp)
>> +{
>> + RISCVVirtState *s = RISCV_VIRT_MACHINE(hotplug_dev);
>> +
>> + if (!s->acpi_dev) {
>> + error_setg(errp,
>> + "memory hotplug is not enabled: missing acpi-ged
>> device");
>> + return;
>> + }
>> +
>> + pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), NULL, errp);
>> +}
>> +
>
> Note that we're not doing any aligment checks in this pre_plug(), meaning that
> we're kind of accepting whatever the pc-dimm device throw at us.
>
> Testing in an AMD x86 machine will force the pc-dimm to be 2Mb aligned:
>
> $ ./build/qemu-system-riscv64 -M virt -m 2G,slots=4,maxmem=8G -nographic
> (...)
> (qemu) object_add memory-backend-ram,id=ram0,size=111M
> (qemu) device_add pc-dimm,id=dimm0,memdev=ram0
> Error: backend memory size must be multiple of 0x200000
> (qemu) object_del ram0
>
> This happens because the DIMM must be aligned with its own backend, in this
> case
> the host memory itself (backends/hostmem.c).
>
> There's no guarantee that we'll always run in a host that is mem aligned with
> the board,
> so it would be nice to add align checks in virt_memory_pre_plug().
Indeed! I'll look into that.
>> static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
>> DeviceState *dev, Error **errp)
>> {
>> + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
>> + virt_memory_pre_plug(hotplug_dev, dev, errp);
>> + }
>> +
>> if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
>> virtio_md_pci_pre_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev),
>> errp);
>> }
>> }
>>
>> +static void virt_memory_plug(HotplugHandler *hotplug_dev,
>> + DeviceState *dev, Error **errp)
>> +{
>> + RISCVVirtState *s = RISCV_VIRT_MACHINE(hotplug_dev);
>> +
>> + pc_dimm_plug(PC_DIMM(dev), MACHINE(s));
>> +
>> + hotplug_handler_plug(HOTPLUG_HANDLER(s->acpi_dev), dev, &error_abort);
>> +}
>> +
>> static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
>> DeviceState *dev, Error **errp)
>> {
>> @@ -1785,16 +1845,36 @@ static void
>> virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
>> create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev)));
>> }
>>
>> + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
>> + virt_memory_plug(hotplug_dev, dev, errp);
>> + }
>> +
>> if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) {
>> virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp);
>> }
>> }
>>
>> +static void virt_dimm_unplug_request(HotplugHandler *hotplug_dev,
>> + DeviceState *dev, Error **errp)
>> +{
>> + RISCVVirtState *s = RISCV_VIRT_MACHINE(hotplug_dev);
>> +
>> + if (!s->acpi_dev) {
>> + error_setg(errp,
>> + "memory hotplug is not enabled: missing acpi-ged
>> device");
>> + return;
>> + }
>> +
>> + hotplug_handler_unplug_request(HOTPLUG_HANDLER(s->acpi_dev), dev, errp);
>> +}
>> +
>
> I'm unsure if we're ready to support both hotplug and hot-unplug, but I
> tested anyway.
> Hotplug seems to work but hot-unplug doesn't:
>
> $ ./build/qemu-system-riscv64 -M virt -m 2G,slots=4,maxmem=8G -nographic
> (...)
> (qemu) object_add memory-backend-ram,id=ram0,size=112M
> (qemu) device_add pc-dimm,id=dimm0,memdev=ram0
> (qemu)
> (qemu) info memory-devices
> Memory device [dimm]: "dimm0"
> addr: 0x100000000
> slot: 0
> node: 0
> size: 117440512
> memdev: /objects/ram0
> hotplugged: true
> hotpluggable: true
> (qemu)
> (qemu) device_del dimm0
> (qemu) object_del ram0
> Error: object 'ram0' is in use, can not be deleted
> (qemu) info memory-devices
> Memory device [dimm]: "dimm0"
> addr: 0x100000000
> slot: 0
> node: 0
> size: 117440512
> memdev: /objects/ram0
> hotplugged: true
> hotpluggable: true
> (qemu)
>
> In short: hotplugged a 112Mb DIMM, then tried to remove it. 'device_del'
> doesn't error
> out but doesn't let me remove the memory backing created, i.e. the dimm0
> device is
> still around.
>
> In a quick digging I see that we're hitting virt_dimm_unplug_request() all
> the way
> down to acpi_memory_unplug_request_cb(), where an ACPI_MEMORY_HOTPLUG_STATUS
> is being
> sent. We never reach virt_dimm_unplug() afterwards, so the PC_DIMM is never
> removed.
>
> I'm not acquainted with ACPI enough to say if we're missing stuff in QEMU, or
> if we
> need SW to be aware of this ACPI HP event to trigger the release of the dimm,
> or
> anything in between.
>
> I consider this more as a FYI. If we're up to the point of hotplugging
> pc-dimms it's
> already an improvement worth having. Hot-unplugging can come later.
I'll debug this as well!
Thanks for all the input, Daniel!
Björn
- Re: [PATCH v2 1/3] hw/riscv/virt: Add memory hotplugging and virtio-md-pci support, (continued)
[PATCH v2 2/3] hw/riscv/virt-acpi-build: Expose device memory in ACPI SRAT, Björn Töpel, 2024/05/21
[PATCH v2 3/3] hw/riscv/virt: Add ACPI GED and PC-DIMM MHP support, Björn Töpel, 2024/05/21