[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v4 06/11] hmat acpi: Build System Locality Laten
From: |
Igor Mammedov |
Subject: |
Re: [Qemu-devel] [PATCH v4 06/11] hmat acpi: Build System Locality Latency and Bandwidth Information Structure(s) in ACPI HMAT |
Date: |
Tue, 4 Jun 2019 16:43:39 +0200 |
On Wed, 8 May 2019 14:17:21 +0800
Tao Xu <address@hidden> wrote:
> From: Liu Jingqi <address@hidden>
>
> This structure describes the memory access latency and bandwidth
> information from various memory access initiator proximity domains.
> The latency and bandwidth numbers represented in this structure
> correspond to rated latency and bandwidth for the platform.
> The software could use this information as hint for optimization.
>
> Signed-off-by: Liu Jingqi <address@hidden>
> Signed-off-by: Tao Xu <address@hidden>
> ---
>
> Changes in v4 -> v3:
> - use build_append_int_noprefix() to build System Locality Latency
> and Bandwidth Information Structure(s) tables (Igor)
> - move globals (hmat_lb_info) into MachineState (Igor)
> - move hmat_build_lb() inside of hmat_build_hma() (Igor)
> ---
> hw/acpi/hmat.c | 97 ++++++++++++++++++++++++++++++++++++++++-
> hw/acpi/hmat.h | 39 +++++++++++++++++
> include/hw/boards.h | 3 ++
> include/qemu/typedefs.h | 1 +
> include/sysemu/sysemu.h | 22 ++++++++++
> 5 files changed, 161 insertions(+), 1 deletion(-)
>
> diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c
> index bffe453280..54aabf77eb 100644
> --- a/hw/acpi/hmat.c
> +++ b/hw/acpi/hmat.c
> @@ -29,6 +29,9 @@
> #include "hw/acpi/hmat.h"
> #include "hw/nvram/fw_cfg.h"
>
> +static uint32_t initiator_pxm[MAX_NODES], target_pxm[MAX_NODES];
> +static uint32_t num_initiator, num_target;
> +
> /* Build Memory Subsystem Address Range Structure */
> static void build_hmat_spa(GArray *table_data, MachineState *ms,
> uint64_t base, uint64_t length, int node)
> @@ -77,6 +80,20 @@ static int pc_dimm_device_list(Object *obj, void *opaque)
> return 0;
> }
>
> +static void classify_proximity_domains(MachineState *ms)
> +{
> + int node;
> +
> + for (node = 0; node < ms->numa_state->num_nodes; node++) {
> + if (ms->numa_state->nodes[node].is_initiator) {
> + initiator_pxm[num_initiator++] = node;
> + }
> + if (ms->numa_state->nodes[node].is_target) {
> + target_pxm[num_target++] = node;
> + }
> + }
> +}
> +
> /*
> * The Proximity Domain of System Physical Address ranges defined
> * in the HMAT, NFIT and SRAT tables shall match each other.
> @@ -85,9 +102,10 @@ static void hmat_build_hma(GArray *table_data,
> MachineState *ms)
> {
> GSList *device_list = NULL;
> uint64_t mem_base, mem_len;
> - int i;
> + int i, j, hrchy, type;
> uint32_t mem_ranges_num = ms->numa_state->mem_ranges_num;
> NumaMemRange *mem_ranges = ms->numa_state->mem_ranges;
> + HMAT_LB_Info *numa_hmat_lb;
>
> PCMachineState *pcms = PC_MACHINE(ms);
> AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
> @@ -117,6 +135,83 @@ static void hmat_build_hma(GArray *table_data,
> MachineState *ms)
> i = object_property_get_uint(OBJECT(dimm), PC_DIMM_NODE_PROP, NULL);
> build_hmat_spa(table_data, ms, mem_base, mem_len, i);
> }
>
Considering below part is sufficiently big, I'd move it into separate function
ex: build_hmat_lb()
> + if (!num_initiator && !num_target) {
> + classify_proximity_domains(ms);
This part I'd just inline instead of it being a separate function
and make initiator_pxm,target_pxm,num_initiator,num_target local variables
instead of globals. (is there a reason why they weren't made locals?)
> + }
> +
> + /* Build HMAT System Locality Latency and Bandwidth Information. */
> + for (hrchy = HMAT_LB_MEM_MEMORY;
> + hrchy <= HMAT_LB_MEM_CACHE_3RD_LEVEL; hrchy++) {
> + for (type = HMAT_LB_DATA_ACCESS_LATENCY;
> + type <= HMAT_LB_DATA_WRITE_BANDWIDTH; type++) {
> + numa_hmat_lb = ms->numa_state->hmat_lb[hrchy][type];
> +
> + if (numa_hmat_lb) {
> + uint32_t s = num_initiator;
> + uint32_t t = num_target;
> + uint8_t m, n;
> +
> + /* Type */
> + build_append_int_noprefix(table_data, 1, 2);
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 2);
> + /* Length */
> + build_append_int_noprefix(table_data,
> + 32 + 4 * s + 4 * t + 2 * s * t, 4);
> + /* Flags */
> + build_append_int_noprefix(table_data,
> + numa_hmat_lb->hierarchy, 1);
> + /* Data Type */
> + build_append_int_noprefix(table_data,
> + numa_hmat_lb->data_type, 1);
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 2);
> + /* Number of Initiator Proximity Domains (s) */
> + build_append_int_noprefix(table_data, s, 4);
> + /* Number of Target Proximity Domains (t) */
> + build_append_int_noprefix(table_data, t, 4);
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 4);
> +
> + /* Entry Base Unit */
> + if (type <= HMAT_LB_DATA_WRITE_LATENCY) {
> + build_append_int_noprefix(table_data,
> + numa_hmat_lb->base_lat, 8);
> + } else {
> + build_append_int_noprefix(table_data,
> + numa_hmat_lb->base_bw, 8);
> + }
> +
> + /* Initiator Proximity Domain List */
> + for (i = 0; i < s; i++) {
> + build_append_int_noprefix(table_data, initiator_pxm[i],
> 4);
> + }
> +
> + /* Target Proximity Domain List */
> + for (i = 0; i < t; i++) {
> + build_append_int_noprefix(table_data, target_pxm[i], 4);
> + }
> +
> + /* Latency or Bandwidth Entries */
> + for (i = 0; i < s; i++) {
> + m = initiator_pxm[i];
> + for (j = 0; j < t; j++) {
> + n = target_pxm[j];
> + uint16_t entry;
> +
> + if (type <= HMAT_LB_DATA_WRITE_LATENCY) {
> + entry = numa_hmat_lb->latency[m][n];
> + } else {
> + entry = numa_hmat_lb->bandwidth[m][n];
> + }
> +
> + build_append_int_noprefix(table_data, entry, 2);
> + }
> + }
> + }
> + }
> + }
> }
>
> void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState
> *ms)
> diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h
> index 4f480c1e43..f37e30e533 100644
> --- a/hw/acpi/hmat.h
> +++ b/hw/acpi/hmat.h
> @@ -38,6 +38,45 @@ enum {
> HMAT_SPA_RESERVATION_HINT = 0x4,
> };
>
> +struct HMAT_LB_Info {
> + /*
> + * Indicates total number of Proximity Domains
> + * that can initiate memory access requests.
> + */
> + uint32_t num_initiator;
> + /*
> + * Indicates total number of Proximity Domains
> + * that can act as target.
> + */
> + uint32_t num_target;
> + /*
> + * Indicates it's memory or
> + * the specified level memory side cache.
> + */
> + uint8_t hierarchy;
> + /*
> + * Present the type of data,
> + * access/read/write latency or bandwidth.
> + */
> + uint8_t data_type;
> + /* The base unit for latency in nanoseconds. */
> + uint64_t base_lat;
> + /* The base unit for bandwidth in megabytes per second(MB/s). */
> + uint64_t base_bw;
> + /*
> + * latency[i][j]:
> + * Indicates the latency based on base_lat
> + * from Initiator Proximity Domain i to Target Proximity Domain j.
> + */
> + uint16_t latency[MAX_NODES][MAX_NODES];
> + /*
> + * bandwidth[i][j]:
> + * Indicates the bandwidth based on base_bw
> + * from Initiator Proximity Domain i to Target Proximity Domain j.
> + */
> + uint16_t bandwidth[MAX_NODES][MAX_NODES];
> +};
> +
> void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState
> *ms);
>
> #endif
> diff --git a/include/hw/boards.h b/include/hw/boards.h
> index d392634e08..e0169b0a64 100644
> --- a/include/hw/boards.h
> +++ b/include/hw/boards.h
> @@ -263,6 +263,9 @@ typedef struct NumaState {
>
> /* NUMA memory ranges */
> NumaMemRange mem_ranges[MAX_NODES + 2];
> +
> + /* NUMA modes HMAT Locality Latency and Bandwidth Information */
> + HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
> } NumaState;
>
> /**
> diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
> index fcdaae58c4..c0257e936b 100644
> --- a/include/qemu/typedefs.h
> +++ b/include/qemu/typedefs.h
> @@ -33,6 +33,7 @@ typedef struct FWCfgEntry FWCfgEntry;
> typedef struct FWCfgIoState FWCfgIoState;
> typedef struct FWCfgMemState FWCfgMemState;
> typedef struct FWCfgState FWCfgState;
> +typedef struct HMAT_LB_Info HMAT_LB_Info;
> typedef struct HVFX86EmulatorState HVFX86EmulatorState;
> typedef struct I2CBus I2CBus;
> typedef struct I2SCodec I2SCodec;
> diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
> index 5f133cae83..da51a9bc26 100644
> --- a/include/sysemu/sysemu.h
> +++ b/include/sysemu/sysemu.h
> @@ -124,6 +124,28 @@ extern int mem_prealloc;
> #define NUMA_DISTANCE_MAX 254
> #define NUMA_DISTANCE_UNREACHABLE 255
>
> +/* the value of AcpiHmatLBInfo flags */
> +enum {
> + HMAT_LB_MEM_MEMORY = 0,
> + HMAT_LB_MEM_CACHE_LAST_LEVEL = 1,
> + HMAT_LB_MEM_CACHE_1ST_LEVEL = 2,
> + HMAT_LB_MEM_CACHE_2ND_LEVEL = 3,
> + HMAT_LB_MEM_CACHE_3RD_LEVEL = 4,
> +};
> +
> +/* the value of AcpiHmatLBInfo data type */
> +enum {
> + HMAT_LB_DATA_ACCESS_LATENCY = 0,
> + HMAT_LB_DATA_READ_LATENCY = 1,
> + HMAT_LB_DATA_WRITE_LATENCY = 2,
> + HMAT_LB_DATA_ACCESS_BANDWIDTH = 3,
> + HMAT_LB_DATA_READ_BANDWIDTH = 4,
> + HMAT_LB_DATA_WRITE_BANDWIDTH = 5,
> +};
> +
> +#define HMAT_LB_LEVELS (HMAT_LB_MEM_CACHE_3RD_LEVEL + 1)
> +#define HMAT_LB_TYPES (HMAT_LB_DATA_WRITE_BANDWIDTH + 1)
> #define MAX_OPTION_ROMS 16
> typedef struct QEMUOptionRom {
> const char *name;
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [Qemu-devel] [PATCH v4 06/11] hmat acpi: Build System Locality Latency and Bandwidth Information Structure(s) in ACPI HMAT,
Igor Mammedov <=