qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 07/11] hmat acpi: Build Memory Side Cache Inf


From: Igor Mammedov
Subject: Re: [Qemu-devel] [PATCH v4 07/11] hmat acpi: Build Memory Side Cache Information Structure(s) in ACPI HMAT
Date: Tue, 4 Jun 2019 17:04:56 +0200

On Wed,  8 May 2019 14:17:22 +0800
Tao Xu <address@hidden> wrote:

> From: Liu Jingqi <address@hidden>
> 
> This structure describes memory side cache information for memory
> proximity domains if the memory side cache is present and the
> physical device(SMBIOS handle) forms the memory side cache.
> The software could use this information to effectively place
> the data in memory to maximize the performance of the system
> memory that use the memory side cache.
> 
> Signed-off-by: Liu Jingqi <address@hidden>
> Signed-off-by: Tao Xu <address@hidden>
> ---
> 
> Changes in v4 -> v3:
>     - use build_append_int_noprefix() to build Memory Side Cache
>     Information Structure(s) tables (Igor)
>     - move globals (hmat_cache_info) into MachineState (Igor)
>     - move hmat_build_cache() inside of hmat_build_hma() (Igor)
> ---
>  hw/acpi/hmat.c          | 50 ++++++++++++++++++++++++++++++++++++++++-
>  hw/acpi/hmat.h          | 25 +++++++++++++++++++++
>  include/hw/boards.h     |  3 +++
>  include/qemu/typedefs.h |  1 +
>  include/sysemu/sysemu.h |  8 +++++++
>  5 files changed, 86 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c
> index 54aabf77eb..3a8c41162d 100644
> --- a/hw/acpi/hmat.c
> +++ b/hw/acpi/hmat.c
> @@ -102,10 +102,11 @@ static void hmat_build_hma(GArray *table_data, 
> MachineState *ms)
>  {
>      GSList *device_list = NULL;
>      uint64_t mem_base, mem_len;
> -    int i, j, hrchy, type;
> +    int i, j, hrchy, type, level;
>      uint32_t mem_ranges_num = ms->numa_state->mem_ranges_num;
>      NumaMemRange *mem_ranges = ms->numa_state->mem_ranges;
>      HMAT_LB_Info *numa_hmat_lb;
> +    HMAT_Cache_Info *numa_hmat_cache = NULL;
>  
>      PCMachineState *pcms = PC_MACHINE(ms);
>      AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
> @@ -212,6 +213,53 @@ static void hmat_build_hma(GArray *table_data, 
> MachineState *ms)
>              }
>          }
>      }
> +
> +    /* Build HMAT Memory Side Cache Information. */
> +    for (i = 0; i < ms->numa_state->num_nodes; i++) {
> +        for (level = 0; level <= MAX_HMAT_CACHE_LEVEL; level++) {
> +            numa_hmat_cache = ms->numa_state->hmat_cache[i][level];
> +            if (numa_hmat_cache) {
> +                uint16_t n = numa_hmat_cache->num_smbios_handles;


> +                uint32_t cache_attr = HMAT_CACHE_TOTAL_LEVEL(
> +                                      numa_hmat_cache->total_levels);
> +                cache_attr |= HMAT_CACHE_CURRENT_LEVEL(
> +                              numa_hmat_cache->level);
> +                cache_attr |= HMAT_CACHE_ASSOC(
> +                                          numa_hmat_cache->associativity);
> +                cache_attr |= HMAT_CACHE_WRITE_POLICY(
> +                                          numa_hmat_cache->write_policy);
> +                cache_attr |= HMAT_CACHE_LINE_SIZE(
> +                                          numa_hmat_cache->line_size);
I don't see a merit of hiding bitfield manipulation behind macro
I'd suggest to drop macros here and mask+shift data here.

> +                cache_attr = cpu_to_le32(cache_attr);
> +
> +                /* Memory Side Cache Information Structure */
> +                /* Type */
> +                build_append_int_noprefix(table_data, 2, 2);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 2);
> +                /* Length */
> +                build_append_int_noprefix(table_data, 32 + 2 * n, 4);
> +                /* Proximity Domain for the Memory */
> +                build_append_int_noprefix(table_data,
> +                                          numa_hmat_cache->mem_proximity, 4);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 4);
> +                /* Memory Side Cache Size */
> +                build_append_int_noprefix(table_data,
> +                                          numa_hmat_cache->size, 8);
> +                /* Cache Attributes */
> +                build_append_int_noprefix(table_data, cache_attr, 4);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 2);
> +                /* Number of SMBIOS handles (n) */
> +                build_append_int_noprefix(table_data, n, 2);
> +
> +                /* SMBIOS Handles */
> +                /* TBD: set smbios handles */
> +                build_append_int_noprefix(table_data, 0, 2 * n);
Is memory side cache structure useful at all without pointing to SMBIOS entries?

> +            }
> +        }
> +    }
>  }
>  
>  void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState 
> *ms)
> diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h
> index f37e30e533..8f563f19dd 100644
> --- a/hw/acpi/hmat.h
> +++ b/hw/acpi/hmat.h
> @@ -77,6 +77,31 @@ struct HMAT_LB_Info {
>      uint16_t    bandwidth[MAX_NODES][MAX_NODES];
>  };
>  
> +struct HMAT_Cache_Info {
> +    /* The memory proximity domain to which the memory belongs. */
> +    uint32_t    mem_proximity;
> +    /* Size of memory side cache in bytes. */
> +    uint64_t    size;
> +    /*
> +     * Total cache levels for this memory
> +     * pr#include "hw/acpi/aml-build.h"oximity domain.
> +     */
> +    uint8_t     total_levels;
> +    /* Cache level described in this structure. */
> +    uint8_t     level;
> +    /* Cache Associativity: None/Direct Mapped/Comple Cache Indexing */
> +    uint8_t     associativity;
> +    /* Write Policy: None/Write Back(WB)/Write Through(WT) */
> +    uint8_t     write_policy;
> +    /* Cache Line size in bytes. */
> +    uint16_t    line_size;
> +    /*
> +     * Number of SMBIOS handles that contributes to
> +     * the memory side cache physical devices.
> +     */
> +    uint16_t    num_smbios_handles;
> +};
> +
>  void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState 
> *ms);
>  
>  #endif
> diff --git a/include/hw/boards.h b/include/hw/boards.h
> index e0169b0a64..8609f923d9 100644
> --- a/include/hw/boards.h
> +++ b/include/hw/boards.h
> @@ -266,6 +266,9 @@ typedef struct NumaState {
>  
>      /* NUMA modes HMAT Locality Latency and Bandwidth Information */
>      HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
> +
> +    /* Memory Side Cache Information Structure */
> +    HMAT_Cache_Info *hmat_cache[MAX_NODES][MAX_HMAT_CACHE_LEVEL + 1];
>  } NumaState;
>  
>  /**
> diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
> index c0257e936b..d971f5109e 100644
> --- a/include/qemu/typedefs.h
> +++ b/include/qemu/typedefs.h
> @@ -33,6 +33,7 @@ typedef struct FWCfgEntry FWCfgEntry;
>  typedef struct FWCfgIoState FWCfgIoState;
>  typedef struct FWCfgMemState FWCfgMemState;
>  typedef struct FWCfgState FWCfgState;
> +typedef struct HMAT_Cache_Info HMAT_Cache_Info;
>  typedef struct HMAT_LB_Info HMAT_LB_Info;
>  typedef struct HVFX86EmulatorState HVFX86EmulatorState;
>  typedef struct I2CBus I2CBus;
> diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
> index da51a9bc26..0cfb387887 100644
> --- a/include/sysemu/sysemu.h
> +++ b/include/sysemu/sysemu.h
> @@ -143,9 +143,17 @@ enum {
>      HMAT_LB_DATA_WRITE_BANDWIDTH  = 5,
>  };
>  
> +#define MAX_HMAT_CACHE_LEVEL        3
> +
>  #define HMAT_LB_LEVELS    (HMAT_LB_MEM_CACHE_3RD_LEVEL + 1)
>  #define HMAT_LB_TYPES     (HMAT_LB_DATA_WRITE_BANDWIDTH + 1)
>  
> +#define HMAT_CACHE_TOTAL_LEVEL(level)      (level & 0xF)
> +#define HMAT_CACHE_CURRENT_LEVEL(level)    ((level & 0xF) << 4)
> +#define HMAT_CACHE_ASSOC(assoc)            ((assoc & 0xF) << 8)
> +#define HMAT_CACHE_WRITE_POLICY(policy)    ((policy & 0xF) << 12)
> +#define HMAT_CACHE_LINE_SIZE(size)         ((size & 0xFFFF) << 16)
> +
>  #define MAX_OPTION_ROMS 16
>  typedef struct QEMUOptionRom {
>      const char *name;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]