qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v5] numa: make 'info numa' take into account hotplug


From: zhanghailiang
Subject: [Qemu-devel] [PATCH v5] numa: make 'info numa' take into account hotplugged memory
Date: Thu, 16 Oct 2014 20:02:23 +0800

When do memory hotplug, if there is numa node, we should add
the memory size to the corresponding node memory size.

For now, it mainly affects the result of hmp command "info numa".

Reviewed-by: Igor Mammedov <address@hidden>
Signed-off-by: zhanghailiang <address@hidden>
---
 v5:
- reword the subject (Igor Mammedov)
- turn query_numa_node_mem to void (Igor Mammedov)
 v4:
- s/pc_dimm_stat_node_mem/numa_stat_memory_devices/ (Igor Mammedov)
- rewrite numa_stat_memory_devices and this will also fix compile error for 
  targets that don't support memory hotplug
 v3:
- cold-plugged memory should not be excluded (Igor Mammedov)
 v2:
- Don't modify the numa_info.node_mem directly when treating hotplug memory,
  fix the "info numa" instead (Igor Mammedov)

Thanks for review!;)
---
 include/sysemu/sysemu.h |  1 +
 monitor.c               |  6 +++++-
 numa.c                  | 41 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 47 insertions(+), 1 deletion(-)

diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 0037a69..ef5eaf4 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -161,6 +161,7 @@ typedef struct node_info {
 extern NodeInfo numa_info[MAX_NODES];
 void set_numa_nodes(void);
 void set_numa_modes(void);
+void query_numa_node_mem(uint64_t *node_mem);
 extern QemuOptsList qemu_numa_opts;
 int numa_init_func(QemuOpts *opts, void *opaque);
 
diff --git a/monitor.c b/monitor.c
index 2d14f39..d45b0a3 100644
--- a/monitor.c
+++ b/monitor.c
@@ -1949,7 +1949,10 @@ static void do_info_numa(Monitor *mon, const QDict 
*qdict)
 {
     int i;
     CPUState *cpu;
+    uint64_t *node_mem;
 
+    node_mem = g_new0(uint64_t, nb_numa_nodes);
+    query_numa_node_mem(node_mem);
     monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
     for (i = 0; i < nb_numa_nodes; i++) {
         monitor_printf(mon, "node %d cpus:", i);
@@ -1960,8 +1963,9 @@ static void do_info_numa(Monitor *mon, const QDict *qdict)
         }
         monitor_printf(mon, "\n");
         monitor_printf(mon, "node %d size: %" PRId64 " MB\n", i,
-            numa_info[i].node_mem >> 20);
+                       node_mem[i] >> 20);
     }
+    g_free(node_mem);
 }
 
 #ifdef CONFIG_PROFILER
diff --git a/numa.c b/numa.c
index 3b98135..5b84ffb 100644
--- a/numa.c
+++ b/numa.c
@@ -35,6 +35,7 @@
 #include "hw/boards.h"
 #include "sysemu/hostmem.h"
 #include "qmp-commands.h"
+#include "hw/mem/pc-dimm.h"
 
 QemuOptsList qemu_numa_opts = {
     .name = "numa",
@@ -315,6 +316,46 @@ void memory_region_allocate_system_memory(MemoryRegion 
*mr, Object *owner,
     }
 }
 
+static void numa_stat_memory_devices(uint64_t *node_mem)
+{
+    MemoryDeviceInfoList *info_list = NULL;
+    MemoryDeviceInfoList **prev = &info_list;
+    MemoryDeviceInfoList *info;
+
+    qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
+    for (info = info_list; info; info = info->next) {
+        MemoryDeviceInfo *value = info->value;
+
+        if (value) {
+            switch (value->kind) {
+            case MEMORY_DEVICE_INFO_KIND_DIMM: {
+                PCDIMMDeviceInfo *di = value->dimm;
+
+                node_mem[di->node] += di->size;
+                break;
+            }
+            default:
+                break;
+            }
+        }
+    }
+    qapi_free_MemoryDeviceInfoList(info_list);
+}
+
+void query_numa_node_mem(uint64_t *node_mem)
+{
+    int i;
+
+    if (nb_numa_nodes <= 0) {
+        return;
+    }
+
+    numa_stat_memory_devices(node_mem);
+    for (i = 0; i < nb_numa_nodes; i++) {
+        node_mem[i] += numa_info[i].node_mem;
+    }
+}
+
 static int query_memdev(Object *obj, void *opaque)
 {
     MemdevList **list = opaque;
-- 
1.7.12.4





reply via email to

[Prev in Thread] Current Thread [Next in Thread]