qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 10/13] contrib/plugins/cflow: fix 32-bit build


From: Richard Henderson
Subject: Re: [PATCH 10/13] contrib/plugins/cflow: fix 32-bit build
Date: Tue, 17 Dec 2024 09:35:59 -0600
User-agent: Mozilla Thunderbird

On 12/16/24 19:07, Pierrick Bouvier wrote:
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
  contrib/plugins/cflow.c | 17 +++++++++++------
  1 file changed, 11 insertions(+), 6 deletions(-)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~


diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c
index b39974d1cf3..930ecb46fcd 100644
--- a/contrib/plugins/cflow.c
+++ b/contrib/plugins/cflow.c
@@ -76,6 +76,8 @@ typedef struct {
/* We use this to track the current execution state */
  typedef struct {
+    /* address of current translated block */
+    uint64_t tb_pc;
      /* address of end of block */
      uint64_t end_block;
      /* next pc after end of block */
@@ -85,6 +87,7 @@ typedef struct {
  } VCPUScoreBoard;
/* descriptors for accessing the above scoreboard */
+static qemu_plugin_u64 tb_pc;
  static qemu_plugin_u64 end_block;
  static qemu_plugin_u64 pc_after_block;
  static qemu_plugin_u64 last_pc;
@@ -189,10 +192,11 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
  static void plugin_init(void)
  {
      g_mutex_init(&node_lock);
-    nodes = g_hash_table_new(NULL, g_direct_equal);
+    nodes = g_hash_table_new(g_int64_hash, g_int64_equal);
      state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard));
/* score board declarations */
+    tb_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, tb_pc);
      end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard,
                                                       end_block);
      pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, 
VCPUScoreBoard,
@@ -215,10 +219,10 @@ static NodeData *fetch_node(uint64_t addr, bool 
create_if_not_found)
      NodeData *node = NULL;
g_mutex_lock(&node_lock);
-    node = (NodeData *) g_hash_table_lookup(nodes, (gconstpointer) addr);
+    node = (NodeData *) g_hash_table_lookup(nodes, &addr);
      if (!node && create_if_not_found) {
          node = create_node(addr);
-        g_hash_table_insert(nodes, (gpointer) addr, (gpointer) node);
+        g_hash_table_insert(nodes, &node->addr, node);
      }
      g_mutex_unlock(&node_lock);
      return node;
@@ -234,7 +238,7 @@ static void vcpu_tb_branched_exec(unsigned int cpu_index, 
void *udata)
      uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
      uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
      uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
-    uint64_t pc = GPOINTER_TO_UINT(udata);
+    uint64_t pc = qemu_plugin_u64_get(tb_pc, cpu_index);
/* return early for address 0 */
      if (!lpc) {
@@ -305,10 +309,11 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct 
qemu_plugin_tb *tb)
       * handle both early block exits and normal branches in the
       * callback if we hit it.
       */
-    gpointer udata = GUINT_TO_POINTER(pc);
+    qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+        tb, QEMU_PLUGIN_INLINE_STORE_U64, tb_pc, pc);
      qemu_plugin_register_vcpu_tb_exec_cond_cb(
          tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
-        QEMU_PLUGIN_COND_NE, pc_after_block, pc, udata);
+        QEMU_PLUGIN_COND_NE, pc_after_block, pc, NULL);
/*
       * Now we can set start/end for this block so the next block can




reply via email to

[Prev in Thread] Current Thread [Next in Thread]