qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 01/17] cpu-exec: unify icount_decr and tcg_exit_req


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 01/17] cpu-exec: unify icount_decr and tcg_exit_req
Date: Mon, 27 Feb 2017 13:45:35 +0100

The icount interrupt flag and tcg_exit_req serve almost the same
purpose, let's make them completely the same.

The former TB_EXIT_REQUESTED and TB_EXIT_ICOUNT_EXPIRED cases are
unified, since we can distinguish them from the value of the
interrupt flag.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 cpu-exec.c                | 80 ++++++++++++++++++++++-------------------------
 include/exec/gen-icount.h | 53 +++++++++++++++----------------
 include/qom/cpu.h         | 15 +++++----
 qom/cpu.c                 |  2 +-
 tcg/tcg.h                 |  1 -
 translate-all.c           |  2 +-
 translate-common.c        | 13 +++-----
 7 files changed, 76 insertions(+), 90 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index 1a5ad48..6fd3f47 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -186,12 +186,6 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, 
TranslationBlock *itb)
             cc->set_pc(cpu, last_tb->pc);
         }
     }
-    if (tb_exit == TB_EXIT_REQUESTED) {
-        /* We were asked to stop executing TBs (probably a pending
-         * interrupt. We've now stopped, so clear the flag.
-         */
-        atomic_set(&cpu->tcg_exit_req, 0);
-    }
     return ret;
 }
 
@@ -575,6 +569,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, 
TranslationBlock *tb,
                                     SyncClocks *sc)
 {
     uintptr_t ret;
+    int32_t insns_left;
 
     if (unlikely(atomic_read(&cpu->exit_request))) {
         return;
@@ -584,49 +579,48 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, 
TranslationBlock *tb,
     ret = cpu_tb_exec(cpu, tb);
     tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
     *tb_exit = ret & TB_EXIT_MASK;
-    switch (*tb_exit) {
-    case TB_EXIT_REQUESTED:
+    if (*tb_exit != TB_EXIT_REQUESTED) {
+        *last_tb = tb;
+        return;
+    }
+
+    *last_tb = NULL;
+    insns_left = atomic_read(&cpu->icount_decr.u32);
+    atomic_set(&cpu->icount_decr.u16.high, 0);
+    if (insns_left < 0) {
         /* Something asked us to stop executing chained TBs; just
          * continue round the main loop. Whatever requested the exit
-         * will also have set something else (eg interrupt_request)
-         * which we will handle next time around the loop.  But we
-         * need to ensure the tcg_exit_req read in generated code
-         * comes before the next read of cpu->exit_request or
-         * cpu->interrupt_request.
+         * will also have set something else (eg exit_request or
+         * interrupt_request) which we will handle next time around
+         * the loop.  But we need to ensure the zeroing of icount_decr
+         * comes before the next read of cpu->exit_request
+         * or cpu->interrupt_request.
          */
         smp_mb();
-        *last_tb = NULL;
-        break;
-    case TB_EXIT_ICOUNT_EXPIRED:
-    {
-        /* Instruction counter expired.  */
-#ifdef CONFIG_USER_ONLY
-        abort();
-#else
-        int insns_left = cpu->icount_decr.u32;
-        *last_tb = NULL;
-        if (cpu->icount_extra && insns_left >= 0) {
-            /* Refill decrementer and continue execution.  */
-            cpu->icount_extra += insns_left;
-            insns_left = MIN(0xffff, cpu->icount_extra);
-            cpu->icount_extra -= insns_left;
-            cpu->icount_decr.u16.low = insns_left;
-        } else {
-            if (insns_left > 0) {
-                /* Execute remaining instructions.  */
-                cpu_exec_nocache(cpu, insns_left, tb, false);
-                align_clocks(sc, cpu);
-            }
-            cpu->exception_index = EXCP_INTERRUPT;
-            cpu_loop_exit(cpu);
-        }
-        break;
-#endif
+        return;
     }
-    default:
-        *last_tb = tb;
-        break;
+
+    /* Instruction counter expired.  */
+    assert(use_icount);
+#ifndef CONFIG_USER_ONLY
+    if (cpu->icount_extra) {
+        /* Refill decrementer and continue execution.  */
+        cpu->icount_extra += insns_left;
+        insns_left = MIN(0xffff, cpu->icount_extra);
+        cpu->icount_extra -= insns_left;
+        cpu->icount_decr.u16.low = insns_left;
+    } else {
+        /* Execute any remaining instructions, then let the main loop
+         * handle the next event.
+         */
+        if (insns_left > 0) {
+            cpu_exec_nocache(cpu, insns_left, tb, false);
+            align_clocks(sc, cpu);
+        }
+        cpu->exception_index = EXCP_INTERRUPT;
+        cpu_loop_exit(cpu);
     }
+#endif
 }
 
 /* main execution loop */
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 050de59..62d462e 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -6,58 +6,55 @@
 /* Helpers for instruction counting code generation.  */
 
 static int icount_start_insn_idx;
-static TCGLabel *icount_label;
 static TCGLabel *exitreq_label;
 
 static inline void gen_tb_start(TranslationBlock *tb)
 {
-    TCGv_i32 count, flag, imm;
+    TCGv_i32 count, imm;
 
     exitreq_label = gen_new_label();
-    flag = tcg_temp_new_i32();
-    tcg_gen_ld_i32(flag, cpu_env,
-                   offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
-    tcg_gen_brcondi_i32(TCG_COND_NE, flag, 0, exitreq_label);
-    tcg_temp_free_i32(flag);
-
-    if (!(tb->cflags & CF_USE_ICOUNT)) {
-        return;
+    if (tb->cflags & CF_USE_ICOUNT) {
+        count = tcg_temp_local_new_i32();
+    } else {
+        count = tcg_temp_new_i32();
     }
 
-    icount_label = gen_new_label();
-    count = tcg_temp_local_new_i32();
     tcg_gen_ld_i32(count, cpu_env,
                    -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
 
-    imm = tcg_temp_new_i32();
-    /* We emit a movi with a dummy immediate argument. Keep the insn index
-     * of the movi so that we later (when we know the actual insn count)
-     * can update the immediate argument with the actual insn count.  */
-    icount_start_insn_idx = tcg_op_buf_count();
-    tcg_gen_movi_i32(imm, 0xdeadbeef);
+    if (tb->cflags & CF_USE_ICOUNT) {
+        imm = tcg_temp_new_i32();
+        /* We emit a movi with a dummy immediate argument. Keep the insn index
+         * of the movi so that we later (when we know the actual insn count)
+         * can update the immediate argument with the actual insn count.  */
+        icount_start_insn_idx = tcg_op_buf_count();
+        tcg_gen_movi_i32(imm, 0xdeadbeef);
+
+        tcg_gen_sub_i32(count, count, imm);
+        tcg_temp_free_i32(imm);
+    }
+
+    tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label);
 
-    tcg_gen_sub_i32(count, count, imm);
-    tcg_temp_free_i32(imm);
+    if (tb->cflags & CF_USE_ICOUNT) {
+        tcg_gen_st16_i32(count, cpu_env,
+                         -ENV_OFFSET + offsetof(CPUState, 
icount_decr.u16.low));
+    }
 
-    tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
-    tcg_gen_st16_i32(count, cpu_env,
-                     -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
     tcg_temp_free_i32(count);
 }
 
 static void gen_tb_end(TranslationBlock *tb, int num_insns)
 {
-    gen_set_label(exitreq_label);
-    tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
-
     if (tb->cflags & CF_USE_ICOUNT) {
         /* Update the num_insn immediate parameter now that we know
          * the actual insn count.  */
         tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
-        gen_set_label(icount_label);
-        tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
     }
 
+    gen_set_label(exitreq_label);
+    tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
+
     /* Terminate the linked list.  */
     tcg_ctx.gen_op_buf[tcg_ctx.gen_op_buf[0].prev].next = 0;
 }
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 3e61c88..c3292ef 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -275,11 +275,11 @@ struct qemu_work_item;
  * @stopped: Indicates the CPU has been artificially stopped.
  * @unplug: Indicates a pending CPU unplug request.
  * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
- * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
- *           CPU and return to its top level loop.
  * @singlestep_enabled: Flags for single-stepping.
  * @icount_extra: Instructions until next timer event.
- * @icount_decr: Number of cycles left, with interrupt flag in high bit.
+ * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
+ * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
+ * CPU and return to its top level loop (even in non-icount mode).
  * This allows a single read-compare-cbranch-write sequence to test
  * for both decrementer underflow and exceptions.
  * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
@@ -382,10 +382,6 @@ struct CPUState {
     /* TODO Move common fields from CPUArchState here. */
     int cpu_index; /* used by alpha TCG */
     uint32_t halted; /* used by alpha, cris, ppc TCG */
-    union {
-        uint32_t u32;
-        icount_decr_u16 u16;
-    } icount_decr;
     uint32_t can_do_io;
     int32_t exception_index; /* used by m68k TCG */
 
@@ -398,7 +394,10 @@ struct CPUState {
        offset from AREG0.  Leave this field at the end so as to make the
        (absolute value) offset as small as possible.  This reduces code
        size, especially for hosts without large memory offsets.  */
-    uint32_t tcg_exit_req;
+    union {
+        uint32_t u32;
+        icount_decr_u16 u16;
+    } icount_decr;
 
     bool hax_vcpu_dirty;
     struct hax_vcpu_state *hax_vcpu;
diff --git a/qom/cpu.c b/qom/cpu.c
index 58784bc..f02e9c0 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -133,7 +133,7 @@ void cpu_exit(CPUState *cpu)
     atomic_set(&cpu->exit_request, 1);
     /* Ensure cpu_exec will see the exit request after TCG has exited.  */
     smp_wmb();
-    atomic_set(&cpu->tcg_exit_req, 1);
+    atomic_set(&cpu->icount_decr.u16.high, -1);
 }
 
 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 4c7f258..6c216bb 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -1101,7 +1101,6 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
 #define TB_EXIT_MASK 3
 #define TB_EXIT_IDX0 0
 #define TB_EXIT_IDX1 1
-#define TB_EXIT_ICOUNT_EXPIRED 2
 #define TB_EXIT_REQUESTED 3
 
 #ifdef HAVE_TCG_QEMU_TB_EXEC
diff --git a/translate-all.c b/translate-all.c
index 9bac061..d42d003 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1930,7 +1930,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
 {
     g_assert(qemu_mutex_iothread_locked());
     cpu->interrupt_request |= mask;
-    cpu->tcg_exit_req = 1;
+    cpu->icount_decr.u16.high = -1;
 }
 
 /*
diff --git a/translate-common.c b/translate-common.c
index d504dd0..40fe5a1 100644
--- a/translate-common.c
+++ b/translate-common.c
@@ -43,14 +43,11 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
     if (!qemu_cpu_is_self(cpu)) {
         qemu_cpu_kick(cpu);
     } else {
-        if (use_icount) {
-            cpu->icount_decr.u16.high = 0xffff;
-            if (!cpu->can_do_io
-                && (mask & ~old_mask) != 0) {
-                cpu_abort(cpu, "Raised interrupt while not in I/O function");
-            }
-        } else {
-            cpu->tcg_exit_req = 1;
+        cpu->icount_decr.u16.high = -1;
+        if (use_icount &&
+            !cpu->can_do_io
+            && (mask & ~old_mask) != 0) {
+            cpu_abort(cpu, "Raised interrupt while not in I/O function");
         }
     }
 }
-- 
2.9.3





reply via email to

[Prev in Thread] Current Thread [Next in Thread]