qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 31/39] cpu-exec: Move TB chaining into tb_find_fast()


From: Richard Henderson
Subject: [Qemu-devel] [PULL 31/39] cpu-exec: Move TB chaining into tb_find_fast()
Date: Thu, 12 May 2016 14:13:32 -1000

From: Sergey Fedorov <address@hidden>

Move tb_add_jump() call and surrounding code from cpu_exec() into
tb_find_fast(). That simplifies cpu_exec() a little by hiding the direct
chaining optimization details into tb_find_fast(). It also allows to
move tb_lock()/tb_unlock() pair into tb_find_fast(), putting it closer
to tb_find_slow() which also manipulates the lock.

Suggested-by: Alex Bennée <address@hidden>
Signed-off-by: Sergey Fedorov <address@hidden>
Signed-off-by: Sergey Fedorov <address@hidden>
Signed-off-by: Richard Henderson <address@hidden>
[rth: Fixed rebase typo in nochain test.]
---
 cpu-exec.c | 35 +++++++++++++++++++----------------
 1 file changed, 19 insertions(+), 16 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index f49a436..7380b1e 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -320,7 +320,9 @@ found:
     return tb;
 }
 
-static inline TranslationBlock *tb_find_fast(CPUState *cpu)
+static inline TranslationBlock *tb_find_fast(CPUState *cpu,
+                                             TranslationBlock **last_tb,
+                                             int tb_exit)
 {
     CPUArchState *env = (CPUArchState *)cpu->env_ptr;
     TranslationBlock *tb;
@@ -331,11 +333,24 @@ static inline TranslationBlock *tb_find_fast(CPUState 
*cpu)
        always be the same before a given translated block
        is executed. */
     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+    tb_lock();
     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                  tb->flags != flags)) {
         tb = tb_find_slow(cpu, pc, cs_base, flags);
     }
+    if (cpu->tb_flushed) {
+        /* Ensure that no TB jump will be modified as the
+         * translation buffer has been flushed.
+         */
+        *last_tb = NULL;
+        cpu->tb_flushed = false;
+    }
+    /* See if we can patch the calling TB. */
+    if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+        tb_add_jump(*last_tb, tb_exit, tb);
+    }
+    tb_unlock();
     return tb;
 }
 
@@ -441,7 +456,8 @@ int cpu_exec(CPUState *cpu)
             } else if (replay_has_exception()
                        && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
                 /* try to cause an exception pending in the log */
-                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
+                last_tb = NULL; /* Avoid chaining TBs */
+                cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
                 ret = -1;
                 break;
 #endif
@@ -511,20 +527,7 @@ int cpu_exec(CPUState *cpu)
                     cpu->exception_index = EXCP_INTERRUPT;
                     cpu_loop_exit(cpu);
                 }
-                tb_lock();
-                tb = tb_find_fast(cpu);
-                if (cpu->tb_flushed) {
-                    /* Ensure that no TB jump will be modified as the
-                     * translation buffer has been flushed.
-                     */
-                    last_tb = NULL;
-                    cpu->tb_flushed = false;
-                }
-                /* See if we can patch the calling TB. */
-                if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
-                    tb_add_jump(last_tb, tb_exit, tb);
-                }
-                tb_unlock();
+                tb = tb_find_fast(cpu, &last_tb, tb_exit);
                 if (likely(!cpu->exit_request)) {
                     uintptr_t ret;
                     trace_exec_tb(tb, tb->pc);
-- 
2.5.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]