diff --git a/cputlb.c b/cputlb.c index 7bd3573..98ccc86 100644 --- a/cputlb.c +++ b/cputlb.c @@ -331,8 +331,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) } #define MMUSUFFIX _cmmu -#undef GETPC -#define GETPC() ((uintptr_t)0) +#undef GETRA +#define GETRA() ((uintptr_t)0) #define SOFTMMU_CODE_ACCESS #define SHIFT 0 diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index f9ac332..9a806a0 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -301,7 +301,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, defined here for simplicity of defining the follow-up macros. */ #if defined(CONFIG_TCG_INTERPRETER) extern uintptr_t tci_tb_ptr; -# define GETRA() tci_tb_ptr +extern uintptr_t GETRA(void); +//~ # define GETRA() tci_tb_ptr #else # define GETRA() \ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) @@ -314,11 +315,7 @@ extern uintptr_t tci_tb_ptr; to indicate the compressed mode; subtracting two works around that. It is also the case that there are no host isas that contain a call insn smaller than 4 bytes, so we don't worry about special-casing this. */ -#if defined(CONFIG_TCG_INTERPRETER) -# define GETPC_ADJ 0 -#else -# define GETPC_ADJ 2 -#endif +#define GETPC_ADJ 2 #define GETPC() (GETRA() - GETPC_ADJ) diff --git a/tci.c b/tci.c index 0202ed9..2637cc8 100644 --- a/tci.c +++ b/tci.c @@ -53,9 +53,19 @@ typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, /* Targets which don't use GETPC also don't need tci_tb_ptr which makes them a little faster. */ -#if defined(GETPC) uintptr_t tci_tb_ptr; +uintptr_t GETRA(void) +{ + assert(tci_tb_ptr != 0); + return tci_tb_ptr; +} + +static inline void savepc(void *tb_ptr) +{ +#ifdef CONFIG_SOFTMMU + tci_tb_ptr = (uintptr_t)tb_ptr; #endif +} static tcg_target_ulong tci_reg[TCG_TARGET_NB_REGS]; @@ -467,9 +477,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) uint64_t v64; #endif -#if defined(GETPC) - tci_tb_ptr = (uintptr_t)tb_ptr; -#endif + tci_tb_ptr = 0; /* Skip opcode and size entry. */ tb_ptr += 2; @@ -489,6 +497,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) TODO(); break; case INDEX_op_call: + tci_tb_ptr = (uintptr_t)tb_ptr; t0 = tci_read_ri(&tb_ptr); #if TCG_TARGET_REG_BITS == 32 tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0), @@ -1087,6 +1096,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tb_ptr += (int32_t)t0; continue; case INDEX_op_qemu_ld8u: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1098,6 +1108,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tci_write_reg8(t0, tmp8); break; case INDEX_op_qemu_ld8s: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1109,6 +1120,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tci_write_reg8s(t0, tmp8); break; case INDEX_op_qemu_ld16u: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1120,6 +1132,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tci_write_reg16(t0, tmp16); break; case INDEX_op_qemu_ld16s: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1132,6 +1145,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_qemu_ld32u: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1143,6 +1157,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tci_write_reg32(t0, tmp32); break; case INDEX_op_qemu_ld32s: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1155,6 +1170,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) break; #endif /* TCG_TARGET_REG_BITS == 64 */ case INDEX_op_qemu_ld32: + savepc(tb_ptr); t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1166,6 +1182,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tci_write_reg32(t0, tmp32); break; case INDEX_op_qemu_ld64: + savepc(tb_ptr); t0 = *tb_ptr++; #if TCG_TARGET_REG_BITS == 32 t1 = *tb_ptr++; @@ -1183,6 +1200,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) #endif break; case INDEX_op_qemu_st8: + savepc(tb_ptr); t0 = tci_read_r8(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1194,6 +1212,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) #endif break; case INDEX_op_qemu_st16: + savepc(tb_ptr); t0 = tci_read_r16(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1205,6 +1224,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) #endif break; case INDEX_op_qemu_st32: + savepc(tb_ptr); t0 = tci_read_r32(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU @@ -1216,6 +1236,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) #endif break; case INDEX_op_qemu_st64: + savepc(tb_ptr); tmp64 = tci_read_r64(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU