[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 25/26] softmmu templates: optionally pass CPUState t
From: |
Blue Swirl |
Subject: |
[Qemu-devel] [PATCH 25/26] softmmu templates: optionally pass CPUState to memory access functions |
Date: |
Sat, 24 Sep 2011 18:31:08 +0000 |
Optionally, make memory access helpers take a parameter for CPUState
instead of relying on global env.
Signed-off-by: Blue Swirl <address@hidden>
---
Only x86_64 is converted (buggy). The helper is called with correct
arguments, but the generated code crashes afterwards.
Some of the tcg-target.c changes should be extracted to a separate
cleanup patch (RDX to tcg_target_call_iarg_regs[1] etc).
The idea is that the ifdeffery and legacy code is removed when all the
guests have been converted.
---
cpu-all.h | 9 +++++
exec-all.h | 6 +++
softmmu_defs.h | 20 +++++++++++
softmmu_header.h | 60 ++++++++++++++++++++++++++--------
softmmu_template.h | 82 +++++++++++++++++++++++++++++++---------------
tcg/i386/tcg-target.c | 85 +++++++++++++++++++++++++++++++++++++++----------
6 files changed, 204 insertions(+), 58 deletions(-)
diff --git a/cpu-all.h b/cpu-all.h
index 42a5fa0..12cb4f2 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -258,12 +258,21 @@ extern unsigned long reserved_va;
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
+#ifndef CONFIG_TCG_PASS_AREG0
#define ldub_code(p) ldub_raw(p)
#define ldsb_code(p) ldsb_raw(p)
#define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p)
#define ldq_code(p) ldq_raw(p)
+#else
+#define cpu_ldub_code(env1, p) ldub_raw(p)
+#define cpu_ldsb_code(env1, p) ldsb_raw(p)
+#define cpu_lduw_code(env1, p) lduw_raw(p)
+#define cpu_ldsw_code(env1, p) ldsw_raw(p)
+#define cpu_ldl_code(env1, p) ldl_raw(p)
+#define cpu_ldq_code(env1, p) ldq_raw(p)
+#endif
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
diff --git a/exec-all.h b/exec-all.h
index 1120f84..8b5fdf4 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -292,7 +292,9 @@ void tlb_fill(CPUState *env1, target_ulong addr,
int is_write, int mmu_idx,
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
+#ifndef CONFIG_TCG_PASS_AREG0
#define env cpu_single_env
+#endif
#define DATA_SIZE 1
#include "softmmu_header.h"
@@ -330,7 +332,11 @@ static inline tb_page_addr_t
get_page_addr_code(CPUState *env1, target_ulong add
mmu_idx = cpu_mmu_index(env1);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
+#ifdef CONFIG_TCG_PASS_AREG0
+ cpu_ldub_code(env1, addr);
+#else
ldub_code(addr);
+#endif
}
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
diff --git a/softmmu_defs.h b/softmmu_defs.h
index c5a2bcd..ba949e3 100644
--- a/softmmu_defs.h
+++ b/softmmu_defs.h
@@ -9,6 +9,7 @@
#ifndef SOFTMMU_DEFS_H
#define SOFTMMU_DEFS_H
+#ifndef CONFIG_TCG_PASS_AREG0
uint8_t REGPARM __ldb_mmu(target_ulong addr, int mmu_idx);
void REGPARM __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx);
uint16_t REGPARM __ldw_mmu(target_ulong addr, int mmu_idx);
@@ -26,5 +27,24 @@ uint32_t REGPARM __ldl_cmmu(target_ulong addr, int mmu_idx);
void REGPARM __stl_cmmu(target_ulong addr, uint32_t val, int mmu_idx);
uint64_t REGPARM __ldq_cmmu(target_ulong addr, int mmu_idx);
void REGPARM __stq_cmmu(target_ulong addr, uint64_t val, int mmu_idx);
+#else
+uint8_t REGPARM helper_ldb_mmu(CPUState *env, target_ulong addr, int mmu_idx);
+void REGPARM helper_stb_mmu(CPUState *env, target_ulong addr, uint8_t
val, int mmu_idx);
+uint16_t REGPARM helper_ldw_mmu(CPUState *env, target_ulong addr, int mmu_idx);
+void REGPARM helper_stw_mmu(CPUState *env, target_ulong addr,
uint16_t val, int mmu_idx);
+uint32_t REGPARM helper_ldl_mmu(CPUState *env, target_ulong addr, int mmu_idx);
+void REGPARM helper_stl_mmu(CPUState *env, target_ulong addr,
uint32_t val, int mmu_idx);
+uint64_t REGPARM helper_ldq_mmu(CPUState *env, target_ulong addr, int mmu_idx);
+void REGPARM helper_stq_mmu(CPUState *env, target_ulong addr,
uint64_t val, int mmu_idx);
+
+uint8_t REGPARM helper_ldb_cmmu(CPUState *env, target_ulong addr, int mmu_idx);
+void REGPARM helper_stb_cmmu(CPUState *env, target_ulong addr,
uint8_t val, int mmu_idx);
+uint16_t REGPARM helper_ldw_cmmu(CPUState *env, target_ulong addr,
int mmu_idx);
+void REGPARM helper_stw_cmmu(CPUState *env, target_ulong addr,
uint16_t val, int mmu_idx);
+uint32_t REGPARM helper_ldl_cmmu(CPUState *env, target_ulong addr,
int mmu_idx);
+void REGPARM helper_stl_cmmu(CPUState *env, target_ulong addr,
uint32_t val, int mmu_idx);
+uint64_t REGPARM helper_ldq_cmmu(CPUState *env, target_ulong addr,
int mmu_idx);
+void REGPARM helper_stq_cmmu(CPUState *env, target_ulong addr,
uint64_t val, int mmu_idx);
+#endif
#endif
diff --git a/softmmu_header.h b/softmmu_header.h
index 818d7b6..3c0b298 100644
--- a/softmmu_header.h
+++ b/softmmu_header.h
@@ -78,9 +78,23 @@
#define ADDR_READ addr_read
#endif
+#ifndef CONFIG_TCG_PASS_AREG0
+#define ENV_PARAM
+#define ENV_VAR
+#define CPU_PREFIX
+#define HELPER_PREFIX __
+#else
+#define ENV_PARAM CPUState *env,
+#define ENV_VAR env,
+#define CPU_PREFIX cpu_
+#define HELPER_PREFIX helper_
+#endif
+
/* generic load/store macros */
-static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
+static inline RES_TYPE
+glue(glue(glue(CPU_PREFIX, ld), USUFFIX), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr)
{
int page_index;
RES_TYPE res;
@@ -93,7 +107,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX),
MEMSUFFIX)(target_ulong ptr)
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
+ res = glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_VAR
+ addr,
+ mmu_idx);
} else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
@@ -102,7 +118,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX),
MEMSUFFIX)(target_ulong ptr)
}
#if DATA_SIZE <= 2
-static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
+static inline int
+glue(glue(glue(CPU_PREFIX, lds), SUFFIX), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr)
{
int res, page_index;
target_ulong addr;
@@ -114,7 +132,8 @@ static inline int glue(glue(lds, SUFFIX),
MEMSUFFIX)(target_ulong ptr)
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
+ res = (DATA_STYPE)glue(glue(glue(HELPER_PREFIX, ld), SUFFIX),
+ MMUSUFFIX)(ENV_VAR addr, mmu_idx);
} else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
@@ -127,7 +146,9 @@ static inline int glue(glue(lds, SUFFIX),
MEMSUFFIX)(target_ulong ptr)
/* generic store macro */
-static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong
ptr, RES_TYPE v)
+static inline void
+glue(glue(glue(CPU_PREFIX, st), SUFFIX), MEMSUFFIX)(ENV_PARAM target_ulong ptr,
+ RES_TYPE v)
{
int page_index;
target_ulong addr;
@@ -139,7 +160,8 @@ static inline void glue(glue(st, SUFFIX),
MEMSUFFIX)(target_ulong ptr, RES_TYPE
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
+ glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_VAR addr, v,
+ mmu_idx);
} else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
@@ -151,46 +173,52 @@ static inline void glue(glue(st, SUFFIX),
MEMSUFFIX)(target_ulong ptr, RES_TYPE
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8
-static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
+static inline float64 glue(glue(CPU_PREFIX, ldfq), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr)
{
union {
float64 d;
uint64_t i;
} u;
- u.i = glue(ldq, MEMSUFFIX)(ptr);
+ u.i = glue(glue(CPU_PREFIX, ldq), MEMSUFFIX)(ENV_VAR ptr);
return u.d;
}
-static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v)
+static inline void glue(glue(CPU_PREFIX, stfq), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr,
+ float64 v)
{
union {
float64 d;
uint64_t i;
} u;
u.d = v;
- glue(stq, MEMSUFFIX)(ptr, u.i);
+ glue(glue(CPU_PREFIX, stq), MEMSUFFIX)(ENV_VAR ptr, u.i);
}
#endif /* DATA_SIZE == 8 */
#if DATA_SIZE == 4
-static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr)
+static inline float32 glue(glue(CPU_PREFIX, ldfl), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr)
{
union {
float32 f;
uint32_t i;
} u;
- u.i = glue(ldl, MEMSUFFIX)(ptr);
+ u.i = glue(glue(CPU_PREFIX, ldl), MEMSUFFIX)(ENV_VAR ptr);
return u.f;
}
-static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
+static inline void glue(glue(CPU_PREFIX, stfl), MEMSUFFIX)(ENV_PARAM
+ target_ulong ptr,
+ float32 v)
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
- glue(stl, MEMSUFFIX)(ptr, u.i);
+ glue(glue(CPU_PREFIX, stl), MEMSUFFIX)(ENV_VAR ptr, u.i);
}
#endif /* DATA_SIZE == 4 */
@@ -205,3 +233,7 @@ static inline void glue(stfl,
MEMSUFFIX)(target_ulong ptr, float32 v)
#undef CPU_MMU_INDEX
#undef MMUSUFFIX
#undef ADDR_READ
+#undef ENV_PARAM
+#undef ENV_VAR
+#undef CPU_PREFIX
+#undef HELPER_PREFIX
diff --git a/softmmu_template.h b/softmmu_template.h
index 36eb2e8..b748c1c 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -53,10 +53,24 @@
#define ADDR_READ addr_read
#endif
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
+#ifndef CONFIG_TCG_PASS_AREG0
+#define ENV_PARAM
+#define ENV_VAR
+#define CPU_PREFIX
+#define HELPER_PREFIX __
+#else
+#define ENV_PARAM CPUState *env,
+#define ENV_VAR env,
+#define CPU_PREFIX cpu_
+#define HELPER_PREFIX helper_
+#endif
+
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
int mmu_idx,
void *retaddr);
-static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
+static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM
+ target_phys_addr_t physaddr,
target_ulong addr,
void *retaddr)
{
@@ -86,8 +100,10 @@ static inline DATA_TYPE glue(io_read,
SUFFIX)(target_phys_addr_t physaddr,
}
/* handle all cases except unaligned access which span two pages */
-DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
- int mmu_idx)
+DATA_TYPE REGPARM
+glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
+ int mmu_idx)
{
DATA_TYPE res;
int index;
@@ -108,22 +124,22 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX),
MMUSUFFIX)(target_ulong addr,
goto do_unaligned_access;
retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
retaddr = GETPC();
#ifdef ALIGNED_ONLY
- do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
#endif
- res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
+ res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr,
mmu_idx, retaddr);
} else {
/* unaligned/aligned access in the same page */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
- do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
#endif
addend = env->tlb_table[mmu_idx][index].addend;
@@ -134,7 +150,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX),
MMUSUFFIX)(target_ulong addr,
retaddr = GETPC();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
#endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
goto redo;
@@ -143,7 +159,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX),
MMUSUFFIX)(target_ulong addr,
}
/* handle all unaligned cases */
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
int mmu_idx,
void *retaddr)
{
@@ -162,15 +179,15 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX),
MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
+ res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1,
mmu_idx, retaddr);
- res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
+ res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2,
mmu_idx, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
@@ -194,12 +211,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX),
MMUSUFFIX)(target_ulong addr,
#ifndef SOFTMMU_CODE_ACCESS
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
DATA_TYPE val,
int mmu_idx,
void *retaddr);
-static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
+static inline void glue(io_write, SUFFIX)(ENV_PARAM
+ target_phys_addr_t physaddr,
DATA_TYPE val,
target_ulong addr,
void *retaddr)
@@ -227,9 +246,11 @@ static inline void glue(io_write,
SUFFIX)(target_phys_addr_t physaddr,
#endif /* SHIFT > 2 */
}
-void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
- DATA_TYPE val,
- int mmu_idx)
+void REGPARM
+glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
+ DATA_TYPE val,
+ int mmu_idx)
{
target_phys_addr_t ioaddr;
unsigned long addend;
@@ -247,21 +268,21 @@ void REGPARM glue(glue(__st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
goto do_unaligned_access;
retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
#ifdef ALIGNED_ONLY
- do_unaligned_access(addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
#endif
- glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
+ glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val,
mmu_idx, retaddr);
} else {
/* aligned/unaligned access in the same page */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
- do_unaligned_access(addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
}
#endif
addend = env->tlb_table[mmu_idx][index].addend;
@@ -272,7 +293,7 @@ void REGPARM glue(glue(__st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
retaddr = GETPC();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
#endif
tlb_fill(env, addr, 1, mmu_idx, retaddr);
goto redo;
@@ -280,7 +301,8 @@ void REGPARM glue(glue(__st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
}
/* handles all unaligned cases */
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
+ target_ulong addr,
DATA_TYPE val,
int mmu_idx,
void *retaddr)
@@ -299,7 +321,7 @@ static void glue(glue(slow_st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
@@ -307,10 +329,12 @@ static void glue(glue(slow_st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
* previous page from the TLB cache. */
for(i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN
- glue(slow_stb, MMUSUFFIX)(addr + i, val >>
(((DATA_SIZE - 1) * 8) - (i * 8)),
+ glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
+ val >> (((DATA_SIZE - 1) *
8) - (i * 8)),
mmu_idx, retaddr);
#else
- glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
+ glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
+ val >> (i * 8),
mmu_idx, retaddr);
#endif
}
@@ -335,3 +359,7 @@ static void glue(glue(slow_st, SUFFIX),
MMUSUFFIX)(target_ulong addr,
#undef USUFFIX
#undef DATA_SIZE
#undef ADDR_READ
+#undef ENV_PARAM
+#undef ENV_VAR
+#undef CPU_PREFIX
+#undef HELPER_PREFIX
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 281f87d..c9fc440 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -184,10 +184,16 @@ static int
target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDX);
+#endif
} else {
tcg_regset_set32(ct->u.regs, 0, 0xff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_ECX);
+#endif
}
break;
@@ -962,6 +968,29 @@ static void tcg_out_jmp(TCGContext *s,
tcg_target_long dest)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+#define ARG_OFFSET 1
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void *qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void *qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+#define ARG_OFFSET 0
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -969,12 +998,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
+#endif
/* Perform the TLB load and compare.
@@ -1004,8 +1036,8 @@ static inline void tcg_out_tlb_load(TCGContext
*s, int addrlo_idx,
uint8_t **label_ptr, int which)
{
const int addrlo = args[addrlo_idx];
- const int r0 = tcg_target_call_iarg_regs[0];
- const int r1 = tcg_target_call_iarg_regs[1];
+ const int r0 = tcg_target_call_iarg_regs[ARG_OFFSET + 0];
+ const int r1 = tcg_target_call_iarg_regs[ARG_OFFSET + 1];
TCGType type = TCG_TYPE_I32;
int rexw = 0;
@@ -1163,7 +1195,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const
TCGArg *args,
/* TLB Hit. */
tcg_out_qemu_ld_direct(s, data_reg, data_reg2,
- tcg_target_call_iarg_regs[0], 0, opc);
+ tcg_target_call_iarg_regs[ARG_OFFSET + 0], 0, opc);
/* jmp label2 */
tcg_out8(s, OPC_JMP_short);
@@ -1179,13 +1211,17 @@ static void tcg_out_qemu_ld(TCGContext *s,
const TCGArg *args,
}
/* XXX: move that code at the end of the TB */
- /* The first argument is already loaded with addrlo. */
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+#endif
+ /* The second (or first for legacy helper) argument is already
+ loaded with addrlo. */
arg_idx = 1;
if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx++],
+ tcg_out_mov(s, TCG_TYPE_I32,
tcg_target_call_iarg_regs[ARG_OFFSET + arg_idx++],
args[addrlo_idx + 1]);
}
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx],
+ tcg_out_movi(s, TCG_TYPE_I32,
tcg_target_call_iarg_regs[ARG_OFFSET + arg_idx],
mem_index);
tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
@@ -1338,7 +1374,7 @@ static void tcg_out_qemu_st(TCGContext *s, const
TCGArg *args,
/* TLB Hit. */
tcg_out_qemu_st_direct(s, data_reg, data_reg2,
- tcg_target_call_iarg_regs[0], 0, opc);
+ tcg_target_call_iarg_regs[ARG_OFFSET + 0], 0, opc);
/* jmp label2 */
tcg_out8(s, OPC_JMP_short);
@@ -1354,39 +1390,54 @@ static void tcg_out_qemu_st(TCGContext *s,
const TCGArg *args,
}
/* XXX: move that code at the end of the TB */
+#ifdef CONFIG_TCG_PASS_AREG0
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+#endif
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
- TCG_REG_RSI, data_reg);
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
+ tcg_target_call_iarg_regs[ARG_OFFSET + 1], data_reg);
+ tcg_out_movi(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 2], mem_index);
stack_adjust = 0;
} else if (TARGET_LONG_BITS == 32) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, data_reg);
+ tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[ARG_OFFSET + 1],
+ data_reg);
if (opc == 3) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg2);
+ tcg_out_mov(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 2], data_reg2);
tcg_out_pushi(s, mem_index);
stack_adjust = 4;
} else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
+ tcg_out_movi(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 2], mem_index);
stack_adjust = 0;
}
} else {
if (opc == 3) {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]);
+ tcg_out_mov(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 1],
+ args[addrlo_idx + 1]);
tcg_out_pushi(s, mem_index);
tcg_out_push(s, data_reg2);
tcg_out_push(s, data_reg);
stack_adjust = 12;
} else {
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]);
+ tcg_out_mov(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 1],
+ args[addrlo_idx + 1]);
switch(opc) {
case 0:
- tcg_out_ext8u(s, TCG_REG_ECX, data_reg);
+ tcg_out_ext8u(s, tcg_target_call_iarg_regs[ARG_OFFSET + 2],
+ data_reg);
break;
case 1:
- tcg_out_ext16u(s, TCG_REG_ECX, data_reg);
+ tcg_out_ext16u(s, tcg_target_call_iarg_regs[ARG_OFFSET + 2],
+ data_reg);
break;
case 2:
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg);
+ tcg_out_mov(s, TCG_TYPE_I32,
+ tcg_target_call_iarg_regs[ARG_OFFSET + 2],
+ data_reg);
break;
}
tcg_out_pushi(s, mem_index);
--
1.6.2.4
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [Qemu-devel] [PATCH 25/26] softmmu templates: optionally pass CPUState to memory access functions,
Blue Swirl <=