[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 37/62] tcg-s390: Define TCG_TMP0.
From: |
Richard Henderson |
Subject: |
[Qemu-devel] [PATCH 37/62] tcg-s390: Define TCG_TMP0. |
Date: |
Thu, 27 May 2010 13:46:19 -0700 |
Use a define for the temp register instead of hard-coding it.
Signed-off-by: Richard Henderson <address@hidden>
---
tcg/s390/tcg-target.c | 49 ++++++++++++++++++++++++++-----------------------
1 files changed, 26 insertions(+), 23 deletions(-)
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index ec8c84d..ee2e879 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -36,6 +36,9 @@
#define TCG_CT_CONST_S32 0x100
#define TCG_CT_CONST_N32 0x200
+#define TCG_TMP0 TCG_REG_R13
+
+
/* All of the following instructions are prefixed with their instruction
format, and are defined as 8- or 16-bit quantities, even when the two
halves of the 16-bit quantity may appear 32 bits apart in the insn.
@@ -491,7 +494,7 @@ static void tcg_out_ldst(TCGContext *s, S390Opcode opc_rx,
S390Opcode opc_rxy,
if (ofs < -0x80000 || ofs >= 0x80000) {
/* Combine the low 16 bits of the offset with the actual load insn;
the high 48 bits must come from an immediate load. */
- index = TCG_REG_R13;
+ index = TCG_TMP0;
tcg_out_movi(s, TCG_TYPE_PTR, index, ofs & ~0xffff);
ofs &= 0xffff;
}
@@ -658,8 +661,8 @@ static void tgen64_andi(TCGContext *s, TCGReg dest,
tcg_target_ulong val)
for (i = 0; i < 4; i++) {
tcg_target_ulong mask = ~(0xffffull << i*16);
if ((val & mask) == 0) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, val);
- tcg_out_insn(s, RRE, NGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
+ tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
return;
}
}
@@ -667,8 +670,8 @@ static void tgen64_andi(TCGContext *s, TCGReg dest,
tcg_target_ulong val)
for (i = 0; i < 2; i++) {
tcg_target_ulong mask = ~(0xffffffffull << i*32);
if ((val & mask) == 0) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, val);
- tcg_out_insn(s, RRE, NGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
+ tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
return;
}
}
@@ -734,8 +737,8 @@ static void tgen64_xori(TCGContext *s, TCGReg dest,
tcg_target_ulong val)
value first and perform the xor via registers. This is true for
any 32-bit negative value, where the high 32-bits get flipped too. */
if (sval < 0 && sval == (int32_t)sval) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, sval);
- tcg_out_insn(s, RRE, XGR, dest, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, sval);
+ tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
return;
}
@@ -792,8 +795,8 @@ static void tgen_gotoi(TCGContext *s, int cc,
tcg_target_long dest)
} else if (off == (int32_t)off) {
tcg_out_insn(s, RIL, BRCL, cc, off);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
- tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+ tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
}
}
@@ -815,8 +818,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long dest)
if (off == (int32_t)off) {
tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
- tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+ tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
}
}
@@ -852,13 +855,13 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int
data_reg, int addr_reg,
tgen64_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
if (is_store) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
offsetof(CPUState, tlb_table[mem_index][0].addr_write));
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
offsetof(CPUState, tlb_table[mem_index][0].addr_read));
}
- tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13);
+ tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0);
tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0);
@@ -1103,16 +1106,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode
opc,
(tcg_target_long)s->code_ptr) >> 1;
if (off == (int32_t)off) {
/* load address relative to PC */
- tcg_out_insn(s, RIL, LARL, TCG_REG_R13, off);
+ tcg_out_insn(s, RIL, LARL, TCG_TMP0, off);
} else {
/* too far for larl */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
(tcg_target_long)(s->tb_next + args[0]));
}
/* load address stored at s->tb_next + args[0] */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R13, TCG_REG_R13, 0);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_TMP0, 0);
/* and go there */
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R13);
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
}
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
@@ -1353,8 +1356,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode
opc,
tcg_out_sh64(s, RSY_RLL, args[0], args[1],
SH32_REG_NONE, (32 - args[2]) & 31);
} else {
- tcg_out_insn(s, RR, LCR, TCG_REG_R13, args[2]);
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_R13, 0);
+ tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
+ tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
}
break;
@@ -1373,8 +1376,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode
opc,
} else {
/* We can use the smaller 32-bit negate because only the
low 6 bits are examined for the rotate. */
- tcg_out_insn(s, RR, LCR, TCG_REG_R13, args[2]);
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_REG_R13, 0);
+ tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
+ tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
}
break;
@@ -1638,7 +1641,7 @@ void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
/* frequently used as a temporary */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13);
+ tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
/* another temporary */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12);
/* XXX many insns can't be used with R0, so we better avoid it for now */
--
1.7.0.1
- [Qemu-devel] [PATCH 26/62] tcg-s390: Implement sign and zero-extension operations., (continued)
- [Qemu-devel] [PATCH 26/62] tcg-s390: Implement sign and zero-extension operations., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 27/62] tcg-s390: Implement bswap operations., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 28/62] tcg-s390: Implement rotates., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 29/62] tcg-s390: Use LOAD COMPLIMENT for negate., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 30/62] tcg-s390: Tidy unimplemented opcodes., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 31/62] tcg-s390: Use the extended-immediate facility for add/sub., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 32/62] tcg-s390: Implement immediate ANDs., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 33/62] tcg-s390: Implement immediate ORs., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 34/62] tcg-s390: Implement immediate MULs., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 35/62] tcg-s390: Implement immediate XORs., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 37/62] tcg-s390: Define TCG_TMP0.,
Richard Henderson <=
- [Qemu-devel] [PATCH 38/62] tcg-s390: Tidy regset initialization; use R14 as temporary., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 39/62] tcg-s390: Rearrange register allocation order., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 36/62] tcg-s390: Icache flush is a no-op., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 40/62] tcg-s390: Tidy goto_tb., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 41/62] tcg-s390: Allocate the code_gen_buffer near the main program., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 42/62] tcg-s390: Rearrange qemu_ld/st to avoid register copy., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 44/62] tcg-s390: Tidy user qemu_ld/st., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 45/62] tcg-s390: Implement GUEST_BASE., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 43/62] tcg-s390: Tidy tcg_prepare_qemu_ldst., Richard Henderson, 2010/05/27
- [Qemu-devel] [PATCH 47/62] tcg-s390: Conditionalize general-instruction-extension insns., Richard Henderson, 2010/05/27