qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 13/15] tcg-sparc: Use defines for temporaries.


From: Blue Swirl
Subject: Re: [Qemu-devel] [PATCH 13/15] tcg-sparc: Use defines for temporaries.
Date: Mon, 26 Mar 2012 16:38:40 +0000

On Sun, Mar 25, 2012 at 22:27, Richard Henderson <address@hidden> wrote:
> And change from %i4 to %g1 to remove a v8plus fixme.
>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
>  tcg/sparc/tcg-target.c |  110 ++++++++++++++++++++++++-----------------------
>  1 files changed, 56 insertions(+), 54 deletions(-)
>
> diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
> index 181ba26..896fab1 100644
> --- a/tcg/sparc/tcg-target.c
> +++ b/tcg/sparc/tcg-target.c
> @@ -59,8 +59,11 @@ static const char * const 
> tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
>  };
>  #endif
>
> +#define TCG_REG_TMP  TCG_REG_G1
> +#define TCG_REG_TMP2 TCG_REG_I5
> +
>  #ifdef CONFIG_USE_GUEST_BASE
> -# define TCG_GUEST_BASE_REG TCG_REG_I3
> +# define TCG_GUEST_BASE_REG TCG_REG_I4
>  #else
>  # define TCG_GUEST_BASE_REG TCG_REG_G0
>  #endif
> @@ -372,10 +375,10 @@ static inline void tcg_out_movi(TCGContext *s, TCGType 
> type,
>         tcg_out_sethi(s, ret, ~arg);
>         tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
>     } else {
> -        tcg_out_movi_imm32(s, TCG_REG_I4, arg >> (TCG_TARGET_REG_BITS / 2));
> -        tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
> -        tcg_out_movi_imm32(s, ret, arg);
> -        tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
> +        tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
> +        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
> +        tcg_out_movi_imm32(s, TCG_REG_TMP2, arg);
> +        tcg_out_arith(s, ret, ret, TCG_REG_TMP2, ARITH_OR);
>     }
>  }
>
> @@ -392,8 +395,8 @@ static inline void tcg_out_ldst(TCGContext *s, int ret, 
> int addr,
>         tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
>                   INSN_IMM13(offset));
>     } else {
> -        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
> -        tcg_out_ldst_rr(s, ret, addr, TCG_REG_I5, op);
> +        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
> +        tcg_out_ldst_rr(s, ret, addr, TCG_REG_TMP, op);
>     }
>  }
>
> @@ -435,8 +438,8 @@ static inline void tcg_out_addi(TCGContext *s, int reg, 
> tcg_target_long val)
>         if (check_fit_tl(val, 13))
>             tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
>         else {
> -            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
> -            tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
> +            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, val);
> +            tcg_out_arith(s, reg, reg, TCG_REG_TMP, ARITH_ADD);
>         }
>     }
>  }
> @@ -448,8 +451,8 @@ static inline void tcg_out_andi(TCGContext *s, int rd, 
> int rs,
>         if (check_fit_tl(val, 13))
>             tcg_out_arithi(s, rd, rs, val, ARITH_AND);
>         else {
> -            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
> -            tcg_out_arith(s, rd, rs, TCG_REG_I5, ARITH_AND);
> +            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, val);
> +            tcg_out_arith(s, rd, rs, TCG_REG_TMP, ARITH_AND);
>         }
>     }
>  }
> @@ -461,8 +464,8 @@ static void tcg_out_div32(TCGContext *s, int rd, int rs1,
>     if (uns) {
>         tcg_out_sety(s, TCG_REG_G0);
>     } else {
> -        tcg_out_arithi(s, TCG_REG_I5, rs1, 31, SHIFT_SRA);
> -        tcg_out_sety(s, TCG_REG_I5);
> +        tcg_out_arithi(s, TCG_REG_TMP, rs1, 31, SHIFT_SRA);
> +        tcg_out_sety(s, TCG_REG_TMP);
>     }
>
>     tcg_out_arithc(s, rd, rs1, val2, val2const,
> @@ -608,8 +611,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond 
> cond, TCGArg ret,
>     case TCG_COND_GTU:
>     case TCG_COND_GEU:
>         if (c2const && c2 != 0) {
> -            tcg_out_movi_imm13(s, TCG_REG_I5, c2);
> -            c2 = TCG_REG_I5;
> +            tcg_out_movi_imm13(s, TCG_REG_TMP, c2);
> +            c2 = TCG_REG_TMP;
>         }
>         t = c1, c1 = c2, c2 = t, c2const = 0;
>         cond = tcg_swap_cond(cond);
> @@ -656,15 +659,15 @@ static void tcg_out_setcond2_i32(TCGContext *s, TCGCond 
> cond, TCGArg ret,
>
>     switch (cond) {
>     case TCG_COND_EQ:
> -        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_I5, al, bl, blconst);
> +        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_TMP, al, bl, blconst);
>         tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
> -        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_AND);
> +        tcg_out_arith(s, ret, ret, TCG_REG_TMP, ARITH_AND);
>         break;
>
>     case TCG_COND_NE:
> -        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_I5, al, al, blconst);
> +        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_TMP, al, al, blconst);
>         tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
> -        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_OR);
> +        tcg_out_arith(s, ret, ret, TCG_REG_TMP, ARITH_OR);
>         break;
>
>     default:
> @@ -964,8 +967,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
> *args, int sizeop)
>  #else
>     addr_reg = args[addrlo_idx];
>     if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
> -        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
> -        addr_reg = TCG_REG_I5;
> +        tcg_out_arithi(s, TCG_REG_TMP, addr_reg, 0, SHIFT_SRL);
> +        addr_reg = TCG_REG_TMP;
>     }
>     if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
>         int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
> @@ -1008,12 +1011,11 @@ static void tcg_out_qemu_st(TCGContext *s, const 
> TCGArg *args, int sizeop)
>                                 offsetof(CPUTLBEntry, addr_write));
>
>     if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
> -        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
> -        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
> -        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
> +        /* Reconstruct the full 64-bit value.  */
> +        tcg_out_arithi(s, TCG_REG_TMP, datalo, 0, SHIFT_SRL);
>         tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
> -        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
> -        datalo = TCG_REG_G1;
> +        tcg_out_arith(s, TCG_REG_O2, TCG_REG_TMP, TCG_REG_O2, ARITH_OR);
> +        datalo = TCG_REG_O2;
>     }
>
>     /* The fast path is exactly one insn.  Thus we can perform the entire
> @@ -1054,16 +1056,14 @@ static void tcg_out_qemu_st(TCGContext *s, const 
> TCGArg *args, int sizeop)
>  #else
>     addr_reg = args[addrlo_idx];
>     if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
> -        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
> -        addr_reg = TCG_REG_I5;
> +        tcg_out_arithi(s, TCG_REG_TMP, addr_reg, 0, SHIFT_SRL);
> +        addr_reg = TCG_REG_TMP;
>     }
>     if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
> -        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
> -        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
> -        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
> +        tcg_out_arithi(s, TCG_REG_TMP, datalo, 0, SHIFT_SRL);
>         tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
> -        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
> -        datalo = TCG_REG_G1;
> +        tcg_out_arith(s, TCG_REG_O2, TCG_REG_TMP, TCG_REG_O2, ARITH_OR);
> +        datalo = TCG_REG_O2;
>     }
>     tcg_out_ldst_rr(s, datalo, addr_reg,
>                     (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
> @@ -1087,14 +1087,14 @@ static inline void tcg_out_op(TCGContext *s, 
> TCGOpcode opc, const TCGArg *args,
>     case INDEX_op_goto_tb:
>         if (s->tb_jmp_offset) {
>             /* direct jump method */
> -            tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
> -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
> +            tcg_out_sethi(s, TCG_REG_TMP, args[0] & 0xffffe000);
> +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_TMP) |
>                       INSN_IMM13((args[0] & 0x1fff)));
>             s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
>         } else {
>             /* indirect jump method */
> -            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + 
> args[0]));
> -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
> +            tcg_out_ld_ptr(s, TCG_REG_TMP, (tcg_target_long)(s->tb_next + 
> args[0]));
> +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_TMP) |
>                       INSN_RS2(TCG_REG_G0));
>         }
>         tcg_out_nop(s);
> @@ -1106,9 +1106,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
> opc, const TCGArg *args,
>                                    - (tcg_target_ulong)s->code_ptr) >> 2)
>                                  & 0x3fffffff));
>         else {
> -            tcg_out_ld_ptr(s, TCG_REG_I5,
> +            tcg_out_ld_ptr(s, TCG_REG_TMP,
>                            (tcg_target_long)(s->tb_next + args[0]));
> -            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
> +            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_TMP) |
>                       INSN_RS2(TCG_REG_G0));
>         }
>         /* delay slot */
> @@ -1214,11 +1214,11 @@ static inline void tcg_out_op(TCGContext *s, 
> TCGOpcode opc, const TCGArg *args,
>
>     case INDEX_op_rem_i32:
>     case INDEX_op_remu_i32:
> -        tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2],
> +        tcg_out_div32(s, TCG_REG_TMP, args[1], args[2], const_args[2],
>                       opc == INDEX_op_remu_i32);
> -        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
> +        tcg_out_arithc(s, TCG_REG_TMP, TCG_REG_TMP, args[2], const_args[2],
>                        ARITH_UMUL);
> -        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
> +        tcg_out_arith(s, args[0], args[1], TCG_REG_TMP, ARITH_SUB);
>         break;
>
>     case INDEX_op_brcond_i32:
> @@ -1335,11 +1335,11 @@ static inline void tcg_out_op(TCGContext *s, 
> TCGOpcode opc, const TCGArg *args,
>         goto gen_arith;
>     case INDEX_op_rem_i64:
>     case INDEX_op_remu_i64:
> -        tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2],
> +        tcg_out_arithc(s, TCG_REG_TMP, args[1], args[2], const_args[2],
>                        opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
> -        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
> +        tcg_out_arithc(s, TCG_REG_TMP, TCG_REG_TMP, args[2], const_args[2],
>                        ARITH_MULX);
> -        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
> +        tcg_out_arith(s, args[0], args[1], TCG_REG_TMP, ARITH_SUB);
>         break;
>     case INDEX_op_ext32s_i64:
>         if (const_args[1]) {
> @@ -1537,15 +1537,17 @@ static void tcg_target_init(TCGContext *s)
>                      (1 << TCG_REG_O7));
>
>     tcg_regset_clear(s->reserved_regs);
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
> -#if TCG_TARGET_REG_BITS == 64
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
> -#endif
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
> -    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); // zero
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); // reserved for os
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); // thread pointer
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); // frame pointer
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); // return address
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); // stack pointer
> +    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); // for internal use
> +    if (TCG_TARGET_REG_BITS == 64) {
> +        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); // for internal 
> use

Please fix the comment style above.

> +    }
> +
>     tcg_add_target_add_op_defs(sparc_op_defs);
>  }
>
> --
> 1.7.7.6
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]