qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm an


From: Claudio Fontana
Subject: Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl
Date: Mon, 16 Sep 2013 10:41:48 +0200
User-agent: Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20130801 Thunderbird/17.0.8

On 14.09.2013 23:54, Richard Henderson wrote:
> Now that we've converted opcode fields to pre-shifted insns, we
> can merge the implementation of arithmetic and shift insns.
> 
> Simplify the left/right shift parameter to just the left shift
> needed by tcg_out_tlb_read.
> 
> Signed-off-by: Richard Henderson <address@hidden>
> ---
>  tcg/aarch64/tcg-target.c | 78 
> +++++++++++++++++++++++-------------------------
>  1 file changed, 38 insertions(+), 40 deletions(-)
> 
> diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
> index cc56fe5..0e7b67b 100644
> --- a/tcg/aarch64/tcg-target.c
> +++ b/tcg/aarch64/tcg-target.c
> @@ -302,6 +302,30 @@ static inline uint32_t tcg_in32(TCGContext *s)
>      return v;
>  }
>  
> +/*
> + * Encode various formats.
> + */
> +
> +/* This function can be used for both Arithmetic and Logical (shifted 
> register)
> +   type insns.  Since we don't actually use the other available shifts, we 
> only
> +   support LSL here.  */
> +static inline void tcg_fmt_Rdnm_lsl(TCGContext *s, AArch64Insn insn,
> +                                    TCGType sf, TCGReg rd, TCGReg rn,
> +                                    TCGReg rm, int imm6)
> +{
> +    /* Note that LSL is bits {23,22} = 0.  */
> +    tcg_out32(s, insn | sf << 31 | imm6 << 10 | rm << 16 | rn << 5 | rd);
> +}
> +
> +/* This function can be used for most insns with 2 input registers and one
> +   output register.  This includes Arithmetic (shifted register, sans shift),
> +   Logical, Shift, Multiply, Divide, and Bit operation.  */
> +static inline void tcg_fmt_Rdnm(TCGContext *s, AArch64Insn insn, TCGType sf,
> +                                TCGReg rd, TCGReg rn, TCGReg rm)
> +{
> +    tcg_out32(s, insn | sf << 31 | rm << 16 | rn << 5 | rd);
> +}
> +

The name of the function should reflect the fact that we are actually emitting 
instructions,
not only formatting them. Also I despise mixed case in functions.
So theoretically, tcg_out_rdnm.

I'd still rather have a name of the function that expresses the meaning of what 
we are trying to do
(tcg_out_arith seems a good name, you can merge with shiftrot_reg if you want), 
rather than how we are doing it, if the model fits.

I guess the question would be, are all instructions formatted exactly that way 
arithmetic and logical shifted register instructions of some sort?
If so, I'd go with tcg_out_arith or similar. If not, we can say tcg_out_rdnm.

Also we lose a couple things here.
The previous implementation made it impossible to pass wrong opcodes to the 
function, since the opcode for the arith was a separate type.
It made it obvious to the reader in which cases the function can be used.
We would lose this with this change here (combined with the INSN change).

Also we lose the ability to do right-shifted arithmetic operations, which I 
feel we should provide for completeness and to reduce the pain for the 
programmer who will eventually need them.

>  static inline void tcg_out_ldst_9(TCGContext *s,
>                                    enum aarch64_ldst_op_data op_data,
>                                    enum aarch64_ldst_op_type op_type,
> @@ -445,23 +469,6 @@ static inline void tcg_out_st(TCGContext *s, TCGType 
> type, TCGReg arg,
>                   arg, arg1, arg2);
>  }
>  
> -static inline void tcg_out_arith(TCGContext *s, AArch64Insn insn,
> -                                 TCGType ext, TCGReg rd, TCGReg rn, TCGReg 
> rm,
> -                                 int shift_imm)
> -{
> -    /* Using shifted register arithmetic operations */
> -    /* if extended register operation (64bit) just OR with 0x80 << 24 */
> -    unsigned int shift, base = insn | (ext ? 0x80000000 : 0);
> -    if (shift_imm == 0) {
> -        shift = 0;
> -    } else if (shift_imm > 0) {
> -        shift = shift_imm << 10 | 1 << 22;
> -    } else /* (shift_imm < 0) */ {
> -        shift = (-shift_imm) << 10;
> -    }
> -    tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
> -}
> -
>  static inline void tcg_out_mul(TCGContext *s, TCGType ext,
>                                 TCGReg rd, TCGReg rn, TCGReg rm)
>  {
> @@ -470,15 +477,6 @@ static inline void tcg_out_mul(TCGContext *s, TCGType 
> ext,
>      tcg_out32(s, base | rm << 16 | rn << 5 | rd);
>  }
>  
> -static inline void tcg_out_shiftrot_reg(TCGContext *s,
> -                                        AArch64Insn insn, TCGType ext,
> -                                        TCGReg rd, TCGReg rn, TCGReg rm)
> -{
> -    /* using 2-source data processing instructions 0x1ac02000 */
> -    unsigned int base = insn | (ext ? 0x80000000 : 0);
> -    tcg_out32(s, base | rm << 16 | rn << 5 | rd);
> -}
> -
>  static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
>                                  TCGReg rn, unsigned int a, unsigned int b)
>  {
> @@ -546,7 +544,7 @@ static inline void tcg_out_cmp(TCGContext *s, TCGType 
> ext, TCGReg rn,
>                                 TCGReg rm)
>  {
>      /* Using CMP alias SUBS wzr, Wn, Wm */
> -    tcg_out_arith(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, 0);
> +    tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
>  }
>  
>  static inline void tcg_out_cset(TCGContext *s, TCGType ext,
> @@ -906,8 +904,8 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg 
> addr_reg,
>      tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
>      /* Merge the tlb index contribution into X2.
>         X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
> -    tcg_out_arith(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
> -                  TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
> +    tcg_fmt_Rdnm_lsl(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
> +                     TCG_REG_X0, CPU_TLB_ENTRY_BITS);
>      /* Merge "low bits" from tlb offset, load the tlb comparator into X0.
>         X0 = load [X2 + (tlb_offset & 0x000fff)] */
>      tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32,
> @@ -1183,27 +1181,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>  
>      case INDEX_op_add_i64:
>      case INDEX_op_add_i32:
> -        tcg_out_arith(s, INSN_ADD, ext, a0, a1, a2, 0);
> +        tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
>          break;
>  
>      case INDEX_op_sub_i64:
>      case INDEX_op_sub_i32:
> -        tcg_out_arith(s, INSN_SUB, ext, a0, a1, a2, 0);
> +        tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
>          break;
>  
>      case INDEX_op_and_i64:
>      case INDEX_op_and_i32:
> -        tcg_out_arith(s, INSN_AND, ext, a0, a1, a2, 0);
> +        tcg_fmt_Rdnm(s, INSN_AND, ext, a0, a1, a2);
>          break;
>  
>      case INDEX_op_or_i64:
>      case INDEX_op_or_i32:
> -        tcg_out_arith(s, INSN_ORR, ext, a0, a1, a2, 0);
> +        tcg_fmt_Rdnm(s, INSN_ORR, ext, a0, a1, a2);
>          break;
>  
>      case INDEX_op_xor_i64:
>      case INDEX_op_xor_i32:
> -        tcg_out_arith(s, INSN_EOR, ext, a0, a1, a2, 0);
> +        tcg_fmt_Rdnm(s, INSN_EOR, ext, a0, a1, a2);
>          break;
>  
>      case INDEX_op_mul_i64:
> @@ -1216,7 +1214,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          if (c2) {
>              tcg_out_shl(s, ext, a0, a1, a2);
>          } else {
> -            tcg_out_shiftrot_reg(s, INSN_LSLV, ext, a0, a1, a2);
> +            tcg_fmt_Rdnm(s, INSN_LSLV, ext, a0, a1, a2);
>          }
>          break;
>  
> @@ -1225,7 +1223,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          if (c2) {
>              tcg_out_shr(s, ext, a0, a1, a2);
>          } else {
> -            tcg_out_shiftrot_reg(s, INSN_LSRV, ext, a0, a1, a2);
> +            tcg_fmt_Rdnm(s, INSN_LSRV, ext, a0, a1, a2);
>          }
>          break;
>  
> @@ -1234,7 +1232,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          if (c2) {
>              tcg_out_sar(s, ext, a0, a1, a2);
>          } else {
> -            tcg_out_shiftrot_reg(s, INSN_ASRV, ext, a0, a1, a2);
> +            tcg_fmt_Rdnm(s, INSN_ASRV, ext, a0, a1, a2);
>          }
>          break;
>  
> @@ -1243,7 +1241,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          if (c2) {
>              tcg_out_rotr(s, ext, a0, a1, a2);
>          } else {
> -            tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, a2);
> +            tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, a2);
>          }
>          break;
>  
> @@ -1252,8 +1250,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          if (c2) {
>              tcg_out_rotl(s, ext, a0, a1, a2);
>          } else {
> -            tcg_out_arith(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
> -            tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
> +            tcg_fmt_Rdnm(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
> +            tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
>          }
>          break;
>  
> 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]