qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xff


From: Chen Gang S
Subject: Re: [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using
Date: Mon, 19 Jan 2015 00:25:21 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:31.0) Gecko/20100101 Thunderbird/31.4.0

Excuse me, gmail is broken in China, during these days, I can only login
in gmail web site via my Mac book Safari (which can not send pure text
mail), so I have to use my another mail address to send patch.

Thanks.

On 1/19/15 00:22, Chen Gang S wrote:
> For tcg_target_ulong (include TCGArg), it often uses lower 32-bit mask
> and higher 32-bit mask, so define the related macro for it, so can let
> code simpler, and also avoid various coding styles for them.
> 
>  - For lower, some append 'u', some append 'U', and some no 'u' or 'U'.
> 
>  - For higher, some append 'ull', some use type cast.
> 
>  - For lower but may be used higher bits, append 'ull'.
> 
> 
> Signed-off-by: Chen Gang <address@hidden>
> ---
>  tcg/optimize.c        | 16 ++++++++--------
>  tcg/s390/tcg-target.c | 24 ++++++++++++------------
>  tcg/tcg.h             |  3 +++
>  3 files changed, 23 insertions(+), 20 deletions(-)
> 
> diff --git a/tcg/optimize.c b/tcg/optimize.c
> index 34ae3c2..dc29223 100644
> --- a/tcg/optimize.c
> +++ b/tcg/optimize.c
> @@ -174,7 +174,7 @@ static void tcg_opt_gen_mov(TCGContext *s, int op_index, 
> TCGArg *gen_args,
>      mask = temps[src].mask;
>      if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
>          /* High bits of the destination are now garbage.  */
> -        mask |= ~0xffffffffull;
> +        mask |= TCG_TARGET_ULONG_M32H;
>      }
>      temps[dst].mask = mask;
>  
> @@ -211,7 +211,7 @@ static void tcg_opt_gen_movi(TCGContext *s, int op_index, 
> TCGArg *gen_args,
>      mask = val;
>      if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
>          /* High bits of the destination are now garbage.  */
> -        mask |= ~0xffffffffull;
> +        mask |= TCG_TARGET_ULONG_M32H;
>      }
>      temps[dst].mask = mask;
>  
> @@ -354,7 +354,7 @@ static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, 
> TCGArg y)
>  {
>      TCGArg res = do_constant_folding_2(op, x, y);
>      if (op_bits(op) == 32) {
> -        res &= 0xffffffff;
> +        res &= TCG_TARGET_ULONG_M32L;
>      }
>      return res;
>  }
> @@ -804,7 +804,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, 
> uint16_t *tcg_opc_ptr,
>                  break;
>              }
>          case INDEX_op_ext32u_i64:
> -            mask = 0xffffffffU;
> +            mask = TCG_TARGET_ULONG_M32L;
>              goto and_const;
>  
>          CASE_OP_32_64(and):
> @@ -895,7 +895,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, 
> uint16_t *tcg_opc_ptr,
>              mask = 0xffff;
>              break;
>          case INDEX_op_ld32u_i64:
> -            mask = 0xffffffffu;
> +            mask = TCG_TARGET_ULONG_M32L;
>              break;
>  
>          CASE_OP_32_64(qemu_ld):
> @@ -916,9 +916,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, 
> uint16_t *tcg_opc_ptr,
>             need to record that the high bits contain garbage.  */
>          partmask = mask;
>          if (!(def->flags & TCG_OPF_64BIT)) {
> -            mask |= ~(tcg_target_ulong)0xffffffffu;
> -            partmask &= 0xffffffffu;
> -            affected &= 0xffffffffu;
> +            mask |= TCG_TARGET_ULONG_M32H;
> +            partmask &= TCG_TARGET_ULONG_M32L;
> +            affected &= TCG_TARGET_ULONG_M32L;
>          }
>  
>          if (partmask == 0) {
> diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
> index 63e9c82..394fefc 100644
> --- a/tcg/s390/tcg-target.c
> +++ b/tcg/s390/tcg-target.c
> @@ -677,11 +677,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
>              tcg_out_insn(s, RIL, LGFI, ret, sval);
>              return;
>          }
> -        if (uval <= 0xffffffff) {
> +        if (uval <= TCG_TARGET_ULONG_M32L) {
>              tcg_out_insn(s, RIL, LLILF, ret, uval);
>              return;
>          }
> -        if ((uval & 0xffffffff) == 0) {
> +        if ((uval & TCG_TARGET_ULONG_M32L) == 0) {
>              tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
>              return;
>          }
> @@ -702,7 +702,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
>          /* A 32-bit unsigned value can be loaded in 2 insns.  And given
>             that the lli_insns loop above did not succeed, we know that
>             both insns are required.  */
> -        if (uval <= 0xffffffff) {
> +        if (uval <= TCG_TARGET_ULONG_M32L) {
>              tcg_out_insn(s, RI, LLILL, ret, uval);
>              tcg_out_insn(s, RI, IILH, ret, uval >> 16);
>              return;
> @@ -727,7 +727,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
>      /* If we get here, both the high and low parts have non-zero bits.  */
>  
>      /* Recurse to load the lower 32-bits.  */
> -    tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
> +    tcg_out_movi(s, TCG_TYPE_I64, ret, uval & TCG_TARGET_ULONG_M32L);
>  
>      /* Insert data into the high 32-bits.  */
>      uval = uval >> 31 >> 1;
> @@ -1006,7 +1006,7 @@ static void tgen_andi(TCGContext *s, TCGType type, 
> TCGReg dest, uint64_t val)
>      /* Try all 48-bit insns that can perform it in one go.  */
>      if (facilities & FACILITY_EXT_IMM) {
>          for (i = 0; i < 2; i++) {
> -            tcg_target_ulong mask = ~(0xffffffffull << i*32);
> +            tcg_target_ulong mask = ~(TCG_TARGET_ULONG_M32L << i*32);
>              if (((val | ~valid) & mask) == mask) {
>                  tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
>                  return;
> @@ -1055,7 +1055,7 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, 
> tcg_target_ulong val)
>  
>          /* Try all 48-bit insns that can perform it in one go.  */
>          for (i = 0; i < 2; i++) {
> -            tcg_target_ulong mask = (0xffffffffull << i*32);
> +            tcg_target_ulong mask = (TCG_TARGET_ULONG_M32L << i*32);
>              if ((val & mask) != 0 && (val & ~mask) == 0) {
>                  tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
>                  return;
> @@ -1065,8 +1065,8 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, 
> tcg_target_ulong val)
>          /* Perform the OR via sequential modifications to the high and
>             low parts.  Do this via recursion to handle 16-bit vs 32-bit
>             masks in each half.  */
> -        tgen64_ori(s, dest, val & 0x00000000ffffffffull);
> -        tgen64_ori(s, dest, val & 0xffffffff00000000ull);
> +        tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32L);
> +        tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32H);
>      } else {
>          /* With no extended-immediate facility, we don't need to be so
>             clever.  Just iterate over the insns and mask in the constant.  */
> @@ -1082,10 +1082,10 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, 
> tcg_target_ulong val)
>  static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
>  {
>      /* Perform the xor by parts.  */
> -    if (val & 0xffffffff) {
> +    if (val & TCG_TARGET_ULONG_M32L) {
>          tcg_out_insn(s, RIL, XILF, dest, val);
>      }
> -    if (val > 0xffffffff) {
> +    if (val > TCG_TARGET_ULONG_M32L) {
>          tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
>      }
>  }
> @@ -1793,14 +1793,14 @@ static inline void tcg_out_op(TCGContext *s, 
> TCGOpcode opc,
>          break;
>      case INDEX_op_or_i32:
>          if (const_args[2]) {
> -            tgen64_ori(s, args[0], args[2] & 0xffffffff);
> +            tgen64_ori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
>          } else {
>              tcg_out_insn(s, RR, OR, args[0], args[2]);
>          }
>          break;
>      case INDEX_op_xor_i32:
>          if (const_args[2]) {
> -            tgen64_xori(s, args[0], args[2] & 0xffffffff);
> +            tgen64_xori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
>          } else {
>              tcg_out_insn(s, RR, XR, args[0], args[2]);
>          }
> diff --git a/tcg/tcg.h b/tcg/tcg.h
> index 944b877..4f113ed 100644
> --- a/tcg/tcg.h
> +++ b/tcg/tcg.h
> @@ -54,6 +54,9 @@ typedef uint64_t tcg_target_ulong;
>  #error unsupported
>  #endif
>  
> +#define TCG_TARGET_ULONG_M32L ((tcg_target_ulong)0xffffffffu)
> +#define TCG_TARGET_ULONG_M32H (~(tcg_target_ulong)0xffffffffu)
> +
>  #if TCG_TARGET_NB_REGS <= 32
>  typedef uint32_t TCGRegSet;
>  #elif TCG_TARGET_NB_REGS <= 64
> 

-- 
Chen Gang

Open, share, and attitude like air, water, and life which God blessed



reply via email to

[Prev in Thread] Current Thread [Next in Thread]