qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v4 05/33] tcg-aarch64: Change enum aarch64_arith_opc


From: Richard Henderson
Subject: [Qemu-devel] [PATCH v4 05/33] tcg-aarch64: Change enum aarch64_arith_opc to AArch64Insn
Date: Sat, 14 Sep 2013 14:54:22 -0700

And since we're no longer talking about opcodes, change the
values to be shifted into the opcode field, avoiding a shift
at runtime.

Signed-off-by: Richard Henderson <address@hidden>
---
 tcg/aarch64/tcg-target.c | 43 +++++++++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 8f5814d..99d9884 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -204,16 +204,19 @@ enum aarch64_ldst_op_type { /* type of operation */
     LDST_LD_S_W = 0xc,  /* load and sign-extend into Wt */
 };
 
-enum aarch64_arith_opc {
-    ARITH_AND = 0x0a,
-    ARITH_ADD = 0x0b,
-    ARITH_OR = 0x2a,
-    ARITH_ADDS = 0x2b,
-    ARITH_XOR = 0x4a,
-    ARITH_SUB = 0x4b,
-    ARITH_ANDS = 0x6a,
-    ARITH_SUBS = 0x6b,
-};
+typedef enum {
+    /* Logical shifted register instructions */
+    INSN_AND    = 0x0a000000,
+    INSN_ORR    = 0x2a000000,
+    INSN_EOR    = 0x4a000000,
+    INSN_ANDS   = 0x6a000000,
+
+    /* Add/subtract shifted register instructions */
+    INSN_ADD    = 0x0b000000,
+    INSN_ADDS   = 0x2b000000,
+    INSN_SUB    = 0x4b000000,
+    INSN_SUBS   = 0x6b000000,
+} AArch64Insn;
 
 enum aarch64_srr_opc {
     SRR_SHL = 0x0,
@@ -443,13 +446,13 @@ static inline void tcg_out_st(TCGContext *s, TCGType 
type, TCGReg arg,
                  arg, arg1, arg2);
 }
 
-static inline void tcg_out_arith(TCGContext *s, enum aarch64_arith_opc opc,
+static inline void tcg_out_arith(TCGContext *s, AArch64Insn insn,
                                  TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm,
                                  int shift_imm)
 {
     /* Using shifted register arithmetic operations */
     /* if extended register operation (64bit) just OR with 0x80 << 24 */
-    unsigned int shift, base = ext ? (0x80 | opc) << 24 : opc << 24;
+    unsigned int shift, base = insn | (ext ? 0x80000000 : 0);
     if (shift_imm == 0) {
         shift = 0;
     } else if (shift_imm > 0) {
@@ -544,7 +547,7 @@ static inline void tcg_out_cmp(TCGContext *s, TCGType ext, 
TCGReg rn,
                                TCGReg rm, int shift_imm)
 {
     /* Using CMP alias SUBS wzr, Wn, Wm */
-    tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
+    tcg_out_arith(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
 }
 
 static inline void tcg_out_cset(TCGContext *s, TCGType ext,
@@ -904,7 +907,7 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
     tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
     /* Merge the tlb index contribution into X2.
        X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
-    tcg_out_arith(s, ARITH_ADD, 1, TCG_REG_X2, TCG_REG_X2,
+    tcg_out_arith(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
                   TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
     /* Merge "low bits" from tlb offset, load the tlb comparator into X0.
        X0 = load [X2 + (tlb_offset & 0x000fff)] */
@@ -1181,27 +1184,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 
     case INDEX_op_add_i64:
     case INDEX_op_add_i32:
-        tcg_out_arith(s, ARITH_ADD, ext, a0, a1, a2, 0);
+        tcg_out_arith(s, INSN_ADD, ext, a0, a1, a2, 0);
         break;
 
     case INDEX_op_sub_i64:
     case INDEX_op_sub_i32:
-        tcg_out_arith(s, ARITH_SUB, ext, a0, a1, a2, 0);
+        tcg_out_arith(s, INSN_SUB, ext, a0, a1, a2, 0);
         break;
 
     case INDEX_op_and_i64:
     case INDEX_op_and_i32:
-        tcg_out_arith(s, ARITH_AND, ext, a0, a1, a2, 0);
+        tcg_out_arith(s, INSN_AND, ext, a0, a1, a2, 0);
         break;
 
     case INDEX_op_or_i64:
     case INDEX_op_or_i32:
-        tcg_out_arith(s, ARITH_OR, ext, a0, a1, a2, 0);
+        tcg_out_arith(s, INSN_ORR, ext, a0, a1, a2, 0);
         break;
 
     case INDEX_op_xor_i64:
     case INDEX_op_xor_i32:
-        tcg_out_arith(s, ARITH_XOR, ext, a0, a1, a2, 0);
+        tcg_out_arith(s, INSN_EOR, ext, a0, a1, a2, 0);
         break;
 
     case INDEX_op_mul_i64:
@@ -1250,7 +1253,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {    /* ROR / EXTR Wd, Wm, Wm, 32 - m */
             tcg_out_rotl(s, ext, a0, a1, a2);
         } else {
-            tcg_out_arith(s, ARITH_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
+            tcg_out_arith(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
             tcg_out_shiftrot_reg(s, SRR_ROR, ext, a0, a1, TCG_REG_TMP);
         }
         break;
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]