qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 03/16] tcg-aarch64: Use TCGMemOp within qemu_ldst ro


From: Richard Henderson
Subject: [Qemu-devel] [PATCH 03/16] tcg-aarch64: Use TCGMemOp within qemu_ldst routines
Date: Wed, 4 Sep 2013 14:04:52 -0700

Signed-off-by: Richard Henderson <address@hidden>
---
 tcg/aarch64/tcg-target.c | 126 +++++++++++++++++++++++------------------------
 1 file changed, 62 insertions(+), 64 deletions(-)

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 651327e..608b735 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -21,12 +21,6 @@ static const char * const 
tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
 };
 #endif /* NDEBUG */
 
-#ifdef TARGET_WORDS_BIGENDIAN
- #define TCG_LDST_BSWAP 1
-#else
- #define TCG_LDST_BSWAP 0
-#endif
-
 static const int tcg_target_reg_alloc_order[] = {
     TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
     TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
@@ -902,7 +896,7 @@ static inline void tcg_out_rev16(TCGContext *s, bool ext, 
TCGReg rd, TCGReg rm)
     tcg_out32(s, base | rm << 5 | rd);
 }
 
-static inline void tcg_out_sxt(TCGContext *s, bool ext, int s_bits,
+static inline void tcg_out_sxt(TCGContext *s, bool ext, TCGMemOp s_bits,
                                TCGReg rd, TCGReg rn)
 {
     /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
@@ -910,7 +904,7 @@ static inline void tcg_out_sxt(TCGContext *s, bool ext, int 
s_bits,
     tcg_out_sbfm(s, ext, rd, rn, 0, bits);
 }
 
-static inline void tcg_out_uxt(TCGContext *s, int s_bits,
+static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits,
                                TCGReg rd, TCGReg rn)
 {
     /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
@@ -1006,10 +1000,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, 
TCGLabelQemuLdst *lb)
     tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
     tcg_out_adr(s, TCG_REG_X3, (uintptr_t)lb->raddr);
 
-    tcg_out_call(s, (tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
+    tcg_out_call(s, (tcg_target_long)qemu_ld_helpers[lb->opc & MO_SIZE]);
 
-    if (lb->opc & 0x04) {
-        tcg_out_sxt(s, 1, lb->opc & 3, lb->datalo_reg, TCG_REG_X0);
+    if (lb->opc & MO_SIGN) {
+        tcg_out_sxt(s, 1, lb->opc & MO_SIZE, lb->datalo_reg, TCG_REG_X0);
     } else {
         tcg_out_movr(s, 1, lb->datalo_reg, TCG_REG_X0);
     }
@@ -1027,7 +1021,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, 
TCGLabelQemuLdst *lb)
     tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
     tcg_out_adr(s, TCG_REG_X4, (uintptr_t)lb->raddr);
 
-    tcg_out_call(s, (tcg_target_long)qemu_st_helpers[lb->opc & 3]);
+    tcg_out_call(s, (tcg_target_long)qemu_st_helpers[lb->opc & MO_SIZE]);
 
     tcg_out_goto(s, (tcg_target_long)lb->raddr);
 }
@@ -1045,7 +1039,7 @@ void tcg_out_tb_finalize(TCGContext *s)
     }
 }
 
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
                                 TCGReg data_reg, TCGReg addr_reg,
                                 int mem_index,
                                 uint8_t *raddr, uint8_t *label_ptr)
@@ -1072,7 +1066,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, 
int opc,
    slow path for the failure case, which will be patched later when finalizing
    the slow path. Generated code returns the host addend in X1,
    clobbers X0,X2,X3,TMP. */
-static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, int s_bits,
+static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
                              uint8_t **label_ptr, int mem_index, int is_read)
 {
     TCGReg base = TCG_AREG0;
@@ -1129,49 +1123,51 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg 
addr_reg, int s_bits,
 
 #endif /* CONFIG_SOFTMMU */
 
-static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r,
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data_r,
                                    TCGReg addr_r, TCGReg off_r)
 {
-    switch (opc) {
-    case 0:
+    const TCGMemOp bswap = opc & MO_BSWAP;
+
+    switch (opc & MO_SSIZE) {
+    case MO_UB:
         tcg_out_ldst_r(s, LDST_8, LDST_LD, data_r, addr_r, off_r);
         break;
-    case 0 | 4:
+    case MO_SB:
         tcg_out_ldst_r(s, LDST_8, LDST_LD_S_X, data_r, addr_r, off_r);
         break;
-    case 1:
+    case MO_UW:
         tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r);
-        if (TCG_LDST_BSWAP) {
+        if (bswap) {
             tcg_out_rev16(s, 0, data_r, data_r);
         }
         break;
-    case 1 | 4:
-        if (TCG_LDST_BSWAP) {
+    case MO_SW:
+        if (bswap) {
             tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r);
             tcg_out_rev16(s, 0, data_r, data_r);
-            tcg_out_sxt(s, 1, 1, data_r, data_r);
+            tcg_out_sxt(s, 1, MO_16, data_r, data_r);
         } else {
             tcg_out_ldst_r(s, LDST_16, LDST_LD_S_X, data_r, addr_r, off_r);
         }
         break;
-    case 2:
+    case MO_UL:
         tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r);
-        if (TCG_LDST_BSWAP) {
+        if (bswap) {
             tcg_out_rev(s, 0, data_r, data_r);
         }
         break;
-    case 2 | 4:
-        if (TCG_LDST_BSWAP) {
+    case MO_SL:
+        if (bswap) {
             tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r);
             tcg_out_rev(s, 0, data_r, data_r);
-            tcg_out_sxt(s, 1, 2, data_r, data_r);
+            tcg_out_sxt(s, 1, MO_32, data_r, data_r);
         } else {
             tcg_out_ldst_r(s, LDST_32, LDST_LD_S_X, data_r, addr_r, off_r);
         }
         break;
-    case 3:
+    case MO_Q:
         tcg_out_ldst_r(s, LDST_64, LDST_LD, data_r, addr_r, off_r);
-        if (TCG_LDST_BSWAP) {
+        if (bswap) {
             tcg_out_rev(s, 1, data_r, data_r);
         }
         break;
@@ -1180,31 +1176,33 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int 
opc, TCGReg data_r,
     }
 }
 
-static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data_r,
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data_r,
                                    TCGReg addr_r, TCGReg off_r)
 {
-    switch (opc) {
-    case 0:
+    const TCGMemOp bswap = opc & MO_BSWAP;
+
+    switch (opc & MO_SIZE) {
+    case MO_8:
         tcg_out_ldst_r(s, LDST_8, LDST_ST, data_r, addr_r, off_r);
         break;
-    case 1:
-        if (TCG_LDST_BSWAP) {
+    case MO_16:
+        if (bswap) {
             tcg_out_rev16(s, 0, TCG_REG_TMP, data_r);
             tcg_out_ldst_r(s, LDST_16, LDST_ST, TCG_REG_TMP, addr_r, off_r);
         } else {
             tcg_out_ldst_r(s, LDST_16, LDST_ST, data_r, addr_r, off_r);
         }
         break;
-    case 2:
-        if (TCG_LDST_BSWAP) {
+    case MO_32:
+        if (bswap) {
             tcg_out_rev(s, 0, TCG_REG_TMP, data_r);
             tcg_out_ldst_r(s, LDST_32, LDST_ST, TCG_REG_TMP, addr_r, off_r);
         } else {
             tcg_out_ldst_r(s, LDST_32, LDST_ST, data_r, addr_r, off_r);
         }
         break;
-    case 3:
-        if (TCG_LDST_BSWAP) {
+    case MO_64:
+        if (bswap) {
             tcg_out_rev(s, 1, TCG_REG_TMP, data_r);
             tcg_out_ldst_r(s, LDST_64, LDST_ST, TCG_REG_TMP, addr_r, off_r);
         } else {
@@ -1216,11 +1214,12 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int 
opc, TCGReg data_r,
     }
 }
 
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
 {
     TCGReg addr_reg, data_reg;
 #ifdef CONFIG_SOFTMMU
-    int mem_index, s_bits;
+    int mem_index;
+    TCGMemOp s_bits;
     uint8_t *label_ptr;
 #endif
     data_reg = args[0];
@@ -1228,7 +1227,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
*args, int opc)
 
 #ifdef CONFIG_SOFTMMU
     mem_index = args[2];
-    s_bits = opc & 3;
+    s_bits = opc & MO_SIZE;
     tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
     tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_X1);
     add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg,
@@ -1239,11 +1238,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
*args, int opc)
 #endif /* CONFIG_SOFTMMU */
 }
 
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
 {
     TCGReg addr_reg, data_reg;
 #ifdef CONFIG_SOFTMMU
-    int mem_index, s_bits;
+    int mem_index;
+    TCGMemOp s_bits;
     uint8_t *label_ptr;
 #endif
     data_reg = args[0];
@@ -1251,7 +1251,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg 
*args, int opc)
 
 #ifdef CONFIG_SOFTMMU
     mem_index = args[2];
-    s_bits = opc & 3;
+    s_bits = opc & MO_SIZE;
 
     tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
     tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_X1);
@@ -1534,40 +1534,38 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         break;
 
     case INDEX_op_qemu_ld8u:
-        tcg_out_qemu_ld(s, args, 0 | 0);
+        tcg_out_qemu_ld(s, args, MO_UB);
         break;
     case INDEX_op_qemu_ld8s:
-        tcg_out_qemu_ld(s, args, 4 | 0);
+        tcg_out_qemu_ld(s, args, MO_SB);
         break;
     case INDEX_op_qemu_ld16u:
-        tcg_out_qemu_ld(s, args, 0 | 1);
+        tcg_out_qemu_ld(s, args, MO_TEUW);
         break;
     case INDEX_op_qemu_ld16s:
-        tcg_out_qemu_ld(s, args, 4 | 1);
+        tcg_out_qemu_ld(s, args, MO_TESW);
         break;
+    case INDEX_op_qemu_ld32:
     case INDEX_op_qemu_ld32u:
-        tcg_out_qemu_ld(s, args, 0 | 2);
+        tcg_out_qemu_ld(s, args, MO_TEUL);
         break;
     case INDEX_op_qemu_ld32s:
-        tcg_out_qemu_ld(s, args, 4 | 2);
-        break;
-    case INDEX_op_qemu_ld32:
-        tcg_out_qemu_ld(s, args, 0 | 2);
+        tcg_out_qemu_ld(s, args, MO_TESL);
         break;
     case INDEX_op_qemu_ld64:
-        tcg_out_qemu_ld(s, args, 0 | 3);
+        tcg_out_qemu_ld(s, args, MO_TEQ);
         break;
     case INDEX_op_qemu_st8:
-        tcg_out_qemu_st(s, args, 0);
+        tcg_out_qemu_st(s, args, MO_UB);
         break;
     case INDEX_op_qemu_st16:
-        tcg_out_qemu_st(s, args, 1);
+        tcg_out_qemu_st(s, args, MO_TEUW);
         break;
     case INDEX_op_qemu_st32:
-        tcg_out_qemu_st(s, args, 2);
+        tcg_out_qemu_st(s, args, MO_TEUL);
         break;
     case INDEX_op_qemu_st64:
-        tcg_out_qemu_st(s, args, 3);
+        tcg_out_qemu_st(s, args, MO_TEQ);
         break;
 
     case INDEX_op_bswap32_i64:
@@ -1585,22 +1583,22 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 
     case INDEX_op_ext8s_i64:
     case INDEX_op_ext8s_i32:
-        tcg_out_sxt(s, ext, 0, a0, a1);
+        tcg_out_sxt(s, ext, MO_8, a0, a1);
         break;
     case INDEX_op_ext16s_i64:
     case INDEX_op_ext16s_i32:
-        tcg_out_sxt(s, ext, 1, a0, a1);
+        tcg_out_sxt(s, ext, MO_16, a0, a1);
         break;
     case INDEX_op_ext32s_i64:
-        tcg_out_sxt(s, 1, 2, a0, a1);
+        tcg_out_sxt(s, 1, MO_32, a0, a1);
         break;
     case INDEX_op_ext8u_i64:
     case INDEX_op_ext8u_i32:
-        tcg_out_uxt(s, 0, a0, a1);
+        tcg_out_uxt(s, MO_8, a0, a1);
         break;
     case INDEX_op_ext16u_i64:
     case INDEX_op_ext16u_i32:
-        tcg_out_uxt(s, 1, a0, a1);
+        tcg_out_uxt(s, MO_16, a0, a1);
         break;
     case INDEX_op_ext32u_i64:
         tcg_out_movr(s, 0, a0, a1);
-- 
1.8.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]