[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 36/48] tcg/optimize: Split out fold_xi_to_x
From: |
Richard Henderson |
Subject: |
[PATCH v2 36/48] tcg/optimize: Split out fold_xi_to_x |
Date: |
Thu, 7 Oct 2021 12:54:44 -0700 |
Pull the "op r, a, i => mov r, a" optimization into a function,
and use them int the outer-most logical operations.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 60 +++++++++++++++++++++-----------------------------
1 file changed, 25 insertions(+), 35 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 9d7b174443..d1d0f3f60c 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -749,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op,
uint64_t i)
return false;
}
+/* If the binary operation has second argument @i, fold to identity. */
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ return false;
+}
+
/* If the binary operation has second argument @i, fold to NOT. */
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -787,7 +796,11 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
static bool fold_add(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
@@ -827,6 +840,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xi_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, -1) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -837,6 +851,7 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_ix_to_not(ctx, op, -1)) {
return true;
}
@@ -1041,6 +1056,7 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, -1) ||
fold_xi_to_not(ctx, op, 0)) {
return true;
}
@@ -1225,6 +1241,7 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
static bool fold_or(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -1348,7 +1365,11 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
@@ -1391,6 +1412,7 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_sub_to_neg(ctx, op)) {
return true;
}
@@ -1406,6 +1428,7 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_xi_to_not(ctx, op, -1)) {
return true;
}
@@ -1529,39 +1552,6 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify expression for "op r, a, const => mov r, a" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(add):
- CASE_OP_32_64_VEC(sub):
- CASE_OP_32_64_VEC(or):
- CASE_OP_32_64_VEC(xor):
- CASE_OP_32_64_VEC(andc):
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
- break;
- CASE_OP_32_64_VEC(and):
- CASE_OP_32_64_VEC(orc):
- CASE_OP_32_64(eqv):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == -1) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
- break;
- default:
- break;
- }
-
/* Simplify using known-zero bits. Currently only ops with a single
output argument is supported. */
z_mask = -1;
--
2.25.1
- [PATCH v2 33/48] tcg/optimize: Add type to OptContext, (continued)
- [PATCH v2 33/48] tcg/optimize: Add type to OptContext, Richard Henderson, 2021/10/07
- [PATCH v2 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies, Richard Henderson, 2021/10/07
- [PATCH v2 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov, movi}, Richard Henderson, 2021/10/07
- [PATCH v2 19/48] tcg/optimize: Split out fold_setcond, Richard Henderson, 2021/10/07
- [PATCH v2 21/48] tcg/optimize: Split out fold_addsub2_i32, Richard Henderson, 2021/10/07
- [PATCH v2 23/48] tcg/optimize: Split out fold_extract2, Richard Henderson, 2021/10/07
- [PATCH v2 29/48] tcg/optimize: Split out fold_mov, Richard Henderson, 2021/10/07
- [PATCH v2 20/48] tcg/optimize: Split out fold_mulu2_i32, Richard Henderson, 2021/10/07
- [PATCH v2 36/48] tcg/optimize: Split out fold_xi_to_x,
Richard Henderson <=
- [PATCH v2 37/48] tcg/optimize: Split out fold_ix_to_i, Richard Henderson, 2021/10/07
- [PATCH v2 38/48] tcg/optimize: Split out fold_masks, Richard Henderson, 2021/10/07
- [PATCH v2 31/48] tcg/optimize: Split out fold_xx_to_x, Richard Henderson, 2021/10/07
- [PATCH v2 13/48] tcg/optimize: Use a boolean to avoid a mass of continues, Richard Henderson, 2021/10/07
- [PATCH v2 42/48] tcg/optimize: Add more simplifications for orc, Richard Henderson, 2021/10/07
- [PATCH v2 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values, Richard Henderson, 2021/10/07
- [PATCH v2 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops, Richard Henderson, 2021/10/07