[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplie
|
From: |
Richard Henderson |
|
Subject: |
[PATCH 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies |
|
Date: |
Sun, 29 Aug 2021 23:24:42 -0700 |
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
and muls2_i64.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
1 file changed, 35 insertions(+), 9 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 735eec6462..ae464339b4 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1392,19 +1392,44 @@ static bool fold_multiply(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint32_t a = arg_info(op->args[2])->val;
- uint32_t b = arg_info(op->args[3])->val;
- uint64_t r = (uint64_t)a * b;
+ uint64_t a = arg_info(op->args[2])->val;
+ uint64_t b = arg_info(op->args[3])->val;
+ uint64_t h, l;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+ TCGOp *op2;
+
+ switch (op->opc) {
+ case INDEX_op_mulu2_i32:
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
+ h = (int32_t)(l >> 32);
+ l = (int32_t)l;
+ break;
+ case INDEX_op_muls2_i32:
+ l = (int64_t)(int32_t)a * (int32_t)b;
+ h = l >> 32;
+ l = (int32_t)l;
+ break;
+ case INDEX_op_mulu2_i64:
+ mulu64(&l, &h, a, b);
+ break;
+ case INDEX_op_muls2_i64:
+ muls64(&l, &h, a, b);
+ break;
+ default:
+ g_assert_not_reached();
+ }
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
+
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
+
+ tcg_opt_gen_movi(ctx, op, rl, l);
+ tcg_opt_gen_movi(ctx, op2, rh, h);
return true;
}
return false;
@@ -1904,8 +1929,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(muluh):
done = fold_multiply(&ctx, op);
break;
- case INDEX_op_mulu2_i32:
- done = fold_mulu2_i32(&ctx, op);
+ CASE_OP_32_64(muls2):
+ CASE_OP_32_64(mulu2):
+ done = fold_multiply2(&ctx, op);
break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
--
2.25.1
- Re: [PATCH 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov,movi}, (continued)
- [PATCH 32/48] tcg/optimize: Split out fold_xi_to_i, Richard Henderson, 2021/08/30
- [PATCH 34/48] tcg/optimize: Split out fold_to_not, Richard Henderson, 2021/08/30
- [PATCH 31/48] tcg/optimize: Split out fold_xx_to_x, Richard Henderson, 2021/08/30
- [PATCH 33/48] tcg/optimize: Add type to OptContext, Richard Henderson, 2021/08/30
- [PATCH 35/48] tcg/optimize: Split out fold_sub_to_neg, Richard Henderson, 2021/08/30
- [PATCH 36/48] tcg/optimize: Split out fold_xi_to_x, Richard Henderson, 2021/08/30
- [PATCH 37/48] tcg/optimize: Split out fold_ix_to_i, Richard Henderson, 2021/08/30
- [PATCH 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies,
Richard Henderson <=
- [PATCH 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops, Richard Henderson, 2021/08/30
- [PATCH 41/48] tcg/optimize: Sink commutative operand swapping into fold functions, Richard Henderson, 2021/08/30
- [PATCH 47/48] tcg/optimize: Propagate sign info for bit counting, Richard Henderson, 2021/08/30
- [PATCH 44/48] tcg/optimize: Optimize sign extensions, Richard Henderson, 2021/08/30
- [PATCH 45/48] tcg/optimize: Propagate sign info for logical operations, Richard Henderson, 2021/08/30
- [PATCH 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values, Richard Henderson, 2021/08/30
- [PATCH 46/48] tcg/optimize: Propagate sign info for setcond, Richard Henderson, 2021/08/30
- [PATCH 42/48] tcg/optimize: Add more simplifications for orc, Richard Henderson, 2021/08/30