[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 41/51] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_s
From: |
Richard Henderson |
Subject: |
[PATCH v2 41/51] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift |
Date: |
Thu, 19 Dec 2024 20:10:53 -0800 |
Avoid the use of the OptContext slots. Find TempOptInfo once.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 8735dc0c9c..da48aadd12 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2554,6 +2554,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
uint64_t s_mask, z_mask, sign;
+ TempOptInfo *t1, *t2;
if (fold_const2(ctx, op) ||
fold_ix_to_i(ctx, op, 0) ||
@@ -2561,18 +2562,19 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
return true;
}
- s_mask = arg_info(op->args[1])->s_mask;
- z_mask = arg_info(op->args[1])->z_mask;
+ t1 = arg_info(op->args[1]);
+ t2 = arg_info(op->args[2]);
+ s_mask = t1->s_mask;
+ z_mask = t1->z_mask;
- if (arg_is_const(op->args[2])) {
- int sh = arg_info(op->args[2])->val;
-
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
+ if (t2->is_const) {
+ int sh = t2->val;
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
- ctx->s_mask = smask_from_smask(s_mask);
+ s_mask = smask_from_smask(s_mask);
- return fold_masks(ctx, op);
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
}
switch (op->opc) {
@@ -2581,23 +2583,22 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
* Arithmetic right shift will not reduce the number of
* input sign repetitions.
*/
- ctx->s_mask = s_mask;
- break;
+ return fold_masks_s(ctx, op, s_mask);
CASE_OP_32_64(shr):
/*
* If the sign bit is known zero, then logical right shift
- * will not reduced the number of input sign repetitions.
+ * will not reduce the number of input sign repetitions.
*/
sign = (s_mask & -s_mask) >> 1;
if (sign && !(z_mask & sign)) {
- ctx->s_mask = s_mask;
+ return fold_masks_s(ctx, op, s_mask);
}
break;
default:
break;
}
- return false;
+ return finish_folding(ctx, op);
}
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
--
2.43.0
- [PATCH v2 42/51] tcg/optimize: Simplify sign bit test in fold_shift, (continued)
- [PATCH v2 42/51] tcg/optimize: Simplify sign bit test in fold_shift, Richard Henderson, 2024/12/19
- [PATCH v2 44/51] tcg/optimize: Use fold_masks_zs in fold_tcg_ld, Richard Henderson, 2024/12/19
- [PATCH v2 49/51] tcg/optimize: Remove z_mask, s_mask from OptContext, Richard Henderson, 2024/12/19
- [PATCH v2 50/51] tcg/optimize: Move fold_bitsel_vec into alphabetic sort, Richard Henderson, 2024/12/19
- [PATCH v2 43/51] tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec, Richard Henderson, 2024/12/19
- [PATCH v2 46/51] tcg/optimize: Use fold_masks_zs in fold_xor, Richard Henderson, 2024/12/19
- [PATCH v2 51/51] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort, Richard Henderson, 2024/12/19
- [PATCH v2 40/51] tcg/optimize: Canonicalize s_mask in fold_exts, fold_sextract, Richard Henderson, 2024/12/19
- [PATCH v2 41/51] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift,
Richard Henderson <=