[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 54/72] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alpha
From: |
Richard Henderson |
Subject: |
[PULL 54/72] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort |
Date: |
Tue, 24 Dec 2024 12:05:03 -0800 |
The big comment just above says functions should be sorted.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 1df61378ea..c23f0d1392 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1619,6 +1619,36 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
+{
+ /* Canonicalize the comparison to put immediate second. */
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
+ op->args[3] = tcg_swap_cond(op->args[3]);
+ }
+ return finish_folding(ctx, op);
+}
+
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
+{
+ /* If true and false values are the same, eliminate the cmp. */
+ if (args_are_copies(op->args[3], op->args[4])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
+ }
+
+ /* Canonicalize the comparison to put immediate second. */
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
+ op->args[5] = tcg_swap_cond(op->args[5]);
+ }
+ /*
+ * Canonicalize the "false" input reg to match the destination,
+ * so that the tcg backend can implement "move if true".
+ */
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
+ op->args[5] = tcg_invert_cond(op->args[5]);
+ }
+ return finish_folding(ctx, op);
+}
+
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
{
uint64_t z_mask, s_mask;
@@ -2519,36 +2549,6 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
-{
- /* Canonicalize the comparison to put immediate second. */
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
- op->args[3] = tcg_swap_cond(op->args[3]);
- }
- return finish_folding(ctx, op);
-}
-
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
-{
- /* If true and false values are the same, eliminate the cmp. */
- if (args_are_copies(op->args[3], op->args[4])) {
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
- }
-
- /* Canonicalize the comparison to put immediate second. */
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
- op->args[5] = tcg_swap_cond(op->args[5]);
- }
- /*
- * Canonicalize the "false" input reg to match the destination,
- * so that the tcg backend can implement "move if true".
- */
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
- op->args[5] = tcg_invert_cond(op->args[5]);
- }
- return finish_folding(ctx, op);
-}
-
static bool fold_sextract(OptContext *ctx, TCGOp *op)
{
uint64_t z_mask, s_mask, s_mask_old;
--
2.43.0
- [PULL 20/72] tcg/optimize: Use fold_masks_s in fold_eqv, (continued)
- [PULL 20/72] tcg/optimize: Use fold_masks_s in fold_eqv, Richard Henderson, 2024/12/24
- [PULL 22/72] tcg/optimize: Use finish_folding in fold_extract2, Richard Henderson, 2024/12/24
- [PULL 28/72] tcg/optimize: Use fold_masks_z in fold_neg_no_const, Richard Henderson, 2024/12/24
- [PULL 27/72] tcg/optimize: Use fold_masks_s in fold_nand, Richard Henderson, 2024/12/24
- [PULL 36/72] tcg/optimize: Distinguish simplification in fold_setcond_zmask, Richard Henderson, 2024/12/24
- [PULL 37/72] tcg/optimize: Use fold_masks_z in fold_setcond, Richard Henderson, 2024/12/24
- [PULL 40/72] tcg/optimize: Use finish_folding in fold_cmp_vec, Richard Henderson, 2024/12/24
- [PULL 41/72] tcg/optimize: Use finish_folding in fold_cmpsel_vec, Richard Henderson, 2024/12/24
- [PULL 45/72] tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec, Richard Henderson, 2024/12/24
- [PULL 43/72] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift, Richard Henderson, 2024/12/24
- [PULL 54/72] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort,
Richard Henderson <=
- [PULL 55/72] softfloat: Add float{16,32,64}_muladd_scalbn, Richard Henderson, 2024/12/24
- [PULL 57/72] target/sparc: Use float*_muladd_scalbn, Richard Henderson, 2024/12/24
- [PULL 59/72] softfloat: Add float_round_nearest_even_max, Richard Henderson, 2024/12/24
- [PULL 48/72] tcg/optimize: Use fold_masks_zs in fold_xor, Richard Henderson, 2024/12/24
- [PULL 50/72] tcg/optimize: Use finish_folding as default in tcg_optimize, Richard Henderson, 2024/12/24
- [PULL 42/72] tcg/optimize: Use fold_masks_zs in fold_sextract, Richard Henderson, 2024/12/24
- [PULL 46/72] tcg/optimize: Use fold_masks_zs in fold_tcg_ld, Richard Henderson, 2024/12/24
- [PULL 47/72] tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy, Richard Henderson, 2024/12/24
- [PULL 49/72] tcg/optimize: Use finish_folding in fold_bitsel_vec, Richard Henderson, 2024/12/24
- [PULL 53/72] tcg/optimize: Move fold_bitsel_vec into alphabetic sort, Richard Henderson, 2024/12/24