[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 57/72] target/sparc: Use float*_muladd_scalbn
From: |
Richard Henderson |
Subject: |
[PULL 57/72] target/sparc: Use float*_muladd_scalbn |
Date: |
Tue, 24 Dec 2024 12:05:06 -0800 |
Use the scalbn interface instead of float_muladd_halve_result.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/sparc/helper.h | 4 +-
target/sparc/fop_helper.c | 8 ++--
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
3 files changed, 54 insertions(+), 38 deletions(-)
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
index 1ae3f0c467..3a7f7dc129 100644
--- a/target/sparc/helper.h
+++ b/target/sparc/helper.h
@@ -59,7 +59,7 @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
@@ -72,7 +72,7 @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
index 236d27b19c..c25097d07f 100644
--- a/target/sparc/fop_helper.c
+++ b/target/sparc/fop_helper.c
@@ -344,17 +344,17 @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
}
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
- float32 s2, float32 s3, uint32_t op)
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
{
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
check_ieee_exceptions(env, GETPC());
return ret;
}
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
- float64 s2, float64 s3, uint32_t op)
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
{
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
check_ieee_exceptions(env, GETPC());
return ret;
}
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 9be26c804e..465e20f4f3 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -1359,93 +1359,109 @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
+ TCGv_i32 z = tcg_constant_i32(0);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
}
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
+ TCGv_i32 z = tcg_constant_i32(0);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
}
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_c;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_c;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_c | float_muladd_negate_result;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
+ float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_c | float_muladd_negate_result;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
+ float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
{
- int op = float_muladd_negate_result;
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
}
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
{
- int op = float_muladd_negate_result;
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
+ TCGv_i32 z = tcg_constant_i32(0);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
}
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(0);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(0);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_negate_c | float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_negate_c | float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
{
- TCGv_i32 one = tcg_constant_i32(float32_one);
- int op = float_muladd_negate_result | float_muladd_halve_result;
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
{
- TCGv_i64 one = tcg_constant_i64(float64_one);
- int op = float_muladd_negate_result | float_muladd_halve_result;
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
+ TCGv_i32 mone = tcg_constant_i32(-1);
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
}
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
--
2.43.0
- [PULL 28/72] tcg/optimize: Use fold_masks_z in fold_neg_no_const, (continued)
- [PULL 28/72] tcg/optimize: Use fold_masks_z in fold_neg_no_const, Richard Henderson, 2024/12/24
- [PULL 27/72] tcg/optimize: Use fold_masks_s in fold_nand, Richard Henderson, 2024/12/24
- [PULL 36/72] tcg/optimize: Distinguish simplification in fold_setcond_zmask, Richard Henderson, 2024/12/24
- [PULL 37/72] tcg/optimize: Use fold_masks_z in fold_setcond, Richard Henderson, 2024/12/24
- [PULL 40/72] tcg/optimize: Use finish_folding in fold_cmp_vec, Richard Henderson, 2024/12/24
- [PULL 41/72] tcg/optimize: Use finish_folding in fold_cmpsel_vec, Richard Henderson, 2024/12/24
- [PULL 45/72] tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec, Richard Henderson, 2024/12/24
- [PULL 43/72] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift, Richard Henderson, 2024/12/24
- [PULL 54/72] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort, Richard Henderson, 2024/12/24
- [PULL 55/72] softfloat: Add float{16,32,64}_muladd_scalbn, Richard Henderson, 2024/12/24
- [PULL 57/72] target/sparc: Use float*_muladd_scalbn,
Richard Henderson <=
- [PULL 59/72] softfloat: Add float_round_nearest_even_max, Richard Henderson, 2024/12/24
- [PULL 48/72] tcg/optimize: Use fold_masks_zs in fold_xor, Richard Henderson, 2024/12/24
- [PULL 50/72] tcg/optimize: Use finish_folding as default in tcg_optimize, Richard Henderson, 2024/12/24
- [PULL 42/72] tcg/optimize: Use fold_masks_zs in fold_sextract, Richard Henderson, 2024/12/24
- [PULL 46/72] tcg/optimize: Use fold_masks_zs in fold_tcg_ld, Richard Henderson, 2024/12/24
- [PULL 47/72] tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy, Richard Henderson, 2024/12/24
- [PULL 49/72] tcg/optimize: Use finish_folding in fold_bitsel_vec, Richard Henderson, 2024/12/24
- [PULL 53/72] tcg/optimize: Move fold_bitsel_vec into alphabetic sort, Richard Henderson, 2024/12/24
- [PULL 58/72] softfloat: Remove float_muladd_halve_result, Richard Henderson, 2024/12/24
- [PULL 60/72] softfloat: Add float_muladd_suppress_add_product_zero, Richard Henderson, 2024/12/24