diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2025-02-01 16:39:48 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2025-02-11 16:22:07 +0000 |
commit | 07e6b8d7526380a8a30081354f12f48f39d3464e (patch) | |
tree | 704c82da27a986a94d204cb88fb2dd185219e3ba | |
parent | 6dcd51ccf6815578bd34ea64a33a1eda9cc324e5 (diff) | |
download | qemu-07e6b8d7526380a8a30081354f12f48f39d3464e.zip qemu-07e6b8d7526380a8a30081354f12f48f39d3464e.tar.gz qemu-07e6b8d7526380a8a30081354f12f48f39d3464e.tar.bz2 |
target/arm: Handle FPCR.AH in SVE FTMAD
The negation step in the SVE FTMAD insn mustn't negate a NaN when
FPCR.AH is set. Pass FPCR.AH to the helper via the SIMD data field,
so we can select the correct behaviour.
Because the operand is known to be negative, negating the operand
is the same as taking the absolute value. Defer this to the muladd
operation via flags, so that it happens after NaN detection, which
is correct for FPCR.AH.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | target/arm/tcg/sve_helper.c | 42 | ||||
-rw-r--r-- | target/arm/tcg/translate-sve.c | 3 |
2 files changed, 35 insertions, 10 deletions
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index bf88bde..c12b260 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5134,16 +5134,24 @@ void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float16 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float16 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float16_is_neg(mm)) { - mm = float16_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float16_abs(mm); + } xx += 8; } - d[i] = float16_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float16_muladd(n[i], mm, coeff[xx], flags, s); } } @@ -5157,16 +5165,24 @@ void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float32 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float32 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float32_is_neg(mm)) { - mm = float32_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float32_abs(mm); + } xx += 8; } - d[i] = float32_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float32_muladd(n[i], mm, coeff[xx], flags, s); } } @@ -5184,16 +5200,24 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float64 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float64 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float64_is_neg(mm)) { - mm = float64_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float64_abs(mm); + } xx += 8; } - d[i] = float64_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float64_muladd(n[i], mm, coeff[xx], flags, s); } } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 2dd4605..410087c 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3685,7 +3685,8 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, }; TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, + ftmad_fns[a->esz], a->rd, a->rn, a->rm, + a->imm | (s->fpcr_ah << 3), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* |