diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-05-24 18:03:16 -0700 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-05-25 16:01:44 +0100 |
commit | 636ddeb15c086e67c29751764eb7fa77450a1d66 (patch) | |
tree | 9f30172c58ec024350f7b2d8f205e8d8bb82cae9 /target/arm/vec_helper.c | |
parent | bc2bd6974ef152f0539681afd9875eddaa8e62d9 (diff) | |
download | qemu-636ddeb15c086e67c29751764eb7fa77450a1d66.zip qemu-636ddeb15c086e67c29751764eb7fa77450a1d66.tar.gz qemu-636ddeb15c086e67c29751764eb7fa77450a1d66.tar.bz2 |
target/arm: Pass separate addend to FCMLA helpers
For SVE, we potentially have a 4th argument coming from the
movprfx instruction. Currently we do not optimize movprfx,
so the problem is not visible.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210525010358.152808-51-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/vec_helper.c')
-rw-r--r-- | target/arm/vec_helper.c | 50 |
1 files changed, 20 insertions, 30 deletions
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index f88e572..b19877e 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -657,13 +657,11 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float16 *d = vd; - float16 *n = vn; - float16 *m = vm; + float16 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -680,19 +678,17 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, float16 e4 = e2; float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; - d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); - d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); + d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst); + d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float16 *d = vd; - float16 *n = vn; - float16 *m = vm; + float16 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -716,20 +712,18 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, float16 e2 = n[H2(j + flip)]; float16 e4 = e2; - d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); - d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); + d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst); + d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float32 *d = vd; - float32 *n = vn; - float32 *m = vm; + float32 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -746,19 +740,17 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, float32 e4 = e2; float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; - d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); - d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); + d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst); + d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float32 *d = vd; - float32 *n = vn; - float32 *m = vm; + float32 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -782,20 +774,18 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, float32 e2 = n[H4(j + flip)]; float32 e4 = e2; - d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); - d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); + d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst); + d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float64 *d = vd; - float64 *n = vn; - float64 *m = vm; + float64 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -812,8 +802,8 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, float64 e4 = e2; float64 e3 = m[i + 1 - flip] ^ neg_imag; - d[i] = float64_muladd(e2, e1, d[i], 0, fpst); - d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); + d[i] = float64_muladd(e2, e1, a[i], 0, fpst); + d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } |