aboutsummaryrefslogtreecommitdiff
path: root/target/arm/tcg/vec_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/tcg/vec_helper.c')
-rw-r--r--target/arm/tcg/vec_helper.c387
1 files changed, 267 insertions, 120 deletions
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 7330b37..986eaf8 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -879,19 +879,21 @@ void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
float16 *d = vd;
float16 *n = vn;
float16 *m = vm;
- uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
-
for (i = 0; i < opr_sz / 2; i += 2) {
float16 e0 = n[H2(i)];
- float16 e1 = m[H2(i + 1)] ^ neg_imag;
+ float16 e1 = m[H2(i + 1)];
float16 e2 = n[H2(i + 1)];
- float16 e3 = m[H2(i)] ^ neg_real;
+ float16 e3 = m[H2(i)];
+
+ if (rot) {
+ e3 = float16_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float16_maybe_ah_chs(e1, fpcr_ah);
+ }
d[H2(i)] = float16_add(e0, e1, fpst);
d[H2(i + 1)] = float16_add(e2, e3, fpst);
@@ -906,19 +908,21 @@ void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
float32 *d = vd;
float32 *n = vn;
float32 *m = vm;
- uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
-
for (i = 0; i < opr_sz / 4; i += 2) {
float32 e0 = n[H4(i)];
- float32 e1 = m[H4(i + 1)] ^ neg_imag;
+ float32 e1 = m[H4(i + 1)];
float32 e2 = n[H4(i + 1)];
- float32 e3 = m[H4(i)] ^ neg_real;
+ float32 e3 = m[H4(i)];
+
+ if (rot) {
+ e3 = float32_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float32_maybe_ah_chs(e1, fpcr_ah);
+ }
d[H4(i)] = float32_add(e0, e1, fpst);
d[H4(i + 1)] = float32_add(e2, e3, fpst);
@@ -933,19 +937,21 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
float64 *d = vd;
float64 *n = vn;
float64 *m = vm;
- uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1);
- uint64_t neg_imag = neg_real ^ 1;
+ bool rot = extract32(desc, SIMD_DATA_SHIFT, 1);
+ bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1);
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 63;
- neg_imag <<= 63;
-
for (i = 0; i < opr_sz / 8; i += 2) {
float64 e0 = n[i];
- float64 e1 = m[i + 1] ^ neg_imag;
+ float64 e1 = m[i + 1];
float64 e2 = n[i + 1];
- float64 e3 = m[i] ^ neg_real;
+ float64 e3 = m[i];
+
+ if (rot) {
+ e3 = float64_maybe_ah_chs(e3, fpcr_ah);
+ } else {
+ e1 = float64_maybe_ah_chs(e1, fpcr_ah);
+ }
d[i] = float64_add(e0, e1, fpst);
d[i + 1] = float64_add(e2, e3, fpst);
@@ -959,22 +965,26 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
uintptr_t opr_sz = simd_oprsz(desc);
float16 *d = vd, *n = vn, *m = vm, *a = va;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float16 negx_imag, negx_real;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 15;
+ negx_imag = (negf_imag & ~fpcr_ah) << 15;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 2; i += 2) {
float16 e2 = n[H2(i + flip)];
- float16 e1 = m[H2(i + flip)] ^ neg_real;
+ float16 e1 = m[H2(i + flip)] ^ negx_real;
float16 e4 = e2;
- float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
+ float16 e3 = m[H2(i + 1 - flip)] ^ negx_imag;
- d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
- d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], negf_real, fpst);
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
@@ -985,29 +995,33 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
uintptr_t opr_sz = simd_oprsz(desc);
float16 *d = vd, *n = vn, *m = vm, *a = va;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1);
+ uint32_t negf_real = flip ^ negf_imag;
intptr_t elements = opr_sz / sizeof(float16);
intptr_t eltspersegment = MIN(16 / sizeof(float16), elements);
+ float16 negx_imag, negx_real;
intptr_t i, j;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 15;
- neg_imag <<= 15;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 15;
+ negx_imag = (negf_imag & ~fpcr_ah) << 15;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < elements; i += eltspersegment) {
float16 mr = m[H2(i + 2 * index + 0)];
float16 mi = m[H2(i + 2 * index + 1)];
- float16 e1 = neg_real ^ (flip ? mi : mr);
- float16 e3 = neg_imag ^ (flip ? mr : mi);
+ float16 e1 = negx_real ^ (flip ? mi : mr);
+ float16 e3 = negx_imag ^ (flip ? mr : mi);
for (j = i; j < i + eltspersegment; j += 2) {
float16 e2 = n[H2(j + flip)];
float16 e4 = e2;
- d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
- d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], negf_real, fpst);
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], negf_imag, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
@@ -1019,22 +1033,26 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
uintptr_t opr_sz = simd_oprsz(desc);
float32 *d = vd, *n = vn, *m = vm, *a = va;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float32 negx_imag, negx_real;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 31;
+ negx_imag = (negf_imag & ~fpcr_ah) << 31;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 4; i += 2) {
float32 e2 = n[H4(i + flip)];
- float32 e1 = m[H4(i + flip)] ^ neg_real;
+ float32 e1 = m[H4(i + flip)] ^ negx_real;
float32 e4 = e2;
- float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
+ float32 e3 = m[H4(i + 1 - flip)] ^ negx_imag;
- d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
- d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], negf_real, fpst);
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
@@ -1045,29 +1063,33 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
uintptr_t opr_sz = simd_oprsz(desc);
float32 *d = vd, *n = vn, *m = vm, *a = va;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
- uint32_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1);
+ uint32_t negf_real = flip ^ negf_imag;
intptr_t elements = opr_sz / sizeof(float32);
intptr_t eltspersegment = MIN(16 / sizeof(float32), elements);
+ float32 negx_imag, negx_real;
intptr_t i, j;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 31;
- neg_imag <<= 31;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (negf_real & ~fpcr_ah) << 31;
+ negx_imag = (negf_imag & ~fpcr_ah) << 31;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < elements; i += eltspersegment) {
float32 mr = m[H4(i + 2 * index + 0)];
float32 mi = m[H4(i + 2 * index + 1)];
- float32 e1 = neg_real ^ (flip ? mi : mr);
- float32 e3 = neg_imag ^ (flip ? mr : mi);
+ float32 e1 = negx_real ^ (flip ? mi : mr);
+ float32 e3 = negx_imag ^ (flip ? mr : mi);
for (j = i; j < i + eltspersegment; j += 2) {
float32 e2 = n[H4(j + flip)];
float32 e4 = e2;
- d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
- d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], negf_real, fpst);
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], negf_imag, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
@@ -1079,22 +1101,26 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
uintptr_t opr_sz = simd_oprsz(desc);
float64 *d = vd, *n = vn, *m = vm, *a = va;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
- uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
- uint64_t neg_real = flip ^ neg_imag;
+ uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t negf_real = flip ^ negf_imag;
+ float64 negx_real, negx_imag;
uintptr_t i;
- /* Shift boolean to the sign bit so we can xor to negate. */
- neg_real <<= 63;
- neg_imag <<= 63;
+ /* With AH=0, use negx; with AH=1 use negf. */
+ negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63;
+ negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63;
+ negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0);
+ negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0);
for (i = 0; i < opr_sz / 8; i += 2) {
float64 e2 = n[i + flip];
- float64 e1 = m[i + flip] ^ neg_real;
+ float64 e1 = m[i + flip] ^ negx_real;
float64 e4 = e2;
- float64 e3 = m[i + 1 - flip] ^ neg_imag;
+ float64 e3 = m[i + 1 - flip] ^ negx_imag;
- d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
- d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
+ d[i] = float64_muladd(e2, e1, a[i], negf_real, fpst);
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], negf_imag, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
@@ -1210,10 +1236,12 @@ void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \
DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32)
+DO_2OP(gvec_frecpe_rpres_s, helper_recpe_rpres_f32, float32)
DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64)
DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
+DO_2OP(gvec_frsqrte_rpres_s, helper_rsqrte_rpres_f32, float32)
DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
@@ -1302,6 +1330,25 @@ static float64 float64_abd(float64 op1, float64 op2, float_status *stat)
return float64_abs(float64_sub(op1, op2, stat));
}
+/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */
+static float16 float16_ah_abd(float16 op1, float16 op2, float_status *stat)
+{
+ float16 r = float16_sub(op1, op2, stat);
+ return float16_is_any_nan(r) ? r : float16_abs(r);
+}
+
+static float32 float32_ah_abd(float32 op1, float32 op2, float_status *stat)
+{
+ float32 r = float32_sub(op1, op2, stat);
+ return float32_is_any_nan(r) ? r : float32_abs(r);
+}
+
+static float64 float64_ah_abd(float64 op1, float64 op2, float_status *stat)
+{
+ float64 r = float64_sub(op1, op2, stat);
+ return float64_is_any_nan(r) ? r : float64_abs(r);
+}
+
/*
* Reciprocal step. These are the AArch32 version which uses a
* non-fused multiply-and-subtract.
@@ -1389,6 +1436,10 @@ DO_3OP(gvec_fabd_h, float16_abd, float16)
DO_3OP(gvec_fabd_s, float32_abd, float32)
DO_3OP(gvec_fabd_d, float64_abd, float64)
+DO_3OP(gvec_ah_fabd_h, float16_ah_abd, float16)
+DO_3OP(gvec_ah_fabd_s, float32_ah_abd, float32)
+DO_3OP(gvec_ah_fabd_d, float64_ah_abd, float64)
+
DO_3OP(gvec_fceq_h, float16_ceq, float16)
DO_3OP(gvec_fceq_s, float32_ceq, float32)
DO_3OP(gvec_fceq_d, float64_ceq, float64)
@@ -1448,6 +1499,22 @@ DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
+DO_3OP(gvec_ah_recps_h, helper_recpsf_ah_f16, float16)
+DO_3OP(gvec_ah_recps_s, helper_recpsf_ah_f32, float32)
+DO_3OP(gvec_ah_recps_d, helper_recpsf_ah_f64, float64)
+
+DO_3OP(gvec_ah_rsqrts_h, helper_rsqrtsf_ah_f16, float16)
+DO_3OP(gvec_ah_rsqrts_s, helper_rsqrtsf_ah_f32, float32)
+DO_3OP(gvec_ah_rsqrts_d, helper_rsqrtsf_ah_f64, float64)
+
+DO_3OP(gvec_ah_fmax_h, helper_vfp_ah_maxh, float16)
+DO_3OP(gvec_ah_fmax_s, helper_vfp_ah_maxs, float32)
+DO_3OP(gvec_ah_fmax_d, helper_vfp_ah_maxd, float64)
+
+DO_3OP(gvec_ah_fmin_h, helper_vfp_ah_minh, float16)
+DO_3OP(gvec_ah_fmin_s, helper_vfp_ah_mins, float32)
+DO_3OP(gvec_ah_fmin_d, helper_vfp_ah_mind, float64)
+
#endif
#undef DO_3OP
@@ -1513,6 +1580,24 @@ static float64 float64_mulsub_f(float64 dest, float64 op1, float64 op2,
return float64_muladd(float64_chs(op1), op2, dest, 0, stat);
}
+static float16 float16_ah_mulsub_f(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
+static float32 float32_ah_mulsub_f(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
+static float64 float64_ah_mulsub_f(float64 dest, float64 op1, float64 op2,
+ float_status *stat)
+{
+ return float64_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
#define DO_MULADD(NAME, FUNC, TYPE) \
void HELPER(NAME)(void *vd, void *vn, void *vm, \
float_status *stat, uint32_t desc) \
@@ -1539,6 +1624,10 @@ DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64)
+DO_MULADD(gvec_ah_vfms_h, float16_ah_mulsub_f, float16)
+DO_MULADD(gvec_ah_vfms_s, float32_ah_mulsub_f, float32)
+DO_MULADD(gvec_ah_vfms_d, float64_ah_mulsub_f, float64)
+
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
* For AdvSIMD, there is of course only one such vector segment.
*/
@@ -1635,29 +1724,35 @@ DO_FMUL_IDX(gvec_fmls_nf_idx_s, float32_sub, float32_mul, float32, H4)
#undef DO_FMUL_IDX
-#define DO_FMLA_IDX(NAME, TYPE, H) \
+#define DO_FMLA_IDX(NAME, TYPE, H, NEGX, NEGF) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
float_status *stat, uint32_t desc) \
{ \
intptr_t i, j, oprsz = simd_oprsz(desc); \
intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
- TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
- intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
+ intptr_t idx = simd_data(desc); \
TYPE *d = vd, *n = vn, *m = vm, *a = va; \
- op1_neg <<= (8 * sizeof(TYPE) - 1); \
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
TYPE mm = m[H(i + idx)]; \
for (j = 0; j < segment; j++) { \
- d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
- mm, a[i + j], 0, stat); \
+ d[i + j] = TYPE##_muladd(n[i + j] ^ NEGX, mm, \
+ a[i + j], NEGF, stat); \
} \
} \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
-DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2)
-DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
-DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8)
+DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2, 0, 0)
+DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4, 0, 0)
+DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8, 0, 0)
+
+DO_FMLA_IDX(gvec_fmls_idx_h, float16, H2, INT16_MIN, 0)
+DO_FMLA_IDX(gvec_fmls_idx_s, float32, H4, INT32_MIN, 0)
+DO_FMLA_IDX(gvec_fmls_idx_d, float64, H8, INT64_MIN, 0)
+
+DO_FMLA_IDX(gvec_ah_fmls_idx_h, float16, H2, 0, float_muladd_negate_product)
+DO_FMLA_IDX(gvec_ah_fmls_idx_s, float32, H4, 0, float_muladd_negate_product)
+DO_FMLA_IDX(gvec_ah_fmls_idx_d, float64, H8, 0, float_muladd_negate_product)
#undef DO_FMLA_IDX
@@ -2030,28 +2125,29 @@ static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2)
* as there is not yet SVE versions that might use blocking.
*/
-static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
- uint32_t desc, bool fz16)
+static void do_fmlal(float32 *d, void *vn, void *vm,
+ CPUARMState *env, uint32_t desc,
+ ARMFPStatusFlavour fpst_idx,
+ uint64_t negx, int negf)
{
+ float_status *fpst = &env->vfp.fp_status[fpst_idx];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
intptr_t i, oprsz = simd_oprsz(desc);
- int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
int is_q = oprsz == 16;
uint64_t n_4, m_4;
- /* Pre-load all of the f16 data, avoiding overlap issues. */
- n_4 = load4_f16(vn, is_q, is_2);
+ /*
+ * Pre-load all of the f16 data, avoiding overlap issues.
+ * Negate all inputs for AH=0 FMLSL at once.
+ */
+ n_4 = load4_f16(vn, is_q, is_2) ^ negx;
m_4 = load4_f16(vm, is_q, is_2);
- /* Negate all inputs for FMLSL at once. */
- if (is_s) {
- n_4 ^= 0x8000800080008000ull;
- }
-
for (i = 0; i < oprsz / 4; i++) {
float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16);
- d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst);
}
clear_tail(d, oprsz, simd_maxsz(desc));
}
@@ -2059,61 +2155,82 @@ static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm,
CPUARMState *env, uint32_t desc)
{
- do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = is_s ? 0x8000800080008000ull : 0;
+
+ do_fmlal(vd, vn, vm, env, desc, FPST_STD, negx, 0);
}
void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
CPUARMState *env, uint32_t desc)
{
- do_fmlal(vd, vn, vm, &env->vfp.fp_status_a64, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = 0;
+ int negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000800080008000ull;
+ }
+ }
+ do_fmlal(vd, vn, vm, env, desc, FPST_A64, negx, negf);
}
void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
CPUARMState *env, uint32_t desc)
{
intptr_t i, oprsz = simd_oprsz(desc);
- uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
- float_status *status = &env->vfp.fp_status_a64;
- bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64);
+ float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
+ int negx = 0, negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000;
+ }
+ }
for (i = 0; i < oprsz; i += sizeof(float32)) {
- float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn;
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negx;
float16 mm_16 = *(float16 *)(vm + H1_2(i + sel));
float32 nn = float16_to_float32_by_bits(nn_16, fz16);
float32 mm = float16_to_float32_by_bits(mm_16, fz16);
float32 aa = *(float32 *)(va + H1_4(i));
- *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, negf, status);
}
}
-static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
- uint32_t desc, bool fz16)
+static void do_fmlal_idx(float32 *d, void *vn, void *vm,
+ CPUARMState *env, uint32_t desc,
+ ARMFPStatusFlavour fpst_idx,
+ uint64_t negx, int negf)
{
+ float_status *fpst = &env->vfp.fp_status[fpst_idx];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
intptr_t i, oprsz = simd_oprsz(desc);
- int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
int is_q = oprsz == 16;
uint64_t n_4;
float32 m_1;
- /* Pre-load all of the f16 data, avoiding overlap issues. */
- n_4 = load4_f16(vn, is_q, is_2);
-
- /* Negate all inputs for FMLSL at once. */
- if (is_s) {
- n_4 ^= 0x8000800080008000ull;
- }
-
+ /*
+ * Pre-load all of the f16 data, avoiding overlap issues.
+ * Negate all inputs for AH=0 FMLSL at once.
+ */
+ n_4 = load4_f16(vn, is_q, is_2) ^ negx;
m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16);
for (i = 0; i < oprsz / 4; i++) {
float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
- d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst);
}
clear_tail(d, oprsz, simd_maxsz(desc));
}
@@ -2121,38 +2238,58 @@ static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm,
CPUARMState *env, uint32_t desc)
{
- do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = is_s ? 0x8000800080008000ull : 0;
+
+ do_fmlal_idx(vd, vn, vm, env, desc, FPST_STD, negx, 0);
}
void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
CPUARMState *env, uint32_t desc)
{
- do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status_a64, desc,
- get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64));
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t negx = 0;
+ int negf = 0;
+
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000800080008000ull;
+ }
+ }
+ do_fmlal_idx(vd, vn, vm, env, desc, FPST_A64, negx, negf);
}
void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
CPUARMState *env, uint32_t desc)
{
intptr_t i, j, oprsz = simd_oprsz(desc);
- uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16);
- float_status *status = &env->vfp.fp_status_a64;
- bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64);
+ float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool fz16 = env->vfp.fpcr & FPCR_FZ16;
+ int negx = 0, negf = 0;
+ if (is_s) {
+ if (env->vfp.fpcr & FPCR_AH) {
+ negf = float_muladd_negate_product;
+ } else {
+ negx = 0x8000;
+ }
+ }
for (i = 0; i < oprsz; i += 16) {
float16 mm_16 = *(float16 *)(vm + i + idx);
float32 mm = float16_to_float32_by_bits(mm_16, fz16);
for (j = 0; j < 16; j += sizeof(float32)) {
- float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn;
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negx;
float32 nn = float16_to_float32_by_bits(nn_16, fz16);
float32 aa = *(float32 *)(va + H1_4(i + j));
*(float32 *)(vd + H1_4(i + j)) =
- float32_muladd(nn, mm, aa, 0, status);
+ float32_muladd(nn, mm, aa, negf, status);
}
}
}
@@ -2436,6 +2573,16 @@ DO_3OP_PAIR(gvec_fminnump_h, float16_minnum, float16, H2)
DO_3OP_PAIR(gvec_fminnump_s, float32_minnum, float32, H4)
DO_3OP_PAIR(gvec_fminnump_d, float64_minnum, float64, )
+#ifdef TARGET_AARCH64
+DO_3OP_PAIR(gvec_ah_fmaxp_h, helper_vfp_ah_maxh, float16, H2)
+DO_3OP_PAIR(gvec_ah_fmaxp_s, helper_vfp_ah_maxs, float32, H4)
+DO_3OP_PAIR(gvec_ah_fmaxp_d, helper_vfp_ah_maxd, float64, )
+
+DO_3OP_PAIR(gvec_ah_fminp_h, helper_vfp_ah_minh, float16, H2)
+DO_3OP_PAIR(gvec_ah_fminp_s, helper_vfp_ah_mins, float32, H4)
+DO_3OP_PAIR(gvec_ah_fminp_d, helper_vfp_ah_mind, float64, )
+#endif
+
#undef DO_3OP_PAIR
#define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \
@@ -2808,7 +2955,7 @@ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp)
*/
bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF;
- *statusp = is_a64(env) ? env->vfp.fp_status_a64 : env->vfp.fp_status_a32;
+ *statusp = env->vfp.fp_status[is_a64(env) ? FPST_A64 : FPST_A32];
set_default_nan_mode(true, statusp);
if (ebf) {