diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2021-08-13 17:11:54 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-08-25 10:48:49 +0100 |
commit | f0ffff5163cb503de236fc766121601592f08744 (patch) | |
tree | 80b59e16a5e42dd873029fd82853ed1b5469f50b /target | |
parent | 640cdf20a25d0021f4e93b6207b648a973df320b (diff) | |
download | qemu-f0ffff5163cb503de236fc766121601592f08744.zip qemu-f0ffff5163cb503de236fc766121601592f08744.tar.gz qemu-f0ffff5163cb503de236fc766121601592f08744.tar.bz2 |
target/arm: Implement MVE VMLADAV and VMLSLDAV
Implement the MVE VMLADAV and VMLSLDAV insns. Like the VMLALDAV and
VMLSLDAV insns already implemented, these accumulate multiplied
vector elements; but they accumulate a 32-bit result rather than a
64-bit one.
Note that these encodings overlap with what would be RdaHi=0b111 for
VMLALDAV, VMLSLDAV, VRMLALDAVH and VRMLSLDAVH.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/helper-mve.h | 17 | ||||
-rw-r--r-- | target/arm/mve.decode | 33 | ||||
-rw-r--r-- | target/arm/mve_helper.c | 41 | ||||
-rw-r--r-- | target/arm/translate-mve.c | 64 |
4 files changed, 150 insertions, 5 deletions
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 17484f7..34d644a 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -392,6 +392,23 @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64) +DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index d295a69..cec5a51 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -320,32 +320,55 @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2 %size_16 16:1 !function=plus_1 &vmlaldav rdahi rdalo size qn qm x a +&vmladav rda size qn qm x a @vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav @vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav -VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav -VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav +@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ + qn=%qn rda=%rdalo size=%size_16 &vmladav +@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ + qn=%qn rda=%rdalo size=0 &vmladav -VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav +{ + VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav + VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav +} +{ + VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav + VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav +} + +{ + VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav + VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav +} + +{ + VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz + VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz +} + +VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz +VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz { VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv + VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz } { VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv + VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz } -VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz - # Scalar operations VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 72c30f3..ea206c9 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1190,6 +1190,47 @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) /* + * Multiply add dual accumulate ops + */ +#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ + uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ + void *vm, uint32_t a) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *n = vn, *m = vm; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + if (mask & 1) { \ + if (e & 1) { \ + a ODDACC \ + n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ + } else { \ + a EVENACC \ + n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ + } \ + } \ + } \ + mve_advance_vpt(env); \ + return a; \ + } + +#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) + +#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ + DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) + +DO_DAV_S(vmladavs, false, +=, +=) +DO_DAV_U(vmladavu, false, +=, +=) +DO_DAV_S(vmlsdav, false, +=, -=) +DO_DAV_S(vmladavsx, true, +=, +=) +DO_DAV_S(vmlsdavx, true, +=, -=) + +/* * Rounding multiply add long dual accumulate high. In the pseudocode * this is implemented with a 72-bit internal accumulator value of which * the top 64 bits are returned. We optimize this to avoid having to diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 676411e..92ed1be 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -46,6 +46,7 @@ typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TC typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */ static inline long mve_qreg_offset(unsigned reg) @@ -765,6 +766,69 @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a) return do_long_dual_acc(s, a, fns[a->x]); } +static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) +{ + TCGv_ptr qn, qm; + TCGv_i32 rda; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qn) || + !fn) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qn = mve_qreg_ptr(a->qn); + qm = mve_qreg_ptr(a->qm); + + /* + * This insn is subject to beat-wise execution. Partial execution + * of an A=0 (no-accumulate) insn which does not execute the first + * beat must start with the current rda value, not 0. + */ + if (a->a || mve_skip_first_beat(s)) { + rda = load_reg(s, a->rda); + } else { + rda = tcg_const_i32(0); + } + + fn(rda, cpu_env, qn, qm, rda); + store_reg(s, a->rda, rda); + tcg_temp_free_ptr(qn); + tcg_temp_free_ptr(qm); + + mve_update_eci(s); + return true; +} + +#define DO_DUAL_ACC(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_vmladav *a) \ + { \ + static MVEGenDualAccOpFn * const fns[4][2] = { \ + { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb }, \ + { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh }, \ + { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw }, \ + { NULL, NULL }, \ + }; \ + return do_dual_acc(s, a, fns[a->size][a->x]); \ + } + +DO_DUAL_ACC(VMLADAV_S, vmladavs) +DO_DUAL_ACC(VMLSDAV, vmlsdav) + +static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a) +{ + static MVEGenDualAccOpFn * const fns[4][2] = { + { gen_helper_mve_vmladavub, NULL }, + { gen_helper_mve_vmladavuh, NULL }, + { gen_helper_mve_vmladavuw, NULL }, + { NULL, NULL }, + }; + return do_dual_acc(s, a, fns[a->size][a->x]); +} + static void gen_vpst(DisasContext *s, uint32_t mask) { /* |