aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-06-28 14:58:23 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-07-02 11:48:36 +0100
commiteab84139855dac258c8d89ad736f6649e3edc76a (patch)
tree35cfceaa366911d7ac810f44ac564dc131b158f3
parente4667a5b5e71d83e3e2af70e7dba4bfab8892829 (diff)
downloadqemu-eab84139855dac258c8d89ad736f6649e3edc76a.zip
qemu-eab84139855dac258c8d89ad736f6649e3edc76a.tar.gz
qemu-eab84139855dac258c8d89ad736f6649e3edc76a.tar.bz2
target/arm: Implement MVE logical immediate insns
Implement the MVE logical-immediate insns (VMOV, VMVN, VORR and VBIC). These have essentially the same encoding as their Neon equivalents, and we implement the decode in the same way. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
-rw-r--r--target/arm/helper-mve.h4
-rw-r--r--target/arm/mve.decode17
-rw-r--r--target/arm/mve_helper.c24
-rw-r--r--target/arm/translate-mve.c50
4 files changed, 95 insertions, 0 deletions
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 4bbb9b3..5248dbe 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -355,3 +355,7 @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index d9ece7b..caeb016 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -26,10 +26,14 @@
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
%size_28 28:1 !function=plus_1
+# 1imm format immediate
+%imm_28_16_0 28:1 16:3 0:4
+
&vldr_vstr rn qd imm p a w size l u
&1op qd qm size
&2op qd qm qn size
&2scalar qd qn rm size
+&1imm qd imm cmode op
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
# Note that both Rn and Qd are 3 bits only (no D bit)
@@ -41,6 +45,7 @@
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
size=%size_28
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
# The _rev suffix indicates that Vn and Vm are reversed. This is
# the case for shifts. In the Arm ARM these insns are documented
@@ -258,3 +263,15 @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
# Predicate operations
%mask_22_13 22:1 13:3
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
+
+# Logical immediate operations (1 reg and modified-immediate)
+
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
+# not in a way we can conveniently represent in decodetree without
+# a lot of repetition:
+# VORR: op=0, (cmode & 1) && cmode < 12
+# VBIC: op=1, (cmode & 1) && cmode < 12
+# VMOV: everything else
+# So we have a single decode line and check the cmode/op in the
+# trans function.
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 85a552f..e6ced14 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -323,6 +323,30 @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
+/*
+ * 1 operand immediates: Vda is destination and possibly also one source.
+ * All these insns work at 64-bit widths.
+ */
+#define DO_1OP_IMM(OP, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
+ { \
+ uint64_t *da = vda; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_MOVI(N, I) (I)
+#define DO_ANDI(N, I) ((N) & (I))
+#define DO_ORRI(N, I) ((N) | (I))
+
+DO_1OP_IMM(vmovi, DO_MOVI)
+DO_1OP_IMM(vandi, DO_ANDI)
+DO_1OP_IMM(vorri, DO_ORRI)
+
#define DO_2OP(OP, ESIZE, TYPE, FN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, \
void *vd, void *vn, void *vm) \
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index e9a5442..f435a1c 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -34,6 +34,7 @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
static inline long mve_qreg_offset(unsigned reg)
@@ -787,3 +788,52 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
mve_update_eci(s);
return true;
}
+
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
+{
+ TCGv_ptr qd;
+ uint64_t imm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+ qd = mve_qreg_ptr(a->qd);
+ fn(cpu_env, qd, tcg_constant_i64(imm));
+ tcg_temp_free_ptr(qd);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
+{
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
+ MVEGenOneOpImmFn *fn;
+
+ if ((a->cmode & 1) && a->cmode < 12) {
+ if (a->op) {
+ /*
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
+ * so the VBIC becomes a logical AND operation.
+ */
+ fn = gen_helper_mve_vandi;
+ } else {
+ fn = gen_helper_mve_vorri;
+ }
+ } else {
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1) {
+ return false;
+ }
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
+ fn = gen_helper_mve_vmovi;
+ }
+ return do_1imm(s, a, fn);
+}