aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-05-24 18:02:41 -0700
committerPeter Maydell <peter.maydell@linaro.org>2021-05-25 16:01:43 +0100
commit4269fef1f901927dd2c56deea6c45da8e8c5170e (patch)
tree51f27052cf0a70f56d4df6f0a0d4440596021436
parente3a561318327417523693f94e99745516f690eb7 (diff)
downloadqemu-4269fef1f901927dd2c56deea6c45da8e8c5170e.zip
qemu-4269fef1f901927dd2c56deea6c45da8e8c5170e.tar.gz
qemu-4269fef1f901927dd2c56deea6c45da8e8c5170e.tar.bz2
target/arm: Implement SVE2 bitwise shift left long
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210525010358.152808-16-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target/arm/helper-sve.h8
-rw-r--r--target/arm/sve.decode8
-rw-r--r--target/arm/sve_helper.c22
-rw-r--r--target/arm/translate-sve.c159
4 files changed, 197 insertions, 0 deletions
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index bf3e533..740939e 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2364,3 +2364,11 @@ DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sshll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 016c15e..a3191eb 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1207,3 +1207,11 @@ SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm
+
+## SVE2 bitwise shift left long
+
+# Note bit23 == 0 is handled by esz > 0 in do_sve2_shll_tb.
+SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl
+SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl
+USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl
+USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index cfd1a7c..79b268c 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1226,6 +1226,28 @@ DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
#undef DO_ZZZ_WTB
+#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \
+ int shift = simd_data(desc) >> 1; \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \
+ *(TYPEW *)(vd + HW(i)) = nn << shift; \
+ } \
+}
+
+DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, , H1_4)
+
+DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, , H1_4)
+
+#undef DO_ZZI_SHLL
+
/* Two-operand reduction expander, controlled by a predicate.
* The difference between TYPERED and TYPERET has to do with
* sign-extension. E.g. for SMAX, TYPERED must be signed,
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index fbdccc1..da7308d 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -6071,3 +6071,162 @@ DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
+
+static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int top = imm & 1;
+ int shl = imm >> 1;
+ int halfbits = 4 << vece;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_sari_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_sari_vec(vece, d, d, halfbits - shl);
+ }
+}
+
+static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = (imm >> 1);
+ int shift;
+ uint64_t mask;
+
+ mask = MAKE_64BIT_MASK(0, halfbits);
+ mask <<= shl;
+ mask = dup_const(vece, mask);
+
+ shift = shl - top * halfbits;
+ if (shift < 0) {
+ tcg_gen_shri_i64(d, n, -shift);
+ } else {
+ tcg_gen_shli_i64(d, n, shift);
+ }
+ tcg_gen_andi_i64(d, d, mask);
+}
+
+static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_16, d, n, imm);
+}
+
+static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_32, d, n, imm);
+}
+
+static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_64, d, n, imm);
+}
+
+static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = imm >> 1;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shri_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ if (shl == 0) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_shri_vec(vece, d, d, halfbits - shl);
+ }
+ }
+}
+
+static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
+ bool sel, bool uns)
+{
+ static const TCGOpcode sshll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, 0
+ };
+ static const TCGOpcode ushll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, 0
+ };
+ static const GVecGen2i ops[2][3] = {
+ { { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_h,
+ .vece = MO_16 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_s,
+ .vece = MO_32 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_d,
+ .vece = MO_64 } },
+ { { .fni8 = gen_ushll16_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_h,
+ .vece = MO_16 },
+ { .fni8 = gen_ushll32_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_s,
+ .vece = MO_32 },
+ { .fni8 = gen_ushll64_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_d,
+ .vece = MO_64 } },
+ };
+
+ if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, (a->imm << 1) | sel,
+ &ops[uns][a->esz]);
+ }
+ return true;
+}
+
+static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, false);
+}
+
+static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, false);
+}
+
+static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, true);
+}
+
+static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, true);
+}