aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2025-06-24 10:38:38 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2025-06-24 10:38:38 -0400
commitf9a3def17b2a57679902c33064cf7853263db0ef (patch)
tree1274469591f5cdfbf41e27839c6d17cff60544b2
parent24c00b754121f3569ea9e68f5f188747cf5b8439 (diff)
parent5171b794d4ac47efd81ef4da54137131e4ea550d (diff)
downloadqemu-f9a3def17b2a57679902c33064cf7853263db0ef.zip
qemu-f9a3def17b2a57679902c33064cf7853263db0ef.tar.gz
qemu-f9a3def17b2a57679902c33064cf7853263db0ef.tar.bz2
Merge tag 'pull-tcg-20250623' of https://gitlab.com/rth7680/qemu into staging
linux-user: fix resource leaks in gen-vdso tcg: Add ptr+ofs alternatives to some gvec functions # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmhZ/LMdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8aCggAtZOamQ0+EMe09u9d # slaeZDlmxHYfb4RXJQasIBi/uHoWY1bFCEWqLnjU41cpNqI7B3yihbS/YQzyI1i/ # fqjATmuhDzer7rZfdtmRdiLi6kY9SuN9tcSVMVU/kxixByPxdYspQBO8hAAQMM1X # ZY5MIR/5nEMN/U0QUMuqd3krsxzglGQl9Dn610ddVGfzluSCKLLMS/m92gaJmz0u # xoLTM29lfdtIA29JPpVY+1X8NJ/vTUeBvy2eXUGHjT11rHsYUzMVGCGbzCLluEzN # V3L/aSkiwrV+wW5M7R6+hySQl65ZVRV+E9BHuln9aDnG4jdzT3conohg2cY9a5jw # m3HqnQ== # =U6ub # -----END PGP SIGNATURE----- # gpg: Signature made Mon 23 Jun 2025 21:17:39 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20250623' of https://gitlab.com/rth7680/qemu: linux-user: fix resource leaks in gen-vdso linux-user/aarch64: Update hwcap bits from 6.14 tcg: Split out tcg_gen_gvec_dup_imm_var tcg: Split out tcg_gen_gvec_{add,sub}_var tcg: Split out tcg_gen_gvec_mov_var tcg: Split out tcg_gen_gvec_3_var tcg: Split out tcg_gen_gvec_2_var tcg: Add base arguments to check_overlap_[234] tcg: Add dbase argument to expand_clr tcg: Add dbase argument to do_dup tcg: Add dbase argument to do_dup_store Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--include/tcg/tcg-op-gvec-common.h63
-rw-r--r--linux-user/elfload.c75
-rw-r--r--linux-user/gen-vdso.c27
-rw-r--r--tcg/tcg-op-gvec.c380
4 files changed, 383 insertions, 162 deletions
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
index 65553f5..ea0c87f 100644
--- a/include/tcg/tcg-op-gvec-common.h
+++ b/include/tcg/tcg-op-gvec-common.h
@@ -227,25 +227,66 @@ typedef struct {
bool prefer_i64;
} GVecGen4i;
+/* Expand (dbase+dofs) = op(abase+aofs), length @oprsz, clearing to @maxsz. */
+void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs). */
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, c). */
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, int64_t c, const GVecGen2i *);
+ uint32_t maxsz, int64_t c, const GVecGen2i *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, s). */
void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
+ uint32_t maxsz, TCGv_i64 c, const GVecGen2s *op);
+
+/*
+ * Expand (dbase+dofs) = op(abase+aofs, bbase+bofs),
+ * length @oprsz, clearing to @maxsz.
+ */
+void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op);
+/* Similarly, expand (env+dofs) = op(env+aofs, env+bofs). */
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op);
+
+/*
+ * Depending on op->load_dest and op->write_aofs, expand
+ * (env+dofs) = op(env+aofs, env+bofs, c)
+ * or
+ * (env+dofs) = op(env+dofs, env+aofs, env+bofs, c)
+ * or
+ * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, c)
+ * or
+ * (env+dofs), (env+aofs) = op(env+dofs, env+aofs, env+bofs, c)
+ */
void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
- const GVecGen3i *);
+ const GVecGen3i *op);
+
+/*
+ * Depending on op->write_aofs, expand
+ * (env+dofs) = op(env+aofs, env+bofs, env+cofs)
+ * or
+ * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, env+cofs)
+ */
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
+
+/* Expand (env+dofs) = op(env+aofs, env+bofs, env+cofs, c). */
void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
const GVecGen4i *);
/* Expand a specific vector operation. */
+void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+
void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -255,6 +296,15 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+
void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -336,6 +386,9 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i64);
+void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, uint64_t imm);
+
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz);
void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 82ebf6a..2add166 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -751,7 +751,23 @@ enum {
ARM_HWCAP_A64_SSBS = 1 << 28,
ARM_HWCAP_A64_SB = 1 << 29,
ARM_HWCAP_A64_PACA = 1 << 30,
- ARM_HWCAP_A64_PACG = 1UL << 31,
+ ARM_HWCAP_A64_PACG = 1ULL << 31,
+ ARM_HWCAP_A64_GCS = 1ULL << 32,
+ ARM_HWCAP_A64_CMPBR = 1ULL << 33,
+ ARM_HWCAP_A64_FPRCVT = 1ULL << 34,
+ ARM_HWCAP_A64_F8MM8 = 1ULL << 35,
+ ARM_HWCAP_A64_F8MM4 = 1ULL << 36,
+ ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37,
+ ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38,
+ ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39,
+ ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40,
+ ARM_HWCAP_A64_SVE2P2 = 1ULL << 41,
+ ARM_HWCAP_A64_SME2P2 = 1ULL << 42,
+ ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43,
+ ARM_HWCAP_A64_SME_AES = 1ULL << 44,
+ ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45,
+ ARM_HWCAP_A64_SME_STMOP = 1ULL << 46,
+ ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47,
ARM_HWCAP2_A64_DCPODP = 1 << 0,
ARM_HWCAP2_A64_SVE2 = 1 << 1,
@@ -798,6 +814,25 @@ enum {
ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
ARM_HWCAP2_A64_MOPS = 1ULL << 43,
ARM_HWCAP2_A64_HBC = 1ULL << 44,
+ ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45,
+ ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46,
+ ARM_HWCAP2_A64_LSE128 = 1ULL << 47,
+ ARM_HWCAP2_A64_FPMR = 1ULL << 48,
+ ARM_HWCAP2_A64_LUT = 1ULL << 49,
+ ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50,
+ ARM_HWCAP2_A64_F8CVT = 1ULL << 51,
+ ARM_HWCAP2_A64_F8FMA = 1ULL << 52,
+ ARM_HWCAP2_A64_F8DP4 = 1ULL << 53,
+ ARM_HWCAP2_A64_F8DP2 = 1ULL << 54,
+ ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55,
+ ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56,
+ ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57,
+ ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58,
+ ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59,
+ ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60,
+ ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61,
+ ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62,
+ ARM_HWCAP2_A64_POE = 1ULL << 63,
};
#define ELF_HWCAP get_elf_hwcap()
@@ -886,7 +921,7 @@ uint64_t get_elf_hwcap2(void)
const char *elf_hwcap_str(uint32_t bit)
{
- static const char *hwcap_str[] = {
+ static const char * const hwcap_str[] = {
[__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
[__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
[__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
@@ -919,6 +954,22 @@ const char *elf_hwcap_str(uint32_t bit)
[__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
[__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
[__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
+ [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs",
+ [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr",
+ [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4",
};
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
@@ -926,7 +977,7 @@ const char *elf_hwcap_str(uint32_t bit)
const char *elf_hwcap2_str(uint32_t bit)
{
- static const char *hwcap_str[] = {
+ static const char * const hwcap_str[] = {
[__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
[__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
[__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
@@ -972,6 +1023,24 @@ const char *elf_hwcap2_str(uint32_t bit)
[__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
[__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
[__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe",
};
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c
index 721f38d..fce9d5c 100644
--- a/linux-user/gen-vdso.c
+++ b/linux-user/gen-vdso.c
@@ -56,13 +56,14 @@ static unsigned rt_sigreturn_addr;
int main(int argc, char **argv)
{
- FILE *inf, *outf;
+ FILE *inf = NULL, *outf = NULL;
long total_len;
const char *prefix = "vdso";
const char *inf_name;
const char *outf_name = NULL;
- unsigned char *buf;
+ unsigned char *buf = NULL;
bool need_bswap;
+ int ret = EXIT_FAILURE;
while (1) {
int opt = getopt(argc, argv, "o:p:r:s:");
@@ -129,7 +130,6 @@ int main(int argc, char **argv)
fprintf(stderr, "%s: incomplete read\n", inf_name);
return EXIT_FAILURE;
}
- fclose(inf);
/*
* Identify which elf flavor we're processing.
@@ -205,19 +205,24 @@ int main(int argc, char **argv)
fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr);
fprintf(outf, "};\n");
- /*
- * Everything should have gone well.
- */
- if (fclose(outf)) {
- goto perror_outf;
+ ret = EXIT_SUCCESS;
+
+ cleanup:
+ free(buf);
+
+ if (outf && fclose(outf) != 0) {
+ ret = EXIT_FAILURE;
+ }
+ if (inf && fclose(inf) != 0) {
+ ret = EXIT_FAILURE;
}
- return EXIT_SUCCESS;
+ return ret;
perror_inf:
perror(inf_name);
- return EXIT_FAILURE;
+ goto cleanup;
perror_outf:
perror(outf_name);
- return EXIT_FAILURE;
+ goto cleanup;
}
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index d32a4f1..2d18454 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -57,30 +57,39 @@ static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
tcg_debug_assert((ofs & max_align) == 0);
}
-/* Verify vector overlap rules for two operands. */
-static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s)
+/*
+ * Verify vector overlap rules for two operands.
+ * When dbase and abase are not the same pointer, we cannot check for
+ * overlap at compile-time, but the runtime restrictions remain.
+ */
+static void check_overlap_2(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a, uint32_t s)
{
- tcg_debug_assert(d == a || d + s <= a || a + s <= d);
+ tcg_debug_assert(dbase != abase || d == a || d + s <= a || a + s <= d);
}
/* Verify vector overlap rules for three operands. */
-static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s)
+static void check_overlap_3(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a,
+ TCGv_ptr bbase, uint32_t b, uint32_t s)
{
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(a, b, s);
+ check_overlap_2(dbase, d, abase, a, s);
+ check_overlap_2(dbase, d, bbase, b, s);
+ check_overlap_2(abase, a, bbase, b, s);
}
/* Verify vector overlap rules for four operands. */
-static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b,
- uint32_t c, uint32_t s)
+static void check_overlap_4(TCGv_ptr dbase, uint32_t d,
+ TCGv_ptr abase, uint32_t a,
+ TCGv_ptr bbase, uint32_t b,
+ TCGv_ptr cbase, uint32_t c, uint32_t s)
{
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(d, c, s);
- check_overlap_2(a, b, s);
- check_overlap_2(a, c, s);
- check_overlap_2(b, c, s);
+ check_overlap_2(dbase, d, abase, a, s);
+ check_overlap_2(dbase, d, bbase, b, s);
+ check_overlap_2(dbase, d, cbase, c, s);
+ check_overlap_2(abase, a, bbase, b, s);
+ check_overlap_2(abase, a, cbase, c, s);
+ check_overlap_2(bbase, b, cbase, c, s);
}
/* Create a descriptor from components. */
@@ -124,9 +133,10 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
}
/* Generate a call to a gvec-style helper with two vector operands. */
-void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2 *fn)
+static void expand_2_ool(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_2 *fn)
{
TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
@@ -134,8 +144,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a0, dbase, dofs);
+ tcg_gen_addi_ptr(a1, abase, aofs);
fn(a0, a1, desc);
@@ -143,6 +153,13 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
tcg_temp_free_ptr(a1);
}
+void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2 *fn)
+{
+ expand_2_ool(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, data, fn);
+}
+
/* Generate a call to a gvec-style helper with two vector operands
and one scalar operand. */
void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
@@ -165,9 +182,11 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
}
/* Generate a call to a gvec-style helper with three vector operands. */
-void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_3 *fn)
+static void expand_3_ool(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_3 *fn)
{
TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
@@ -176,9 +195,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a0, dbase, dofs);
+ tcg_gen_addi_ptr(a1, abase, aofs);
+ tcg_gen_addi_ptr(a2, bbase, bofs);
fn(a0, a1, a2, desc);
@@ -187,6 +206,14 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a2);
}
+void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_3 *fn)
+{
+ expand_3_ool(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz, data, fn);
+}
+
/* Generate a call to a gvec-style helper with four vector operands. */
void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
@@ -380,7 +407,7 @@ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
return q <= MAX_UNROLL;
}
-static void expand_clr(uint32_t dofs, uint32_t maxsz);
+static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz);
/* Duplicate C as per VECE. */
uint64_t (dup_const)(unsigned vece, uint64_t c)
@@ -483,8 +510,8 @@ static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
return 0;
}
-static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_vec t_vec)
+static void do_dup_store(TCGType type, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, TCGv_vec t_vec)
{
uint32_t i = 0;
@@ -496,7 +523,7 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
* are misaligned wrt the maximum vector size, so do that first.
*/
if (dofs & 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64);
i += 8;
}
@@ -508,17 +535,17 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
for (; i + 32 <= oprsz; i += 32) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V256);
}
/* fallthru */
case TCG_TYPE_V128:
for (; i + 16 <= oprsz; i += 16) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V128);
}
break;
case TCG_TYPE_V64:
for (; i < oprsz; i += 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64);
}
break;
default:
@@ -526,17 +553,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
-/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
+/*
+ * Set OPRSZ bytes at DBASE + DOFS to replications of IN_32, IN_64 or IN_C.
* Only one of IN_32 or IN_64 may be set;
* IN_C is used if IN_32 and IN_64 are unset.
*/
-static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64,
- uint64_t in_c)
+static void do_dup(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz,
+ TCGv_i32 in_32, TCGv_i64 in_64, uint64_t in_c)
{
TCGType type;
TCGv_i64 t_64;
@@ -574,7 +602,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
} else {
tcg_gen_dupi_vec(vece, t_vec, in_c);
}
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
+ do_dup_store(type, dbase, dofs, oprsz, maxsz, t_vec);
return;
}
@@ -618,14 +646,14 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
/* Implement inline if we picked an implementation size above. */
if (t_32) {
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_st_i32(t_32, tcg_env, dofs + i);
+ tcg_gen_st_i32(t_32, dbase, dofs + i);
}
tcg_temp_free_i32(t_32);
goto done;
}
if (t_64) {
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_st_i64(t_64, tcg_env, dofs + i);
+ tcg_gen_st_i64(t_64, dbase, dofs + i);
}
tcg_temp_free_i64(t_64);
goto done;
@@ -634,7 +662,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
/* Otherwise implement out of line. */
t_ptr = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(t_ptr, tcg_env, dofs);
+ tcg_gen_addi_ptr(t_ptr, dbase, dofs);
/*
* This may be expand_clr for the tail of an operation, e.g.
@@ -703,31 +731,32 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
done:
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
/* Likewise, but with zero. */
-static void expand_clr(uint32_t dofs, uint32_t maxsz)
+static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz)
{
- do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0);
+ do_dup(MO_8, dbase, dofs, maxsz, maxsz, NULL, NULL, 0);
}
/* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
-static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
+static void expand_2_i32(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase,
+ uint32_t aofs, uint32_t oprsz, bool load_dest,
+ void (*fni)(TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t1, tcg_env, dofs + i);
+ tcg_gen_ld_i32(t1, dbase, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i32(t1, tcg_env, dofs + i);
+ tcg_gen_st_i32(t1, dbase, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -775,8 +804,10 @@ static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
-static void expand_3_i32(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
+static void expand_3_i32(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, bool load_dest,
void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
@@ -785,13 +816,13 @@ static void expand_3_i32(uint32_t dofs, uint32_t aofs,
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_ld_i32(t1, tcg_env, bofs + i);
+ tcg_gen_ld_i32(t0, abase, aofs + i);
+ tcg_gen_ld_i32(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t2, tcg_env, dofs + i);
+ tcg_gen_ld_i32(t2, dbase, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i32(t2, tcg_env, dofs + i);
+ tcg_gen_st_i32(t2, dbase, dofs + i);
}
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t1);
@@ -877,20 +908,21 @@ static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
}
/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
-static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
+static void expand_2_i64(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase,
+ uint32_t aofs, uint32_t oprsz, bool load_dest,
+ void (*fni)(TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t1, tcg_env, dofs + i);
+ tcg_gen_ld_i64(t1, dbase, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i64(t1, tcg_env, dofs + i);
+ tcg_gen_st_i64(t1, dbase, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -938,8 +970,10 @@ static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
/* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
-static void expand_3_i64(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
+static void expand_3_i64(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, bool load_dest,
void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
@@ -948,13 +982,13 @@ static void expand_3_i64(uint32_t dofs, uint32_t aofs,
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_ld_i64(t1, tcg_env, bofs + i);
+ tcg_gen_ld_i64(t0, abase, aofs + i);
+ tcg_gen_ld_i64(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t2, tcg_env, dofs + i);
+ tcg_gen_ld_i64(t2, dbase, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i64(t2, tcg_env, dofs + i);
+ tcg_gen_st_i64(t2, dbase, dofs + i);
}
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t1);
@@ -1040,7 +1074,8 @@ static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
}
/* Expand OPSZ bytes worth of two-operand operations using host vectors. */
-static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
+static void expand_2_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
uint32_t oprsz, uint32_t tysz, TCGType type,
bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec))
@@ -1049,12 +1084,12 @@ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_vec t0 = tcg_temp_new_vec(type);
TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t0, abase, aofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t1, tcg_env, dofs + i);
+ tcg_gen_ld_vec(t1, dbase, dofs + i);
}
fni(vece, t1, t0);
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
+ tcg_gen_st_vec(t1, dbase, dofs + i);
}
}
@@ -1098,8 +1133,9 @@ static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
}
/* Expand OPSZ bytes worth of three-operand operations using host vectors. */
-static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz,
+static void expand_3_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs, uint32_t oprsz,
uint32_t tysz, TCGType type, bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
{
@@ -1108,13 +1144,13 @@ static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_vec t1 = tcg_temp_new_vec(type);
TCGv_vec t2 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- tcg_gen_ld_vec(t1, tcg_env, bofs + i);
+ tcg_gen_ld_vec(t0, abase, aofs + i);
+ tcg_gen_ld_vec(t1, bbase, bofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t2, tcg_env, dofs + i);
+ tcg_gen_ld_vec(t2, dbase, dofs + i);
}
fni(vece, t2, t0, t1);
- tcg_gen_st_vec(t2, tcg_env, dofs + i);
+ tcg_gen_st_vec(t2, dbase, dofs + i);
}
}
@@ -1196,8 +1232,9 @@ static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
}
/* Expand a vector two-operand operation. */
-void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
+void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
{
const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
@@ -1205,7 +1242,7 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(dbase, dofs, abase, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1218,8 +1255,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, some, 32,
+ TCG_TYPE_V256, g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@@ -1229,22 +1266,25 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 16,
+ TCG_TYPE_V128, g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
+ expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 8,
+ TCG_TYPE_V64, g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
+ expand_2_i64(dbase, dofs, abase, aofs,
+ oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
+ expand_2_i32(dbase, dofs, abase, aofs,
+ oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
- tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
+ expand_2_ool(dbase, dofs, abase, aofs,
+ oprsz, maxsz, g->data, g->fno);
oprsz = maxsz;
}
break;
@@ -1255,10 +1295,16 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
+void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
+{
+ tcg_gen_gvec_2_var(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, g);
+}
+
/* Expand a vector operation with two vectors and an immediate. */
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t maxsz, int64_t c, const GVecGen2i *g)
@@ -1269,7 +1315,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1324,7 +1370,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1335,7 +1381,7 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
TCGType type;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1401,13 +1447,15 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
/* Expand a vector three-operand operation. */
-void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
+void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
{
const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
@@ -1415,7 +1463,7 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(dbase, dofs, abase, aofs, bbase, bofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1428,8 +1476,8 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ some, 32, TCG_TYPE_V256, g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@@ -1440,23 +1488,25 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, 16, TCG_TYPE_V128, g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
+ expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, 8, TCG_TYPE_V64, g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
+ expand_3_i64(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
+ expand_3_i32(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
- tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
- maxsz, g->data, g->fno);
+ expand_3_ool(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, g->data, g->fno);
oprsz = maxsz;
}
break;
@@ -1467,10 +1517,17 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
}
+void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
+{
+ tcg_gen_gvec_3_var(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz, g);
+}
+
/* Expand a vector operation with three vectors and an immediate. */
void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
@@ -1482,7 +1539,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1536,7 +1593,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1550,7 +1607,8 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
+ check_overlap_4(tcg_env, dofs, tcg_env, aofs,
+ tcg_env, bofs, tcg_env, cofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1605,7 +1663,7 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1620,7 +1678,8 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
+ check_overlap_4(tcg_env, dofs, tcg_env, aofs,
+ tcg_env, bofs, tcg_env, cofs, maxsz);
type = 0;
if (g->fniv) {
@@ -1674,7 +1733,7 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -1687,8 +1746,9 @@ static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b)
tcg_gen_mov_vec(a, b);
}
-void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen2 g = {
.fni8 = tcg_gen_mov_i64,
@@ -1696,14 +1756,22 @@ void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
.fno = gen_helper_gvec_mov,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
- if (dofs != aofs) {
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
- } else {
+
+ if (dofs == aofs && dbase == abase) {
check_size_align(oprsz, maxsz, dofs);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(dbase, dofs + oprsz, maxsz - oprsz);
}
+ return;
}
+
+ tcg_gen_gvec_2_var(dbase, dofs, abase, aofs, oprsz, maxsz, &g);
+}
+
+void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_mov_var(vece, tcg_env, dofs, tcg_env, aofs, oprsz, maxsz);
}
void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
@@ -1711,7 +1779,7 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
{
check_size_align(oprsz, maxsz, dofs);
tcg_debug_assert(vece <= MO_32);
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0);
}
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
@@ -1719,7 +1787,7 @@ void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
{
check_size_align(oprsz, maxsz, dofs);
tcg_debug_assert(vece <= MO_64);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0);
}
void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -1731,7 +1799,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
if (type != 0) {
TCGv_vec t_vec = tcg_temp_new_vec(type);
tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs);
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
+ do_dup_store(type, tcg_env, dofs, oprsz, maxsz, t_vec);
} else if (vece <= MO_32) {
TCGv_i32 in = tcg_temp_ebb_new_i32();
switch (vece) {
@@ -1745,12 +1813,12 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_gen_ld_i32(in, tcg_env, aofs);
break;
}
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0);
tcg_temp_free_i32(in);
} else {
TCGv_i64 in = tcg_temp_ebb_new_i64();
tcg_gen_ld_i64(in, tcg_env, aofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
+ do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
} else if (vece == 4) {
@@ -1779,7 +1847,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_temp_free_i64(in1);
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
} else if (vece == 5) {
/* 256-bit duplicate. */
@@ -1822,18 +1890,24 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
}
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
} else {
g_assert_not_reached();
}
}
+void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ uint32_t oprsz, uint32_t maxsz, uint64_t x)
+{
+ check_size_align(oprsz, maxsz, dofs);
+ do_dup(vece, dbase, dofs, oprsz, maxsz, NULL, NULL, x);
+}
+
void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint64_t x)
{
- check_size_align(oprsz, maxsz, dofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
+ tcg_gen_gvec_dup_imm_var(vece, tcg_env, dofs, oprsz, maxsz, x);
}
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -1931,8 +2005,10 @@ void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 };
-void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen3 g[4] = {
{ .fni8 = tcg_gen_vec_add8_i64,
@@ -1959,7 +2035,15 @@ void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+ tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_add_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz);
}
void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -2112,8 +2196,10 @@ void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
tcg_temp_free_i64(t2);
}
-void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs,
+ TCGv_ptr abase, uint32_t aofs,
+ TCGv_ptr bbase, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz)
{
static const GVecGen3 g[4] = {
{ .fni8 = tcg_gen_vec_sub8_i64,
@@ -2140,7 +2226,15 @@ void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+ tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs,
+ oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_sub_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs,
+ oprsz, maxsz);
}
static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 };
@@ -3149,7 +3243,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
/* If the backend has a scalar expansion, great. */
type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64);
@@ -3255,7 +3349,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
clear_tail:
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -3769,10 +3863,10 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
+ check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz);
if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
+ do_dup(MO_8, tcg_env, dofs, oprsz, maxsz,
NULL, NULL, -(cond == TCG_COND_ALWAYS));
return;
}
@@ -3834,7 +3928,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}
@@ -3889,10 +3983,10 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
TCGType type;
check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
+ check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz);
if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
+ do_dup(MO_8, tcg_env, dofs, oprsz, maxsz,
NULL, NULL, -(cond == TCG_COND_ALWAYS));
return;
}
@@ -3975,7 +4069,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
}
if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
+ expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz);
}
}