diff options
Diffstat (limited to 'tcg/tcg-op-gvec.c')
-rw-r--r-- | tcg/tcg-op-gvec.c | 398 |
1 files changed, 253 insertions, 145 deletions
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index 0308732..2d18454 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -23,6 +23,7 @@ #include "tcg/tcg-op-common.h" #include "tcg/tcg-op-gvec-common.h" #include "tcg/tcg-gvec-desc.h" +#include "tcg-has.h" #define MAX_UNROLL 4 @@ -56,30 +57,39 @@ static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs) tcg_debug_assert((ofs & max_align) == 0); } -/* Verify vector overlap rules for two operands. */ -static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s) +/* + * Verify vector overlap rules for two operands. + * When dbase and abase are not the same pointer, we cannot check for + * overlap at compile-time, but the runtime restrictions remain. + */ +static void check_overlap_2(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, uint32_t s) { - tcg_debug_assert(d == a || d + s <= a || a + s <= d); + tcg_debug_assert(dbase != abase || d == a || d + s <= a || a + s <= d); } /* Verify vector overlap rules for three operands. */ -static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s) +static void check_overlap_3(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, + TCGv_ptr bbase, uint32_t b, uint32_t s) { - check_overlap_2(d, a, s); - check_overlap_2(d, b, s); - check_overlap_2(a, b, s); + check_overlap_2(dbase, d, abase, a, s); + check_overlap_2(dbase, d, bbase, b, s); + check_overlap_2(abase, a, bbase, b, s); } /* Verify vector overlap rules for four operands. */ -static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b, - uint32_t c, uint32_t s) +static void check_overlap_4(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, + TCGv_ptr bbase, uint32_t b, + TCGv_ptr cbase, uint32_t c, uint32_t s) { - check_overlap_2(d, a, s); - check_overlap_2(d, b, s); - check_overlap_2(d, c, s); - check_overlap_2(a, b, s); - check_overlap_2(a, c, s); - check_overlap_2(b, c, s); + check_overlap_2(dbase, d, abase, a, s); + check_overlap_2(dbase, d, bbase, b, s); + check_overlap_2(dbase, d, cbase, c, s); + check_overlap_2(abase, a, bbase, b, s); + check_overlap_2(abase, a, cbase, c, s); + check_overlap_2(bbase, b, cbase, c, s); } /* Create a descriptor from components. */ @@ -88,7 +98,20 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) uint32_t desc = 0; check_size_align(oprsz, maxsz, 0); - tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); + + /* + * We want to check that 'data' will fit into SIMD_DATA_BITS. + * However, some callers want to treat the data as a signed + * value (which they can later get back with simd_data()) + * and some want to treat it as an unsigned value. + * So here we assert only that the data will fit into the + * field in at least one way. This means that some invalid + * values from the caller will not be detected, e.g. if the + * caller wants to handle the value as a signed integer but + * incorrectly passes us 1 << (SIMD_DATA_BITS - 1). + */ + tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) || + data == extract32(data, 0, SIMD_DATA_BITS)); oprsz = (oprsz / 8) - 1; maxsz = (maxsz / 8) - 1; @@ -110,9 +133,10 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) } /* Generate a call to a gvec-style helper with two vector operands. */ -void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz, int32_t data, - gen_helper_gvec_2 *fn) +static void expand_2_ool(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_2 *fn) { TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); @@ -120,8 +144,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, a0 = tcg_temp_ebb_new_ptr(); a1 = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(a0, tcg_env, dofs); - tcg_gen_addi_ptr(a1, tcg_env, aofs); + tcg_gen_addi_ptr(a0, dbase, dofs); + tcg_gen_addi_ptr(a1, abase, aofs); fn(a0, a1, desc); @@ -129,6 +153,13 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, tcg_temp_free_ptr(a1); } +void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2 *fn) +{ + expand_2_ool(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, data, fn); +} + /* Generate a call to a gvec-style helper with two vector operands and one scalar operand. */ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, @@ -151,9 +182,11 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, } /* Generate a call to a gvec-style helper with three vector operands. */ -void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, - uint32_t oprsz, uint32_t maxsz, int32_t data, - gen_helper_gvec_3 *fn) +static void expand_3_ool(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_3 *fn) { TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); @@ -162,9 +195,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, a1 = tcg_temp_ebb_new_ptr(); a2 = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(a0, tcg_env, dofs); - tcg_gen_addi_ptr(a1, tcg_env, aofs); - tcg_gen_addi_ptr(a2, tcg_env, bofs); + tcg_gen_addi_ptr(a0, dbase, dofs); + tcg_gen_addi_ptr(a1, abase, aofs); + tcg_gen_addi_ptr(a2, bbase, bofs); fn(a0, a1, a2, desc); @@ -173,6 +206,14 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_temp_free_ptr(a2); } +void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_3 *fn) +{ + expand_3_ool(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz, data, fn); +} + /* Generate a call to a gvec-style helper with four vector operands. */ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, @@ -366,7 +407,7 @@ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) return q <= MAX_UNROLL; } -static void expand_clr(uint32_t dofs, uint32_t maxsz); +static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz); /* Duplicate C as per VECE. */ uint64_t (dup_const)(unsigned vece, uint64_t c) @@ -469,8 +510,8 @@ static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece, return 0; } -static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, TCGv_vec t_vec) +static void do_dup_store(TCGType type, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, TCGv_vec t_vec) { uint32_t i = 0; @@ -482,7 +523,7 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, * are misaligned wrt the maximum vector size, so do that first. */ if (dofs & 8) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64); i += 8; } @@ -494,17 +535,17 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ for (; i + 32 <= oprsz; i += 32) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V256); } /* fallthru */ case TCG_TYPE_V128: for (; i + 16 <= oprsz; i += 16) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V128); } break; case TCG_TYPE_V64: for (; i < oprsz; i += 8) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64); } break; default: @@ -512,17 +553,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } -/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C. +/* + * Set OPRSZ bytes at DBASE + DOFS to replications of IN_32, IN_64 or IN_C. * Only one of IN_32 or IN_64 may be set; * IN_C is used if IN_32 and IN_64 are unset. */ -static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64, - uint64_t in_c) +static void do_dup(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, + TCGv_i32 in_32, TCGv_i64 in_64, uint64_t in_c) { TCGType type; TCGv_i64 t_64; @@ -560,7 +602,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, } else { tcg_gen_dupi_vec(vece, t_vec, in_c); } - do_dup_store(type, dofs, oprsz, maxsz, t_vec); + do_dup_store(type, dbase, dofs, oprsz, maxsz, t_vec); return; } @@ -604,14 +646,14 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, /* Implement inline if we picked an implementation size above. */ if (t_32) { for (i = 0; i < oprsz; i += 4) { - tcg_gen_st_i32(t_32, tcg_env, dofs + i); + tcg_gen_st_i32(t_32, dbase, dofs + i); } tcg_temp_free_i32(t_32); goto done; } if (t_64) { for (i = 0; i < oprsz; i += 8) { - tcg_gen_st_i64(t_64, tcg_env, dofs + i); + tcg_gen_st_i64(t_64, dbase, dofs + i); } tcg_temp_free_i64(t_64); goto done; @@ -620,7 +662,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, /* Otherwise implement out of line. */ t_ptr = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(t_ptr, tcg_env, dofs); + tcg_gen_addi_ptr(t_ptr, dbase, dofs); /* * This may be expand_clr for the tail of an operation, e.g. @@ -689,31 +731,32 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, done: if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } /* Likewise, but with zero. */ -static void expand_clr(uint32_t dofs, uint32_t maxsz) +static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz) { - do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0); + do_dup(MO_8, dbase, dofs, maxsz, maxsz, NULL, NULL, 0); } /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */ -static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - bool load_dest, void (*fni)(TCGv_i32, TCGv_i32)) +static void expand_2_i32(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase, + uint32_t aofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { - tcg_gen_ld_i32(t0, tcg_env, aofs + i); + tcg_gen_ld_i32(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_i32(t1, tcg_env, dofs + i); + tcg_gen_ld_i32(t1, dbase, dofs + i); } fni(t1, t0); - tcg_gen_st_i32(t1, tcg_env, dofs + i); + tcg_gen_st_i32(t1, dbase, dofs + i); } tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); @@ -761,8 +804,10 @@ static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ -static void expand_3_i32(uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, bool load_dest, +static void expand_3_i32(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, bool load_dest, void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(); @@ -771,13 +816,13 @@ static void expand_3_i32(uint32_t dofs, uint32_t aofs, uint32_t i; for (i = 0; i < oprsz; i += 4) { - tcg_gen_ld_i32(t0, tcg_env, aofs + i); - tcg_gen_ld_i32(t1, tcg_env, bofs + i); + tcg_gen_ld_i32(t0, abase, aofs + i); + tcg_gen_ld_i32(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_i32(t2, tcg_env, dofs + i); + tcg_gen_ld_i32(t2, dbase, dofs + i); } fni(t2, t0, t1); - tcg_gen_st_i32(t2, tcg_env, dofs + i); + tcg_gen_st_i32(t2, dbase, dofs + i); } tcg_temp_free_i32(t2); tcg_temp_free_i32(t1); @@ -863,20 +908,21 @@ static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, } /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ -static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - bool load_dest, void (*fni)(TCGv_i64, TCGv_i64)) +static void expand_2_i64(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase, + uint32_t aofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { - tcg_gen_ld_i64(t0, tcg_env, aofs + i); + tcg_gen_ld_i64(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_i64(t1, tcg_env, dofs + i); + tcg_gen_ld_i64(t1, dbase, dofs + i); } fni(t1, t0); - tcg_gen_st_i64(t1, tcg_env, dofs + i); + tcg_gen_st_i64(t1, dbase, dofs + i); } tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); @@ -924,8 +970,10 @@ static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ -static void expand_3_i64(uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, bool load_dest, +static void expand_3_i64(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, bool load_dest, void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(); @@ -934,13 +982,13 @@ static void expand_3_i64(uint32_t dofs, uint32_t aofs, uint32_t i; for (i = 0; i < oprsz; i += 8) { - tcg_gen_ld_i64(t0, tcg_env, aofs + i); - tcg_gen_ld_i64(t1, tcg_env, bofs + i); + tcg_gen_ld_i64(t0, abase, aofs + i); + tcg_gen_ld_i64(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_i64(t2, tcg_env, dofs + i); + tcg_gen_ld_i64(t2, dbase, dofs + i); } fni(t2, t0, t1); - tcg_gen_st_i64(t2, tcg_env, dofs + i); + tcg_gen_st_i64(t2, dbase, dofs + i); } tcg_temp_free_i64(t2); tcg_temp_free_i64(t1); @@ -1026,7 +1074,8 @@ static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, } /* Expand OPSZ bytes worth of two-operand operations using host vectors. */ -static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, +static void expand_2_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool load_dest, void (*fni)(unsigned, TCGv_vec, TCGv_vec)) @@ -1035,12 +1084,12 @@ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_vec t0 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type); - tcg_gen_ld_vec(t0, tcg_env, aofs + i); + tcg_gen_ld_vec(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_vec(t1, tcg_env, dofs + i); + tcg_gen_ld_vec(t1, dbase, dofs + i); } fni(vece, t1, t0); - tcg_gen_st_vec(t1, tcg_env, dofs + i); + tcg_gen_st_vec(t1, dbase, dofs + i); } } @@ -1084,8 +1133,9 @@ static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs, } /* Expand OPSZ bytes worth of three-operand operations using host vectors. */ -static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, +static void expand_3_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool load_dest, void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) { @@ -1094,13 +1144,13 @@ static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_vec t1 = tcg_temp_new_vec(type); TCGv_vec t2 = tcg_temp_new_vec(type); - tcg_gen_ld_vec(t0, tcg_env, aofs + i); - tcg_gen_ld_vec(t1, tcg_env, bofs + i); + tcg_gen_ld_vec(t0, abase, aofs + i); + tcg_gen_ld_vec(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_vec(t2, tcg_env, dofs + i); + tcg_gen_ld_vec(t2, dbase, dofs + i); } fni(vece, t2, t0, t1); - tcg_gen_st_vec(t2, tcg_env, dofs + i); + tcg_gen_st_vec(t2, dbase, dofs + i); } } @@ -1182,8 +1232,9 @@ static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs, } /* Expand a vector two-operand operation. */ -void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) +void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) { const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); @@ -1191,7 +1242,7 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(dbase, dofs, abase, aofs, maxsz); type = 0; if (g->fniv) { @@ -1204,8 +1255,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); - expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, some, 32, + TCG_TYPE_V256, g->load_dest, g->fniv); if (some == oprsz) { break; } @@ -1215,22 +1266,25 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, maxsz -= some; /* fallthru */ case TCG_TYPE_V128: - expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 16, + TCG_TYPE_V128, g->load_dest, g->fniv); break; case TCG_TYPE_V64: - expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 8, + TCG_TYPE_V64, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { - expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8); + expand_2_i64(dbase, dofs, abase, aofs, + oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { - expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4); + expand_2_i32(dbase, dofs, abase, aofs, + oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); - tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno); + expand_2_ool(dbase, dofs, abase, aofs, + oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; @@ -1241,10 +1295,16 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } +void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) +{ + tcg_gen_gvec_2_var(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, g); +} + /* Expand a vector operation with two vectors and an immediate. */ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen2i *g) @@ -1255,7 +1315,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); type = 0; if (g->fniv) { @@ -1310,7 +1370,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1321,7 +1381,7 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz, TCGType type; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); type = 0; if (g->fniv) { @@ -1387,13 +1447,15 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector three-operand operation. */ -void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) +void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) { const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); @@ -1401,7 +1463,7 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(dbase, dofs, abase, aofs, bbase, bofs, maxsz); type = 0; if (g->fniv) { @@ -1414,8 +1476,8 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); - expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + some, 32, TCG_TYPE_V256, g->load_dest, g->fniv); if (some == oprsz) { break; } @@ -1426,23 +1488,25 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, maxsz -= some; /* fallthru */ case TCG_TYPE_V128: - expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + oprsz, 16, TCG_TYPE_V128, g->load_dest, g->fniv); break; case TCG_TYPE_V64: - expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + oprsz, 8, TCG_TYPE_V64, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { - expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8); + expand_3_i64(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { - expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4); + expand_3_i32(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); - tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, - maxsz, g->data, g->fno); + expand_3_ool(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; @@ -1453,10 +1517,17 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } +void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) +{ + tcg_gen_gvec_3_var(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz, g); +} + /* Expand a vector operation with three vectors and an immediate. */ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int64_t c, @@ -1468,7 +1539,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz); type = 0; if (g->fniv) { @@ -1522,7 +1593,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1536,7 +1607,8 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); - check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + check_overlap_4(tcg_env, dofs, tcg_env, aofs, + tcg_env, bofs, tcg_env, cofs, maxsz); type = 0; if (g->fniv) { @@ -1591,7 +1663,7 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1606,7 +1678,8 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); - check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + check_overlap_4(tcg_env, dofs, tcg_env, aofs, + tcg_env, bofs, tcg_env, cofs, maxsz); type = 0; if (g->fniv) { @@ -1660,7 +1733,7 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1673,8 +1746,9 @@ static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b) tcg_gen_mov_vec(a, b); } -void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen2 g = { .fni8 = tcg_gen_mov_i64, @@ -1682,14 +1756,22 @@ void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, .fno = gen_helper_gvec_mov, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; - if (dofs != aofs) { - tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g); - } else { + + if (dofs == aofs && dbase == abase) { check_size_align(oprsz, maxsz, dofs); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } + return; } + + tcg_gen_gvec_2_var(dbase, dofs, abase, aofs, oprsz, maxsz, &g); +} + +void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_mov_var(vece, tcg_env, dofs, tcg_env, aofs, oprsz, maxsz); } void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz, @@ -1697,7 +1779,7 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz, { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_32); - do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0); } void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz, @@ -1705,7 +1787,7 @@ void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz, { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_64); - do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0); } void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -1717,7 +1799,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, if (type != 0) { TCGv_vec t_vec = tcg_temp_new_vec(type); tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs); - do_dup_store(type, dofs, oprsz, maxsz, t_vec); + do_dup_store(type, tcg_env, dofs, oprsz, maxsz, t_vec); } else if (vece <= MO_32) { TCGv_i32 in = tcg_temp_ebb_new_i32(); switch (vece) { @@ -1731,12 +1813,12 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_gen_ld_i32(in, tcg_env, aofs); break; } - do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0); tcg_temp_free_i32(in); } else { TCGv_i64 in = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in, tcg_env, aofs); - do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0); tcg_temp_free_i64(in); } } else if (vece == 4) { @@ -1765,7 +1847,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_temp_free_i64(in1); } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } else if (vece == 5) { /* 256-bit duplicate. */ @@ -1808,18 +1890,24 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, } } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } else { g_assert_not_reached(); } } +void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, uint64_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(vece, dbase, dofs, oprsz, maxsz, NULL, NULL, x); +} + void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint64_t x) { - check_size_align(oprsz, maxsz, dofs); - do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x); + tcg_gen_gvec_dup_imm_var(vece, tcg_env, dofs, oprsz, maxsz, x); } void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -1917,8 +2005,10 @@ void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 }; -void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_add8_i64, @@ -1945,7 +2035,15 @@ void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, }; tcg_debug_assert(vece <= MO_64); - tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); + tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_add_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz); } void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -2098,8 +2196,10 @@ void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_temp_free_i64(t2); } -void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_sub8_i64, @@ -2126,7 +2226,15 @@ void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, }; tcg_debug_assert(vece <= MO_64); - tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); + tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_sub_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz); } static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 }; @@ -3135,7 +3243,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); /* If the backend has a scalar expansion, great. */ type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64); @@ -3241,7 +3349,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, clear_tail: if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -3755,10 +3863,10 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { - do_dup(MO_8, dofs, oprsz, maxsz, + do_dup(MO_8, tcg_env, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } @@ -3820,7 +3928,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -3875,10 +3983,10 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, TCGType type; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { - do_dup(MO_8, dofs, oprsz, maxsz, + do_dup(MO_8, tcg_env, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } @@ -3939,7 +4047,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t i; tcg_gen_extrl_i64_i32(t1, c); - for (i = 0; i < oprsz; i += 8) { + for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(t0, tcg_env, aofs + i); tcg_gen_negsetcond_i32(cond, t0, t0, t1); tcg_gen_st_i32(t0, tcg_env, dofs + i); @@ -3961,7 +4069,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } |