aboutsummaryrefslogtreecommitdiff
path: root/tcg/s390x
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-09-14 17:25:52 -0700
committerRichard Henderson <richard.henderson@linaro.org>2021-10-05 16:53:17 -0700
commit79cada8693d3e32162e41323b4bfbeaa765cad35 (patch)
treedd0201629ff8921945c056a88a03d372fa852f96 /tcg/s390x
parentb33ce7251c9591635ca0f93bf5e75e7dbb844b5c (diff)
downloadqemu-79cada8693d3e32162e41323b4bfbeaa765cad35.zip
qemu-79cada8693d3e32162e41323b4bfbeaa765cad35.tar.gz
qemu-79cada8693d3e32162e41323b4bfbeaa765cad35.tar.bz2
tcg/s390x: Implement tcg_out_dup*_vec
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/s390x')
-rw-r--r--tcg/s390x/tcg-target.c.inc122
1 files changed, 119 insertions, 3 deletions
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 586a4b5..f592508 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -265,13 +265,20 @@ typedef enum S390Opcode {
RX_STC = 0x42,
RX_STH = 0x40,
+ VRIa_VGBM = 0xe744,
+ VRIa_VREPI = 0xe745,
+ VRIb_VGM = 0xe746,
+ VRIc_VREP = 0xe74d,
+
VRRa_VLR = 0xe756,
+ VRRf_VLVGP = 0xe762,
VRSb_VLVG = 0xe722,
VRSc_VLGV = 0xe721,
VRX_VL = 0xe706,
VRX_VLLEZ = 0xe704,
+ VRX_VLREP = 0xe705,
VRX_VST = 0xe70e,
VRX_VSTEF = 0xe70b,
VRX_VSTEG = 0xe70a,
@@ -563,6 +570,34 @@ static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
| ((v4 & 0x10) << (4 + 0));
}
+static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
+ TCGReg v1, uint16_t i2, int m3)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
+ tcg_out16(s, i2);
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
+}
+
+static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
+ TCGReg v1, uint8_t i2, uint8_t i3, int m4)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
+ tcg_out16(s, (i2 << 8) | (i3 & 0xff));
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
+}
+
+static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
+ TCGReg v1, uint16_t i2, TCGReg v3, int m4)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_debug_assert(is_vector_reg(v3));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
+ tcg_out16(s, i2);
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
+}
+
static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
TCGReg v1, TCGReg v2, int m3)
{
@@ -572,6 +607,17 @@ static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
}
+static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
+ TCGReg v1, TCGReg r2, TCGReg r3)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_debug_assert(is_general_reg(r2));
+ tcg_debug_assert(is_general_reg(r3));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
+ tcg_out16(s, r3 << 12);
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
+}
+
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
{
@@ -2501,19 +2547,89 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src)
{
- g_assert_not_reached();
+ if (is_general_reg(src)) {
+ /* Replicate general register into two MO_64. */
+ tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
+ if (vece == MO_64) {
+ return true;
+ }
+ }
+
+ /*
+ * Recall that the "standard" integer, within a vector, is the
+ * rightmost element of the leftmost doubleword, a-la VLLEZ.
+ */
+ tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
+ return true;
}
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg base, intptr_t offset)
{
- g_assert_not_reached();
+ tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
+ return true;
}
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, int64_t val)
{
- g_assert_not_reached();
+ int i, mask, msb, lsb;
+
+ /* Look for int16_t elements. */
+ if (vece <= MO_16 ||
+ (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
+ tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
+ return;
+ }
+
+ /* Look for bit masks. */
+ if (vece == MO_32) {
+ if (risbg_mask((int32_t)val)) {
+ /* Handle wraparound by swapping msb and lsb. */
+ if ((val & 0x80000001u) == 0x80000001u) {
+ msb = 32 - ctz32(~val);
+ lsb = clz32(~val) - 1;
+ } else {
+ msb = clz32(val);
+ lsb = 31 - ctz32(val);
+ }
+ tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_32);
+ return;
+ }
+ } else {
+ if (risbg_mask(val)) {
+ /* Handle wraparound by swapping msb and lsb. */
+ if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
+ /* Handle wraparound by swapping msb and lsb. */
+ msb = 64 - ctz64(~val);
+ lsb = clz64(~val) - 1;
+ } else {
+ msb = clz64(val);
+ lsb = 63 - ctz64(val);
+ }
+ tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_64);
+ return;
+ }
+ }
+
+ /* Look for all bytes 0x00 or 0xff. */
+ for (i = mask = 0; i < 8; i++) {
+ uint8_t byte = val >> (i * 8);
+ if (byte == 0xff) {
+ mask |= 1 << i;
+ } else if (byte != 0) {
+ break;
+ }
+ }
+ if (i == 8) {
+ tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
+ return;
+ }
+
+ /* Otherwise, stuff it in the constant pool. */
+ tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
+ new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
+ tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
}
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,