aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-09-14 17:04:22 -0700
committerRichard Henderson <richard.henderson@linaro.org>2021-10-05 16:53:17 -0700
commit2dabf74252e2b9b9f23b1991d69a8def61442a2c (patch)
tree7e2bc755f2280b5dc624c0ca5601e367ae60e4c5
parent34ef7676090e35ffb7f7d8b8d92a843a6ee94931 (diff)
downloadqemu-2dabf74252e2b9b9f23b1991d69a8def61442a2c.zip
qemu-2dabf74252e2b9b9f23b1991d69a8def61442a2c.tar.gz
qemu-2dabf74252e2b9b9f23b1991d69a8def61442a2c.tar.bz2
tcg/s390x: Implement tcg_out_ld/st for vector types
Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--tcg/s390x/tcg-target.c.inc132
1 files changed, 120 insertions, 12 deletions
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 8bee6dd..d80f25e 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -265,6 +265,12 @@ typedef enum S390Opcode {
RX_STC = 0x42,
RX_STH = 0x40,
+ VRX_VL = 0xe706,
+ VRX_VLLEZ = 0xe704,
+ VRX_VST = 0xe70e,
+ VRX_VSTEF = 0xe70b,
+ VRX_VSTEG = 0xe70a,
+
NOP = 0x0707,
} S390Opcode;
@@ -412,6 +418,16 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
static const tcg_insn_unit *tb_ret_addr;
uint64_t s390_facilities[3];
+static inline bool is_general_reg(TCGReg r)
+{
+ return r <= TCG_REG_R15;
+}
+
+static inline bool is_vector_reg(TCGReg r)
+{
+ return r >= TCG_REG_V0 && r <= TCG_REG_V31;
+}
+
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
intptr_t value, intptr_t addend)
{
@@ -529,6 +545,31 @@ static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
#define tcg_out_insn_RX tcg_out_insn_RS
#define tcg_out_insn_RXY tcg_out_insn_RSY
+static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
+{
+ /*
+ * Shift bit 4 of each regno to its corresponding bit of RXB.
+ * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
+ * is the left-shift of the 4th operand.
+ */
+ return ((v1 & 0x10) << (4 + 3))
+ | ((v2 & 0x10) << (4 + 2))
+ | ((v3 & 0x10) << (4 + 1))
+ | ((v4 & 0x10) << (4 + 0));
+}
+
+static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
+ TCGReg b2, TCGReg x2, intptr_t d2, int m3)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
+ tcg_debug_assert(is_general_reg(x2));
+ tcg_debug_assert(is_general_reg(b2));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
+ tcg_out16(s, (b2 << 12) | d2);
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
+}
+
/* Emit an opcode with "type-checking" of the format. */
#define tcg_out_insn(S, FMT, OP, ...) \
glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
@@ -705,25 +746,92 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
}
}
+static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
+ TCGReg data, TCGReg base, TCGReg index,
+ tcg_target_long ofs, int m3)
+{
+ if (ofs < 0 || ofs >= 0x1000) {
+ if (ofs >= -0x80000 && ofs < 0x80000) {
+ tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
+ base = TCG_TMP0;
+ index = TCG_REG_NONE;
+ ofs = 0;
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
+ if (index != TCG_REG_NONE) {
+ tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
+ }
+ index = TCG_TMP0;
+ ofs = 0;
+ }
+ }
+ tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
+}
/* load data without address translation or endianness conversion */
-static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
- TCGReg base, intptr_t ofs)
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, intptr_t ofs)
{
- if (type == TCG_TYPE_I32) {
- tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
- } else {
- tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
+ switch (type) {
+ case TCG_TYPE_I32:
+ if (likely(is_general_reg(data))) {
+ tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
+ break;
+ }
+ tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
+ break;
+
+ case TCG_TYPE_I64:
+ if (likely(is_general_reg(data))) {
+ tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
+ break;
+ }
+ /* fallthru */
+
+ case TCG_TYPE_V64:
+ tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
+ break;
+
+ case TCG_TYPE_V128:
+ /* Hint quadword aligned. */
+ tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
+ break;
+
+ default:
+ g_assert_not_reached();
}
}
-static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
- TCGReg base, intptr_t ofs)
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, intptr_t ofs)
{
- if (type == TCG_TYPE_I32) {
- tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
- } else {
- tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
+ switch (type) {
+ case TCG_TYPE_I32:
+ if (likely(is_general_reg(data))) {
+ tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
+ } else {
+ tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
+ }
+ break;
+
+ case TCG_TYPE_I64:
+ if (likely(is_general_reg(data))) {
+ tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
+ break;
+ }
+ /* fallthru */
+
+ case TCG_TYPE_V64:
+ tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
+ break;
+
+ case TCG_TYPE_V128:
+ /* Hint quadword aligned. */
+ tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
+ break;
+
+ default:
+ g_assert_not_reached();
}
}