aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2019-03-07 13:15:14 +0100
committerCornelia Huck <cohuck@redhat.com>2019-03-11 09:31:01 +0100
commitb4f5ae47d8f3d4f31ccd78c8c02c1e7f206b5ef5 (patch)
tree54ddf2c85a23553665f11d84be41945400ba9fc4
parenteeb11a90a68ed4d1be433187c3cb0b90b85d2e18 (diff)
downloadqemu-b4f5ae47d8f3d4f31ccd78c8c02c1e7f206b5ef5.zip
qemu-b4f5ae47d8f3d4f31ccd78c8c02c1e7f206b5ef5.tar.gz
qemu-b4f5ae47d8f3d4f31ccd78c8c02c1e7f206b5ef5.tar.bz2
s390x/tcg: Implement VECTOR LOAD
When loading from memory, load both elements into temps first before modifying the target vector Loading with strange alingment from the end of the address space will not properly wrap, we can ignore that for now. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20190307121539.12842-8-david@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
-rw-r--r--target/s390x/insn-data.def3
-rw-r--r--target/s390x/translate_vx.inc.c24
2 files changed, 27 insertions, 0 deletions
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index c8cd5df..f129e51 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -981,6 +981,9 @@
F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC)
/* VECTOR GENERATE MASK */
F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC)
+/* VECTOR LOAD */
+ F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC)
+ F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index 28edd9b..9063784 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -113,6 +113,9 @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
}
}
+#define gen_gvec_mov(v1, v2) \
+ tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
+ 16)
#define gen_gvec_dup64i(v1, c) \
tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
@@ -219,3 +222,24 @@ static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
gen_gvec_dupi(es, get_field(s->fields, v1), mask);
return DISAS_NEXT;
}
+
+static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64);
+ write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_mov(get_field(s->fields, v1), get_field(s->fields, v2));
+ return DISAS_NEXT;
+}