aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target/arm/translate-vfp.inc.c167
-rw-r--r--target/arm/translate.c12
-rw-r--r--target/arm/vfp.decode5
3 files changed, 180 insertions, 4 deletions
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index a2eeb6c..d0282f1 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -1112,6 +1112,14 @@ typedef void VFPGen3OpDPFn(TCGv_i64 vd,
TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
/*
+ * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
+ * The callback should emit code to write a value to vd (which
+ * should be written to only).
+ */
+typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
+typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
+
+/*
* Perform a 3-operand VFP data processing instruction. fn is the
* callback to do the actual operation; this function deals with the
* code to handle looping around for VFP vector processing.
@@ -1274,6 +1282,155 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
return true;
}
+static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 f0, fd;
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0x18;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+
+ neon_load_reg32(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ neon_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ neon_store_reg32(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg32(f0, vm);
+ }
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(fd);
+
+ return true;
+}
+
+static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 f0, fd;
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0xc;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i64();
+ fd = tcg_temp_new_i64();
+
+ neon_load_reg64(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ neon_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ neon_store_reg64(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg64(f0, vm);
+ }
+
+ tcg_temp_free_i64(f0);
+ tcg_temp_free_i64(fd);
+
+ return true;
+}
+
static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
{
/* Note that order of inputs to the add matters for NaNs */
@@ -1731,3 +1888,13 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
tcg_temp_free_i64(fd);
return true;
}
+
+static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
+{
+ return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
+}
+
+static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
+{
+ return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 29b6487..e510c5e 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3096,6 +3096,14 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
case 0 ... 14:
/* Already handled by decodetree */
return 1;
+ case 15:
+ switch (rn) {
+ case 1:
+ /* Already handled by decodetree */
+ return 1;
+ default:
+ break;
+ }
default:
break;
}
@@ -3104,7 +3112,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
/* rn is opcode, encoded as per VFP_SREG_N. */
switch (rn) {
case 0x00: /* vmov */
- case 0x01: /* vabs */
case 0x02: /* vneg */
case 0x03: /* vsqrt */
break;
@@ -3284,9 +3291,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
case 0: /* cpy */
/* no-op */
break;
- case 1: /* abs */
- gen_vfp_abs(dp);
- break;
case 2: /* neg */
gen_vfp_neg(dp);
break;
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
index 1818d4f..7035861 100644
--- a/target/arm/vfp.decode
+++ b/target/arm/vfp.decode
@@ -156,3 +156,8 @@ VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
vd=%vd_sp
VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
vd=%vd_dp
+
+VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \
+ vd=%vd_dp vm=%vm_dp