From 3a7a2b4e5cf0d49cd8b14e8225af0310068b7d20 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 18 May 2019 12:19:34 -0700 Subject: target/arm: Use tcg_gen_gvec_bitsel This replaces 3 target-specific implementations for BIT, BIF, and BSL. Signed-off-by: Richard Henderson Reviewed-by: Peter Maydell Message-id: 20190518191934.21887-3-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/translate-a64.c | 15 +++++++-- target/arm/translate-a64.h | 2 ++ target/arm/translate.c | 78 ++++------------------------------------------ target/arm/translate.h | 3 -- 4 files changed, 20 insertions(+), 78 deletions(-) diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 8a3bf20..ae739f6 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -704,6 +704,15 @@ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); } +/* Expand a 4-operand AdvSIMD vector operation using an expander function. */ +static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, + int rx, GVecGen4Fn *gvec_fn, int vece) +{ + gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), + is_q ? 16 : 8, vec_full_reg_size(s)); +} + /* Expand a 2-operand + immediate AdvSIMD vector operation using * an op descriptor. */ @@ -10918,13 +10927,13 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) return; case 5: /* BSL bitwise select */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op); + gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0); return; case 6: /* BIT, bitwise insert if true */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op); + gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0); return; case 7: /* BIF, bitwise insert if false */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op); + gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0); return; default: diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index 63d958c..9569bc5 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -122,5 +122,7 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, uint32_t, uint32_t); typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); +typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); #endif /* TARGET_ARM_TRANSLATE_A64_H */ diff --git a/target/arm/translate.c b/target/arm/translate.c index d25e19e..b1276e0 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -5755,72 +5755,6 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, return 1; } -/* - * Expanders for VBitOps_VBIF, VBIT, VBSL. - */ -static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rm); - tcg_gen_and_i64(rn, rn, rd); - tcg_gen_xor_i64(rd, rm, rn); -} - -static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rd); - tcg_gen_and_i64(rn, rn, rm); - tcg_gen_xor_i64(rd, rd, rn); -} - -static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rd); - tcg_gen_andc_i64(rn, rn, rm); - tcg_gen_xor_i64(rd, rd, rn); -} - -static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rm); - tcg_gen_and_vec(vece, rn, rn, rd); - tcg_gen_xor_vec(vece, rd, rm, rn); -} - -static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rd); - tcg_gen_and_vec(vece, rn, rn, rm); - tcg_gen_xor_vec(vece, rd, rd, rn); -} - -static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rd); - tcg_gen_andc_vec(vece, rn, rn, rm); - tcg_gen_xor_vec(vece, rd, rd, rn); -} - -const GVecGen3 bsl_op = { - .fni8 = gen_bsl_i64, - .fniv = gen_bsl_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true -}; - -const GVecGen3 bit_op = { - .fni8 = gen_bit_i64, - .fniv = gen_bit_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true -}; - -const GVecGen3 bif_op = { - .fni8 = gen_bif_i64, - .fniv = gen_bif_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true -}; - static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_sar8i_i64(a, a, shift); @@ -6570,16 +6504,16 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) vec_size, vec_size); break; case 5: /* VBSL */ - tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, - vec_size, vec_size, &bsl_op); + tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); break; case 6: /* VBIT */ - tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, - vec_size, vec_size, &bit_op); + tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs, + vec_size, vec_size); break; case 7: /* VBIF */ - tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, - vec_size, vec_size, &bif_op); + tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs, + vec_size, vec_size); break; } return 0; diff --git a/target/arm/translate.h b/target/arm/translate.h index c2348de..dc06dce 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -238,9 +238,6 @@ static inline void gen_ss_advance(DisasContext *s) } /* Vector operations shared between ARM and AArch64. */ -extern const GVecGen3 bsl_op; -extern const GVecGen3 bit_op; -extern const GVecGen3 bif_op; extern const GVecGen3 mla_op[4]; extern const GVecGen3 mls_op[4]; extern const GVecGen3 cmtst_op[4]; -- cgit v1.1 From fc1120a7f5f2d4b601003205c598077d3eb11ad2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 10 May 2019 12:03:57 +0100 Subject: target/arm: Implement NSACR gating of floating point The NSACR register allows secure code to configure the FPU to be inaccessible to non-secure code. If the NSACR.CP10 bit is set then: * NS accesses to the FPU trap as UNDEF (ie to NS EL1 or EL2) * CPACR.{CP10,CP11} behave as if RAZ/WI * HCPTR.{TCP11,TCP10} behave as if RAO/WI Note that we do not implement the NSACR.NSASEDIS bit which gates only access to Advanced SIMD, in the same way that we don't implement the equivalent CPACR.ASEDIS and HCPTR.TASE. Reviewed-by: Richard Henderson Signed-off-by: Peter Maydell Message-id: 20190510110357.18825-1-peter.maydell@linaro.org --- target/arm/helper.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 188fb19..df4276f 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -930,9 +930,36 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, } value &= mask; } + + /* + * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 + * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. + */ + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0xf << 20); + value |= env->cp15.cpacr_el1 & (0xf << 20); + } + env->cp15.cpacr_el1 = value; } +static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* + * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 + * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. + */ + uint64_t value = env->cp15.cpacr_el1; + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0xf << 20); + } + return value; +} + + static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) { /* Call cpacr_write() so that we reset with the correct RAO bits set @@ -998,7 +1025,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), - .resetfn = cpacr_reset, .writefn = cpacr_write }, + .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, REGINFO_SENTINEL }; @@ -4683,6 +4710,36 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env) return ret; } +static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * For A-profile AArch32 EL3, if NSACR.CP10 + * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. + */ + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0x3 << 10); + value |= env->cp15.cptr_el[2] & (0x3 << 10); + } + env->cp15.cptr_el[2] = value; +} + +static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* + * For A-profile AArch32 EL3, if NSACR.CP10 + * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. + */ + uint64_t value = env->cp15.cptr_el[2]; + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value |= 0x3 << 10; + } + return value; +} + static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, @@ -4730,7 +4787,8 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, - .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, + .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), + .readfn = cptr_el2_read, .writefn = cptr_el2_write }, { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), @@ -13587,6 +13645,19 @@ int fp_exception_el(CPUARMState *env, int cur_el) break; } + /* + * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode + * to control non-secure access to the FPU. It doesn't have any + * effect if EL3 is AArch64 or if EL3 doesn't exist at all. + */ + if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + cur_el <= 2 && !arm_is_secure_below_el3(env))) { + if (!extract32(env->cp15.nsacr, 10, 1)) { + /* FP insns act as UNDEF */ + return cur_el == 2 ? 2 : 1; + } + } + /* For the CPTR registers we don't need to guard with an ARM_FEATURE * check because zero bits in the registers mean "don't trap". */ -- cgit v1.1 From 97fb318d37be4d21125e89c96e4e92ea33beac51 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 24 May 2019 13:48:29 +0100 Subject: hw/arm/smmuv3: Fix decoding of ID register range The SMMUv3 ID registers cover an area 0x30 bytes in size (12 registers, 4 bytes each). We were incorrectly decoding only the first 0x20 bytes. Signed-off-by: Peter Maydell Reviewed-by: Eric Auger Message-id: 20190524124829.2589-1-peter.maydell@linaro.org --- hw/arm/smmuv3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index fd8ec78..e96d5be 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1232,7 +1232,7 @@ static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, uint64_t *data, MemTxAttrs attrs) { switch (offset) { - case A_IDREGS ... A_IDREGS + 0x1f: + case A_IDREGS ... A_IDREGS + 0x2f: *data = smmuv3_idreg(offset - A_IDREGS); return MEMTX_OK; case A_IDR0 ... A_IDR5: -- cgit v1.1 From be1ba4d56eba5666ee03b40e286d7315862ab188 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Thu, 23 May 2019 16:05:43 +0100 Subject: hw/core/bus.c: Only the main system bus can have no parent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 80376c3fc2c38fdd453 in 2010 we added a workaround for some qbus buses not being connected to qdev devices -- if the bus has no parent object then we register a reset function which resets the bus on system reset (and unregister it when the bus is unparented). Nearly a decade later, we have now no buses in the tree which are created with non-NULL parents, so we can remove the workaround and instead just assert that if the bus has a NULL parent then it is the main system bus. (The absence of other parentless buses was confirmed by code inspection of all the callsites of qbus_create() and qbus_create_inplace() and cross-checked by 'make check'.) Signed-off-by: Peter Maydell Reviewed-by: Markus Armbruster Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Damien Hedde Tested-by: Philippe Mathieu-Daudé Message-id: 20190523150543.22676-1-peter.maydell@linaro.org --- hw/core/bus.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/hw/core/bus.c b/hw/core/bus.c index e6baa04..17bc1ed 100644 --- a/hw/core/bus.c +++ b/hw/core/bus.c @@ -97,10 +97,9 @@ static void qbus_realize(BusState *bus, DeviceState *parent, const char *name) bus->parent->num_child_bus++; object_property_add_child(OBJECT(bus->parent), bus->name, OBJECT(bus), NULL); object_unref(OBJECT(bus)); - } else if (bus != sysbus_get_default()) { - /* TODO: once all bus devices are qdevified, - only reset handler for main_system_bus should be registered here. */ - qemu_register_reset(qbus_reset_all_fn, bus); + } else { + /* The only bus without a parent is the main system bus */ + assert(bus == sysbus_get_default()); } } @@ -109,18 +108,16 @@ static void bus_unparent(Object *obj) BusState *bus = BUS(obj); BusChild *kid; + /* Only the main system bus has no parent, and that bus is never freed */ + assert(bus->parent); + while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { DeviceState *dev = kid->child; object_unparent(OBJECT(dev)); } - if (bus->parent) { - QLIST_REMOVE(bus, sibling); - bus->parent->num_child_bus--; - bus->parent = NULL; - } else { - assert(bus != sysbus_get_default()); /* main_system_bus is never freed */ - qemu_unregister_reset(qbus_reset_all_fn, bus); - } + QLIST_REMOVE(bus, sibling); + bus->parent->num_child_bus--; + bus->parent = NULL; } void qbus_create_inplace(void *bus, size_t size, const char *typename, -- cgit v1.1 From d67ebada159148bfdfde84871338738e4465e985 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 9 Jun 2019 15:22:49 -0700 Subject: target/arm: Fix output of PAuth Auth The ARM pseudocode installs the error_code into the original pointer, not the encrypted pointer. The difference applies within the 7 bits of pac data; the result should be the sign extension of bit 55. Add a testcase to that effect. Signed-off-by: Richard Henderson Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/pauth_helper.c | 4 +-- tests/tcg/aarch64/Makefile.target | 2 +- tests/tcg/aarch64/pauth-2.c | 61 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 tests/tcg/aarch64/pauth-2.c diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c index 7f30ae7..d3194f2 100644 --- a/target/arm/pauth_helper.c +++ b/target/arm/pauth_helper.c @@ -344,9 +344,9 @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) { int error_code = (keynumber << 1) | (keynumber ^ 1); if (param.tbi) { - return deposit64(ptr, 53, 2, error_code); + return deposit64(orig_ptr, 53, 2, error_code); } else { - return deposit64(ptr, 61, 2, error_code); + return deposit64(orig_ptr, 61, 2, error_code); } } return orig_ptr; diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index 2bb9149..31ba9cf 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -15,7 +15,7 @@ run-fcvt: fcvt $(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)") $(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref) -AARCH64_TESTS += pauth-1 +AARCH64_TESTS += pauth-1 pauth-2 run-pauth-%: QEMU += -cpu max TESTS:=$(AARCH64_TESTS) diff --git a/tests/tcg/aarch64/pauth-2.c b/tests/tcg/aarch64/pauth-2.c new file mode 100644 index 0000000..2fe030b --- /dev/null +++ b/tests/tcg/aarch64/pauth-2.c @@ -0,0 +1,61 @@ +#include +#include + +asm(".arch armv8.4-a"); + +void do_test(uint64_t value) +{ + uint64_t salt1, salt2; + uint64_t encode, decode; + + /* + * With TBI enabled and a 48-bit VA, there are 7 bits of auth, + * and so a 1/128 chance of encode = pac(value,key,salt) producing + * an auth for which leaves value unchanged. + * Iterate until we find a salt for which encode != value. + */ + for (salt1 = 1; ; salt1++) { + asm volatile("pacda %0, %2" : "=r"(encode) : "0"(value), "r"(salt1)); + if (encode != value) { + break; + } + } + + /* A valid salt must produce a valid authorization. */ + asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt1)); + assert(decode == value); + + /* + * An invalid salt usually fails authorization, but again there + * is a chance of choosing another salt that works. + * Iterate until we find another salt which does fail. + */ + for (salt2 = salt1 + 1; ; salt2++) { + asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt2)); + if (decode != value) { + break; + } + } + + /* The VA bits, bit 55, and the TBI bits, should be unchanged. */ + assert(((decode ^ value) & 0xff80ffffffffffffull) == 0); + + /* + * Bits [54:53] are an error indicator based on the key used; + * the DA key above is keynumber 0, so error == 0b01. Otherwise + * bit 55 of the original is sign-extended into the rest of the auth. + */ + if ((value >> 55) & 1) { + assert(((decode >> 48) & 0xff) == 0b10111111); + } else { + assert(((decode >> 48) & 0xff) == 0b00100000); + } +} + +int main() +{ + do_test(0); + do_test(-1); + do_test(0xda004acedeadbeefull); + return 0; +} -- cgit v1.1 From 2c7d442743854d2c1f5475446e088bd523f4bb20 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 11 Jun 2019 16:39:41 +0100 Subject: decodetree: Fix comparison of Field MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Typo comparing the sign of the field, twice, instead of also comparing the mask of the field (which itself encodes both position and length). Reported-by: Peter Maydell Signed-off-by: Richard Henderson Message-id: 20190604154225.26992-1-richard.henderson@linaro.org Reviewed-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Peter Maydell --- scripts/decodetree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/decodetree.py b/scripts/decodetree.py index 81874e2..d7a59d6 100755 --- a/scripts/decodetree.py +++ b/scripts/decodetree.py @@ -184,7 +184,7 @@ class Field: return '{0}(insn, {1}, {2})'.format(extr, self.pos, self.len) def __eq__(self, other): - return self.sign == other.sign and self.sign == other.sign + return self.sign == other.sign and self.mask == other.mask def __ne__(self, other): return not self.__eq__(other) -- cgit v1.1 From 78e138bc1f672c145ef6ace74617db00eebaa2ba Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:41 +0100 Subject: target/arm: Add stubs for AArch32 VFP decodetree Add the infrastructure for building and invoking a decodetree decoder for the AArch32 VFP encodings. At the moment the new decoder covers nothing, so we always fall back to the existing hand-written decode. We need to have one decoder for the unconditional insns and one for the conditional insns, as otherwise the patterns for conditional insns would incorrectly match against the unconditional ones too. Since translate.c is over 14,000 lines long and we're going to be touching pretty much every line of the VFP code as part of the decodetree conversion, we create a new translate-vfp.inc.c to hold the code which deals with VFP in the new scheme. It should be possible to convert this into a standalone translation unit eventually, but the conversion process will be much simpler if we simply #include it midway through translate.c to start with. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/Makefile.objs | 13 +++++++++++++ target/arm/translate-vfp.inc.c | 31 +++++++++++++++++++++++++++++++ target/arm/translate.c | 19 +++++++++++++++++++ target/arm/vfp-uncond.decode | 28 ++++++++++++++++++++++++++++ target/arm/vfp.decode | 28 ++++++++++++++++++++++++++++ 5 files changed, 119 insertions(+) create mode 100644 target/arm/translate-vfp.inc.c create mode 100644 target/arm/vfp-uncond.decode create mode 100644 target/arm/vfp.decode diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs index 6bdcc65..dfa736a 100644 --- a/target/arm/Makefile.objs +++ b/target/arm/Makefile.objs @@ -19,5 +19,18 @@ target/arm/decode-sve.inc.c: $(SRC_PATH)/target/arm/sve.decode $(DECODETREE) $(PYTHON) $(DECODETREE) --decode disas_sve -o $@ $<,\ "GEN", $(TARGET_DIR)$@) +target/arm/decode-vfp.inc.c: $(SRC_PATH)/target/arm/vfp.decode $(DECODETREE) + $(call quiet-command,\ + $(PYTHON) $(DECODETREE) --static-decode disas_vfp -o $@ $<,\ + "GEN", $(TARGET_DIR)$@) + +target/arm/decode-vfp-uncond.inc.c: $(SRC_PATH)/target/arm/vfp-uncond.decode $(DECODETREE) + $(call quiet-command,\ + $(PYTHON) $(DECODETREE) --static-decode disas_vfp_uncond -o $@ $<,\ + "GEN", $(TARGET_DIR)$@) + target/arm/translate-sve.o: target/arm/decode-sve.inc.c +target/arm/translate.o: target/arm/decode-vfp.inc.c +target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c + obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c new file mode 100644 index 0000000..3447b3e --- /dev/null +++ b/target/arm/translate-vfp.inc.c @@ -0,0 +1,31 @@ +/* + * ARM translation: AArch32 VFP instructions + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005-2007 CodeSourcery + * Copyright (c) 2007 OpenedHand, Ltd. + * Copyright (c) 2019 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * This file is intended to be included from translate.c; it uses + * some macros and definitions provided by that file. + * It might be possible to convert it to a standalone .c file eventually. + */ + +/* Include the generated VFP decoder */ +#include "decode-vfp.inc.c" +#include "decode-vfp-uncond.inc.c" diff --git a/target/arm/translate.c b/target/arm/translate.c index b1276e0..5ed2680 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1727,6 +1727,9 @@ static inline void gen_mov_vreg_F0(int dp, int reg) #define ARM_CP_RW_BIT (1 << 20) +/* Include the VFP decoder */ +#include "translate-vfp.inc.c" + static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); @@ -3384,6 +3387,22 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; } + /* + * If the decodetree decoder handles this insn it will always + * emit code to either execute the insn or generate an appropriate + * exception; so we don't need to ever return non-zero to tell + * the calling code to emit an UNDEF exception. + */ + if (extract32(insn, 28, 4) == 0xf) { + if (disas_vfp_uncond(s, insn)) { + return 0; + } + } else { + if (disas_vfp(s, insn)) { + return 0; + } + } + /* FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode new file mode 100644 index 0000000..b1d9dc5 --- /dev/null +++ b/target/arm/vfp-uncond.decode @@ -0,0 +1,28 @@ +# AArch32 VFP instruction descriptions (unconditional insns) +# +# Copyright (c) 2019 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . + +# +# This file is processed by scripts/decodetree.py +# +# Encodings for the unconditional VFP instructions are here: +# generally anything matching A32 +# 1111 1110 .... .... .... 101. ...0 .... +# and T32 +# 1111 110. .... .... .... 101. .... .... +# 1111 1110 .... .... .... 101. .... .... +# (but those patterns might also cover some Neon instructions, +# which do not live in this file.) diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode new file mode 100644 index 0000000..28ee664 --- /dev/null +++ b/target/arm/vfp.decode @@ -0,0 +1,28 @@ +# AArch32 VFP instruction descriptions (conditional insns) +# +# Copyright (c) 2019 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . + +# +# This file is processed by scripts/decodetree.py +# +# Encodings for the conditional VFP instructions are here: +# generally anything matching A32 +# cccc 11.. .... .... .... 101. .... .... +# and T32 +# 1110 110. .... .... .... 101. .... .... +# 1110 1110 .... .... .... 101. .... .... +# (but those patterns might also cover some Neon instructions, +# which do not live in this file.) -- cgit v1.1 From 06db8196bba34776829020192ed623a0b22e6557 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:41 +0100 Subject: target/arm: Factor out VFP access checking code Factor out the VFP access checking code so that we can use it in the leaf functions of the decodetree decoder. We call the function full_vfp_access_check() so we can keep the more natural vfp_access_check() for a version which doesn't have the 'ignore_vfp_enabled' flag -- that way almost all VFP insns will be able to use vfp_access_check(s) and only the special-register access function will have to use full_vfp_access_check(s, ignore_vfp_enabled). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 100 ++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 101 ++++++----------------------------------- 2 files changed, 113 insertions(+), 88 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 3447b3e..cf3d7fe 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -29,3 +29,103 @@ /* Include the generated VFP decoder */ #include "decode-vfp.inc.c" #include "decode-vfp-uncond.inc.c" + +/* + * Check that VFP access is enabled. If it is, do the necessary + * M-profile lazy-FP handling and then return true. + * If not, emit code to generate an appropriate exception and + * return false. + * The ignore_vfp_enabled argument specifies that we should ignore + * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX + * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns. + */ +static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) +{ + if (s->fp_excp_el) { + if (arm_dc_feature(s, ARM_FEATURE_M)) { + gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(), + s->fp_excp_el); + } else { + gen_exception_insn(s, 4, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, false), + s->fp_excp_el); + } + return false; + } + + if (!s->vfp_enabled && !ignore_vfp_enabled) { + assert(!arm_dc_feature(s, ARM_FEATURE_M)); + gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), + default_exception_el(s)); + return false; + } + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + /* Handle M-profile lazy FP state mechanics */ + + /* Trigger lazy-state preservation if necessary */ + if (s->v7m_lspact) { + /* + * Lazy state saving affects external memory and also the NVIC, + * so we must mark it as an IO operation for icount. + */ + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_v7m_preserve_fp_state(cpu_env); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(); + } + /* + * If the preserve_fp_state helper doesn't throw an exception + * then it will clear LSPACT; we don't need to repeat this for + * any further FP insns in this TB. + */ + s->v7m_lspact = false; + } + + /* Update ownership of FP context: set FPCCR.S to match current state */ + if (s->v8m_fpccr_s_wrong) { + TCGv_i32 tmp; + + tmp = load_cpu_field(v7m.fpccr[M_REG_S]); + if (s->v8m_secure) { + tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK); + } else { + tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK); + } + store_cpu_field(tmp, v7m.fpccr[M_REG_S]); + /* Don't need to do this for any further FP insns in this TB */ + s->v8m_fpccr_s_wrong = false; + } + + if (s->v7m_new_fp_ctxt_needed) { + /* + * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA + * and the FPSCR. + */ + TCGv_i32 control, fpscr; + uint32_t bits = R_V7M_CONTROL_FPCA_MASK; + + fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); + gen_helper_vfp_set_fpscr(cpu_env, fpscr); + tcg_temp_free_i32(fpscr); + /* + * We don't need to arrange to end the TB, because the only + * parts of FPSCR which we cache in the TB flags are the VECLEN + * and VECSTRIDE, and those don't exist for M-profile. + */ + + if (s->v8m_secure) { + bits |= R_V7M_CONTROL_SFPA_MASK; + } + control = load_cpu_field(v7m.control[M_REG_S]); + tcg_gen_ori_i32(control, control, bits); + store_cpu_field(control, v7m.control[M_REG_S]); + /* Don't need to do this for any further FP insns in this TB */ + s->v7m_new_fp_ctxt_needed = false; + } + } + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 5ed2680..23c8a82 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3373,8 +3373,10 @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) return 1; } -/* Disassemble a VFP instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ +/* + * Disassemble a VFP instruction. Returns nonzero if an error occurred + * (ie. an undefined instruction). + */ static int disas_vfp_insn(DisasContext *s, uint32_t insn) { uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; @@ -3382,6 +3384,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; + bool ignore_vfp_enabled = false; if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { return 1; @@ -3403,98 +3406,20 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) } } - /* FIXME: this access check should not take precedence over UNDEF + /* + * FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. */ - if (s->fp_excp_el) { - if (arm_dc_feature(s, ARM_FEATURE_M)) { - gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(), - s->fp_excp_el); - } else { - gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), - s->fp_excp_el); - } - return 0; - } - - if (!s->vfp_enabled) { - /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */ - if ((insn & 0x0fe00fff) != 0x0ee00a10) - return 1; + if ((insn & 0x0fe00fff) == 0x0ee00a10) { rn = (insn >> 16) & 0xf; - if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2 - && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) { - return 1; + if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2 + || rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) { + ignore_vfp_enabled = true; } } - - if (arm_dc_feature(s, ARM_FEATURE_M)) { - /* Handle M-profile lazy FP state mechanics */ - - /* Trigger lazy-state preservation if necessary */ - if (s->v7m_lspact) { - /* - * Lazy state saving affects external memory and also the NVIC, - * so we must mark it as an IO operation for icount. - */ - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - gen_helper_v7m_preserve_fp_state(cpu_env); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } - /* - * If the preserve_fp_state helper doesn't throw an exception - * then it will clear LSPACT; we don't need to repeat this for - * any further FP insns in this TB. - */ - s->v7m_lspact = false; - } - - /* Update ownership of FP context: set FPCCR.S to match current state */ - if (s->v8m_fpccr_s_wrong) { - TCGv_i32 tmp; - - tmp = load_cpu_field(v7m.fpccr[M_REG_S]); - if (s->v8m_secure) { - tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK); - } else { - tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK); - } - store_cpu_field(tmp, v7m.fpccr[M_REG_S]); - /* Don't need to do this for any further FP insns in this TB */ - s->v8m_fpccr_s_wrong = false; - } - - if (s->v7m_new_fp_ctxt_needed) { - /* - * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA - * and the FPSCR. - */ - TCGv_i32 control, fpscr; - uint32_t bits = R_V7M_CONTROL_FPCA_MASK; - - fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); - gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(fpscr); - /* - * We don't need to arrange to end the TB, because the only - * parts of FPSCR which we cache in the TB flags are the VECLEN - * and VECSTRIDE, and those don't exist for M-profile. - */ - - if (s->v8m_secure) { - bits |= R_V7M_CONTROL_SFPA_MASK; - } - control = load_cpu_field(v7m.control[M_REG_S]); - tcg_gen_ori_i32(control, control, bits); - store_cpu_field(control, v7m.control[M_REG_S]); - /* Don't need to do this for any further FP insns in this TB */ - s->v7m_new_fp_ctxt_needed = false; - } + if (!full_vfp_access_check(s, ignore_vfp_enabled)) { + return 0; } if (extract32(insn, 28, 4) == 0xf) { -- cgit v1.1 From 3de79d335c9aa7d726865e3933d9b21781032183 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:42 +0100 Subject: target/arm: Fix Cortex-R5F MVFR values The Cortex-R5F initfn was not correctly setting up the MVFR ID register values. Fill these in, since some subsequent patches will use ID register checks rather than CPU feature bit checks. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 4d5d46d..c8441fc 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1609,6 +1609,8 @@ static void cortex_r5f_initfn(Object *obj) cortex_r5_initfn(obj); set_feature(&cpu->env, ARM_FEATURE_VFP3); + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x00000011; } static const ARMCPRegInfo cortexa8_cp_reginfo[] = { -- cgit v1.1 From 973751fd798d41402d34f9f705c0c6d1633d0cda Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:42 +0100 Subject: target/arm: Explicitly enable VFP short-vectors for aarch32 -cpu max At the moment our -cpu max for AArch32 supports VFP short-vectors because we always implement them, even for CPUs which should not have them. The following commits are going to switch to using the correct ID-register-check to enable or disable short vector support, so we need to turn it on explicitly for -cpu max, because Cortex-A15 doesn't implement it. We don't enable this for the AArch64 -cpu max, because the v8A architecture never supports short-vectors. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index c8441fc..2335659 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2023,6 +2023,10 @@ static void arm_max_initfn(Object *obj) kvm_arm_set_cpu_features_from_host(cpu); } else { cortex_a15_initfn(obj); + + /* old-style VFP short-vector support */ + cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + #ifdef CONFIG_USER_ONLY /* We don't set these in system emulation mode for the moment, * since we don't correctly set (all of) the ID registers to -- cgit v1.1 From b3ff4b87b4ae08120a51fe12592725e1dca8a085 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:42 +0100 Subject: target/arm: Convert the VSEL instructions to decodetree Convert the VSEL instructions to decodetree. We leave trans_VSEL() in translate.c for now as this allows the patch to show just the changes from the old handle_vsel(). In the old code the check for "do D16-D31 exist" was hidden in the VFP_DREG macro, and assumed that VFPv3 always implied that D16-D31 exist. In the new code we do the correct ID register test. This gives identical behaviour for most of our CPUs, and fixes previously incorrect handling for Cortex-R5F, Cortex-M4 and Cortex-M33, which all implement VFPv3 or better with only 16 double-precision registers. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 6 ++++++ target/arm/translate-vfp.inc.c | 9 +++++++++ target/arm/translate.c | 35 +++++++++++++++++++++++++---------- target/arm/vfp-uncond.decode | 19 +++++++++++++++++++ 4 files changed, 59 insertions(+), 10 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 06ddc49..dc50c86 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3371,6 +3371,12 @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; } +static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id) +{ + /* Return true if D16-D31 are implemented */ + return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2; +} + /* * We always set the FP and SIMD FP16 fields to indicate identical * levels of support (assuming SIMD is implemented at all), so diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index cf3d7fe..f753513 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -129,3 +129,12 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) return true; } + +/* + * The most usual kind of VFP access check, for everything except + * FMXR/FMRX to the always-available special registers. + */ +static bool vfp_access_check(DisasContext *s) +{ + return full_vfp_access_check(s, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 23c8a82..5e10d85 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3076,10 +3076,27 @@ static void gen_neon_dup_high16(TCGv_i32 var) tcg_temp_free_i32(tmp); } -static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, - uint32_t dp) +static bool trans_VSEL(DisasContext *s, arg_VSEL *a) { - uint32_t cc = extract32(insn, 20, 2); + uint32_t rd, rn, rm; + bool dp = a->dp; + + if (!dc_isar_feature(aa32_vsel, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vn | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rn = a->vn; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } if (dp) { TCGv_i64 frn, frm, dest; @@ -3101,7 +3118,7 @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); - switch (cc) { + switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, frn, frm); @@ -3148,7 +3165,7 @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, dest = tcg_temp_new_i32(); tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); - switch (cc) { + switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, frn, frm); @@ -3182,7 +3199,7 @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, tcg_temp_free_i32(zero); } - return 0; + return true; } static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn, @@ -3354,10 +3371,8 @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) rm = VFP_SREG_M(insn); } - if ((insn & 0x0f800e50) == 0x0e000a00 && dc_isar_feature(aa32_vsel, s)) { - return handle_vsel(insn, rd, rn, rm, dp); - } else if ((insn & 0x0fb00e10) == 0x0e800a00 && - dc_isar_feature(aa32_vminmaxnm, s)) { + if ((insn & 0x0fb00e10) == 0x0e800a00 && + dc_isar_feature(aa32_vminmaxnm, s)) { return handle_vminmaxnm(insn, rd, rn, rm, dp); } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 && dc_isar_feature(aa32_vrint, s)) { diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode index b1d9dc5..b7f7c27 100644 --- a/target/arm/vfp-uncond.decode +++ b/target/arm/vfp-uncond.decode @@ -26,3 +26,22 @@ # 1111 1110 .... .... .... 101. .... .... # (but those patterns might also cover some Neon instructions, # which do not live in this file.) + +# VFP registers have an odd encoding with a four-bit field +# and a one-bit field which are assembled in different orders +# depending on whether the register is double or single precision. +# Each individual instruction function must do the checks for +# "double register selected but CPU does not have double support" +# and "double register number has bit 4 set but CPU does not +# support D16-D31" (which should UNDEF). +%vm_dp 5:1 0:4 +%vm_sp 0:4 5:1 +%vn_dp 7:1 16:4 +%vn_sp 16:4 7:1 +%vd_dp 22:1 12:4 +%vd_sp 12:4 22:1 + +VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 +VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 -- cgit v1.1 From f65988a1efdb42f9058db44297591491842e697c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:43 +0100 Subject: target/arm: Convert VMINNM, VMAXNM to decodetree Convert the VMINNM and VMAXNM instructions to decodetree. As with VSEL, we leave the trans_VMINMAXNM() function in translate.c for the moment. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate.c | 41 ++++++++++++++++++++++++++++------------- target/arm/vfp-uncond.decode | 5 +++++ 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 5e10d85..c989431 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3202,11 +3202,31 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) return true; } -static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn, - uint32_t rm, uint32_t dp) +static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) { - uint32_t vmin = extract32(insn, 6, 1); - TCGv_ptr fpst = get_fpstatus_ptr(0); + uint32_t rd, rn, rm; + bool dp = a->dp; + bool vmin = a->op; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vn | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rn = a->vn; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); if (dp) { TCGv_i64 frn, frm, dest; @@ -3247,7 +3267,7 @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn, } tcg_temp_free_ptr(fpst); - return 0; + return true; } static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, @@ -3359,23 +3379,18 @@ static const uint8_t fp_decode_rm[] = { static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) { - uint32_t rd, rn, rm, dp = extract32(insn, 8, 1); + uint32_t rd, rm, dp = extract32(insn, 8, 1); if (dp) { VFP_DREG_D(rd, insn); - VFP_DREG_N(rn, insn); VFP_DREG_M(rm, insn); } else { rd = VFP_SREG_D(insn); - rn = VFP_SREG_N(insn); rm = VFP_SREG_M(insn); } - if ((insn & 0x0fb00e10) == 0x0e800a00 && - dc_isar_feature(aa32_vminmaxnm, s)) { - return handle_vminmaxnm(insn, rd, rn, rm, dp); - } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 && - dc_isar_feature(aa32_vrint, s)) { + if ((insn & 0x0fbc0ed0) == 0x0eb80a40 && + dc_isar_feature(aa32_vrint, s)) { /* VRINTA, VRINTN, VRINTP, VRINTM */ int rounding = fp_decode_rm[extract32(insn, 16, 2)]; return handle_vrint(insn, rd, rm, dp, rounding); diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode index b7f7c27..8ab201f 100644 --- a/target/arm/vfp-uncond.decode +++ b/target/arm/vfp-uncond.decode @@ -45,3 +45,8 @@ VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 + +VMINMAXNM 1111 1110 1.00 .... .... 1010 . op:1 .0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 +VMINMAXNM 1111 1110 1.00 .... .... 1011 . op:1 .0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 -- cgit v1.1 From e3bb599d16e4678b228d80194cee328f894b1ceb Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:43 +0100 Subject: target/arm: Convert VRINTA/VRINTN/VRINTP/VRINTM to decodetree Convert the VRINTA/VRINTN/VRINTP/VRINTM instructions to decodetree. Again, trans_VRINT() is temporarily left in translate.c. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate.c | 60 ++++++++++++++++++++++++++++---------------- target/arm/vfp-uncond.decode | 5 ++++ 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index c989431..99561c7 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3270,11 +3270,43 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) return true; } -static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, - int rounding) +/* + * Table for converting the most common AArch32 encoding of + * rounding mode to arm_fprounding order (which matches the + * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). + */ +static const uint8_t fp_decode_rm[] = { + FPROUNDING_TIEAWAY, + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, +}; + +static bool trans_VRINT(DisasContext *s, arg_VRINT *a) { - TCGv_ptr fpst = get_fpstatus_ptr(0); + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; TCGv_i32 tcg_rmode; + int rounding = fp_decode_rm[a->rm]; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); @@ -3305,7 +3337,7 @@ static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, tcg_temp_free_i32(tcg_rmode); tcg_temp_free_ptr(fpst); - return 0; + return true; } static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, @@ -3366,17 +3398,6 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, return 0; } -/* Table for converting the most common AArch32 encoding of - * rounding mode to arm_fprounding order (which matches the - * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). - */ -static const uint8_t fp_decode_rm[] = { - FPROUNDING_TIEAWAY, - FPROUNDING_TIEEVEN, - FPROUNDING_POSINF, - FPROUNDING_NEGINF, -}; - static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) { uint32_t rd, rm, dp = extract32(insn, 8, 1); @@ -3389,13 +3410,8 @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) rm = VFP_SREG_M(insn); } - if ((insn & 0x0fbc0ed0) == 0x0eb80a40 && - dc_isar_feature(aa32_vrint, s)) { - /* VRINTA, VRINTN, VRINTP, VRINTM */ - int rounding = fp_decode_rm[extract32(insn, 16, 2)]; - return handle_vrint(insn, rd, rm, dp, rounding); - } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40 && - dc_isar_feature(aa32_vcvt_dr, s)) { + if ((insn & 0x0fbc0e50) == 0x0ebc0a40 && + dc_isar_feature(aa32_vcvt_dr, s)) { /* VCVTA, VCVTN, VCVTP, VCVTM */ int rounding = fp_decode_rm[extract32(insn, 16, 2)]; return handle_vcvt(insn, rd, rm, dp, rounding); diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode index 8ab201f..0aa8328 100644 --- a/target/arm/vfp-uncond.decode +++ b/target/arm/vfp-uncond.decode @@ -50,3 +50,8 @@ VMINMAXNM 1111 1110 1.00 .... .... 1010 . op:1 .0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 VMINMAXNM 1111 1110 1.00 .... .... 1011 . op:1 .0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 + +VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \ + vm=%vm_sp vd=%vd_sp dp=0 +VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \ + vm=%vm_dp vd=%vd_dp dp=1 -- cgit v1.1 From c2a46a914cd5c38fd0ee57ff0befc1c5bde27bcf Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:43 +0100 Subject: target/arm: Convert VCVTA/VCVTN/VCVTP/VCVTM to decodetree Convert the VCVTA/VCVTN/VCVTP/VCVTM instructions to decodetree. trans_VCVT() is temporarily left in translate.c. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate.c | 72 ++++++++++++++++++++------------------------ target/arm/vfp-uncond.decode | 6 ++++ 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 99561c7..1f9ff97 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3340,12 +3340,31 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) return true; } -static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, - int rounding) +static bool trans_VCVT(DisasContext *s, arg_VCVT *a) { - bool is_signed = extract32(insn, 7, 1); - TCGv_ptr fpst = get_fpstatus_ptr(0); + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; TCGv_i32 tcg_rmode, tcg_shift; + int rounding = fp_decode_rm[a->rm]; + bool is_signed = a->op; + + if (!dc_isar_feature(aa32_vcvt_dr, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); tcg_shift = tcg_const_i32(0); @@ -3355,10 +3374,6 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, if (dp) { TCGv_i64 tcg_double, tcg_res; TCGv_i32 tcg_tmp; - /* Rd is encoded as a single precision register even when the source - * is double precision. - */ - rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1); tcg_double = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64(); tcg_tmp = tcg_temp_new_i32(); @@ -3395,28 +3410,7 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, tcg_temp_free_ptr(fpst); - return 0; -} - -static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn) -{ - uint32_t rd, rm, dp = extract32(insn, 8, 1); - - if (dp) { - VFP_DREG_D(rd, insn); - VFP_DREG_M(rm, insn); - } else { - rd = VFP_SREG_D(insn); - rm = VFP_SREG_M(insn); - } - - if ((insn & 0x0fbc0e50) == 0x0ebc0a40 && - dc_isar_feature(aa32_vcvt_dr, s)) { - /* VCVTA, VCVTN, VCVTP, VCVTM */ - int rounding = fp_decode_rm[extract32(insn, 16, 2)]; - return handle_vcvt(insn, rd, rm, dp, rounding); - } - return 1; + return true; } /* @@ -3452,6 +3446,15 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) } } + if (extract32(insn, 28, 4) == 0xf) { + /* + * Encodings with T=1 (Thumb) or unconditional (ARM): these + * were all handled by the decodetree decoder, so any insn + * patterns which get here must be UNDEF. + */ + return 1; + } + /* * FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information @@ -3468,15 +3471,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 0; } - if (extract32(insn, 28, 4) == 0xf) { - /* - * Encodings with T=1 (Thumb) or unconditional (ARM): - * only used for the "miscellaneous VFP features" added in v8A - * and v7M (and gated on the MVFR2.FPMisc field). - */ - return disas_vfp_misc_insn(s, insn); - } - dp = ((insn & 0xf00) == 0xb00); switch ((insn >> 24) & 0xf) { case 0xe: diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode index 0aa8328..5af1f2e 100644 --- a/target/arm/vfp-uncond.decode +++ b/target/arm/vfp-uncond.decode @@ -55,3 +55,9 @@ VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \ vm=%vm_sp vd=%vd_sp dp=0 VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \ vm=%vm_dp vd=%vd_dp dp=1 + +# VCVT float to int with specified rounding mode; Vd is always single-precision +VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \ + vm=%vm_sp vd=%vd_sp dp=0 +VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \ + vm=%vm_dp vd=%vd_sp dp=1 -- cgit v1.1 From f7bbb8f31f0761edbf0c64b7ab3c3f49c13612ea Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:43 +0100 Subject: target/arm: Move the VFP trans_* functions to translate-vfp.inc.c Move the trans_*() functions we've just created from translate.c to translate-vfp.inc.c. This is pure code motion with no textual changes (this can be checked with 'git show --color-moved'). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 337 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 337 ----------------------------------------- 2 files changed, 337 insertions(+), 337 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index f753513..2f070a6 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -138,3 +138,340 @@ static bool vfp_access_check(DisasContext *s) { return full_vfp_access_check(s, false); } + +static bool trans_VSEL(DisasContext *s, arg_VSEL *a) +{ + uint32_t rd, rn, rm; + bool dp = a->dp; + + if (!dc_isar_feature(aa32_vsel, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vn | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rn = a->vn; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + if (dp) { + TCGv_i64 frn, frm, dest; + TCGv_i64 tmp, zero, zf, nf, vf; + + zero = tcg_const_i64(0); + + frn = tcg_temp_new_i64(); + frm = tcg_temp_new_i64(); + dest = tcg_temp_new_i64(); + + zf = tcg_temp_new_i64(); + nf = tcg_temp_new_i64(); + vf = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(zf, cpu_ZF); + tcg_gen_ext_i32_i64(nf, cpu_NF); + tcg_gen_ext_i32_i64(vf, cpu_VF); + + tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); + switch (a->cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i64(); + tcg_gen_xor_i64(tmp, vf, nf); + tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i64(tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, + frn, frm); + tmp = tcg_temp_new_i64(); + tcg_gen_xor_i64(tmp, vf, nf); + tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i64(tmp); + break; + } + tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(frn); + tcg_temp_free_i64(frm); + tcg_temp_free_i64(dest); + + tcg_temp_free_i64(zf); + tcg_temp_free_i64(nf); + tcg_temp_free_i64(vf); + + tcg_temp_free_i64(zero); + } else { + TCGv_i32 frn, frm, dest; + TCGv_i32 tmp, zero; + + zero = tcg_const_i32(0); + + frn = tcg_temp_new_i32(); + frm = tcg_temp_new_i32(); + dest = tcg_temp_new_i32(); + tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); + switch (a->cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i32(); + tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); + tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i32(tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, + frn, frm); + tmp = tcg_temp_new_i32(); + tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); + tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i32(tmp); + break; + } + tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(frn); + tcg_temp_free_i32(frm); + tcg_temp_free_i32(dest); + + tcg_temp_free_i32(zero); + } + + return true; +} + +static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) +{ + uint32_t rd, rn, rm; + bool dp = a->dp; + bool vmin = a->op; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vn | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rn = a->vn; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); + + if (dp) { + TCGv_i64 frn, frm, dest; + + frn = tcg_temp_new_i64(); + frm = tcg_temp_new_i64(); + dest = tcg_temp_new_i64(); + + tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); + if (vmin) { + gen_helper_vfp_minnumd(dest, frn, frm, fpst); + } else { + gen_helper_vfp_maxnumd(dest, frn, frm, fpst); + } + tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(frn); + tcg_temp_free_i64(frm); + tcg_temp_free_i64(dest); + } else { + TCGv_i32 frn, frm, dest; + + frn = tcg_temp_new_i32(); + frm = tcg_temp_new_i32(); + dest = tcg_temp_new_i32(); + + tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); + if (vmin) { + gen_helper_vfp_minnums(dest, frn, frm, fpst); + } else { + gen_helper_vfp_maxnums(dest, frn, frm, fpst); + } + tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(frn); + tcg_temp_free_i32(frm); + tcg_temp_free_i32(dest); + } + + tcg_temp_free_ptr(fpst); + return true; +} + +/* + * Table for converting the most common AArch32 encoding of + * rounding mode to arm_fprounding order (which matches the + * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). + */ +static const uint8_t fp_decode_rm[] = { + FPROUNDING_TIEAWAY, + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, +}; + +static bool trans_VRINT(DisasContext *s, arg_VRINT *a) +{ + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; + TCGv_i32 tcg_rmode; + int rounding = fp_decode_rm[a->rm]; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && + ((a->vm | a->vd) & 0x10)) { + return false; + } + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); + + tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + + if (dp) { + TCGv_i64 tcg_op; + TCGv_i64 tcg_res; + tcg_op = tcg_temp_new_i64(); + tcg_res = tcg_temp_new_i64(); + tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); + gen_helper_rintd(tcg_res, tcg_op, fpst); + tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(tcg_op); + tcg_temp_free_i64(tcg_res); + } else { + TCGv_i32 tcg_op; + TCGv_i32 tcg_res; + tcg_op = tcg_temp_new_i32(); + tcg_res = tcg_temp_new_i32(); + tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); + gen_helper_rints(tcg_res, tcg_op, fpst); + tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(tcg_op); + tcg_temp_free_i32(tcg_res); + } + + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_rmode); + + tcg_temp_free_ptr(fpst); + return true; +} + +static bool trans_VCVT(DisasContext *s, arg_VCVT *a) +{ + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; + TCGv_i32 tcg_rmode, tcg_shift; + int rounding = fp_decode_rm[a->rm]; + bool is_signed = a->op; + + if (!dc_isar_feature(aa32_vcvt_dr, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(0); + + tcg_shift = tcg_const_i32(0); + + tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + + if (dp) { + TCGv_i64 tcg_double, tcg_res; + TCGv_i32 tcg_tmp; + tcg_double = tcg_temp_new_i64(); + tcg_res = tcg_temp_new_i64(); + tcg_tmp = tcg_temp_new_i32(); + tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm)); + if (is_signed) { + gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); + } else { + gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); + } + tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); + tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd)); + tcg_temp_free_i32(tcg_tmp); + tcg_temp_free_i64(tcg_res); + tcg_temp_free_i64(tcg_double); + } else { + TCGv_i32 tcg_single, tcg_res; + tcg_single = tcg_temp_new_i32(); + tcg_res = tcg_temp_new_i32(); + tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm)); + if (is_signed) { + gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); + } else { + gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); + } + tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd)); + tcg_temp_free_i32(tcg_res); + tcg_temp_free_i32(tcg_single); + } + + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_rmode); + + tcg_temp_free_i32(tcg_shift); + + tcg_temp_free_ptr(fpst); + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 1f9ff97..4c11407 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3076,343 +3076,6 @@ static void gen_neon_dup_high16(TCGv_i32 var) tcg_temp_free_i32(tmp); } -static bool trans_VSEL(DisasContext *s, arg_VSEL *a) -{ - uint32_t rd, rn, rm; - bool dp = a->dp; - - if (!dc_isar_feature(aa32_vsel, s)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && - ((a->vm | a->vn | a->vd) & 0x10)) { - return false; - } - rd = a->vd; - rn = a->vn; - rm = a->vm; - - if (!vfp_access_check(s)) { - return true; - } - - if (dp) { - TCGv_i64 frn, frm, dest; - TCGv_i64 tmp, zero, zf, nf, vf; - - zero = tcg_const_i64(0); - - frn = tcg_temp_new_i64(); - frm = tcg_temp_new_i64(); - dest = tcg_temp_new_i64(); - - zf = tcg_temp_new_i64(); - nf = tcg_temp_new_i64(); - vf = tcg_temp_new_i64(); - - tcg_gen_extu_i32_i64(zf, cpu_ZF); - tcg_gen_ext_i32_i64(nf, cpu_NF); - tcg_gen_ext_i32_i64(vf, cpu_VF); - - tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); - switch (a->cc) { - case 0: /* eq: Z */ - tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, - frn, frm); - break; - case 1: /* vs: V */ - tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, - frn, frm); - break; - case 2: /* ge: N == V -> N ^ V == 0 */ - tmp = tcg_temp_new_i64(); - tcg_gen_xor_i64(tmp, vf, nf); - tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, - frn, frm); - tcg_temp_free_i64(tmp); - break; - case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, - frn, frm); - tmp = tcg_temp_new_i64(); - tcg_gen_xor_i64(tmp, vf, nf); - tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, - dest, frm); - tcg_temp_free_i64(tmp); - break; - } - tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(frn); - tcg_temp_free_i64(frm); - tcg_temp_free_i64(dest); - - tcg_temp_free_i64(zf); - tcg_temp_free_i64(nf); - tcg_temp_free_i64(vf); - - tcg_temp_free_i64(zero); - } else { - TCGv_i32 frn, frm, dest; - TCGv_i32 tmp, zero; - - zero = tcg_const_i32(0); - - frn = tcg_temp_new_i32(); - frm = tcg_temp_new_i32(); - dest = tcg_temp_new_i32(); - tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); - switch (a->cc) { - case 0: /* eq: Z */ - tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, - frn, frm); - break; - case 1: /* vs: V */ - tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, - frn, frm); - break; - case 2: /* ge: N == V -> N ^ V == 0 */ - tmp = tcg_temp_new_i32(); - tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); - tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, - frn, frm); - tcg_temp_free_i32(tmp); - break; - case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, - frn, frm); - tmp = tcg_temp_new_i32(); - tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); - tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, - dest, frm); - tcg_temp_free_i32(tmp); - break; - } - tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(frn); - tcg_temp_free_i32(frm); - tcg_temp_free_i32(dest); - - tcg_temp_free_i32(zero); - } - - return true; -} - -static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) -{ - uint32_t rd, rn, rm; - bool dp = a->dp; - bool vmin = a->op; - TCGv_ptr fpst; - - if (!dc_isar_feature(aa32_vminmaxnm, s)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && - ((a->vm | a->vn | a->vd) & 0x10)) { - return false; - } - rd = a->vd; - rn = a->vn; - rm = a->vm; - - if (!vfp_access_check(s)) { - return true; - } - - fpst = get_fpstatus_ptr(0); - - if (dp) { - TCGv_i64 frn, frm, dest; - - frn = tcg_temp_new_i64(); - frm = tcg_temp_new_i64(); - dest = tcg_temp_new_i64(); - - tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); - if (vmin) { - gen_helper_vfp_minnumd(dest, frn, frm, fpst); - } else { - gen_helper_vfp_maxnumd(dest, frn, frm, fpst); - } - tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(frn); - tcg_temp_free_i64(frm); - tcg_temp_free_i64(dest); - } else { - TCGv_i32 frn, frm, dest; - - frn = tcg_temp_new_i32(); - frm = tcg_temp_new_i32(); - dest = tcg_temp_new_i32(); - - tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); - if (vmin) { - gen_helper_vfp_minnums(dest, frn, frm, fpst); - } else { - gen_helper_vfp_maxnums(dest, frn, frm, fpst); - } - tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(frn); - tcg_temp_free_i32(frm); - tcg_temp_free_i32(dest); - } - - tcg_temp_free_ptr(fpst); - return true; -} - -/* - * Table for converting the most common AArch32 encoding of - * rounding mode to arm_fprounding order (which matches the - * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). - */ -static const uint8_t fp_decode_rm[] = { - FPROUNDING_TIEAWAY, - FPROUNDING_TIEEVEN, - FPROUNDING_POSINF, - FPROUNDING_NEGINF, -}; - -static bool trans_VRINT(DisasContext *s, arg_VRINT *a) -{ - uint32_t rd, rm; - bool dp = a->dp; - TCGv_ptr fpst; - TCGv_i32 tcg_rmode; - int rounding = fp_decode_rm[a->rm]; - - if (!dc_isar_feature(aa32_vrint, s)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && - ((a->vm | a->vd) & 0x10)) { - return false; - } - rd = a->vd; - rm = a->vm; - - if (!vfp_access_check(s)) { - return true; - } - - fpst = get_fpstatus_ptr(0); - - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - - if (dp) { - TCGv_i64 tcg_op; - TCGv_i64 tcg_res; - tcg_op = tcg_temp_new_i64(); - tcg_res = tcg_temp_new_i64(); - tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); - gen_helper_rintd(tcg_res, tcg_op, fpst); - tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); - } else { - TCGv_i32 tcg_op; - TCGv_i32 tcg_res; - tcg_op = tcg_temp_new_i32(); - tcg_res = tcg_temp_new_i32(); - tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); - gen_helper_rints(tcg_res, tcg_op, fpst); - tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); - } - - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_ptr(fpst); - return true; -} - -static bool trans_VCVT(DisasContext *s, arg_VCVT *a) -{ - uint32_t rd, rm; - bool dp = a->dp; - TCGv_ptr fpst; - TCGv_i32 tcg_rmode, tcg_shift; - int rounding = fp_decode_rm[a->rm]; - bool is_signed = a->op; - - if (!dc_isar_feature(aa32_vcvt_dr, s)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { - return false; - } - rd = a->vd; - rm = a->vm; - - if (!vfp_access_check(s)) { - return true; - } - - fpst = get_fpstatus_ptr(0); - - tcg_shift = tcg_const_i32(0); - - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - - if (dp) { - TCGv_i64 tcg_double, tcg_res; - TCGv_i32 tcg_tmp; - tcg_double = tcg_temp_new_i64(); - tcg_res = tcg_temp_new_i64(); - tcg_tmp = tcg_temp_new_i32(); - tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm)); - if (is_signed) { - gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); - } else { - gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); - } - tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); - tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd)); - tcg_temp_free_i32(tcg_tmp); - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_double); - } else { - TCGv_i32 tcg_single, tcg_res; - tcg_single = tcg_temp_new_i32(); - tcg_res = tcg_temp_new_i32(); - tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm)); - if (is_signed) { - gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); - } else { - gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); - } - tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd)); - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_single); - } - - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_i32(tcg_shift); - - tcg_temp_free_ptr(fpst); - - return true; -} - /* * Disassemble a VFP instruction. Returns nonzero if an error occurred * (ie. an undefined instruction). -- cgit v1.1 From 160f3b64c5cc4c8a09a1859edc764882ce6ad6bf Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:44 +0100 Subject: target/arm: Add helpers for VFP register loads and stores The current VFP code has two different idioms for loading and storing from the VFP register file: 1 using the gen_mov_F0_vreg() and similar functions, which load and store to a fixed set of TCG globals cpu_F0s, CPU_F0d, etc 2 by direct calls to tcg_gen_ld_f64() and friends We want to phase out idiom 1 (because the use of the fixed globals is a relic of a much older version of TCG), but idiom 2 is quite longwinded: tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg)) requires us to specify the 64-bitness twice, once in the function name and once by passing 'true' to vfp_reg_offset(). There's no guard against accidentally passing the wrong flag. Instead, let's move to a convention of accessing 64-bit registers via the existing neon_load_reg64() and neon_store_reg64(), and provide new neon_load_reg32() and neon_store_reg32() for the 32-bit equivalents. Implement the new functions and use them in the code in translate-vfp.inc.c. We will convert the rest of the VFP code as we do the decodetree conversion in subsequent commits. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 40 ++++++++++++++++++++-------------------- target/arm/translate.c | 10 ++++++++++ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 2f070a6..24358f3 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -179,8 +179,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_gen_ext_i32_i64(nf, cpu_NF); tcg_gen_ext_i32_i64(vf, cpu_VF); - tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg64(frn, rn); + neon_load_reg64(frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, @@ -207,7 +207,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_temp_free_i64(tmp); break; } - tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg64(dest, rd); tcg_temp_free_i64(frn); tcg_temp_free_i64(frm); tcg_temp_free_i64(dest); @@ -226,8 +226,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) frn = tcg_temp_new_i32(); frm = tcg_temp_new_i32(); dest = tcg_temp_new_i32(); - tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg32(frn, rn); + neon_load_reg32(frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, @@ -254,7 +254,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_temp_free_i32(tmp); break; } - tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg32(dest, rd); tcg_temp_free_i32(frn); tcg_temp_free_i32(frm); tcg_temp_free_i32(dest); @@ -298,14 +298,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) frm = tcg_temp_new_i64(); dest = tcg_temp_new_i64(); - tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg64(frn, rn); + neon_load_reg64(frm, rm); if (vmin) { gen_helper_vfp_minnumd(dest, frn, frm, fpst); } else { gen_helper_vfp_maxnumd(dest, frn, frm, fpst); } - tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg64(dest, rd); tcg_temp_free_i64(frn); tcg_temp_free_i64(frm); tcg_temp_free_i64(dest); @@ -316,14 +316,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) frm = tcg_temp_new_i32(); dest = tcg_temp_new_i32(); - tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg32(frn, rn); + neon_load_reg32(frm, rm); if (vmin) { gen_helper_vfp_minnums(dest, frn, frm, fpst); } else { gen_helper_vfp_maxnums(dest, frn, frm, fpst); } - tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg32(dest, rd); tcg_temp_free_i32(frn); tcg_temp_free_i32(frm); tcg_temp_free_i32(dest); @@ -379,9 +379,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) TCGv_i64 tcg_res; tcg_op = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64(); - tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg64(tcg_op, rm); gen_helper_rintd(tcg_res, tcg_op, fpst); - tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg64(tcg_res, rd); tcg_temp_free_i64(tcg_op); tcg_temp_free_i64(tcg_res); } else { @@ -389,9 +389,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) TCGv_i32 tcg_res; tcg_op = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32(); - tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm)); + neon_load_reg32(tcg_op, rm); gen_helper_rints(tcg_res, tcg_op, fpst); - tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd)); + neon_store_reg32(tcg_res, rd); tcg_temp_free_i32(tcg_op); tcg_temp_free_i32(tcg_res); } @@ -440,14 +440,14 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) tcg_double = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64(); tcg_tmp = tcg_temp_new_i32(); - tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm)); + neon_load_reg64(tcg_double, rm); if (is_signed) { gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); } else { gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); } tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); - tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd)); + neon_store_reg32(tcg_tmp, rd); tcg_temp_free_i32(tcg_tmp); tcg_temp_free_i64(tcg_res); tcg_temp_free_i64(tcg_double); @@ -455,13 +455,13 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) TCGv_i32 tcg_single, tcg_res; tcg_single = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32(); - tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm)); + neon_load_reg32(tcg_single, rm); if (is_signed) { gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst); } else { gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); } - tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd)); + neon_store_reg32(tcg_res, rd); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_single); } diff --git a/target/arm/translate.c b/target/arm/translate.c index 4c11407..b79091e 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1689,6 +1689,16 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg) tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); } +static inline void neon_load_reg32(TCGv_i32 var, int reg) +{ + tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); +} + +static inline void neon_store_reg32(TCGv_i32 var, int reg) +{ + tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); +} + static TCGv_ptr vfp_reg_ptr(bool dp, int reg) { TCGv_ptr ret = tcg_temp_new_ptr(); -- cgit v1.1 From 9851ed9269d214c0c6feba960dd14ff09e6c34b4 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:44 +0100 Subject: target/arm: Convert "double-precision" register moves to decodetree Convert the "double-precision" register moves to decodetree: this covers VMOV scalar-to-gpreg, VMOV gpreg-to-scalar and VDUP. Note that the conversion process has tightened up a few of the UNDEF encoding checks: we now correctly forbid: * VMOV-to-gpr with U:opc1:opc2 == 10x00 or x0x10 * VMOV-from-gpr with opc1:opc2 == 0x10 * VDUP with B:E == 11 * VDUP with Q == 1 and Vn<0> == 1 Signed-off-by: Peter Maydell --- The accesses of elements < 32 bits could be improved by doing direct ld/st of the right size rather than 32-bit read-and-shift or read-modify-write, but we leave this for later cleanup, since this series is generally trying to stick to fixing the decode. Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 147 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 83 +---------------------- target/arm/vfp.decode | 36 ++++++++++ 3 files changed, 185 insertions(+), 81 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 24358f3..8b0899f 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -475,3 +475,150 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) return true; } + +static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) +{ + /* VMOV scalar to general purpose register */ + TCGv_i32 tmp; + int pass; + uint32_t offset; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + return false; + } + + offset = a->index << a->size; + pass = extract32(offset, 2, 1); + offset = extract32(offset, 0, 2) * 8; + + if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = neon_load_reg(a->vn, pass); + switch (a->size) { + case 0: + if (offset) { + tcg_gen_shri_i32(tmp, tmp, offset); + } + if (a->u) { + gen_uxtb(tmp); + } else { + gen_sxtb(tmp); + } + break; + case 1: + if (a->u) { + if (offset) { + tcg_gen_shri_i32(tmp, tmp, 16); + } else { + gen_uxth(tmp); + } + } else { + if (offset) { + tcg_gen_sari_i32(tmp, tmp, 16); + } else { + gen_sxth(tmp); + } + } + break; + case 2: + break; + } + store_reg(s, a->rt, tmp); + + return true; +} + +static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) +{ + /* VMOV general purpose register to scalar */ + TCGv_i32 tmp, tmp2; + int pass; + uint32_t offset; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + return false; + } + + offset = a->index << a->size; + pass = extract32(offset, 2, 1); + offset = extract32(offset, 0, 2) * 8; + + if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = load_reg(s, a->rt); + switch (a->size) { + case 0: + tmp2 = neon_load_reg(a->vn, pass); + tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8); + tcg_temp_free_i32(tmp2); + break; + case 1: + tmp2 = neon_load_reg(a->vn, pass); + tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16); + tcg_temp_free_i32(tmp2); + break; + case 2: + break; + } + neon_store_reg(a->vn, pass, tmp); + + return true; +} + +static bool trans_VDUP(DisasContext *s, arg_VDUP *a) +{ + /* VDUP (general purpose register) */ + TCGv_i32 tmp; + int size, vec_size; + + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + return false; + } + + if (a->b && a->e) { + return false; + } + + if (a->q && (a->vn & 1)) { + return false; + } + + vec_size = a->q ? 16 : 8; + if (a->b) { + size = 0; + } else if (a->e) { + size = 1; + } else { + size = 2; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = load_reg(s, a->rt); + tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0), + vec_size, vec_size, tmp); + tcg_temp_free_i32(tmp); + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index b79091e..72156ff 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3151,87 +3151,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* single register transfer */ rd = (insn >> 12) & 0xf; if (dp) { - int size; - int pass; - - VFP_DREG_N(rn, insn); - if (insn & 0xf) - return 1; - if (insn & 0x00c00060 - && !arm_dc_feature(s, ARM_FEATURE_NEON)) { - return 1; - } - - pass = (insn >> 21) & 1; - if (insn & (1 << 22)) { - size = 0; - offset = ((insn >> 5) & 3) * 8; - } else if (insn & (1 << 5)) { - size = 1; - offset = (insn & (1 << 6)) ? 16 : 0; - } else { - size = 2; - offset = 0; - } - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - tmp = neon_load_reg(rn, pass); - switch (size) { - case 0: - if (offset) - tcg_gen_shri_i32(tmp, tmp, offset); - if (insn & (1 << 23)) - gen_uxtb(tmp); - else - gen_sxtb(tmp); - break; - case 1: - if (insn & (1 << 23)) { - if (offset) { - tcg_gen_shri_i32(tmp, tmp, 16); - } else { - gen_uxth(tmp); - } - } else { - if (offset) { - tcg_gen_sari_i32(tmp, tmp, 16); - } else { - gen_sxth(tmp); - } - } - break; - case 2: - break; - } - store_reg(s, rd, tmp); - } else { - /* arm->vfp */ - tmp = load_reg(s, rd); - if (insn & (1 << 23)) { - /* VDUP */ - int vec_size = pass ? 16 : 8; - tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0), - vec_size, vec_size, tmp); - tcg_temp_free_i32(tmp); - } else { - /* VMOV */ - switch (size) { - case 0: - tmp2 = neon_load_reg(rn, pass); - tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8); - tcg_temp_free_i32(tmp2); - break; - case 1: - tmp2 = neon_load_reg(rn, pass); - tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16); - tcg_temp_free_i32(tmp2); - break; - case 2: - break; - } - neon_store_reg(rn, pass, tmp); - } - } + /* already handled by decodetree */ + return 1; } else { /* !dp */ bool is_sysreg; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 28ee664..8286bdc 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -26,3 +26,39 @@ # 1110 1110 .... .... .... 101. .... .... # (but those patterns might also cover some Neon instructions, # which do not live in this file.) + +# VFP registers have an odd encoding with a four-bit field +# and a one-bit field which are assembled in different orders +# depending on whether the register is double or single precision. +# Each individual instruction function must do the checks for +# "double register selected but CPU does not have double support" +# and "double register number has bit 4 set but CPU does not +# support D16-D31" (which should UNDEF). +%vm_dp 5:1 0:4 +%vm_sp 0:4 5:1 +%vn_dp 7:1 16:4 +%vn_sp 16:4 7:1 +%vd_dp 22:1 12:4 +%vd_sp 12:4 22:1 + +%vmov_idx_b 21:1 5:2 +%vmov_idx_h 21:1 6:1 + +# VMOV scalar to general-purpose register; note that this does +# include some Neon cases. +VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \ + vn=%vn_dp size=0 index=%vmov_idx_b +VMOV_to_gp ---- 1110 u:1 0. 1 .... rt:4 1011 ..1 1 0000 \ + vn=%vn_dp size=1 index=%vmov_idx_h +VMOV_to_gp ---- 1110 0 0 index:1 1 .... rt:4 1011 .00 1 0000 \ + vn=%vn_dp size=2 u=0 + +VMOV_from_gp ---- 1110 0 1. 0 .... rt:4 1011 ... 1 0000 \ + vn=%vn_dp size=0 index=%vmov_idx_b +VMOV_from_gp ---- 1110 0 0. 0 .... rt:4 1011 ..1 1 0000 \ + vn=%vn_dp size=1 index=%vmov_idx_h +VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \ + vn=%vn_dp size=2 + +VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \ + vn=%vn_dp -- cgit v1.1 From a9ab50011aeda2dd012da99069e078379315ea18 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:44 +0100 Subject: target/arm: Convert "single-precision" register moves to decodetree Convert the "single-precision" register moves to decodetree: * VMSR * VMRS * VMOV between general purpose register and single precision Note that the VMSR/VMRS conversions make our handling of the "should this UNDEF?" checks consistent between the two instructions: * VMSR to MVFR0, MVFR1, MVFR2 now UNDEF from EL0 (previously was a nop) * VMSR to FPSID now UNDEFs from EL0 or if VFPv3 or better (previously was a nop) * VMSR to FPINST and FPINST2 now UNDEF if VFPv3 or better (previously would write to the register, which had no guest-visible effect because we always UNDEF reads) We also tighten up the decode: we were previously underdecoding some SBZ or SBO bits. The conversion of VMOV_single includes the expansion out of the gen_mov_F0_vreg()/gen_vfp_mrs() and gen_mov_vreg_F0()/gen_vfp_msr() sequences into the simpler direct load/store of the TCG temp via neon_{load,store}_reg32(): we know in the new function that we're always single-precision, we don't need to use the old-and-deprecated cpu_F0* TCG globals, and we don't happen to have the declaration of gen_vfp_msr() and gen_vfp_mrs() at the point in the file where the new function is. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 161 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 148 +------------------------------------ target/arm/vfp.decode | 4 + 3 files changed, 168 insertions(+), 145 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 8b0899f..74c10f9 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -622,3 +622,164 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) return true; } + +static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) +{ + TCGv_i32 tmp; + bool ignore_vfp_enabled = false; + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + /* + * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. + * Writes to R15 are UNPREDICTABLE; we choose to undef. + */ + if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) { + return false; + } + } + + switch (a->reg) { + case ARM_VFP_FPSID: + /* + * VFPv2 allows access to FPSID from userspace; VFPv3 restricts + * all ID registers to privileged access only. + */ + if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_MVFR2: + if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_FPSCR: + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + /* Not present in VFPv3 */ + if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + break; + default: + return false; + } + + if (!full_vfp_access_check(s, ignore_vfp_enabled)) { + return true; + } + + if (a->l) { + /* VMRS, move VFP special register to gp register */ + switch (a->reg) { + case ARM_VFP_FPSID: + case ARM_VFP_FPEXC: + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + tmp = load_cpu_field(vfp.xregs[a->reg]); + break; + case ARM_VFP_FPSCR: + if (a->rt == 15) { + tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); + tcg_gen_andi_i32(tmp, tmp, 0xf0000000); + } else { + tmp = tcg_temp_new_i32(); + gen_helper_vfp_get_fpscr(tmp, cpu_env); + } + break; + default: + g_assert_not_reached(); + } + + if (a->rt == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + } else { + store_reg(s, a->rt, tmp); + } + } else { + /* VMSR, move gp register to VFP special register */ + switch (a->reg) { + case ARM_VFP_FPSID: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + tmp = load_reg(s, a->rt); + gen_helper_vfp_set_fpscr(cpu_env, tmp); + tcg_temp_free_i32(tmp); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + /* + * TODO: VFP subarchitecture support. + * For now, keep the EN bit only + */ + tmp = load_reg(s, a->rt); + tcg_gen_andi_i32(tmp, tmp, 1 << 30); + store_cpu_field(tmp, vfp.xregs[a->reg]); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + tmp = load_reg(s, a->rt); + store_cpu_field(tmp, vfp.xregs[a->reg]); + break; + default: + g_assert_not_reached(); + } + } + + return true; +} + +static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) +{ + TCGv_i32 tmp; + + if (!vfp_access_check(s)) { + return true; + } + + if (a->l) { + /* VFP to general purpose register */ + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vn); + if (a->rt == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + } else { + store_reg(s, a->rt, tmp); + } + } else { + /* general purpose register to VFP */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tmp, a->vn); + tcg_temp_free_i32(tmp); + } + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 72156ff..5b614e7 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3097,7 +3097,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; - bool ignore_vfp_enabled = false; if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { return 1; @@ -3133,14 +3132,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. */ - if ((insn & 0x0fe00fff) == 0x0ee00a10) { - rn = (insn >> 16) & 0xf; - if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2 - || rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) { - ignore_vfp_enabled = true; - } - } - if (!full_vfp_access_check(s, ignore_vfp_enabled)) { + if (!vfp_access_check(s)) { return 0; } @@ -3148,142 +3140,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch ((insn >> 24) & 0xf) { case 0xe: if (insn & (1 << 4)) { - /* single register transfer */ - rd = (insn >> 12) & 0xf; - if (dp) { - /* already handled by decodetree */ - return 1; - } else { /* !dp */ - bool is_sysreg; - - if ((insn & 0x6f) != 0x00) - return 1; - rn = VFP_SREG_N(insn); - - is_sysreg = extract32(insn, 21, 1); - - if (arm_dc_feature(s, ARM_FEATURE_M)) { - /* - * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. - * Writes to R15 are UNPREDICTABLE; we choose to undef. - */ - if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) { - return 1; - } - } - - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (is_sysreg) { - /* system register */ - rn >>= 1; - - switch (rn) { - case ARM_VFP_FPSID: - /* VFP2 allows access to FSID from userspace. - VFP3 restricts all id registers to privileged - accesses. */ - if (IS_USER(s) - && arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - /* Not present in VFP3. */ - if (IS_USER(s) - || arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - tmp = load_cpu_field(vfp.xregs[rn]); - break; - case ARM_VFP_FPSCR: - if (rd == 15) { - tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); - tcg_gen_andi_i32(tmp, tmp, 0xf0000000); - } else { - tmp = tcg_temp_new_i32(); - gen_helper_vfp_get_fpscr(tmp, cpu_env); - } - break; - case ARM_VFP_MVFR2: - if (!arm_dc_feature(s, ARM_FEATURE_V8)) { - return 1; - } - /* fall through */ - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - if (IS_USER(s) - || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { - return 1; - } - tmp = load_cpu_field(vfp.xregs[rn]); - break; - default: - return 1; - } - } else { - gen_mov_F0_vreg(0, rn); - tmp = gen_vfp_mrs(); - } - if (rd == 15) { - /* Set the 4 flag bits in the CPSR. */ - gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); - } else { - store_reg(s, rd, tmp); - } - } else { - /* arm->vfp */ - if (is_sysreg) { - rn >>= 1; - /* system register */ - switch (rn) { - case ARM_VFP_FPSID: - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - /* Writes are ignored. */ - break; - case ARM_VFP_FPSCR: - tmp = load_reg(s, rd); - gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); - gen_lookup_tb(s); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - /* TODO: VFP subarchitecture support. - * For now, keep the EN bit only */ - tmp = load_reg(s, rd); - tcg_gen_andi_i32(tmp, tmp, 1 << 30); - store_cpu_field(tmp, vfp.xregs[rn]); - gen_lookup_tb(s); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - if (IS_USER(s)) { - return 1; - } - tmp = load_reg(s, rd); - store_cpu_field(tmp, vfp.xregs[rn]); - break; - default: - return 1; - } - } else { - tmp = load_reg(s, rd); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rn); - } - } - } + /* already handled by decodetree */ + return 1; } else { /* data processing */ bool rd_is_dp = dp; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 8286bdc..bb7de40 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -62,3 +62,7 @@ VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \ VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \ vn=%vn_dp + +VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000 +VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \ + vn=%vn_sp -- cgit v1.1 From 81f681106eabe21c55118a5a41999fb7387fb714 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:45 +0100 Subject: target/arm: Convert VFP two-register transfer insns to decodetree Convert the VFP two-register transfer instructions to decodetree (in the v8 Arm ARM these are the "Advanced SIMD and floating-point 64-bit move" encoding group). Again, we expand out the sequences involving gen_vfp_msr() and gen_msr_vfp(). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 70 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 46 ++------------------------- target/arm/vfp.decode | 5 +++ 3 files changed, 77 insertions(+), 44 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 74c10f9..5f08122 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -783,3 +783,73 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) return true; } + +static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) +{ + TCGv_i32 tmp; + + /* + * VMOV between two general-purpose registers and two single precision + * floating point registers + */ + if (!vfp_access_check(s)) { + return true; + } + + if (a->op) { + /* fpreg to gpreg */ + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm); + store_reg(s, a->rt, tmp); + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm + 1); + store_reg(s, a->rt2, tmp); + } else { + /* gpreg to fpreg */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tmp, a->vm); + tmp = load_reg(s, a->rt2); + neon_store_reg32(tmp, a->vm + 1); + } + + return true; +} + +static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) +{ + TCGv_i32 tmp; + + /* + * VMOV between two general-purpose registers and one double precision + * floating point register + */ + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (a->op) { + /* fpreg to gpreg */ + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm * 2); + store_reg(s, a->rt, tmp); + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm * 2 + 1); + store_reg(s, a->rt2, tmp); + } else { + /* gpreg to fpreg */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tmp, a->vm * 2); + tcg_temp_free_i32(tmp); + tmp = load_reg(s, a->rt2); + neon_store_reg32(tmp, a->vm * 2 + 1); + tcg_temp_free_i32(tmp); + } + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 5b614e7..0607625 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3703,50 +3703,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 0xc: case 0xd: if ((insn & 0x03e00000) == 0x00400000) { - /* two-register transfer */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (dp) { - VFP_DREG_M(rm, insn); - } else { - rm = VFP_SREG_M(insn); - } - - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (dp) { - gen_mov_F0_vreg(0, rm * 2); - tmp = gen_vfp_mrs(); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(0, rm * 2 + 1); - tmp = gen_vfp_mrs(); - store_reg(s, rn, tmp); - } else { - gen_mov_F0_vreg(0, rm); - tmp = gen_vfp_mrs(); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(0, rm + 1); - tmp = gen_vfp_mrs(); - store_reg(s, rn, tmp); - } - } else { - /* arm->vfp */ - if (dp) { - tmp = load_reg(s, rd); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm * 2); - tmp = load_reg(s, rn); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm * 2 + 1); - } else { - tmp = load_reg(s, rd); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm); - tmp = load_reg(s, rn); - gen_vfp_msr(tmp); - gen_mov_vreg_F0(0, rm + 1); - } - } + /* Already handled by decodetree */ + return 1; } else { /* Load/store */ rn = (insn >> 16) & 0xf; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index bb7de40..134f1c9 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -66,3 +66,8 @@ VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \ VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000 VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \ vn=%vn_sp + +VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \ + vm=%vm_sp +VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \ + vm=%vm_dp -- cgit v1.1 From 79b02a3b5231c5b8cd31e50cd549968dd0a05c49 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:45 +0100 Subject: target/arm: Convert VFP VLDR and VSTR to decodetree Convert the VFP single load/store insns VLDR and VSTR to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 73 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 22 ++----------- target/arm/vfp.decode | 7 ++++ 3 files changed, 82 insertions(+), 20 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 5f08122..40f2cac 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -853,3 +853,76 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) return true; } + +static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) +{ + uint32_t offset; + TCGv_i32 addr; + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { + offset = -offset; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + tcg_gen_addi_i32(addr, addr, offset); + if (a->l) { + gen_vfp_ld(s, false, addr); + gen_mov_vreg_F0(false, a->vd); + } else { + gen_mov_F0_vreg(false, a->vd); + gen_vfp_st(s, false, addr); + } + tcg_temp_free_i32(addr); + + return true; +} + +static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) +{ + uint32_t offset; + TCGv_i32 addr; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { + offset = -offset; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + tcg_gen_addi_i32(addr, addr, offset); + if (a->l) { + gen_vfp_ld(s, true, addr); + gen_mov_vreg_F0(true, a->vd); + } else { + gen_mov_F0_vreg(true, a->vd); + gen_vfp_st(s, true, addr); + } + tcg_temp_free_i32(addr); + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 0607625..499ce7d 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3713,26 +3713,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) else rd = VFP_SREG_D(insn); if ((insn & 0x01200000) == 0x01000000) { - /* Single load/store */ - offset = (insn & 0xff) << 2; - if ((insn & (1 << 23)) == 0) - offset = -offset; - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - tcg_gen_addi_i32(addr, addr, offset); - if (insn & (1 << 20)) { - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(dp, rd); - } else { - gen_mov_F0_vreg(dp, rd); - gen_vfp_st(s, dp, addr); - } - tcg_temp_free_i32(addr); + /* Already handled by decodetree */ + return 1; } else { /* load/store multiple */ int w = insn & (1 << 21); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 134f1c9..8fa7fa0 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -71,3 +71,10 @@ VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \ vm=%vm_sp VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \ vm=%vm_dp + +# Note that the half-precision variants of VLDR and VSTR are +# not part of this decodetree at all because they have bits [9:8] == 0b01 +VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \ + vd=%vd_sp +VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \ + vd=%vd_dp -- cgit v1.1 From fa288de272c5c8a66d5eb683b123706a52bc7ad6 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:45 +0100 Subject: target/arm: Convert the VFP load/store multiple insns to decodetree Convert the VFP load/store multiple insns to decodetree. This includes tightening up the UNDEF checking for pre-VFPv3 CPUs which only have D0-D15 : they now UNDEF for any access to D16-D31, not merely when the smallest register in the transfer list is in D16-D31. This conversion does not try to share code between the single precision and the double precision versions; this looks a bit duplicative of code, but it leaves the door open for a future refactoring which gets rid of the use of the "F0" registers by inlining the various functions like gen_vfp_ld() and gen_mov_F0_reg() which are hiding "if (dp) { ... } else { ... }" conditionalisation. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 162 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 97 +----------------------- target/arm/vfp.decode | 18 +++++ 3 files changed, 183 insertions(+), 94 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 40f2cac..32a1805 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -926,3 +926,165 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) return true; } + +static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) +{ + uint32_t offset; + TCGv_i32 addr; + int i, n; + + n = a->imm; + + if (n == 0 || (a->vd + n) > 32) { + /* + * UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return false; + } + if (a->rn == 15 && a->w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + if (a->p) { + /* pre-decrement */ + tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * Here 'addr' is the lowest address we will store to, + * and is either the old SP (if post-increment) or + * the new SP (if pre-decrement). For post-increment + * where the old value is below the limit and the new + * value is above, it is UNKNOWN whether the limit check + * triggers; we choose to trigger. + */ + gen_helper_v8m_stackcheck(cpu_env, addr); + } + + offset = 4; + for (i = 0; i < n; i++) { + if (a->l) { + /* load */ + gen_vfp_ld(s, false, addr); + gen_mov_vreg_F0(false, a->vd + i); + } else { + /* store */ + gen_mov_F0_vreg(false, a->vd + i); + gen_vfp_st(s, false, addr); + } + tcg_gen_addi_i32(addr, addr, offset); + } + if (a->w) { + /* writeback */ + if (a->p) { + offset = -offset * n; + tcg_gen_addi_i32(addr, addr, offset); + } + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(addr); + } + + return true; +} + +static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) +{ + uint32_t offset; + TCGv_i32 addr; + int i, n; + + n = a->imm >> 1; + + if (n == 0 || (a->vd + n) > 32 || n > 16) { + /* + * UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return false; + } + if (a->rn == 15 && a->w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(); + tcg_gen_movi_i32(addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + if (a->p) { + /* pre-decrement */ + tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * Here 'addr' is the lowest address we will store to, + * and is either the old SP (if post-increment) or + * the new SP (if pre-decrement). For post-increment + * where the old value is below the limit and the new + * value is above, it is UNKNOWN whether the limit check + * triggers; we choose to trigger. + */ + gen_helper_v8m_stackcheck(cpu_env, addr); + } + + offset = 8; + for (i = 0; i < n; i++) { + if (a->l) { + /* load */ + gen_vfp_ld(s, true, addr); + gen_mov_vreg_F0(true, a->vd + i); + } else { + /* store */ + gen_mov_F0_vreg(true, a->vd + i); + gen_vfp_st(s, true, addr); + } + tcg_gen_addi_i32(addr, addr, offset); + } + if (a->w) { + /* writeback */ + if (a->p) { + offset = -offset * n; + } else if (a->imm & 1) { + offset = 4; + } else { + offset = 0; + } + + if (offset != 0) { + tcg_gen_addi_i32(addr, addr, offset); + } + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(addr); + } + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 499ce7d..58c0eb1 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3092,9 +3092,8 @@ static void gen_neon_dup_high16(TCGv_i32 var) */ static int disas_vfp_insn(DisasContext *s, uint32_t insn) { - uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; + uint32_t rd, rn, rm, op, i, n, delta_d, delta_m, bank_mask; int dp, veclen; - TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -3702,98 +3701,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) break; case 0xc: case 0xd: - if ((insn & 0x03e00000) == 0x00400000) { - /* Already handled by decodetree */ - return 1; - } else { - /* Load/store */ - rn = (insn >> 16) & 0xf; - if (dp) - VFP_DREG_D(rd, insn); - else - rd = VFP_SREG_D(insn); - if ((insn & 0x01200000) == 0x01000000) { - /* Already handled by decodetree */ - return 1; - } else { - /* load/store multiple */ - int w = insn & (1 << 21); - if (dp) - n = (insn >> 1) & 0x7f; - else - n = insn & 0xff; - - if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { - /* P == U , W == 1 => UNDEF */ - return 1; - } - if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { - /* UNPREDICTABLE cases for bad immediates: we choose to - * UNDEF to avoid generating huge numbers of TCG ops - */ - return 1; - } - if (rn == 15 && w) { - /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ - return 1; - } - - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - if (insn & (1 << 24)) /* pre-decrement */ - tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2)); - - if (s->v8m_stackcheck && rn == 13 && w) { - /* - * Here 'addr' is the lowest address we will store to, - * and is either the old SP (if post-increment) or - * the new SP (if pre-decrement). For post-increment - * where the old value is below the limit and the new - * value is above, it is UNKNOWN whether the limit check - * triggers; we choose to trigger. - */ - gen_helper_v8m_stackcheck(cpu_env, addr); - } - - if (dp) - offset = 8; - else - offset = 4; - for (i = 0; i < n; i++) { - if (insn & ARM_CP_RW_BIT) { - /* load */ - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(dp, rd + i); - } else { - /* store */ - gen_mov_F0_vreg(dp, rd + i); - gen_vfp_st(s, dp, addr); - } - tcg_gen_addi_i32(addr, addr, offset); - } - if (w) { - /* writeback */ - if (insn & (1 << 24)) - offset = -offset * n; - else if (dp && (insn & 1)) - offset = 4; - else - offset = 0; - - if (offset != 0) - tcg_gen_addi_i32(addr, addr, offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(addr); - } - } - } - break; + /* Already handled by decodetree */ + return 1; default: /* Should never happen. */ return 1; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 8fa7fa0..68c9ffc 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -78,3 +78,21 @@ VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \ vd=%vd_sp VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \ vd=%vd_dp + +# We split the load/store multiple up into two patterns to avoid +# overlap with other insns in the "Advanced SIMD load/store and 64-bit move" +# grouping: +# P=0 U=0 W=0 is 64-bit VMOV +# P=1 W=0 is VLDR/VSTR +# P=U W=1 is UNDEF +# leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple. +# These include FSTM/FLDM. +VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \ + vd=%vd_sp p=0 u=1 +VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \ + vd=%vd_dp p=0 u=1 + +VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \ + vd=%vd_sp p=1 u=0 w=1 +VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \ + vd=%vd_dp p=1 u=0 w=1 -- cgit v1.1 From 3993d0407dff7233e42f2251db971e126a0497e9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:46 +0100 Subject: target/arm: Remove VLDR/VSTR/VLDM/VSTM use of cpu_F0s and cpu_F0d Expand out the sequences in the new decoder VLDR/VSTR/VLDM/VSTM trans functions which perform the memory accesses by going via the TCG globals cpu_F0s and cpu_F0d, to use local TCG temps instead. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 46 +++++++++++++++++++++++++----------------- target/arm/translate.c | 18 ----------------- 2 files changed, 28 insertions(+), 36 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 32a1805..9729946 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -857,7 +857,7 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; - TCGv_i32 addr; + TCGv_i32 addr, tmp; if (!vfp_access_check(s)) { return true; @@ -876,13 +876,15 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); + tmp = tcg_temp_new_i32(); if (a->l) { - gen_vfp_ld(s, false, addr); - gen_mov_vreg_F0(false, a->vd); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + neon_store_reg32(tmp, a->vd); } else { - gen_mov_F0_vreg(false, a->vd); - gen_vfp_st(s, false, addr); + neon_load_reg32(tmp, a->vd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } + tcg_temp_free_i32(tmp); tcg_temp_free_i32(addr); return true; @@ -892,6 +894,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; TCGv_i32 addr; + TCGv_i64 tmp; /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { @@ -915,13 +918,15 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); + tmp = tcg_temp_new_i64(); if (a->l) { - gen_vfp_ld(s, true, addr); - gen_mov_vreg_F0(true, a->vd); + gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); + neon_store_reg64(tmp, a->vd); } else { - gen_mov_F0_vreg(true, a->vd); - gen_vfp_st(s, true, addr); + neon_load_reg64(tmp, a->vd); + gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } + tcg_temp_free_i64(tmp); tcg_temp_free_i32(addr); return true; @@ -930,7 +935,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) { uint32_t offset; - TCGv_i32 addr; + TCGv_i32 addr, tmp; int i, n; n = a->imm; @@ -976,18 +981,20 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) } offset = 4; + tmp = tcg_temp_new_i32(); for (i = 0; i < n; i++) { if (a->l) { /* load */ - gen_vfp_ld(s, false, addr); - gen_mov_vreg_F0(false, a->vd + i); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + neon_store_reg32(tmp, a->vd + i); } else { /* store */ - gen_mov_F0_vreg(false, a->vd + i); - gen_vfp_st(s, false, addr); + neon_load_reg32(tmp, a->vd + i); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(addr, addr, offset); } + tcg_temp_free_i32(tmp); if (a->w) { /* writeback */ if (a->p) { @@ -1006,6 +1013,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) { uint32_t offset; TCGv_i32 addr; + TCGv_i64 tmp; int i, n; n = a->imm >> 1; @@ -1056,18 +1064,20 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) } offset = 8; + tmp = tcg_temp_new_i64(); for (i = 0; i < n; i++) { if (a->l) { /* load */ - gen_vfp_ld(s, true, addr); - gen_mov_vreg_F0(true, a->vd + i); + gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); + neon_store_reg64(tmp, a->vd + i); } else { /* store */ - gen_mov_F0_vreg(true, a->vd + i); - gen_vfp_st(s, true, addr); + neon_load_reg64(tmp, a->vd + i); + gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(addr, addr, offset); } + tcg_temp_free_i64(tmp); if (a->w) { /* writeback */ if (a->p) { diff --git a/target/arm/translate.c b/target/arm/translate.c index 58c0eb1..4a462fe 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1522,24 +1522,6 @@ VFP_GEN_FIX(uhto, ) VFP_GEN_FIX(ulto, ) #undef VFP_GEN_FIX -static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr) -{ - if (dp) { - gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s)); - } else { - gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s)); - } -} - -static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr) -{ - if (dp) { - gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s)); - } else { - gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s)); - } -} - static inline long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { -- cgit v1.1 From 266bd25c485597c94209bfdb3891c1d0c573c164 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:46 +0100 Subject: target/arm: Convert VFP VMLA to decodetree Convert the VFP VMLA instruction to decodetree. This is the first of the VFP 3-operand data processing instructions, so we include in this patch the code which loops over the elements for an old-style VFP vector operation. The existing code to do this looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since we are going to be converting instructions one at a time anyway we can take the opportunity to make the new loop use TCG temporaries, which means we can do that conversion one operation at a time rather than needing to do it all in one go. We include an UNDEF check which was missing in the old code: short-vector operations (with stride or length non-zero) were deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec field does not indicate that support for short vectors is present we UNDEF the operations that would use them. (This is a change of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which previously were all incorrectly allowing short-vector operations.) Note that the conversion fixes a bug in the old code for the case of VFP short-vector "mixed scalar/vector operations". These happen where the destination register is in a vector bank but but the second operand is in a scalar bank. For example vmla.f64 d10, d1, d16 with length 2 stride 2 is equivalent to the pair of scalar operations vmla.f64 d10, d1, d16 vmla.f64 d8, d3, d16 where the destination and first input register cycle through their vector but the second input is scalar (d16). In the old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d} as a temporary output for the multiply, which trashes the second input operand. For the fully-scalar case (where we never do a second iteration) and the fully-vector case (where the loop loads the new second input operand) this doesn't matter, but for the mixed scalar/vector case we will end up using the wrong value for later loop iterations. In the new code we use TCG temporaries and so avoid the bug. This bug is present for all the multiply-accumulate insns that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS. Note 2: the expression used to calculate the next register number in the vector bank is not in fact correct; we leave this behaviour unchanged from the old decoder and will fix this bug later in the series. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 5 + target/arm/translate-vfp.inc.c | 205 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 14 +-- target/arm/vfp.decode | 6 ++ 4 files changed, 224 insertions(+), 6 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index dc50c86..9229862 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3377,6 +3377,11 @@ static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id) return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2; } +static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) +{ + return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0; +} + /* * We always set the FP and SIMD FP16 fields to indicate identical * levels of support (assuming SIMD is implemented at all), so diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 9729946..4f922dc 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1098,3 +1098,208 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) return true; } + +/* + * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp(). + * The callback should emit code to write a value to vd. If + * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd + * will contain the old value of the relevant VFP register; + * otherwise it must be written to only. + */ +typedef void VFPGen3OpSPFn(TCGv_i32 vd, + TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst); +typedef void VFPGen3OpDPFn(TCGv_i64 vd, + TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst); + +/* + * Perform a 3-operand VFP data processing instruction. fn is the + * callback to do the actual operation; this function deals with the + * code to handle looping around for VFP vector processing. + */ +static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, + int vd, int vn, int vm, bool reads_vd) +{ + uint32_t delta_m = 0; + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i32 f0, f1, fd; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0x18; + + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + + if ((vm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i32(); + f1 = tcg_temp_new_i32(); + fd = tcg_temp_new_i32(); + fpst = get_fpstatus_ptr(0); + + neon_load_reg32(f0, vn); + neon_load_reg32(f1, vm); + + for (;;) { + if (reads_vd) { + neon_load_reg32(fd, vd); + } + fn(fd, f0, f1, fpst); + neon_store_reg32(fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + neon_load_reg32(f0, vn); + if (delta_m) { + vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + neon_load_reg32(f1, vm); + } + } + + tcg_temp_free_i32(f0); + tcg_temp_free_i32(f1); + tcg_temp_free_i32(fd); + tcg_temp_free_ptr(fpst); + + return true; +} + +static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, + int vd, int vn, int vm, bool reads_vd) +{ + uint32_t delta_m = 0; + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i64 f0, f1, fd; + TCGv_ptr fpst; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0xc; + + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + + if ((vm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i64(); + f1 = tcg_temp_new_i64(); + fd = tcg_temp_new_i64(); + fpst = get_fpstatus_ptr(0); + + neon_load_reg64(f0, vn); + neon_load_reg64(f1, vm); + + for (;;) { + if (reads_vd) { + neon_load_reg64(fd, vd); + } + fn(fd, f0, f1, fpst); + neon_store_reg64(fd, vd); + + if (veclen == 0) { + break; + } + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + neon_load_reg64(f0, vn); + if (delta_m) { + vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + neon_load_reg64(f1, vm); + } + } + + tcg_temp_free_i64(f0); + tcg_temp_free_i64(f1); + tcg_temp_free_i64(fd); + tcg_temp_free_ptr(fpst); + + return true; +} + +static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* Note that order of inputs to the add matters for NaNs */ + TCGv_i32 tmp = tcg_temp_new_i32(); + + gen_helper_vfp_muls(tmp, vn, vm, fpst); + gen_helper_vfp_adds(vd, vd, tmp, fpst); + tcg_temp_free_i32(tmp); +} + +static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) +{ + return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* Note that order of inputs to the add matters for NaNs */ + TCGv_i64 tmp = tcg_temp_new_i64(); + + gen_helper_vfp_muld(tmp, vn, vm, fpst); + gen_helper_vfp_addd(vd, vd, tmp, fpst); + tcg_temp_free_i64(tmp); +} + +static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a) +{ + return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 4a462fe..2215e83 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3133,6 +3133,14 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); rn = VFP_SREG_N(insn); + switch (op) { + case 0: + /* Already handled by decodetree */ + return 1; + default: + break; + } + if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { @@ -3312,12 +3320,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 0: /* VMLA: fd + (fn * fm) */ - /* Note that order of inputs to the add matters for NaNs */ - gen_vfp_F1_mul(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_add(dp); - break; case 1: /* VMLS: fd + -(fn * fm) */ gen_vfp_mul(dp); gen_vfp_F1_neg(dp); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 68c9ffc..9530e17 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -96,3 +96,9 @@ VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \ vd=%vd_sp p=1 u=0 w=1 VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \ vd=%vd_dp p=1 u=0 w=1 + +# 3-register VFP data-processing; bits [23,21:20,6] identify the operation. +VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From e7258280d46af4ab6a0cc93ccfe8f6614defb4b7 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:46 +0100 Subject: target/arm: Convert VFP VMLS to decodetree Convert the VFP VMLS instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 38 ++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 8 +------- target/arm/vfp.decode | 5 +++++ 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 4f922dc..00f6440 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1303,3 +1303,41 @@ static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a) { return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true); } + +static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* + * VMLS: vd = vd + -(vn * vm) + * Note that order of inputs to the add matters for NaNs. + */ + TCGv_i32 tmp = tcg_temp_new_i32(); + + gen_helper_vfp_muls(tmp, vn, vm, fpst); + gen_helper_vfp_negs(tmp, tmp); + gen_helper_vfp_adds(vd, vd, tmp, fpst); + tcg_temp_free_i32(tmp); +} + +static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) +{ + return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* + * VMLS: vd = vd + -(vn * vm) + * Note that order of inputs to the add matters for NaNs. + */ + TCGv_i64 tmp = tcg_temp_new_i64(); + + gen_helper_vfp_muld(tmp, vn, vm, fpst); + gen_helper_vfp_negd(tmp, tmp); + gen_helper_vfp_addd(vd, vd, tmp, fpst); + tcg_temp_free_i64(tmp); +} + +static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a) +{ + return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 2215e83..4e9cabc 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3134,7 +3134,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0: + case 0 ... 1: /* Already handled by decodetree */ return 1; default: @@ -3320,12 +3320,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 1: /* VMLS: fd + -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_F1_neg(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_add(dp); - break; case 2: /* VNMLS: -fd + (fn * fm) */ /* Note that it isn't valid to replace (-A + B) with (B - A) * or similar plausible looking simplifications diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 9530e17..7bcf226 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -102,3 +102,8 @@ VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From c54a416cc6d60efbc79dd37aaf0c8918c05b5815 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:46 +0100 Subject: target/arm: Convert VFP VNMLS to decodetree Convert the VFP VNMLS instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 42 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 24 +----------------------- target/arm/vfp.decode | 5 +++++ 3 files changed, 48 insertions(+), 23 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 00f6440..1d7100d 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1341,3 +1341,45 @@ static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a) { return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true); } + +static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* + * VNMLS: -fd + (fn * fm) + * Note that it isn't valid to replace (-A + B) with (B - A) or similar + * plausible looking simplifications because this will give wrong results + * for NaNs. + */ + TCGv_i32 tmp = tcg_temp_new_i32(); + + gen_helper_vfp_muls(tmp, vn, vm, fpst); + gen_helper_vfp_negs(vd, vd); + gen_helper_vfp_adds(vd, vd, tmp, fpst); + tcg_temp_free_i32(tmp); +} + +static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* + * VNMLS: -fd + (fn * fm) + * Note that it isn't valid to replace (-A + B) with (B - A) or similar + * plausible looking simplifications because this will give wrong results + * for NaNs. + */ + TCGv_i64 tmp = tcg_temp_new_i64(); + + gen_helper_vfp_muld(tmp, vn, vm, fpst); + gen_helper_vfp_negd(vd, vd); + gen_helper_vfp_addd(vd, vd, tmp, fpst); + tcg_temp_free_i64(tmp); +} + +static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a) +{ + return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 4e9cabc..b85baec 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1393,18 +1393,6 @@ VFP_OP2(div) #undef VFP_OP2 -static inline void gen_vfp_F1_mul(int dp) -{ - /* Like gen_vfp_mul() but put result in F1 */ - TCGv_ptr fpst = get_fpstatus_ptr(0); - if (dp) { - gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst); - } else { - gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst); - } - tcg_temp_free_ptr(fpst); -} - static inline void gen_vfp_F1_neg(int dp) { /* Like gen_vfp_neg() but put result in F1 */ @@ -3134,7 +3122,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 1: + case 0 ... 2: /* Already handled by decodetree */ return 1; default: @@ -3320,16 +3308,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 2: /* VNMLS: -fd + (fn * fm) */ - /* Note that it isn't valid to replace (-A + B) with (B - A) - * or similar plausible looking simplifications - * because this will give wrong results for NaNs. - */ - gen_vfp_F1_mul(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_neg(dp); - gen_vfp_add(dp); - break; case 3: /* VNMLA: -fd + -(fn * fm) */ gen_vfp_mul(dp); gen_vfp_F1_neg(dp); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 7bcf226..08e4f42 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -107,3 +107,8 @@ VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From 8a483533adc1bdc2decb8f456dbe930a2d245a8b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:47 +0100 Subject: target/arm: Convert VFP VNMLA to decodetree Convert the VFP VNMLA instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 34 ++++++++++++++++++++++++++++++++++ target/arm/translate.c | 19 +------------------ target/arm/vfp.decode | 5 +++++ 3 files changed, 40 insertions(+), 18 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 1d7100d..8532bf4 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1383,3 +1383,37 @@ static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a) { return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true); } + +static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* VNMLA: -fd + -(fn * fm) */ + TCGv_i32 tmp = tcg_temp_new_i32(); + + gen_helper_vfp_muls(tmp, vn, vm, fpst); + gen_helper_vfp_negs(tmp, tmp); + gen_helper_vfp_negs(vd, vd); + gen_helper_vfp_adds(vd, vd, tmp, fpst); + tcg_temp_free_i32(tmp); +} + +static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* VNMLA: -fd + (fn * fm) */ + TCGv_i64 tmp = tcg_temp_new_i64(); + + gen_helper_vfp_muld(tmp, vn, vm, fpst); + gen_helper_vfp_negd(tmp, tmp); + gen_helper_vfp_negd(vd, vd); + gen_helper_vfp_addd(vd, vd, tmp, fpst); + tcg_temp_free_i64(tmp); +} + +static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a) +{ + return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index b85baec..93e6725 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1393,16 +1393,6 @@ VFP_OP2(div) #undef VFP_OP2 -static inline void gen_vfp_F1_neg(int dp) -{ - /* Like gen_vfp_neg() but put result in F1 */ - if (dp) { - gen_helper_vfp_negd(cpu_F1d, cpu_F0d); - } else { - gen_helper_vfp_negs(cpu_F1s, cpu_F0s); - } -} - static inline void gen_vfp_abs(int dp) { if (dp) @@ -3122,7 +3112,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 2: + case 0 ... 3: /* Already handled by decodetree */ return 1; default: @@ -3308,13 +3298,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 3: /* VNMLA: -fd + -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_F1_neg(dp); - gen_mov_F0_vreg(dp, rd); - gen_vfp_neg(dp); - gen_vfp_add(dp); - break; case 4: /* mul: fn * fm */ gen_vfp_mul(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 08e4f42..c50d2c3 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -112,3 +112,8 @@ VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From 88c5188ced60e9f2b8cc3af3b9bc4a8031c8c996 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:47 +0100 Subject: target/arm: Convert VMUL to decodetree Convert the VMUL instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 5 +---- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 8532bf4..a2afe82 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1417,3 +1417,13 @@ static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a) { return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true); } + +static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false); +} + +static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 93e6725..a3e13c7 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3112,7 +3112,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 3: + case 0 ... 4: /* Already handled by decodetree */ return 1; default: @@ -3298,9 +3298,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 4: /* mul: fn * fm */ - gen_vfp_mul(dp); - break; case 5: /* nmul: -(fn * fm) */ gen_vfp_mul(dp); gen_vfp_neg(dp); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index c50d2c3..d7fcb97 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -117,3 +117,8 @@ VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From 43c4be1236c105090d134540da1036073d157cd4 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:47 +0100 Subject: target/arm: Convert VNMUL to decodetree Convert the VNMUL instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 24 ++++++++++++++++++++++++ target/arm/translate.c | 7 +------ target/arm/vfp.decode | 5 +++++ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index a2afe82..4c684f0 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1427,3 +1427,27 @@ static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false); } + +static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* VNMUL: -(fn * fm) */ + gen_helper_vfp_muls(vd, vn, vm, fpst); + gen_helper_vfp_negs(vd, vd); +} + +static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false); +} + +static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* VNMUL: -(fn * fm) */ + gen_helper_vfp_muld(vd, vn, vm, fpst); + gen_helper_vfp_negd(vd, vd); +} + +static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a) +{ + return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index a3e13c7..24c2425 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1388,7 +1388,6 @@ static inline void gen_vfp_##name(int dp) \ VFP_OP2(add) VFP_OP2(sub) -VFP_OP2(mul) VFP_OP2(div) #undef VFP_OP2 @@ -3112,7 +3111,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 4: + case 0 ... 5: /* Already handled by decodetree */ return 1; default: @@ -3298,10 +3297,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 5: /* nmul: -(fn * fm) */ - gen_vfp_mul(dp); - gen_vfp_neg(dp); - break; case 6: /* add: fn + fm */ gen_vfp_add(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index d7fcb97..3063fca 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -122,3 +122,8 @@ VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From ce28b303716e7eca3f3765bf6776d722ebbe1122 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:48 +0100 Subject: target/arm: Convert VADD to decodetree Convert the VADD instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 6 +----- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 4c684f0..14aeb25 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1451,3 +1451,13 @@ static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a) { return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false); } + +static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false); +} + +static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 24c2425..6106590 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1386,7 +1386,6 @@ static inline void gen_vfp_##name(int dp) \ tcg_temp_free_ptr(fpst); \ } -VFP_OP2(add) VFP_OP2(sub) VFP_OP2(div) @@ -3111,7 +3110,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 5: + case 0 ... 6: /* Already handled by decodetree */ return 1; default: @@ -3297,9 +3296,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 6: /* add: fn + fm */ - gen_vfp_add(dp); - break; case 7: /* sub: fn - fm */ gen_vfp_sub(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 3063fca..d911f12 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -127,3 +127,8 @@ VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From 8fec9a119264b7936503abce3c106fad7e3ccb76 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:48 +0100 Subject: target/arm: Convert VSUB to decodetree Convert the VSUB instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 6 +----- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 14aeb25..12da3b8 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1461,3 +1461,13 @@ static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false); } + +static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false); +} + +static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 6106590..664e3b5 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1386,7 +1386,6 @@ static inline void gen_vfp_##name(int dp) \ tcg_temp_free_ptr(fpst); \ } -VFP_OP2(sub) VFP_OP2(div) #undef VFP_OP2 @@ -3110,7 +3109,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 6: + case 0 ... 7: /* Already handled by decodetree */ return 1; default: @@ -3296,9 +3295,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 7: /* sub: fn - fm */ - gen_vfp_sub(dp); - break; case 8: /* div: fn / fm */ gen_vfp_div(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index d911f12..de56f44 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -132,3 +132,8 @@ VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From 519ee7ae31e050eb0ff9ad35c213f0bd7ab1c03e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:48 +0100 Subject: target/arm: Convert VDIV to decodetree Convert the VDIV instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 21 +-------------------- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 12da3b8..6af9960 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1471,3 +1471,13 @@ static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false); } + +static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false); +} + +static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 664e3b5..48981af 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1374,22 +1374,6 @@ static TCGv_ptr get_fpstatus_ptr(int neon) return statusptr; } -#define VFP_OP2(name) \ -static inline void gen_vfp_##name(int dp) \ -{ \ - TCGv_ptr fpst = get_fpstatus_ptr(0); \ - if (dp) { \ - gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \ - } else { \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \ - } \ - tcg_temp_free_ptr(fpst); \ -} - -VFP_OP2(div) - -#undef VFP_OP2 - static inline void gen_vfp_abs(int dp) { if (dp) @@ -3109,7 +3093,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 7: + case 0 ... 8: /* Already handled by decodetree */ return 1; default: @@ -3295,9 +3279,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 8: /* div: fn / fm */ - gen_vfp_div(dp); - break; case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ case 12: /* VFMA : fd = muladd( fd, fn, fm) */ diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index de56f44..de305f6 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -137,3 +137,8 @@ VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp +VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp -- cgit v1.1 From d4893b01d23060845ee3855bc96626e16aad9ab5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:49 +0100 Subject: target/arm: Convert VFP fused multiply-add insns to decodetree Convert the VFP fused multiply-add instructions (VFNMA, VFNMS, VFMA, VFMS) to decodetree. Note that in the old decode structure we were implementing these to honour the VFP vector stride/length. These instructions were introduced in VFPv4, and in the v7A architecture they are UNPREDICTABLE if the vector stride or length are non-zero. In v8A they must UNDEF if stride or length are non-zero, like all VFP instructions; we choose to UNDEF always. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 121 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 53 +----------------- target/arm/vfp.decode | 9 +++ 3 files changed, 131 insertions(+), 52 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 6af9960..ba6506a 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1481,3 +1481,124 @@ static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false); } + +static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a) +{ + /* + * VFNMA : fd = muladd(-fd, fn, fm) + * VFNMS : fd = muladd(-fd, -fn, fm) + * VFMA : fd = muladd( fd, fn, fm) + * VFMS : fd = muladd( fd, -fn, fm) + * + * These are fused multiply-add, and must be done as one floating + * point operation with no rounding between the multiplication and + * addition steps. NB that doing the negations here as separate + * steps is correct : an input NaN should come out with its sign + * bit flipped if it is a negated-input. + */ + TCGv_ptr fpst; + TCGv_i32 vn, vm, vd; + + /* + * Present in VFPv4 only. + * In v7A, UNPREDICTABLE with non-zero vector length/stride; from + * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. + */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || + (s->vec_len != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vn = tcg_temp_new_i32(); + vm = tcg_temp_new_i32(); + vd = tcg_temp_new_i32(); + + neon_load_reg32(vn, a->vn); + neon_load_reg32(vm, a->vm); + if (a->o2) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(vn, vn); + } + neon_load_reg32(vd, a->vd); + if (a->o1 & 1) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negs(vd, vd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladds(vd, vn, vm, vd, fpst); + neon_store_reg32(vd, a->vd); + + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(vn); + tcg_temp_free_i32(vm); + tcg_temp_free_i32(vd); + + return true; +} + +static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a) +{ + /* + * VFNMA : fd = muladd(-fd, fn, fm) + * VFNMS : fd = muladd(-fd, -fn, fm) + * VFMA : fd = muladd( fd, fn, fm) + * VFMS : fd = muladd( fd, -fn, fm) + * + * These are fused multiply-add, and must be done as one floating + * point operation with no rounding between the multiplication and + * addition steps. NB that doing the negations here as separate + * steps is correct : an input NaN should come out with its sign + * bit flipped if it is a negated-input. + */ + TCGv_ptr fpst; + TCGv_i64 vn, vm, vd; + + /* + * Present in VFPv4 only. + * In v7A, UNPREDICTABLE with non-zero vector length/stride; from + * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. + */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || + (s->vec_len != 0 || s->vec_stride != 0)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vn = tcg_temp_new_i64(); + vm = tcg_temp_new_i64(); + vd = tcg_temp_new_i64(); + + neon_load_reg64(vn, a->vn); + neon_load_reg64(vm, a->vm); + if (a->o2) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(vn, vn); + } + neon_load_reg64(vd, a->vd); + if (a->o1 & 1) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(vd, vd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst); + neon_store_reg64(vd, a->vd); + + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(vn); + tcg_temp_free_i64(vm); + tcg_temp_free_i64(vd); + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 48981af..58a0dfd 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3093,7 +3093,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 8: + case 0 ... 13: /* Already handled by decodetree */ return 1; default: @@ -3279,57 +3279,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ - case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ - case 12: /* VFMA : fd = muladd( fd, fn, fm) */ - case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ - /* These are fused multiply-add, and must be done as one - * floating point operation with no rounding between the - * multiplication and addition steps. - * NB that doing the negations here as separate steps is - * correct : an input NaN should come out with its sign bit - * flipped if it is a negated-input. - */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) { - return 1; - } - if (dp) { - TCGv_ptr fpst; - TCGv_i64 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negd(cpu_F0d, cpu_F0d); - } - frd = tcg_temp_new_i64(); - tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - /* VFNMA, VFNMS */ - gen_helper_vfp_negd(frd, frd); - } - fpst = get_fpstatus_ptr(0); - gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d, - cpu_F1d, frd, fpst); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(frd); - } else { - TCGv_ptr fpst; - TCGv_i32 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negs(cpu_F0s, cpu_F0s); - } - frd = tcg_temp_new_i32(); - tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - gen_helper_vfp_negs(frd, frd); - } - fpst = get_fpstatus_ptr(0); - gen_helper_vfp_muladds(cpu_F0s, cpu_F0s, - cpu_F1s, frd, fpst); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(frd); - } - break; case 14: /* fconst */ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { return 1; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index de305f6..37eec0e 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -142,3 +142,12 @@ VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp + +VFM_sp ---- 1110 1.01 .... .... 1010 . o2:1 . 0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=1 +VFM_dp ---- 1110 1.01 .... .... 1011 . o2:1 . 0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=1 +VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \ + vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=2 +VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2 -- cgit v1.1 From b518c753f0b94e14e01e97b4ec42c100dafc0cc2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:49 +0100 Subject: target/arm: Convert VMOV (imm) to decodetree Convert the VFP VMOV (immediate) instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 129 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 27 +-------- target/arm/vfp.decode | 5 ++ 3 files changed, 136 insertions(+), 25 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index ba6506a..a2eeb6c 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1602,3 +1602,132 @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a) return true; } + +static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) +{ + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i32 fd; + uint32_t n, i, vd; + + vd = a->vd; + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0x18; + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + } + } + + n = (a->imm4h << 28) & 0x80000000; + i = ((a->imm4h << 4) & 0x70) | a->imm4l; + if (i & 0x40) { + i |= 0x780; + } else { + i |= 0x800; + } + n |= i << 19; + + fd = tcg_temp_new_i32(); + tcg_gen_movi_i32(fd, n); + + for (;;) { + neon_store_reg32(fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + } + + tcg_temp_free_i32(fd); + return true; +} + +static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) +{ + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i64 fd; + uint32_t n, i, vd; + + vd = a->vd; + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0xc; + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + } + } + + n = (a->imm4h << 28) & 0x80000000; + i = ((a->imm4h << 4) & 0x70) | a->imm4l; + if (i & 0x40) { + i |= 0x3f80; + } else { + i |= 0x4000; + } + n |= i << 16; + + fd = tcg_temp_new_i64(); + tcg_gen_movi_i64(fd, ((uint64_t)n) << 32); + + for (;;) { + neon_store_reg64(fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + } + + tcg_temp_free_i64(fd); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 58a0dfd..29b6487 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3033,7 +3033,7 @@ static void gen_neon_dup_high16(TCGv_i32 var) */ static int disas_vfp_insn(DisasContext *s, uint32_t insn) { - uint32_t rd, rn, rm, op, i, n, delta_d, delta_m, bank_mask; + uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask; int dp, veclen; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -3093,7 +3093,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rn = VFP_SREG_N(insn); switch (op) { - case 0 ... 13: + case 0 ... 14: /* Already handled by decodetree */ return 1; default: @@ -3279,29 +3279,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 14: /* fconst */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - - n = (insn << 12) & 0x80000000; - i = ((insn >> 12) & 0x70) | (insn & 0xf); - if (dp) { - if (i & 0x40) - i |= 0x3f80; - else - i |= 0x4000; - n |= i << 16; - tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32); - } else { - if (i & 0x40) - i |= 0x780; - else - i |= 0x800; - n |= i << 19; - tcg_gen_movi_i32(cpu_F0s, n); - } - break; case 15: /* extension space */ switch (rn) { case 0: /* cpy */ diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 37eec0e..1818d4f 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -151,3 +151,8 @@ VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \ vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=2 VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2 + +VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \ + vd=%vd_sp +VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \ + vd=%vd_dp -- cgit v1.1 From 90287e22c987e9840704345ed33d237cbe759dd9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:49 +0100 Subject: target/arm: Convert VABS to decodetree Convert the VFP VABS instruction to decodetree. Unlike the 3-op versions, we don't pass fpst to the VFPGen2OpSPFn or VFPGen2OpDPFn because none of the operations which use this format and support short vectors will need it. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 167 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 12 ++- target/arm/vfp.decode | 5 ++ 3 files changed, 180 insertions(+), 4 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index a2eeb6c..d0282f1 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1112,6 +1112,14 @@ typedef void VFPGen3OpDPFn(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst); /* + * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp(). + * The callback should emit code to write a value to vd (which + * should be written to only). + */ +typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm); +typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm); + +/* * Perform a 3-operand VFP data processing instruction. fn is the * callback to do the actual operation; this function deals with the * code to handle looping around for VFP vector processing. @@ -1274,6 +1282,155 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, return true; } +static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) +{ + uint32_t delta_m = 0; + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i32 f0, fd; + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0x18; + + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + + if ((vm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i32(); + fd = tcg_temp_new_i32(); + + neon_load_reg32(f0, vm); + + for (;;) { + fn(fd, f0); + neon_store_reg32(fd, vd); + + if (veclen == 0) { + break; + } + + if (delta_m == 0) { + /* single source one-many */ + while (veclen--) { + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + neon_store_reg32(fd, vd); + } + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + neon_load_reg32(f0, vm); + } + + tcg_temp_free_i32(f0); + tcg_temp_free_i32(fd); + + return true; +} + +static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) +{ + uint32_t delta_m = 0; + uint32_t delta_d = 0; + uint32_t bank_mask = 0; + int veclen = s->vec_len; + TCGv_i64 f0, fd; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + bank_mask = 0xc; + + /* Figure out what type of vector operation this is. */ + if ((vd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + + if ((vm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i64(); + fd = tcg_temp_new_i64(); + + neon_load_reg64(f0, vm); + + for (;;) { + fn(fd, f0); + neon_store_reg64(fd, vd); + + if (veclen == 0) { + break; + } + + if (delta_m == 0) { + /* single source one-many */ + while (veclen--) { + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + neon_store_reg64(fd, vd); + } + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + neon_load_reg64(f0, vm); + } + + tcg_temp_free_i64(f0); + tcg_temp_free_i64(fd); + + return true; +} + static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* Note that order of inputs to the add matters for NaNs */ @@ -1731,3 +1888,13 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) tcg_temp_free_i64(fd); return true; } + +static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a) +{ + return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm); +} + +static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a) +{ + return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 29b6487..e510c5e 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3096,6 +3096,14 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 0 ... 14: /* Already handled by decodetree */ return 1; + case 15: + switch (rn) { + case 1: + /* Already handled by decodetree */ + return 1; + default: + break; + } default: break; } @@ -3104,7 +3112,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { case 0x00: /* vmov */ - case 0x01: /* vabs */ case 0x02: /* vneg */ case 0x03: /* vsqrt */ break; @@ -3284,9 +3291,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 0: /* cpy */ /* no-op */ break; - case 1: /* abs */ - gen_vfp_abs(dp); - break; case 2: /* neg */ gen_vfp_neg(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 1818d4f..7035861 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -156,3 +156,8 @@ VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \ vd=%vd_sp VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \ vd=%vd_dp + +VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \ + vd=%vd_sp vm=%vm_sp +VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \ + vd=%vd_dp vm=%vm_dp -- cgit v1.1 From 1882651afdb0ca44f0631192fbe65a71c660d809 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:50 +0100 Subject: target/arm: Convert VNEG to decodetree Convert the VNEG instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 6 +----- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index d0282f1..6e06b2a 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1898,3 +1898,13 @@ static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a) { return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm); } + +static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a) +{ + return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm); +} + +static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a) +{ + return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index e510c5e..bb2282d 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3098,7 +3098,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 1: + case 1 ... 2: /* Already handled by decodetree */ return 1; default: @@ -3112,7 +3112,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { case 0x00: /* vmov */ - case 0x02: /* vneg */ case 0x03: /* vsqrt */ break; @@ -3291,9 +3290,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 0: /* cpy */ /* no-op */ break; - case 2: /* neg */ - gen_vfp_neg(dp); - break; case 3: /* sqrt */ gen_vfp_sqrt(dp); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 7035861..79e4196 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -161,3 +161,8 @@ VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \ vd=%vd_sp vm=%vm_sp VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \ vd=%vd_dp vm=%vm_dp + +VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... \ + vd=%vd_sp vm=%vm_sp +VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... \ + vd=%vd_dp vm=%vm_dp -- cgit v1.1 From b8474540cbce4e2fa45010416375d1bcbe86dc15 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:50 +0100 Subject: target/arm: Convert VSQRT to decodetree Convert the VSQRT instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 20 ++++++++++++++++++++ target/arm/translate.c | 14 +------------- target/arm/vfp.decode | 5 +++++ 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 6e06b2a..ae2f77a 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1908,3 +1908,23 @@ static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a) { return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm); } + +static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm) +{ + gen_helper_vfp_sqrts(vd, vm, cpu_env); +} + +static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a) +{ + return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm); +} + +static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm) +{ + gen_helper_vfp_sqrtd(vd, vm, cpu_env); +} + +static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a) +{ + return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm); +} diff --git a/target/arm/translate.c b/target/arm/translate.c index bb2282d..64ebfed 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1390,14 +1390,6 @@ static inline void gen_vfp_neg(int dp) gen_helper_vfp_negs(cpu_F0s, cpu_F0s); } -static inline void gen_vfp_sqrt(int dp) -{ - if (dp) - gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env); - else - gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env); -} - static inline void gen_vfp_cmp(int dp) { if (dp) @@ -3098,7 +3090,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 1 ... 2: + case 1 ... 3: /* Already handled by decodetree */ return 1; default: @@ -3112,7 +3104,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { case 0x00: /* vmov */ - case 0x03: /* vsqrt */ break; case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */ @@ -3290,9 +3281,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 0: /* cpy */ /* no-op */ break; - case 3: /* sqrt */ - gen_vfp_sqrt(dp); - break; case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ { TCGv_ptr fpst = get_fpstatus_ptr(false); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 79e4196..2780e1e 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -166,3 +166,8 @@ VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... \ vd=%vd_sp vm=%vm_sp VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... \ vd=%vd_dp vm=%vm_dp + +VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... \ + vd=%vd_sp vm=%vm_sp +VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... \ + vd=%vd_dp vm=%vm_dp -- cgit v1.1 From 17552b979ebb9848a534c25ebed18a1072710058 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:50 +0100 Subject: target/arm: Convert VMOV (register) to decodetree Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 10 ++++++++++ target/arm/translate.c | 8 +------- target/arm/vfp.decode | 5 +++++ 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index ae2f77a..a7e4ae3 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1889,6 +1889,16 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) return true; } +static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a) +{ + return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm); +} + +static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a) +{ + return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm); +} + static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a) { return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm); diff --git a/target/arm/translate.c b/target/arm/translate.c index 64ebfed..79e9cca 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3090,7 +3090,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 1 ... 3: + case 0 ... 3: /* Already handled by decodetree */ return 1; default: @@ -3103,9 +3103,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x00: /* vmov */ - break; - case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */ case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */ /* @@ -3278,9 +3275,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 0: /* cpy */ - /* no-op */ - break; case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ { TCGv_ptr fpst = get_fpstatus_ptr(false); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 2780e1e..b72ab8b 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -157,6 +157,11 @@ VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \ VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \ vd=%vd_dp +VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... \ + vd=%vd_sp vm=%vm_sp +VMOV_reg_dp ---- 1110 1.11 0000 .... 1011 01.0 .... \ + vd=%vd_dp vm=%vm_dp + VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \ vd=%vd_sp vm=%vm_sp VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \ -- cgit v1.1 From 386bba2368842fc74388a3c1651c6c0c0c70adbd Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:51 +0100 Subject: target/arm: Convert VFP comparison insns to decodetree Convert the VFP comparison instructions to decodetree. Note that comparison instructions should not honour the VFP short-vector length and stride information: they are scalar-only operations. This applies to all the 2-operand instructions except for VMOV, VABS, VNEG and VSQRT. (In the old decoder this is implemented via the "if (op == 15 && rn > 3) { veclen = 0; }" check.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 75 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 51 +--------------------------- target/arm/vfp.decode | 5 +++ 3 files changed, 81 insertions(+), 50 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index a7e4ae3..ebde862 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1938,3 +1938,78 @@ static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a) { return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm); } + +static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) +{ + TCGv_i32 vd, vm; + + /* Vm/M bits must be zero for the Z variant */ + if (a->z && a->vm != 0) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i32(); + vm = tcg_temp_new_i32(); + + neon_load_reg32(vd, a->vd); + if (a->z) { + tcg_gen_movi_i32(vm, 0); + } else { + neon_load_reg32(vm, a->vm); + } + + if (a->e) { + gen_helper_vfp_cmpes(vd, vm, cpu_env); + } else { + gen_helper_vfp_cmps(vd, vm, cpu_env); + } + + tcg_temp_free_i32(vd); + tcg_temp_free_i32(vm); + + return true; +} + +static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) +{ + TCGv_i64 vd, vm; + + /* Vm/M bits must be zero for the Z variant */ + if (a->z && a->vm != 0) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i64(); + vm = tcg_temp_new_i64(); + + neon_load_reg64(vd, a->vd); + if (a->z) { + tcg_gen_movi_i64(vm, 0); + } else { + neon_load_reg64(vm, a->vm); + } + + if (a->e) { + gen_helper_vfp_cmped(vd, vm, cpu_env); + } else { + gen_helper_vfp_cmpd(vd, vm, cpu_env); + } + + tcg_temp_free_i64(vd); + tcg_temp_free_i64(vm); + + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 79e9cca..eb16269 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1390,30 +1390,6 @@ static inline void gen_vfp_neg(int dp) gen_helper_vfp_negs(cpu_F0s, cpu_F0s); } -static inline void gen_vfp_cmp(int dp) -{ - if (dp) - gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env); - else - gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env); -} - -static inline void gen_vfp_cmpe(int dp) -{ - if (dp) - gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env); - else - gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env); -} - -static inline void gen_vfp_F1_ld0(int dp) -{ - if (dp) - tcg_gen_movi_i64(cpu_F1d, 0); - else - tcg_gen_movi_i32(cpu_F1s, 0); -} - #define VFP_GEN_ITOF(name) \ static inline void gen_vfp_##name(int dp, int neon) \ { \ @@ -3091,6 +3067,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 15: switch (rn) { case 0 ... 3: + case 8 ... 11: /* Already handled by decodetree */ return 1; default: @@ -3135,11 +3112,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rd_is_dp = false; break; - case 0x08: case 0x0a: /* vcmp, vcmpz */ - case 0x09: case 0x0b: /* vcmpe, vcmpez */ - no_output = true; - break; - case 0x0c: /* vrintr */ case 0x0d: /* vrintz */ case 0x0e: /* vrintx */ @@ -3240,14 +3212,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* Load the initial operands. */ if (op == 15) { switch (rn) { - case 0x08: case 0x09: /* Compare */ - gen_mov_F0_vreg(dp, rd); - gen_mov_F1_vreg(dp, rm); - break; - case 0x0a: case 0x0b: /* Compare with zero */ - gen_mov_F0_vreg(dp, rd); - gen_vfp_F1_ld0(dp); - break; case 0x14: /* vcvt fp <-> fixed */ case 0x15: case 0x16: @@ -3357,19 +3321,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) gen_vfp_msr(tmp); break; } - case 8: /* cmp */ - gen_vfp_cmp(dp); - break; - case 9: /* cmpe */ - gen_vfp_cmpe(dp); - break; - case 10: /* cmpz */ - gen_vfp_cmp(dp); - break; - case 11: /* cmpez */ - gen_vfp_F1_ld0(dp); - gen_vfp_cmpe(dp); - break; case 12: /* vrintr */ { TCGv_ptr fpst = get_fpstatus_ptr(0); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index b72ab8b..9db7aa7 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -176,3 +176,8 @@ VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... \ vd=%vd_sp vm=%vm_sp VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... \ vd=%vd_dp vm=%vm_dp + +VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \ + vd=%vd_dp vm=%vm_dp -- cgit v1.1 From b623d803dda805f07aadcbf098961fde27315c19 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:51 +0100 Subject: target/arm: Convert the VCVT-from-f16 insns to decodetree Convert the VCVTT, VCVTB instructions that deal with conversion from half-precision floats to f32 or 64 to decodetree. Since we're no longer constrained to the old decoder's style using cpu_F0s and cpu_F0d we can perform a direct 16 bit load of the right half of the input single-precision register rather than loading the full 32 bits and then doing a separate shift or sign-extension. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 82 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 56 +---------------------------- target/arm/vfp.decode | 6 ++++ 3 files changed, 89 insertions(+), 55 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index ebde862..732bf60 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -31,6 +31,26 @@ #include "decode-vfp-uncond.inc.c" /* + * Return the offset of a 16-bit half of the specified VFP single-precision + * register. If top is true, returns the top 16 bits; otherwise the bottom + * 16 bits. + */ +static inline long vfp_f16_offset(unsigned reg, bool top) +{ + long offs = vfp_reg_offset(false, reg); +#ifdef HOST_WORDS_BIGENDIAN + if (!top) { + offs += 2; + } +#else + if (top) { + offs += 2; + } +#endif + return offs; +} + +/* * Check that VFP access is enabled. If it is, do the necessary * M-profile lazy-FP handling and then return true. * If not, emit code to generate an appropriate exception and @@ -2013,3 +2033,65 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) return true; } + +static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fp16_spconv, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); + gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); + neon_store_reg32(tmp, a->vd); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + TCGv_i64 vd; + + if (!dc_isar_feature(aa32_fp16_dpconv, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); + vd = tcg_temp_new_i64(); + gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); + neon_store_reg64(vd, a->vd); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + tcg_temp_free_i64(vd); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index eb16269..f22b8dd 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3066,7 +3066,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 3: + case 0 ... 5: case 8 ... 11: /* Already handled by decodetree */ return 1; @@ -3080,24 +3080,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */ - case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */ - /* - * VCVTB, VCVTT: only present with the halfprec extension - * UNPREDICTABLE if bit 8 is set prior to ARMv8 - * (we choose to UNDEF) - */ - if (dp) { - if (!dc_isar_feature(aa32_fp16_dpconv, s)) { - return 1; - } - } else { - if (!dc_isar_feature(aa32_fp16_spconv, s)) { - return 1; - } - } - rm_is_dp = false; - break; case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */ case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */ if (dp) { @@ -3239,42 +3221,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp_mode = get_ahp_flag(); - tmp = gen_vfp_mrs(); - tcg_gen_ext16u_i32(tmp, tmp); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp, - fpst, ahp_mode); - } else { - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, - fpst, ahp_mode); - } - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); - break; - } - case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp = get_ahp_flag(); - tmp = gen_vfp_mrs(); - tcg_gen_shri_i32(tmp, tmp, 16); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp, - fpst, ahp); - } else { - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, - fpst, ahp); - } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - break; - } case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */ { TCGv_ptr fpst = get_fpstatus_ptr(false); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 9db7aa7..53d9544 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -181,3 +181,9 @@ VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \ vd=%vd_sp vm=%vm_sp VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \ vd=%vd_dp vm=%vm_dp + +# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp +VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \ + vd=%vd_dp vm=%vm_sp -- cgit v1.1 From cdfd14e86ab0b1ca29a702d13a8e4af2e902a9bf Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:51 +0100 Subject: target/arm: Convert the VCVT-to-f16 insns to decodetree Convert the VCVTT and VCVTB instructions which convert from f32 and f64 to f16 to decodetree. Since we're no longer constrained to the old decoder's style using cpu_F0s and cpu_F0d we can perform a direct 16 bit store of the right half of the input single-precision register rather than doing a load/modify/store sequence on the full 32 bits. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 62 +++++++++++++++++++++++++++++++++ target/arm/translate.c | 79 +----------------------------------------- target/arm/vfp.decode | 6 ++++ 3 files changed, 69 insertions(+), 78 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 732bf60..a19ede8 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2095,3 +2095,65 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) tcg_temp_free_i64(vd); return true; } + +static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fp16_spconv, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + + neon_load_reg32(tmp, a->vm); + gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode); + tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + TCGv_i64 vm; + + if (!dc_isar_feature(aa32_fp16_dpconv, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + vm = tcg_temp_new_i64(); + + neon_load_reg64(vm, a->vm); + gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode); + tcg_temp_free_i64(vm); + tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index f22b8dd..b53bf93 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -2963,20 +2963,6 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn) #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) -/* Move between integer and VFP cores. */ -static TCGv_i32 gen_vfp_mrs(void) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp, cpu_F0s); - return tmp; -} - -static void gen_vfp_msr(TCGv_i32 tmp) -{ - tcg_gen_mov_i32(cpu_F0s, tmp); - tcg_temp_free_i32(tmp); -} - static void gen_neon_dup_low16(TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -3003,8 +2989,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) { uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask; int dp, veclen; - TCGv_i32 tmp; - TCGv_i32 tmp2; if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { return 1; @@ -3066,8 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 5: - case 8 ... 11: + case 0 ... 11: /* Already handled by decodetree */ return 1; default: @@ -3080,20 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */ - case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */ - if (dp) { - if (!dc_isar_feature(aa32_fp16_dpconv, s)) { - return 1; - } - } else { - if (!dc_isar_feature(aa32_fp16_spconv, s)) { - return 1; - } - } - rd_is_dp = false; - break; - case 0x0c: /* vrintr */ case 0x0d: /* vrintz */ case 0x0e: /* vrintx */ @@ -3221,52 +3190,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp = get_ahp_flag(); - tmp = tcg_temp_new_i32(); - - if (dp) { - gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d, - fpst, ahp); - } else { - gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, - fpst, ahp); - } - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - gen_mov_F0_vreg(0, rd); - tmp2 = gen_vfp_mrs(); - tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - gen_vfp_msr(tmp); - break; - } - case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp = get_ahp_flag(); - tmp = tcg_temp_new_i32(); - if (dp) { - gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d, - fpst, ahp); - } else { - gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, - fpst, ahp); - } - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - tcg_gen_shli_i32(tmp, tmp, 16); - gen_mov_F0_vreg(0, rd); - tmp2 = gen_vfp_mrs(); - tcg_gen_ext16u_i32(tmp2, tmp2); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - gen_vfp_msr(tmp); - break; - } case 12: /* vrintr */ { TCGv_ptr fpst = get_fpstatus_ptr(0); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 53d9544..b88d1d0 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -187,3 +187,9 @@ VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \ vd=%vd_sp vm=%vm_sp VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \ vd=%vd_dp vm=%vm_sp + +# VCVTB and VCVTT to f16: Vd format is always vd_sp; Vm format depends on size bit +VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \ + vd=%vd_sp vm=%vm_dp -- cgit v1.1 From e25155f55dc4abb427a88dfe58bbbc550fe7d643 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:51 +0100 Subject: target/arm: Convert VFP round insns to decodetree Convert the VFP round-to-integer instructions VRINTR, VRINTZ and VRINTX to decodetree. These instructions were only introduced as part of the "VFP misc" additions in v8A, so we check this. The old decoder's implementation was incorrectly providing them even for v7A CPUs. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 163 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 45 +----------- target/arm/vfp.decode | 15 ++++ 3 files changed, 179 insertions(+), 44 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index a19ede8..e94a8f2 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2157,3 +2157,166 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) tcg_temp_free_i32(tmp); return true; } + +static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) +{ + TCGv_ptr fpst; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + gen_helper_rints(tmp, tmp, fpst); + neon_store_reg32(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a) +{ + TCGv_ptr fpst; + TCGv_i64 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(); + neon_load_reg64(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + gen_helper_rintd(tmp, tmp, fpst); + neon_store_reg64(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(tmp); + return true; +} + +static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) +{ + TCGv_ptr fpst; + TCGv_i32 tmp; + TCGv_i32 tcg_rmode; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + tcg_rmode = tcg_const_i32(float_round_to_zero); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + gen_helper_rints(tmp, tmp, fpst); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + neon_store_reg32(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tcg_rmode); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a) +{ + TCGv_ptr fpst; + TCGv_i64 tmp; + TCGv_i32 tcg_rmode; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(); + neon_load_reg64(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + tcg_rmode = tcg_const_i32(float_round_to_zero); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + gen_helper_rintd(tmp, tmp, fpst); + gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + neon_store_reg64(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tcg_rmode); + return true; +} + +static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) +{ + TCGv_ptr fpst; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(); + neon_load_reg32(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + gen_helper_rints_exact(tmp, tmp, fpst); + neon_store_reg32(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) +{ + TCGv_ptr fpst; + TCGv_i64 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(); + neon_load_reg64(tmp, a->vm); + fpst = get_fpstatus_ptr(false); + gen_helper_rintd_exact(tmp, tmp, fpst); + neon_store_reg64(tmp, a->vd); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(tmp); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index b53bf93..b332d85 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 11: + case 0 ... 14: /* Already handled by decodetree */ return 1; default: @@ -3063,11 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x0c: /* vrintr */ - case 0x0d: /* vrintz */ - case 0x0e: /* vrintx */ - break; - case 0x0f: /* vcvt double<->single */ rd_is_dp = !dp; break; @@ -3190,44 +3185,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 12: /* vrintr */ - { - TCGv_ptr fpst = get_fpstatus_ptr(0); - if (dp) { - gen_helper_rintd(cpu_F0d, cpu_F0d, fpst); - } else { - gen_helper_rints(cpu_F0s, cpu_F0s, fpst); - } - tcg_temp_free_ptr(fpst); - break; - } - case 13: /* vrintz */ - { - TCGv_ptr fpst = get_fpstatus_ptr(0); - TCGv_i32 tcg_rmode; - tcg_rmode = tcg_const_i32(float_round_to_zero); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - if (dp) { - gen_helper_rintd(cpu_F0d, cpu_F0d, fpst); - } else { - gen_helper_rints(cpu_F0s, cpu_F0s, fpst); - } - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_ptr(fpst); - break; - } - case 14: /* vrintx */ - { - TCGv_ptr fpst = get_fpstatus_ptr(0); - if (dp) { - gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst); - } else { - gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst); - } - tcg_temp_free_ptr(fpst); - break; - } case 15: /* single<->double conversion */ if (dp) { gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index b88d1d0..9942d2a 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -193,3 +193,18 @@ VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \ vd=%vd_sp vm=%vm_sp VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \ vd=%vd_sp vm=%vm_dp + +VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... \ + vd=%vd_sp vm=%vm_sp +VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... \ + vd=%vd_dp vm=%vm_dp + +VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... \ + vd=%vd_sp vm=%vm_sp +VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... \ + vd=%vd_dp vm=%vm_dp + +VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \ + vd=%vd_sp vm=%vm_sp +VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \ + vd=%vd_dp vm=%vm_dp -- cgit v1.1 From 6ed7e49c3693ed8411773c4880f42b2932beb12d Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:52 +0100 Subject: target/arm: Convert double-single precision conversion insns to decodetree Convert the VCVT double/single precision conversion insns to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 48 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 13 +----------- target/arm/vfp.decode | 6 ++++++ 3 files changed, 55 insertions(+), 12 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index e94a8f2..c500937 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2320,3 +2320,51 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) tcg_temp_free_i64(tmp); return true; } + +static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) +{ + TCGv_i64 vd; + TCGv_i32 vm; + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(); + vd = tcg_temp_new_i64(); + neon_load_reg32(vm, a->vm); + gen_helper_vfp_fcvtds(vd, vm, cpu_env); + neon_store_reg64(vd, a->vd); + tcg_temp_free_i32(vm); + tcg_temp_free_i64(vd); + return true; +} + +static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) +{ + TCGv_i64 vm; + TCGv_i32 vd; + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i32(); + vm = tcg_temp_new_i64(); + neon_load_reg64(vm, a->vm); + gen_helper_vfp_fcvtsd(vd, vm, cpu_env); + neon_store_reg32(vd, a->vd); + tcg_temp_free_i32(vd); + tcg_temp_free_i64(vm); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index b332d85..6d2a398 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 14: + case 0 ... 15: /* Already handled by decodetree */ return 1; default: @@ -3063,10 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x0f: /* vcvt double<->single */ - rd_is_dp = !dp; - break; - case 0x10: /* vcvt.fxx.u32 */ case 0x11: /* vcvt.fxx.s32 */ rm_is_dp = false; @@ -3185,13 +3181,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 15: /* single<->double conversion */ - if (dp) { - gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env); - } else { - gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env); - } - break; case 16: /* fuito */ gen_vfp_uito(dp, 0); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 9942d2a..56b8b4e 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -208,3 +208,9 @@ VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \ vd=%vd_sp vm=%vm_sp VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \ vd=%vd_dp vm=%vm_dp + +# VCVT between single and double: Vm precision depends on size; Vd is its reverse +VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... \ + vd=%vd_dp vm=%vm_sp +VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... \ + vd=%vd_sp vm=%vm_dp -- cgit v1.1 From 8fc9d8918cde342c71923e361b9f2193e36ed18b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:52 +0100 Subject: target/arm: Convert integer-to-float insns to decodetree Convert the VCVT integer-to-float instructions to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 58 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 12 +-------- target/arm/vfp.decode | 6 +++++ 3 files changed, 65 insertions(+), 11 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index c500937..cc3f61d 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2368,3 +2368,61 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) tcg_temp_free_i64(vm); return true; } + +static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) +{ + TCGv_i32 vm; + TCGv_ptr fpst; + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(); + neon_load_reg32(vm, a->vm); + fpst = get_fpstatus_ptr(false); + if (a->s) { + /* i32 -> f32 */ + gen_helper_vfp_sitos(vm, vm, fpst); + } else { + /* u32 -> f32 */ + gen_helper_vfp_uitos(vm, vm, fpst); + } + neon_store_reg32(vm, a->vd); + tcg_temp_free_i32(vm); + tcg_temp_free_ptr(fpst); + return true; +} + +static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) +{ + TCGv_i32 vm; + TCGv_i64 vd; + TCGv_ptr fpst; + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(); + vd = tcg_temp_new_i64(); + neon_load_reg32(vm, a->vm); + fpst = get_fpstatus_ptr(false); + if (a->s) { + /* i32 -> f64 */ + gen_helper_vfp_sitod(vd, vm, fpst); + } else { + /* u32 -> f64 */ + gen_helper_vfp_uitod(vd, vm, fpst); + } + neon_store_reg64(vd, a->vd); + tcg_temp_free_i32(vm); + tcg_temp_free_i64(vd); + tcg_temp_free_ptr(fpst); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 6d2a398..a50761a 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 15: + case 0 ... 17: /* Already handled by decodetree */ return 1; default: @@ -3063,10 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x10: /* vcvt.fxx.u32 */ - case 0x11: /* vcvt.fxx.s32 */ - rm_is_dp = false; - break; case 0x18: /* vcvtr.u32.fxx */ case 0x19: /* vcvtz.u32.fxx */ case 0x1a: /* vcvtr.s32.fxx */ @@ -3181,12 +3177,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 16: /* fuito */ - gen_vfp_uito(dp, 0); - break; - case 17: /* fsito */ - gen_vfp_sito(dp, 0); - break; case 19: /* vjcvt */ gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 56b8b4e..6da9a79 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -214,3 +214,9 @@ VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... \ vd=%vd_dp vm=%vm_sp VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... \ vd=%vd_sp vm=%vm_dp + +# VCVT from integer to floating point: Vm always single; Vd depends on size +VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \ + vd=%vd_dp vm=%vm_sp -- cgit v1.1 From 92073e947487e2109f3dfebfeaa48d6323cbd981 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:52 +0100 Subject: target/arm: Convert VJCVT to decodetree Convert the VJCVT instruction to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 28 ++++++++++++++++++++++++++++ target/arm/translate.c | 12 +----------- target/arm/vfp.decode | 4 ++++ 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index cc3f61d..161f0fd 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2426,3 +2426,31 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) tcg_temp_free_ptr(fpst); return true; } + +static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) +{ + TCGv_i32 vd; + TCGv_i64 vm; + + if (!dc_isar_feature(aa32_jscvt, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i64(); + vd = tcg_temp_new_i32(); + neon_load_reg64(vm, a->vm); + gen_helper_vjcvt(vd, vm, cpu_env); + neon_store_reg32(vd, a->vd); + tcg_temp_free_i64(vm); + tcg_temp_free_i32(vd); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index a50761a..d485cd4 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 17: + case 0 ... 19: /* Already handled by decodetree */ return 1; default: @@ -3085,13 +3085,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rm_is_dp = false; break; - case 0x13: /* vjcvt */ - if (!dp || !dc_isar_feature(aa32_jscvt, s)) { - return 1; - } - rd_is_dp = false; - break; - default: return 1; } @@ -3177,9 +3170,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 19: /* vjcvt */ - gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env); - break; case 20: /* fshto */ gen_vfp_shto(dp, 16 - rm, 0); break; diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 6da9a79..1a7c9b5 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -220,3 +220,7 @@ VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \ vd=%vd_sp vm=%vm_sp VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \ vd=%vd_dp vm=%vm_sp + +# VJCVT is always dp to sp +VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... \ + vd=%vd_sp vm=%vm_dp -- cgit v1.1 From e3d6f4290c788e850c64815f0b3e331600a4bcc0 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:53 +0100 Subject: target/arm: Convert VCVT fp/fixed-point conversion insns to decodetree Convert the VCVT (between floating-point and fixed-point) instructions to decodetree. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 124 +++++++++++++++++++++++++++++++++++++++++ target/arm/translate.c | 57 +------------------ target/arm/vfp.decode | 10 ++++ 3 files changed, 136 insertions(+), 55 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 161f0fd..db07fdd 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2454,3 +2454,127 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) tcg_temp_free_i32(vd); return true; } + +static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) +{ + TCGv_i32 vd, shift; + TCGv_ptr fpst; + int frac_bits; + + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); + + vd = tcg_temp_new_i32(); + neon_load_reg32(vd, a->vd); + + fpst = get_fpstatus_ptr(false); + shift = tcg_const_i32(frac_bits); + + /* Switch on op:U:sx bits */ + switch (a->opc) { + case 0: + gen_helper_vfp_shtos(vd, vd, shift, fpst); + break; + case 1: + gen_helper_vfp_sltos(vd, vd, shift, fpst); + break; + case 2: + gen_helper_vfp_uhtos(vd, vd, shift, fpst); + break; + case 3: + gen_helper_vfp_ultos(vd, vd, shift, fpst); + break; + case 4: + gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst); + break; + case 5: + gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst); + break; + case 6: + gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst); + break; + case 7: + gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst); + break; + default: + g_assert_not_reached(); + } + + neon_store_reg32(vd, a->vd); + tcg_temp_free_i32(vd); + tcg_temp_free_i32(shift); + tcg_temp_free_ptr(fpst); + return true; +} + +static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) +{ + TCGv_i64 vd; + TCGv_i32 shift; + TCGv_ptr fpst; + int frac_bits; + + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); + + vd = tcg_temp_new_i64(); + neon_load_reg64(vd, a->vd); + + fpst = get_fpstatus_ptr(false); + shift = tcg_const_i32(frac_bits); + + /* Switch on op:U:sx bits */ + switch (a->opc) { + case 0: + gen_helper_vfp_shtod(vd, vd, shift, fpst); + break; + case 1: + gen_helper_vfp_sltod(vd, vd, shift, fpst); + break; + case 2: + gen_helper_vfp_uhtod(vd, vd, shift, fpst); + break; + case 3: + gen_helper_vfp_ultod(vd, vd, shift, fpst); + break; + case 4: + gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst); + break; + case 5: + gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst); + break; + case 6: + gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst); + break; + case 7: + gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst); + break; + default: + g_assert_not_reached(); + } + + neon_store_reg64(vd, a->vd); + tcg_temp_free_i64(vd); + tcg_temp_free_i32(shift); + tcg_temp_free_ptr(fpst); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index d485cd4..3ad951e 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1439,13 +1439,9 @@ static inline void gen_vfp_##name(int dp, int shift, int neon) \ tcg_temp_free_i32(tmp_shift); \ tcg_temp_free_ptr(statusptr); \ } -VFP_GEN_FIX(tosh, _round_to_zero) VFP_GEN_FIX(tosl, _round_to_zero) -VFP_GEN_FIX(touh, _round_to_zero) VFP_GEN_FIX(toul, _round_to_zero) -VFP_GEN_FIX(shto, ) VFP_GEN_FIX(slto, ) -VFP_GEN_FIX(uhto, ) VFP_GEN_FIX(ulto, ) #undef VFP_GEN_FIX @@ -3050,7 +3046,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 19: + case 0 ... 23: + case 28 ... 31: /* Already handled by decodetree */ return 1; default: @@ -3070,21 +3067,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) rd_is_dp = false; break; - case 0x14: /* vcvt fp <-> fixed */ - case 0x15: - case 0x16: - case 0x17: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - /* Immediate frac_bits has same format as SREG_M. */ - rm_is_dp = false; - break; - default: return 1; } @@ -3143,17 +3125,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) /* Load the initial operands. */ if (op == 15) { switch (rn) { - case 0x14: /* vcvt fp <-> fixed */ - case 0x15: - case 0x16: - case 0x17: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: - /* Source and destination the same. */ - gen_mov_F0_vreg(dp, rd); - break; default: /* One source operand. */ gen_mov_F0_vreg(rm_is_dp, rm); @@ -3170,18 +3141,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 20: /* fshto */ - gen_vfp_shto(dp, 16 - rm, 0); - break; - case 21: /* fslto */ - gen_vfp_slto(dp, 32 - rm, 0); - break; - case 22: /* fuhto */ - gen_vfp_uhto(dp, 16 - rm, 0); - break; - case 23: /* fulto */ - gen_vfp_ulto(dp, 32 - rm, 0); - break; case 24: /* ftoui */ gen_vfp_toui(dp, 0); break; @@ -3194,18 +3153,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) case 27: /* ftosiz */ gen_vfp_tosiz(dp, 0); break; - case 28: /* ftosh */ - gen_vfp_tosh(dp, 16 - rm, 0); - break; - case 29: /* ftosl */ - gen_vfp_tosl(dp, 32 - rm, 0); - break; - case 30: /* ftouh */ - gen_vfp_touh(dp, 16 - rm, 0); - break; - case 31: /* ftoul */ - gen_vfp_toul(dp, 32 - rm, 0); - break; default: /* undefined */ g_assert_not_reached(); } diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 1a7c9b5..c3223a1 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -224,3 +224,13 @@ VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \ # VJCVT is always dp to sp VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... \ vd=%vd_sp vm=%vm_dp + +# VCVT between floating-point and fixed-point. The immediate value +# is in the same format as a Vm single-precision register number. +# We assemble bits 18 (op), 16 (u) and 7 (sx) into a single opc field +# for the convenience of the trans_VCVT_fix functions. +%vcvt_fix_op 18:1 16:1 7:1 +VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \ + vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op +VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \ + vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op -- cgit v1.1 From 3111bfc2da6ba0c8396dc97ca479942d711c6146 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:53 +0100 Subject: target/arm: Convert float-to-integer VCVT insns to decodetree Convert the float-to-integer VCVT instructions to decodetree. Since these are the last unconverted instructions, we can delete the old decoder structure entirely now. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 72 ++++++++++++ target/arm/translate.c | 241 +---------------------------------------- target/arm/vfp.decode | 6 + 3 files changed, 80 insertions(+), 239 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index db07fdd..8216dba 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -2578,3 +2578,75 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) tcg_temp_free_ptr(fpst); return true; } + +static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) +{ + TCGv_i32 vm; + TCGv_ptr fpst; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + vm = tcg_temp_new_i32(); + neon_load_reg32(vm, a->vm); + + if (a->s) { + if (a->rz) { + gen_helper_vfp_tosizs(vm, vm, fpst); + } else { + gen_helper_vfp_tosis(vm, vm, fpst); + } + } else { + if (a->rz) { + gen_helper_vfp_touizs(vm, vm, fpst); + } else { + gen_helper_vfp_touis(vm, vm, fpst); + } + } + neon_store_reg32(vm, a->vd); + tcg_temp_free_i32(vm); + tcg_temp_free_ptr(fpst); + return true; +} + +static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) +{ + TCGv_i32 vd; + TCGv_i64 vm; + TCGv_ptr fpst; + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + vm = tcg_temp_new_i64(); + vd = tcg_temp_new_i32(); + neon_load_reg64(vm, a->vm); + + if (a->s) { + if (a->rz) { + gen_helper_vfp_tosizd(vd, vm, fpst); + } else { + gen_helper_vfp_tosid(vd, vm, fpst); + } + } else { + if (a->rz) { + gen_helper_vfp_touizd(vd, vm, fpst); + } else { + gen_helper_vfp_touid(vd, vm, fpst); + } + } + neon_store_reg32(vd, a->vd); + tcg_temp_free_i32(vd); + tcg_temp_free_i64(vm); + tcg_temp_free_ptr(fpst); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index 3ad951e..c274c8b 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1418,9 +1418,7 @@ static inline void gen_vfp_##name(int dp, int neon) \ tcg_temp_free_ptr(statusptr); \ } -VFP_GEN_FTOI(toui) VFP_GEN_FTOI(touiz) -VFP_GEN_FTOI(tosi) VFP_GEN_FTOI(tosiz) #undef VFP_GEN_FTOI @@ -1612,33 +1610,7 @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg) } #define tcg_gen_ld_f32 tcg_gen_ld_i32 -#define tcg_gen_ld_f64 tcg_gen_ld_i64 #define tcg_gen_st_f32 tcg_gen_st_i32 -#define tcg_gen_st_f64 tcg_gen_st_i64 - -static inline void gen_mov_F0_vreg(int dp, int reg) -{ - if (dp) - tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); -} - -static inline void gen_mov_F1_vreg(int dp, int reg) -{ - if (dp) - tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg)); -} - -static inline void gen_mov_vreg_F0(int dp, int reg) -{ - if (dp) - tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg)); -} #define ARM_CP_RW_BIT (1 << 20) @@ -2983,9 +2955,6 @@ static void gen_neon_dup_high16(TCGv_i32 var) */ static int disas_vfp_insn(DisasContext *s, uint32_t insn) { - uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask; - int dp, veclen; - if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { return 1; } @@ -3005,214 +2974,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 0; } } - - if (extract32(insn, 28, 4) == 0xf) { - /* - * Encodings with T=1 (Thumb) or unconditional (ARM): these - * were all handled by the decodetree decoder, so any insn - * patterns which get here must be UNDEF. - */ - return 1; - } - - /* - * FIXME: this access check should not take precedence over UNDEF - * for invalid encodings; we will generate incorrect syndrome information - * for attempts to execute invalid vfp/neon encodings with FP disabled. - */ - if (!vfp_access_check(s)) { - return 0; - } - - dp = ((insn & 0xf00) == 0xb00); - switch ((insn >> 24) & 0xf) { - case 0xe: - if (insn & (1 << 4)) { - /* already handled by decodetree */ - return 1; - } else { - /* data processing */ - bool rd_is_dp = dp; - bool rm_is_dp = dp; - bool no_output = false; - - /* The opcode is in bits 23, 21, 20 and 6. */ - op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); - rn = VFP_SREG_N(insn); - - switch (op) { - case 0 ... 14: - /* Already handled by decodetree */ - return 1; - case 15: - switch (rn) { - case 0 ... 23: - case 28 ... 31: - /* Already handled by decodetree */ - return 1; - default: - break; - } - default: - break; - } - - if (op == 15) { - /* rn is opcode, encoded as per VFP_SREG_N. */ - switch (rn) { - case 0x18: /* vcvtr.u32.fxx */ - case 0x19: /* vcvtz.u32.fxx */ - case 0x1a: /* vcvtr.s32.fxx */ - case 0x1b: /* vcvtz.s32.fxx */ - rd_is_dp = false; - break; - - default: - return 1; - } - } else if (dp) { - /* rn is register number */ - VFP_DREG_N(rn, insn); - } - - if (rd_is_dp) { - VFP_DREG_D(rd, insn); - } else { - rd = VFP_SREG_D(insn); - } - if (rm_is_dp) { - VFP_DREG_M(rm, insn); - } else { - rm = VFP_SREG_M(insn); - } - - veclen = s->vec_len; - if (op == 15 && rn > 3) { - veclen = 0; - } - - /* Shut up compiler warnings. */ - delta_m = 0; - delta_d = 0; - bank_mask = 0; - - if (veclen > 0) { - if (dp) - bank_mask = 0xc; - else - bank_mask = 0x18; - - /* Figure out what type of vector operation this is. */ - if ((rd & bank_mask) == 0) { - /* scalar */ - veclen = 0; - } else { - if (dp) - delta_d = (s->vec_stride >> 1) + 1; - else - delta_d = s->vec_stride + 1; - - if ((rm & bank_mask) == 0) { - /* mixed scalar/vector */ - delta_m = 0; - } else { - /* vector */ - delta_m = delta_d; - } - } - } - - /* Load the initial operands. */ - if (op == 15) { - switch (rn) { - default: - /* One source operand. */ - gen_mov_F0_vreg(rm_is_dp, rm); - break; - } - } else { - /* Two source operands. */ - gen_mov_F0_vreg(dp, rn); - gen_mov_F1_vreg(dp, rm); - } - - for (;;) { - /* Perform the calculation. */ - switch (op) { - case 15: /* extension space */ - switch (rn) { - case 24: /* ftoui */ - gen_vfp_toui(dp, 0); - break; - case 25: /* ftouiz */ - gen_vfp_touiz(dp, 0); - break; - case 26: /* ftosi */ - gen_vfp_tosi(dp, 0); - break; - case 27: /* ftosiz */ - gen_vfp_tosiz(dp, 0); - break; - default: /* undefined */ - g_assert_not_reached(); - } - break; - default: /* undefined */ - return 1; - } - - /* Write back the result, if any. */ - if (!no_output) { - gen_mov_vreg_F0(rd_is_dp, rd); - } - - /* break out of the loop if we have finished */ - if (veclen == 0) { - break; - } - - if (op == 15 && delta_m == 0) { - /* single source one-many */ - while (veclen--) { - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); - gen_mov_vreg_F0(dp, rd); - } - break; - } - /* Setup the next operands. */ - veclen--; - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); - - if (op == 15) { - /* One source operand. */ - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F0_vreg(dp, rm); - } else { - /* Two source operands. */ - rn = ((rn + delta_d) & (bank_mask - 1)) - | (rn & bank_mask); - gen_mov_F0_vreg(dp, rn); - if (delta_m) { - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F1_vreg(dp, rm); - } - } - } - } - break; - case 0xc: - case 0xd: - /* Already handled by decodetree */ - return 1; - default: - /* Should never happen. */ - return 1; - } - return 0; + /* If the decodetree decoder didn't handle this insn, it must be UNDEF */ + return 1; } static inline bool use_goto_tb(DisasContext *s, target_ulong dest) diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index c3223a1..ea24365 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -234,3 +234,9 @@ VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \ vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \ vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op + +# VCVT float to integer (VCVT and VCVTR): Vd always single; Vd depends on size +VCVT_sp_int ---- 1110 1.11 110 s:1 .... 1010 rz:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCVT_dp_int ---- 1110 1.11 110 s:1 .... 1011 rz:1 1.0 .... \ + vd=%vd_sp vm=%vm_dp -- cgit v1.1 From 18cf951af9a27ae573a6fa17f9d0c103f7b7679b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Jun 2019 16:39:53 +0100 Subject: target/arm: Fix short-vector increment behaviour For VFP short vectors, the VFP registers are divided into a series of banks: for single-precision these are s0-s7, s8-s15, s16-s23 and s24-s31; for double-precision they are d0-d3, d4-d7, ... d28-d31. Some banks are "scalar" meaning that use of a register within them triggers a pure-scalar or mixed vector-scalar operation rather than a full vector operation. The scalar banks are s0-s7, d0-d3 and d16-d19. When using a bank as part of a vector operation, we iterate through it, increasing the register number by the specified stride each time, and wrapping around to the beginning of the bank. Unfortunately our calculation of the "increment" part of this was incorrect: vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask) will only do the intended thing if bank_mask has exactly one set high bit. For instance for doubles (bank_mask = 0xc), if we start with vd = 6 and delta_d = 2 then vd is updated to 12 rather than the intended 4. This only causes problems in the unlikely case that the starting register is not the first in its bank: if the register number doesn't have to wrap around then the expression happens to give the right answer. Fix this bug by abstracting out the "check whether register is in a scalar bank" and "advance register within bank" operations to utility functions which use the right bit masking operations. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/translate-vfp.inc.c | 100 ++++++++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 40 deletions(-) diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 8216dba..709fc65 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -1140,6 +1140,42 @@ typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm); typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm); /* + * Return true if the specified S reg is in a scalar bank + * (ie if it is s0..s7) + */ +static inline bool vfp_sreg_is_scalar(int reg) +{ + return (reg & 0x18) == 0; +} + +/* + * Return true if the specified D reg is in a scalar bank + * (ie if it is d0..d3 or d16..d19) + */ +static inline bool vfp_dreg_is_scalar(int reg) +{ + return (reg & 0xc) == 0; +} + +/* + * Advance the S reg number forwards by delta within its bank + * (ie increment the low 3 bits but leave the rest the same) + */ +static inline int vfp_advance_sreg(int reg, int delta) +{ + return ((reg + delta) & 0x7) | (reg & ~0x7); +} + +/* + * Advance the D reg number forwards by delta within its bank + * (ie increment the low 2 bits but leave the rest the same) + */ +static inline int vfp_advance_dreg(int reg, int delta) +{ + return ((reg + delta) & 0x3) | (reg & ~0x3); +} + +/* * Perform a 3-operand VFP data processing instruction. fn is the * callback to do the actual operation; this function deals with the * code to handle looping around for VFP vector processing. @@ -1149,7 +1185,6 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, { uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 f0, f1, fd; TCGv_ptr fpst; @@ -1164,16 +1199,14 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, } if (veclen > 0) { - bank_mask = 0x18; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; - if ((vm & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1204,11 +1237,11 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); + vn = vfp_advance_sreg(vn, delta_d); neon_load_reg32(f0, vn); if (delta_m) { - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(f1, vm); } } @@ -1226,7 +1259,6 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, { uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 f0, f1, fd; TCGv_ptr fpst; @@ -1246,16 +1278,14 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, } if (veclen > 0) { - bank_mask = 0xc; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; - if ((vm & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1285,11 +1315,11 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, } /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); + vn = vfp_advance_dreg(vn, delta_d); neon_load_reg64(f0, vn); if (delta_m) { - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vm = vfp_advance_dreg(vm, delta_m); neon_load_reg64(f1, vm); } } @@ -1306,7 +1336,6 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) { uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 f0, fd; @@ -1320,16 +1349,14 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) } if (veclen > 0) { - bank_mask = 0x18; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; - if ((vm & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1355,7 +1382,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) if (delta_m == 0) { /* single source one-many */ while (veclen--) { - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); neon_store_reg32(fd, vd); } break; @@ -1363,8 +1390,8 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); + vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(f0, vm); } @@ -1378,7 +1405,6 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) { uint32_t delta_m = 0; uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 f0, fd; @@ -1397,16 +1423,14 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) } if (veclen > 0) { - bank_mask = 0xc; - /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; - if ((vm & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { @@ -1432,7 +1456,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) if (delta_m == 0) { /* single source one-many */ while (veclen--) { - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); neon_store_reg64(fd, vd); } break; @@ -1440,8 +1464,8 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); - vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask); + vd = vfp_advance_dreg(vd, delta_d); + vd = vfp_advance_dreg(vm, delta_m); neon_load_reg64(f0, vm); } @@ -1783,7 +1807,6 @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a) static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) { uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i32 fd; uint32_t n, i, vd; @@ -1804,9 +1827,8 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) } if (veclen > 0) { - bank_mask = 0x18; /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { @@ -1835,7 +1857,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vd = vfp_advance_sreg(vd, delta_d); } tcg_temp_free_i32(fd); @@ -1845,7 +1867,6 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) { uint32_t delta_d = 0; - uint32_t bank_mask = 0; int veclen = s->vec_len; TCGv_i64 fd; uint32_t n, i, vd; @@ -1871,9 +1892,8 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) } if (veclen > 0) { - bank_mask = 0xc; /* Figure out what type of vector operation this is. */ - if ((vd & bank_mask) == 0) { + if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { @@ -1902,7 +1922,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) /* Set up the operands for the next iteration */ veclen--; - vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask); + vfp_advance_dreg(vd, delta_d); } tcg_temp_free_i64(fd); -- cgit v1.1