aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-05-08 08:43:53 -0700
committerPeter Maydell <peter.maydell@linaro.org>2020-05-11 11:22:06 +0100
commit5c9b8458a0b3008d24d84b67e1c9b6d5f39f4d66 (patch)
treee30fe3c37d432b3ced262d51c71783fc9b01d032 /target
parent4bcc3f0ff8e5ae2b17b5aab9aa613ff1b8025896 (diff)
downloadqemu-5c9b8458a0b3008d24d84b67e1c9b6d5f39f4d66.zip
qemu-5c9b8458a0b3008d24d84b67e1c9b6d5f39f4d66.tar.gz
qemu-5c9b8458a0b3008d24d84b67e1c9b6d5f39f4d66.tar.bz2
target/arm: Use SVEContLdSt for multi-register contiguous loads
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200508154359.7494-14-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/sve_helper.c223
1 files changed, 79 insertions, 144 deletions
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 7992a56..9365e32 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -4449,27 +4449,28 @@ static inline bool test_host_page(void *host)
}
/*
- * Common helper for all contiguous one-register predicated loads.
+ * Common helper for all contiguous 1,2,3,4-register predicated stores.
*/
static inline QEMU_ALWAYS_INLINE
-void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
+void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint32_t desc, const uintptr_t retaddr,
- const int esz, const int msz,
+ const int esz, const int msz, const int N,
sve_ldst1_host_fn *host_fn,
sve_ldst1_tlb_fn *tlb_fn)
{
const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
- void *vd = &env->vfp.zregs[rd];
const intptr_t reg_max = simd_oprsz(desc);
intptr_t reg_off, reg_last, mem_off;
SVEContLdSt info;
void *host;
- int flags;
+ int flags, i;
/* Find the active elements. */
- if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, 1 << msz)) {
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
/* The entire predicate was false; no load occurs. */
- memset(vd, 0, reg_max);
+ for (i = 0; i < N; ++i) {
+ memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
+ }
return;
}
@@ -4477,7 +4478,7 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, retaddr);
/* Handle watchpoints for all active elements. */
- sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, 1 << msz,
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
BP_MEM_READ, retaddr);
/* TODO: MTE check. */
@@ -4493,9 +4494,8 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
* which for ARM will raise SyncExternal. Perform the load
* into scratch memory to preserve register state until the end.
*/
- ARMVectorReg scratch;
+ ARMVectorReg scratch[4] = { };
- memset(&scratch, 0, reg_max);
mem_off = info.mem_off_first[0];
reg_off = info.reg_off_first[0];
reg_last = info.reg_off_last[1];
@@ -4510,21 +4510,29 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6];
do {
if ((pg >> (reg_off & 63)) & 1) {
- tlb_fn(env, &scratch, reg_off, addr + mem_off, retaddr);
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &scratch[i], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
}
reg_off += 1 << esz;
- mem_off += 1 << msz;
+ mem_off += N << msz;
} while (reg_off & 63);
} while (reg_off <= reg_last);
- memcpy(vd, &scratch, reg_max);
+ for (i = 0; i < N; ++i) {
+ memcpy(&env->vfp.zregs[(rd + i) & 31], &scratch[i], reg_max);
+ }
return;
#endif
}
/* The entire operation is in RAM, on valid pages. */
- memset(vd, 0, reg_max);
+ for (i = 0; i < N; ++i) {
+ memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
+ }
+
mem_off = info.mem_off_first[0];
reg_off = info.reg_off_first[0];
reg_last = info.reg_off_last[0];
@@ -4534,10 +4542,13 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6];
do {
if ((pg >> (reg_off & 63)) & 1) {
- host_fn(vd, reg_off, host + mem_off);
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
}
reg_off += 1 << esz;
- mem_off += 1 << msz;
+ mem_off += N << msz;
} while (reg_off <= reg_last && (reg_off & 63));
}
@@ -4547,7 +4558,11 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
*/
mem_off = info.mem_off_split;
if (unlikely(mem_off >= 0)) {
- tlb_fn(env, vd, info.reg_off_split, addr + mem_off, retaddr);
+ reg_off = info.reg_off_split;
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
}
mem_off = info.mem_off_first[1];
@@ -4560,10 +4575,13 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6];
do {
if ((pg >> (reg_off & 63)) & 1) {
- host_fn(vd, reg_off, host + mem_off);
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
}
reg_off += 1 << esz;
- mem_off += 1 << msz;
+ mem_off += N << msz;
} while (reg_off & 63);
} while (reg_off <= reg_last);
}
@@ -4573,7 +4591,7 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \
{ \
- sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \
sve_##NAME##_host, sve_##NAME##_tlb); \
}
@@ -4581,159 +4599,76 @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \
{ \
- sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
} \
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \
{ \
- sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
}
-DO_LD1_1(ld1bb, 0)
-DO_LD1_1(ld1bhu, 1)
-DO_LD1_1(ld1bhs, 1)
-DO_LD1_1(ld1bsu, 2)
-DO_LD1_1(ld1bss, 2)
-DO_LD1_1(ld1bdu, 3)
-DO_LD1_1(ld1bds, 3)
+DO_LD1_1(ld1bb, MO_8)
+DO_LD1_1(ld1bhu, MO_16)
+DO_LD1_1(ld1bhs, MO_16)
+DO_LD1_1(ld1bsu, MO_32)
+DO_LD1_1(ld1bss, MO_32)
+DO_LD1_1(ld1bdu, MO_64)
+DO_LD1_1(ld1bds, MO_64)
-DO_LD1_2(ld1hh, 1, 1)
-DO_LD1_2(ld1hsu, 2, 1)
-DO_LD1_2(ld1hss, 2, 1)
-DO_LD1_2(ld1hdu, 3, 1)
-DO_LD1_2(ld1hds, 3, 1)
+DO_LD1_2(ld1hh, MO_16, MO_16)
+DO_LD1_2(ld1hsu, MO_32, MO_16)
+DO_LD1_2(ld1hss, MO_32, MO_16)
+DO_LD1_2(ld1hdu, MO_64, MO_16)
+DO_LD1_2(ld1hds, MO_64, MO_16)
-DO_LD1_2(ld1ss, 2, 2)
-DO_LD1_2(ld1sdu, 3, 2)
-DO_LD1_2(ld1sds, 3, 2)
+DO_LD1_2(ld1ss, MO_32, MO_32)
+DO_LD1_2(ld1sdu, MO_64, MO_32)
+DO_LD1_2(ld1sds, MO_64, MO_32)
-DO_LD1_2(ld1dd, 3, 3)
+DO_LD1_2(ld1dd, MO_64, MO_64)
#undef DO_LD1_1
#undef DO_LD1_2
-/*
- * Common helpers for all contiguous 2,3,4-register predicated loads.
- */
-static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
- uint32_t desc, int size, uintptr_t ra,
- sve_ldst1_tlb_fn *tlb_fn)
-{
- const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
- intptr_t i, oprsz = simd_oprsz(desc);
- ARMVectorReg scratch[2] = { };
-
- for (i = 0; i < oprsz; ) {
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
- do {
- if (pg & 1) {
- tlb_fn(env, &scratch[0], i, addr, ra);
- tlb_fn(env, &scratch[1], i, addr + size, ra);
- }
- i += size, pg >>= size;
- addr += 2 * size;
- } while (i & 15);
- }
-
- /* Wait until all exceptions have been raised to write back. */
- memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
- memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
-}
-
-static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
- uint32_t desc, int size, uintptr_t ra,
- sve_ldst1_tlb_fn *tlb_fn)
-{
- const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
- intptr_t i, oprsz = simd_oprsz(desc);
- ARMVectorReg scratch[3] = { };
-
- for (i = 0; i < oprsz; ) {
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
- do {
- if (pg & 1) {
- tlb_fn(env, &scratch[0], i, addr, ra);
- tlb_fn(env, &scratch[1], i, addr + size, ra);
- tlb_fn(env, &scratch[2], i, addr + 2 * size, ra);
- }
- i += size, pg >>= size;
- addr += 3 * size;
- } while (i & 15);
- }
-
- /* Wait until all exceptions have been raised to write back. */
- memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
- memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
- memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
-}
-
-static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
- uint32_t desc, int size, uintptr_t ra,
- sve_ldst1_tlb_fn *tlb_fn)
-{
- const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
- intptr_t i, oprsz = simd_oprsz(desc);
- ARMVectorReg scratch[4] = { };
-
- for (i = 0; i < oprsz; ) {
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
- do {
- if (pg & 1) {
- tlb_fn(env, &scratch[0], i, addr, ra);
- tlb_fn(env, &scratch[1], i, addr + size, ra);
- tlb_fn(env, &scratch[2], i, addr + 2 * size, ra);
- tlb_fn(env, &scratch[3], i, addr + 3 * size, ra);
- }
- i += size, pg >>= size;
- addr += 4 * size;
- } while (i & 15);
- }
-
- /* Wait until all exceptions have been raised to write back. */
- memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
- memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
- memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
- memcpy(&env->vfp.zregs[(rd + 3) & 31], &scratch[3], oprsz);
-}
-
#define DO_LDN_1(N) \
-void QEMU_FLATTEN HELPER(sve_ld##N##bb_r) \
- (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
-{ \
- sve_ld##N##_r(env, vg, addr, desc, 1, GETPC(), sve_ld1bb_tlb); \
+void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \
+ sve_ld1bb_host, sve_ld1bb_tlb); \
}
-#define DO_LDN_2(N, SUFF, SIZE) \
-void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_le_r) \
- (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
+#define DO_LDN_2(N, SUFF, ESZ) \
+void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
{ \
- sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \
- sve_ld1##SUFF##_le_tlb); \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
} \
-void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_be_r) \
- (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
+void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
{ \
- sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \
- sve_ld1##SUFF##_be_tlb); \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
}
DO_LDN_1(2)
DO_LDN_1(3)
DO_LDN_1(4)
-DO_LDN_2(2, hh, 2)
-DO_LDN_2(3, hh, 2)
-DO_LDN_2(4, hh, 2)
+DO_LDN_2(2, hh, MO_16)
+DO_LDN_2(3, hh, MO_16)
+DO_LDN_2(4, hh, MO_16)
-DO_LDN_2(2, ss, 4)
-DO_LDN_2(3, ss, 4)
-DO_LDN_2(4, ss, 4)
+DO_LDN_2(2, ss, MO_32)
+DO_LDN_2(3, ss, MO_32)
+DO_LDN_2(4, ss, MO_32)
-DO_LDN_2(2, dd, 8)
-DO_LDN_2(3, dd, 8)
-DO_LDN_2(4, dd, 8)
+DO_LDN_2(2, dd, MO_64)
+DO_LDN_2(3, dd, MO_64)
+DO_LDN_2(4, dd, MO_64)
#undef DO_LDN_1
#undef DO_LDN_2