aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/vector_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv/vector_helper.c')
-rw-r--r--target/riscv/vector_helper.c826
1 files changed, 542 insertions, 284 deletions
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1b4d5a8..5dc1c10 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -21,10 +21,13 @@
#include "qemu/bitops.h"
#include "cpu.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
+#include "exec/target_page.h"
+#include "exec/tswap.h"
#include "fpu/softfloat.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
@@ -75,6 +78,8 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
if (s1 <= vlmax) {
vl = s1;
+ } else if (s1 < 2 * vlmax && cpu->cfg.rvv_vl_half_avl) {
+ vl = (s1 + 1) >> 1;
} else {
vl = vlmax;
}
@@ -103,11 +108,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
-static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
-{
- return (addr & ~env->cur_pmmask) | env->cur_pmbase;
-}
-
/*
* This function checks watchpoint before real load operation.
*
@@ -117,25 +117,42 @@ static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
* It will trigger an exception if there is no mapping in TLB
* and page table walk can't fill the TLB entry. Then the guest
* software can return here after process the exception or never return.
+ *
+ * This function can also be used when direct access to probe_access_flags is
+ * needed in order to access the flags. If a pointer to a flags operand is
+ * provided the function will call probe_access_flags instead, use nonfault
+ * and update host and flags.
*/
-static void probe_pages(CPURISCVState *env, target_ulong addr,
- target_ulong len, uintptr_t ra,
- MMUAccessType access_type)
+static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len,
+ uintptr_t ra, MMUAccessType access_type, int mmu_index,
+ void **host, int *flags, bool nonfault)
{
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
target_ulong curlen = MIN(pagelen, len);
- int mmu_index = riscv_env_mmu_index(env, false);
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault, host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
+
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault,
+ host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
}
}
+
static inline void vext_set_elem_mask(void *v0, int index,
uint8_t value)
{
@@ -146,34 +163,90 @@ static inline void vext_set_elem_mask(void *v0, int index,
}
/* elements operations for load and store */
-typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
- uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_tlb(CPURISCVState *env, abi_ptr addr,
+ uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_host(void *vd, uint32_t idx, void *host);
+
+#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = (ETYPE)LDSUF##_p(host); \
+}
-#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE *cur = ((ETYPE *)vd + H(idx)); \
- *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
-} \
-
-GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
-GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
-GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
-GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
-
-#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE data = *((ETYPE *)vd + H(idx)); \
- cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+GEN_VEXT_LD_ELEM(lde_b, uint8_t, H1, ldub)
+GEN_VEXT_LD_ELEM(lde_h, uint16_t, H2, lduw)
+GEN_VEXT_LD_ELEM(lde_w, uint32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(lde_d, uint64_t, H8, ldq)
+
+#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ STSUF##_p(host, data); \
+}
+
+GEN_VEXT_ST_ELEM(ste_b, uint8_t, H1, stb)
+GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw)
+GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
+GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
+
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
+ void *vd, uint32_t evl, target_ulong addr,
+ uint32_t reg_start, uintptr_t ra, uint32_t esz,
+ bool is_load)
+{
+ uint32_t i;
+ for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) {
+ ldst_tlb(env, adjust_addr(env, addr), i, vd, ra);
+ }
}
-GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
-GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
-GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
-GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
+ void *vd, uint32_t evl, uint32_t reg_start, void *host,
+ uint32_t esz, bool is_load)
+{
+#if HOST_BIG_ENDIAN
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+#else
+ if (esz == 1) {
+ uint32_t byte_offset = reg_start * esz;
+ uint32_t size = (evl - reg_start) * esz;
+
+ if (is_load) {
+ memcpy(vd + byte_offset, host, size);
+ } else {
+ memcpy(host, vd + byte_offset, size);
+ }
+ } else {
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+ }
+#endif
+}
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
uint32_t desc, uint32_t nf,
@@ -196,11 +269,10 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
* stride: access vector element from strided memory
*/
static void
-vext_ldst_stride(void *vd, void *v0, target_ulong base,
- target_ulong stride, CPURISCVState *env,
- uint32_t desc, uint32_t vm,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
+ CPURISCVState *env, uint32_t desc, uint32_t vm,
+ vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz,
+ uintptr_t ra)
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
@@ -208,7 +280,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
k = 0;
@@ -240,10 +312,10 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
-GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
-GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
-GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
+GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b_tlb)
+GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h_tlb)
+GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w_tlb)
+GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d_tlb)
#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -255,39 +327,137 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
-GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
-GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
-GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
+GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b_tlb)
+GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h_tlb)
+GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w_tlb)
+GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d_tlb)
/*
* unit-stride: access elements stored contiguously in memory
*/
/* unmasked unit-stride load and store operation */
-static void
+static inline QEMU_ALWAYS_INLINE void
+vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
+ uint32_t elems, uint32_t nf, uint32_t max_elems,
+ uint32_t log2_esz, bool is_load, int mmu_index,
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uintptr_t ra)
+{
+ void *host;
+ int i, k, flags;
+ uint32_t esz = 1 << log2_esz;
+ uint32_t size = (elems * nf) << log2_esz;
+ uint32_t evl = env->vstart + elems;
+ MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags,
+ true);
+
+ if (flags == 0) {
+ if (nf == 1) {
+ vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
+ host, esz, is_load);
+ } else {
+ for (i = env->vstart; i < evl; ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_host(vd, i + k * max_elems, host);
+ host += esz;
+ k++;
+ }
+ }
+ }
+ env->vstart += elems;
+ } else {
+ if (nf == 1) {
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
+ ra, esz, is_load);
+ } else {
+ /* load bytes from guest memory */
+ for (i = env->vstart; i < evl; env->vstart = ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ addr += esz;
+ k++;
+ }
+ }
+ }
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl,
- uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uint32_t evl, uintptr_t ra, bool is_load)
{
- uint32_t i, k;
+ uint32_t k;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, evl);
- /* load bytes from guest memory */
- for (i = env->vstart; i < evl; env->vstart = ++i) {
- k = 0;
- while (k < nf) {
- target_ulong addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * For data sizes <= 6 bytes we get better performance by simply calling
+ * vext_continuous_ldst_tlb
+ */
+ if (nf == 1 && (evl << log2_esz) <= 6) {
+ addr = base + (env->vstart << log2_esz);
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
+ esz, is_load);
+
+ env->vstart = 0;
+ vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
+ return;
+ }
+#endif
+
+ /* Calculate the page range of first page */
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
+ }
+
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
}
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- env->vstart = 0;
+ env->vstart = 0;
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
}
@@ -296,47 +466,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
* stride, stride = NF * sizeof (ETYPE)
*/
-#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-} \
- \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_us(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ LOAD_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
+} \
+ \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_us(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), true); \
}
-GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
-GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
-GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
-GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
+GEN_VEXT_LD_US(vle8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_US(vle16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_US(vle32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_US(vle64_v, int64_t, lde_d_tlb, lde_d_host)
-#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
+#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ STORE_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
} \
\
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
- vext_ldst_us(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+ vext_ldst_us(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), false); \
}
-GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
-GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
-GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
-GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
+GEN_VEXT_ST_US(vse8_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_US(vse16_v, int16_t, ste_h_tlb, ste_h_host)
+GEN_VEXT_ST_US(vse32_v, int32_t, ste_w_tlb, ste_w_host)
+GEN_VEXT_ST_US(vse64_v, int64_t, ste_d_tlb, ste_d_host)
/*
* unit stride mask load and store, EEW = 1
@@ -346,8 +516,8 @@ void HELPER(vlm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, lde_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, lde_b_tlb, lde_b_host,
+ 0, evl, GETPC(), true);
}
void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
@@ -355,8 +525,8 @@ void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, ste_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, ste_b_tlb, ste_b_host,
+ 0, evl, GETPC(), false);
}
/*
@@ -381,7 +551,7 @@ static inline void
vext_ldst_index(void *vd, void *v0, target_ulong base,
void *vs2, CPURISCVState *env, uint32_t desc,
vext_get_index_addr get_index_addr,
- vext_ldst_elem_fn *ldst_elem,
+ vext_ldst_elem_fn_tlb *ldst_elem,
uint32_t log2_esz, uintptr_t ra)
{
uint32_t i, k;
@@ -391,7 +561,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
/* load bytes from guest memory */
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
@@ -422,22 +592,22 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
-GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
-GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
-GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
-GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
-GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
-GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
-GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
-GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
-GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
-GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
-GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
-GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
-GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
-GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
-GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
+GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d_tlb)
#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -448,76 +618,101 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
GETPC()); \
}
-GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
-GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
-GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
-GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
-GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
-GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
-GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
-GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
-GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
-GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
-GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
-GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
-GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
-GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
-GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
-GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
+GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d_tlb)
/*
* unit-stride fault-only-fisrt load instructions
*/
static inline void
-vext_ldff(void *vd, void *v0, target_ulong base,
- CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
+ uint32_t desc, vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, uintptr_t ra)
{
- void *host;
uint32_t i, k, vl = 0;
uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
uint32_t vma = vext_vma(desc);
- target_ulong addr, offset, remain;
+ target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
+ int flags, probe_flags;
+ void *host;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
- /* probe every access */
- for (i = env->vstart; i < env->vl; i++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- addr = adjust_addr(env, base + i * (nf << log2_esz));
- if (i == 0) {
- probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
- } else {
- /* if it triggers an exception, no need to check watchpoint */
- remain = nf << log2_esz;
- while (remain > 0) {
- offset = -(addr | TARGET_PAGE_MASK);
- host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_index);
- if (host) {
-#ifdef CONFIG_USER_ONLY
- if (!page_check_range(addr, offset, PAGE_READ)) {
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= env->vl)) {
+ elems = env->vl - env->vstart;
+ }
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host,
+ &flags, true);
+
+ /* If we are crossing a page check also the second page. */
+ if (env->vl > elems) {
+ addr_probe = addr + (elems << log2_esz);
+ probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD,
+ mmu_index, &host, &probe_flags, true);
+ flags |= probe_flags;
+ }
+
+ if (flags & ~TLB_WATCHPOINT) {
+ /* probe every access */
+ for (i = env->vstart; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ continue;
+ }
+ addr_i = adjust_addr(env, base + i * (nf << log2_esz));
+ if (i == 0) {
+ /* Allow fault on first element. */
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD,
+ mmu_index, &host, NULL, false);
+ } else {
+ remain = nf << log2_esz;
+ while (remain > 0) {
+ offset = -(addr_i | TARGET_PAGE_MASK);
+
+ /* Probe nonfault on subsequent elements. */
+ probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD,
+ mmu_index, &host, &flags, true);
+
+ /*
+ * Stop if invalid (unmapped) or mmio (transaction may
+ * fail). Do not stop if watchpoint, as the spec says that
+ * first-fault should continue to access the same
+ * elements regardless of any watchpoint.
+ */
+ if (flags & ~TLB_WATCHPOINT) {
vl = i;
goto ProbeSuccess;
}
-#else
- probe_pages(env, addr, offset, ra, MMU_DATA_LOAD);
-#endif
- } else {
- vl = i;
- goto ProbeSuccess;
- }
- if (remain <= offset) {
- break;
+ if (remain <= offset) {
+ break;
+ }
+ remain -= offset;
+ addr_i = adjust_addr(env, addr_i + offset);
}
- remain -= offset;
- addr = adjust_addr(env, addr + offset);
}
}
}
@@ -526,19 +721,54 @@ ProbeSuccess:
if (vl != 0) {
env->vl = vl;
}
- for (i = env->vstart; i < env->vl; i++) {
- k = 0;
- while (k < nf) {
- if (!vm && !vext_elem_mask(v0, i)) {
- /* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
- (i + k * max_elems + 1) * esz);
- k++;
- continue;
+
+ if (env->vstart < env->vl) {
+ if (vm) {
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < env->vl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
+ }
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = env->vl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+ } else {
+ for (i = env->vstart; i < env->vl; i++) {
+ k = 0;
+ while (k < nf) {
+ if (!vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
+ addr = base + ((i * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ k++;
+ }
}
- addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
}
}
env->vstart = 0;
@@ -546,18 +776,18 @@ ProbeSuccess:
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
}
-#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldff(vd, v0, base, env, desc, LOAD_FN_TLB, \
+ LOAD_FN_HOST, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
-GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
-GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
-GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
+GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb, lde_d_host)
#define DO_SWAP(N, M) (M)
#define DO_AND(N, M) (N & M)
@@ -572,81 +802,93 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
/*
* load and store whole register instructions
*/
-static void
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uintptr_t ra, bool is_load)
{
- uint32_t i, k, off, pos;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t vlenb = riscv_cpu_cfg(env)->vlenb;
uint32_t max_elems = vlenb >> log2_esz;
+ uint32_t evl = nf * max_elems;
+ uint32_t esz = 1 << log2_esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- if (env->vstart >= ((vlenb * nf) >> log2_esz)) {
- env->vstart = 0;
- return;
+ /* Calculate the page range of first page */
+ addr = base + (env->vstart << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / esz;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
}
- k = env->vstart / max_elems;
- off = env->vstart % max_elems;
-
- if (off) {
- /* load/store rest of elements of current segment pointed by vstart */
- for (pos = off; pos < max_elems; pos++, env->vstart++) {
- target_ulong addr = base + ((pos + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd,
- ra);
- }
- k++;
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- /* load/store elements for rest of segments */
- for (; k < nf; k++) {
- for (i = 0; i < max_elems; i++, env->vstart++) {
- target_ulong addr = base + ((i + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % esz)) {
+ addr = base + (env->vstart << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), env->vstart, vd, ra);
+ env->vstart++;
}
+
+ addr = base + (env->vstart << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
env->vstart = 0;
}
-#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d)
-
-#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
+#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), true); \
+}
+
+GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb, lde_d_host)
+
+#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), false); \
+}
+
+GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb, ste_b_host)
/*
* Vector Integer Arithmetic Instructions
@@ -891,7 +1133,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -925,7 +1167,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -962,7 +1204,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1002,7 +1244,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1100,7 +1342,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1149,7 +1391,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1213,7 +1455,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1280,7 +1522,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1829,7 +2071,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1855,7 +2097,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
@@ -1880,7 +2122,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
@@ -1906,7 +2148,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1953,8 +2195,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -1978,6 +2218,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vv_rm_1(vd, v0, vs1, vs2,
@@ -2080,8 +2322,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -2105,6 +2345,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vx_rm_1(vd, v0, s1, vs2,
@@ -2879,7 +3121,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -2924,7 +3166,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -3512,7 +3754,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
if (vl == 0) { \
return; \
@@ -4035,7 +4277,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -4077,7 +4319,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4272,7 +4514,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4440,6 +4682,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4447,7 +4691,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4526,6 +4772,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4533,7 +4781,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2, &env->fp_status); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4598,7 +4848,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
int a, b; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
a = vext_elem_mask(vs1, i); \
@@ -4688,6 +4938,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
int i;
bool first_mask_bit = false;
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -4760,6 +5012,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
uint32_t sum = 0; \
int i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
@@ -4793,7 +5047,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
uint32_t vma = vext_vma(desc); \
int i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4830,7 +5084,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong offset = s1, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MAX(env->vstart, offset); \
for (i = i_min; i < vl; i++) { \
@@ -4865,7 +5119,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong i_max, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
i_max = MAX(i_min, env->vstart); \
@@ -4879,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
\
for (i = i_max; i < vl; ++i) { \
- if (vm || vext_elem_mask(v0, i)) { \
- *((ETYPE *)vd + H(i)) = 0; \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ continue; \
} \
+ *((ETYPE *)vd + H(i)) = 0; \
} \
\
env->vstart = 0; \
@@ -4909,7 +5165,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4960,7 +5216,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5037,7 +5293,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint64_t index; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5082,7 +5338,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint64_t index = s1; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5118,6 +5374,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vext_elem_mask(vs1, i)) { \
continue; \
@@ -5127,7 +5385,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5178,7 +5436,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \