aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2023-09-12 15:04:27 +0100
committerPeter Maydell <peter.maydell@linaro.org>2023-09-21 16:07:14 +0100
commitaa03378bccb1138cb6a3d5a8c91b11feda036188 (patch)
tree5bb391500282a2b9fe40a54315ae9c2e0c777a69
parent31aaaddecb36c17eeeb991e2124de5132df18af9 (diff)
downloadqemu-aa03378bccb1138cb6a3d5a8c91b11feda036188.zip
qemu-aa03378bccb1138cb6a3d5a8c91b11feda036188.tar.gz
qemu-aa03378bccb1138cb6a3d5a8c91b11feda036188.tar.bz2
target/arm: New function allocation_tag_mem_probe()
For the FEAT_MOPS operations, the existing allocation_tag_mem() function almost does what we want, but it will take a watchpoint exception even for an ra == 0 probe request, and it requires that the caller guarantee that the memory is accessible. For FEAT_MOPS we want a function that will not take any kind of exception, and will return NULL for the not-accessible case. Rename allocation_tag_mem() to allocation_tag_mem_probe() and add an extra 'probe' argument that lets us distinguish these cases; allocation_tag_mem() is now a wrapper that always passes 'false'. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20230912140434.1333369-6-peter.maydell@linaro.org
-rw-r--r--target/arm/tcg/mte_helper.c48
1 files changed, 37 insertions, 11 deletions
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index e2494f7..303bcc7 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -50,13 +50,14 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
}
/**
- * allocation_tag_mem:
+ * allocation_tag_mem_probe:
* @env: the cpu environment
* @ptr_mmu_idx: the addressing regime to use for the virtual address
* @ptr: the virtual address for which to look up tag memory
* @ptr_access: the access to use for the virtual address
* @ptr_size: the number of bytes in the normal memory access
* @tag_access: the access to use for the tag memory
+ * @probe: true to merely probe, never taking an exception
* @ra: the return address for exception handling
*
* Our tag memory is formatted as a sequence of little-endian nibbles.
@@ -65,15 +66,25 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
* for the higher addr.
*
* Here, resolve the physical address from the virtual address, and return
- * a pointer to the corresponding tag byte. Exit with exception if the
- * virtual address is not accessible for @ptr_access.
+ * a pointer to the corresponding tag byte.
*
* If there is no tag storage corresponding to @ptr, return NULL.
+ *
+ * If the page is inaccessible for @ptr_access, or has a watchpoint, there are
+ * three options:
+ * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
+ * accessible, and do not take watchpoint traps. The calling code must
+ * handle those cases in the right priority compared to MTE traps.
+ * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
+ * that the page is going to be accessible. We will take watchpoint traps.
+ * (3) probe = false, ra != 0 : non-probe -- we will take both memory access
+ * traps and watchpoint traps.
+ * (probe = true, ra != 0 is invalid and will assert.)
*/
-static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
- uint64_t ptr, MMUAccessType ptr_access,
- int ptr_size, MMUAccessType tag_access,
- uintptr_t ra)
+static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
+ uint64_t ptr, MMUAccessType ptr_access,
+ int ptr_size, MMUAccessType tag_access,
+ bool probe, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
uint64_t clean_ptr = useronly_clean_ptr(ptr);
@@ -81,6 +92,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
uint8_t *tags;
uintptr_t index;
+ assert(!(probe && ra));
+
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
!(flags & PAGE_VALID), ra);
@@ -111,12 +124,16 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
* exception for inaccessible pages, and resolves the virtual address
* into the softmmu tlb.
*
- * When RA == 0, this is for mte_probe. The page is expected to be
- * valid. Indicate to probe_access_flags no-fault, then assert that
- * we received a valid page.
+ * When RA == 0, this is either a pure probe or a no-fault-expected probe.
+ * Indicate to probe_access_flags no-fault, then either return NULL
+ * for the pure probe, or assert that we received a valid page for the
+ * no-fault-expected probe.
*/
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
ra == 0, &host, &full, ra);
+ if (probe && (flags & TLB_INVALID_MASK)) {
+ return NULL;
+ }
assert(!(flags & TLB_INVALID_MASK));
/* If the virtual page MemAttr != Tagged, access unchecked. */
@@ -157,7 +174,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
}
/* Any debug exception has priority over a tag check exception. */
- if (unlikely(flags & TLB_WATCHPOINT)) {
+ if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
assert(ra != 0);
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
@@ -199,6 +216,15 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
#endif
}
+static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
+ uint64_t ptr, MMUAccessType ptr_access,
+ int ptr_size, MMUAccessType tag_access,
+ uintptr_t ra)
+{
+ return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
+ ptr_size, tag_access, false, ra);
+}
+
uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
{
uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);