aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2023-09-12 15:04:32 +0100
committerPeter Maydell <peter.maydell@linaro.org>2023-09-21 16:07:14 +0100
commit69c51dc3723bdcb8d020f812f0d25d17b466d959 (patch)
tree6aa264a09fcf9186c863d5e5ba2e1a4f677f56ed
parent6087df574400659226861fa5ba47970f1fbd277b (diff)
downloadqemu-69c51dc3723bdcb8d020f812f0d25d17b466d959.zip
qemu-69c51dc3723bdcb8d020f812f0d25d17b466d959.tar.gz
qemu-69c51dc3723bdcb8d020f812f0d25d17b466d959.tar.bz2
target/arm: Implement MTE tag-checking functions for FEAT_MOPS copies
The FEAT_MOPS memory copy operations need an extra helper routine for checking for MTE tag checking failures beyond the ones we already added for memory set operations: * mte_mops_probe_rev() does the same job as mte_mops_probe(), but it checks tags starting at the provided address and working backwards, rather than forwards Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20230912140434.1333369-11-peter.maydell@linaro.org
-rw-r--r--target/arm/internals.h17
-rw-r--r--target/arm/tcg/mte_helper.c99
2 files changed, 116 insertions, 0 deletions
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 642f77d..1dd9182 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1289,6 +1289,23 @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
uint32_t desc);
/**
+ * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
+ * operation going in the reverse direction
+ * @env: CPU env
+ * @ptr: *end* address of memory region (dirty pointer)
+ * @size: length of region (guaranteed not to cross a page boundary)
+ * @desc: MTEDESC descriptor word (0 means no MTE checks)
+ * Returns: the size of the region that can be copied without hitting
+ * an MTE tag failure
+ *
+ * Note that we assume that the caller has already checked the TBI
+ * and TCMA bits with mte_checks_needed() and an MTE check is definitely
+ * required.
+ */
+uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
+ uint32_t desc);
+
+/**
* mte_check_fail: Record an MTE tag check failure
* @env: CPU env
* @desc: MTEDESC descriptor word
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 66a80ee..2dd7eb3 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -735,6 +735,55 @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
}
/**
+ * checkNrev:
+ * @tag: tag memory to test
+ * @odd: true to begin testing at tags at odd nibble
+ * @cmp: the tag to compare against
+ * @count: number of tags to test
+ *
+ * Return the number of successful tests.
+ * Thus a return value < @count indicates a failure.
+ *
+ * This is like checkN, but it runs backwards, checking the
+ * tags starting with @tag and then the tags preceding it.
+ * This is needed by the backwards-memory-copying operations.
+ */
+static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
+{
+ int n = 0, diff;
+
+ /* Replicate the test tag and compare. */
+ cmp *= 0x11;
+ diff = *mem-- ^ cmp;
+
+ if (!odd) {
+ goto start_even;
+ }
+
+ while (1) {
+ /* Test odd tag. */
+ if (unlikely((diff) & 0xf0)) {
+ break;
+ }
+ if (++n == count) {
+ break;
+ }
+
+ start_even:
+ /* Test even tag. */
+ if (unlikely((diff) & 0x0f)) {
+ break;
+ }
+ if (++n == count) {
+ break;
+ }
+
+ diff = *mem-- ^ cmp;
+ }
+ return n;
+}
+
+/**
* mte_probe_int() - helper for mte_probe and mte_check
* @env: CPU environment
* @desc: MTEDESC descriptor
@@ -1042,6 +1091,56 @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
}
}
+uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
+ uint32_t desc)
+{
+ int mmu_idx, tag_count;
+ uint64_t ptr_tag, tag_first, tag_last;
+ void *mem;
+ bool w = FIELD_EX32(desc, MTEDESC, WRITE);
+ uint32_t n;
+
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ /* True probe; this will never fault */
+ mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
+ w ? MMU_DATA_STORE : MMU_DATA_LOAD,
+ size, MMU_DATA_LOAD, true, 0);
+ if (!mem) {
+ return size;
+ }
+
+ /*
+ * TODO: checkNrev() is not designed for checks of the size we expect
+ * for FEAT_MOPS operations, so we should implement this differently.
+ * Maybe we should do something like
+ * if (region start and size are aligned nicely) {
+ * do direct loads of 64 tag bits at a time;
+ * } else {
+ * call checkN()
+ * }
+ */
+ /* Round the bounds to the tag granule, and compute the number of tags. */
+ ptr_tag = allocation_tag_from_addr(ptr);
+ tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
+ tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
+ n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
+ if (likely(n == tag_count)) {
+ return size;
+ }
+
+ /*
+ * Failure; for the first granule, it's at @ptr. Otherwise
+ * it's at the last byte of the nth granule. Calculate how
+ * many bytes we can access without hitting that failure.
+ */
+ if (n == 0) {
+ return 0;
+ } else {
+ return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
+ }
+}
+
void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
uint32_t desc)
{