aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-12-12 10:38:21 -0600
committerRichard Henderson <richard.henderson@linaro.org>2021-01-07 05:09:41 -1000
commit1da8de39a39c55560cb4bf0cea94d598fea035cd (patch)
tree59d8b4160b3342019ef5fd8c1392af7812de0713 /util
parentdf5d2b1658b988cb2be557e9f3114115935506ef (diff)
downloadqemu-1da8de39a39c55560cb4bf0cea94d598fea035cd.zip
qemu-1da8de39a39c55560cb4bf0cea94d598fea035cd.tar.gz
qemu-1da8de39a39c55560cb4bf0cea94d598fea035cd.tar.bz2
util: Enhance flush_icache_range with separate data pointer
We are shortly going to have a split rw/rx jit buffer. Depending on the host, we need to flush the dcache at the rw data pointer and flush the icache at the rx code pointer. For now, the two passed pointers are identical, so there is no effective change in behaviour. Reviewed-by: Joelle van Dyne <j@getutm.app> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'util')
-rw-r--r--util/cacheflush.c38
-rw-r--r--util/cacheinfo.c8
2 files changed, 27 insertions, 19 deletions
diff --git a/util/cacheflush.c b/util/cacheflush.c
index 2881832..92805ef 100644
--- a/util/cacheflush.c
+++ b/util/cacheflush.c
@@ -21,29 +21,32 @@
#include <sys/cachectl.h>
#endif
-void flush_icache_range(uintptr_t start, uintptr_t stop)
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
{
- cacheflush((void *)start, stop - start, ICACHE);
+ if (rx != rw) {
+ cacheflush((void *)rw, len, DCACHE);
+ }
+ cacheflush((void *)rx, len, ICACHE);
}
#elif defined(__powerpc__)
-void flush_icache_range(uintptr_t start, uintptr_t stop)
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
{
- uintptr_t p, start1, stop1;
+ uintptr_t p, b, e;
size_t dsize = qemu_dcache_linesize;
size_t isize = qemu_icache_linesize;
- start1 = start & ~(dsize - 1);
- stop1 = (stop + dsize - 1) & ~(dsize - 1);
- for (p = start1; p < stop1; p += dsize) {
+ b = rw & ~(dsize - 1);
+ e = (rw + len + dsize - 1) & ~(dsize - 1);
+ for (p = b; p < e; p += dsize) {
asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
}
asm volatile ("sync" : : : "memory");
- start &= start & ~(isize - 1);
- stop1 = (stop + isize - 1) & ~(isize - 1);
- for (p = start1; p < stop1; p += isize) {
+ b = rx & ~(isize - 1);
+ e = (rx + len + isize - 1) & ~(isize - 1);
+ for (p = b; p < e; p += isize) {
asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
}
asm volatile ("sync" : : : "memory");
@@ -52,20 +55,23 @@ void flush_icache_range(uintptr_t start, uintptr_t stop)
#elif defined(__sparc__)
-void flush_icache_range(uintptr_t start, uintptr_t stop)
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
{
- uintptr_t p;
-
- for (p = start & -8; p < ((stop + 7) & -8); p += 8) {
+ /* No additional data flush to the RW virtual address required. */
+ uintptr_t p, end = (rx + len + 7) & -8;
+ for (p = rx & -8; p < end; p += 8) {
__asm__ __volatile__("flush\t%0" : : "r" (p));
}
}
#else
-void flush_icache_range(uintptr_t start, uintptr_t stop)
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
{
- __builtin___clear_cache((char *)start, (char *)stop);
+ if (rw != rx) {
+ __builtin___clear_cache((char *)rw, (char *)rw + len);
+ }
+ __builtin___clear_cache((char *)rx, (char *)rx + len);
}
#endif
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
index 7804c18..b182f0b 100644
--- a/util/cacheinfo.c
+++ b/util/cacheinfo.c
@@ -166,9 +166,11 @@ static void fallback_cache_info(int *isize, int *dsize)
*isize = *dsize;
} else {
#if defined(_ARCH_PPC)
- /* For PPC, we're going to use the icache size computed for
- flush_icache_range. Which means that we must use the
- architecture minimum. */
+ /*
+ * For PPC, we're going to use the cache sizes computed for
+ * flush_idcache_range. Which means that we must use the
+ * architecture minimum.
+ */
*isize = *dsize = 16;
#else
/* Otherwise, 64 bytes is not uncommon. */