diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2017-11-20 18:08:28 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2017-11-21 12:09:25 +0000 |
commit | 34d49937e480edfa173d71e8c17972ad866b56c6 (patch) | |
tree | d2080bc4d17a444d90a3b95a82ce0324e2898122 /accel/tcg/atomic_template.h | |
parent | 27266271977c5a30f2f7d493e042be1897827bdd (diff) | |
download | qemu-34d49937e480edfa173d71e8c17972ad866b56c6.zip qemu-34d49937e480edfa173d71e8c17972ad866b56c6.tar.gz qemu-34d49937e480edfa173d71e8c17972ad866b56c6.tar.bz2 |
accel/tcg: Handle atomic accesses to notdirty memory correctly
To do a write to memory that is marked as notdirty, we need
to invalidate any TBs we have cached for that memory, and
update the cpu physical memory dirty flags for VGA and migration.
The slowpath code in notdirty_mem_write() does all this correctly,
but the new atomic handling code in atomic_mmu_lookup() doesn't
do anything at all, it just clears the dirty bit in the TLB.
The effect of this bug is that if the first write to a notdirty
page for which we have cached TBs is by a guest atomic access,
we fail to invalidate the TBs and subsequently will execute
incorrect code. This can be seen by trying to run 'javac' on AArch64.
Use the new notdirty_call_before() and notdirty_call_after()
functions to correctly handle the update to notdirty memory
in the atomic codepath.
Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 1511201308-23580-3-git-send-email-peter.maydell@linaro.org
Diffstat (limited to 'accel/tcg/atomic_template.h')
-rw-r--r-- | accel/tcg/atomic_template.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 1c7c175..e022df4 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -61,6 +61,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); ATOMIC_MMU_CLEANUP; @@ -70,6 +71,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, #if DATA_SIZE >= 16 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; __atomic_load(haddr, &val, __ATOMIC_RELAXED); ATOMIC_MMU_CLEANUP; @@ -79,6 +81,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; __atomic_store(haddr, &val, __ATOMIC_RELAXED); ATOMIC_MMU_CLEANUP; @@ -87,6 +90,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret = atomic_xchg__nocheck(haddr, val); ATOMIC_MMU_CLEANUP; @@ -97,6 +101,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ + ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret = atomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ @@ -130,6 +135,7 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); ATOMIC_MMU_CLEANUP; @@ -139,6 +145,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, #if DATA_SIZE >= 16 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; __atomic_load(haddr, &val, __ATOMIC_RELAXED); ATOMIC_MMU_CLEANUP; @@ -148,6 +155,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; val = BSWAP(val); __atomic_store(haddr, &val, __ATOMIC_RELAXED); @@ -157,6 +165,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; @@ -167,6 +176,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ + ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ @@ -187,6 +197,7 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ldo, ldn, ret, sto; @@ -206,6 +217,7 @@ ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { + ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ldo, ldn, ret, sto; |