diff options
author | Richard Henderson <rth@twiddle.net> | 2016-06-28 11:37:27 -0700 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-10-26 08:29:01 -0700 |
commit | c482cb117cc418115ca9c6d21a7a2315414c0a40 (patch) | |
tree | b665778366c40123e7e9831109ffc420caec9bfb /cputlb.c | |
parent | c86c6e4c80fee4d9423bedb10ba9e9c4aa68f861 (diff) | |
download | qemu-c482cb117cc418115ca9c6d21a7a2315414c0a40.zip qemu-c482cb117cc418115ca9c6d21a7a2315414c0a40.tar.gz qemu-c482cb117cc418115ca9c6d21a7a2315414c0a40.tar.bz2 |
tcg: Add atomic helpers
Add all of cmpxchg, op_fetch, fetch_op, and xchg.
Handle both endian-ness, and sizes up to 8.
Handle expanding non-atomically, when emulating in serial.
Reviewed-by: Emilio G. Cota <cota@braap.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'cputlb.c')
-rw-r--r-- | cputlb.c | 112 |
1 files changed, 109 insertions, 3 deletions
@@ -23,15 +23,15 @@ #include "exec/memory.h" #include "exec/address-spaces.h" #include "exec/cpu_ldst.h" - #include "exec/cputlb.h" - #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "qemu/error-report.h" #include "exec/log.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -585,6 +585,69 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, } } +/* Probe for a read-modify-write atomic operation. Do not allow unaligned + * operations, or io operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + size_t mmu_idx = get_mmuidx(oi); + size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; + target_ulong tlb_addr = tlbe->addr_write; + TCGMemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + int s_bits = mop & MO_SIZE; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & ((1 << s_bits) - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + /* Check TLB entry and enforce page permissions. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = tlbe->addr_write; + } + + /* Notice an IO access, or a notdirty page. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + /* Let the guest notice RMW on a write-only page. */ + if (unlikely(tlbe->addr_read != tlb_addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); + /* Since we don't support reads and writes to different addresses, + and we do have the proper page loaded for write, this shouldn't + ever return. But just in case, handle via stop-the-world. */ + goto stop_the_world; + } + + return (void *)((uintptr_t)addr + tlbe->addend); + + stop_the_world: + cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); +} + #ifdef TARGET_WORDS_BIGENDIAN # define TGT_BE(X) (X) # define TGT_LE(X) BSWAP(X) @@ -606,8 +669,51 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, #define DATA_SIZE 8 #include "softmmu_template.h" -#undef MMUSUFFIX +/* First set of helpers allows passing in of OI and RETADDR. This makes + them callable from other helpers. */ + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#define DATA_SIZE 8 +#include "atomic_template.h" + +/* Second set of helpers are directly callable from TCG as helpers. */ + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP +#define EXTRA_ARGS , TCGMemOpIdx oi +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#define DATA_SIZE 8 +#include "atomic_template.h" + +/* Code access functions. */ + +#undef MMUSUFFIX #define MMUSUFFIX _cmmu #undef GETPC #define GETPC() ((uintptr_t)0) |