diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-11-07 19:48:14 +1100 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-02-04 06:19:42 -1000 |
commit | cb48f3654e290ee5d7cbf1fb31888463fa2a180c (patch) | |
tree | f528facdd87c95035ca0503406a20b4b86b7c3eb /accel | |
parent | 4771e71c28eb0cece2a17a2d891bbd724bdc158d (diff) | |
download | qemu-cb48f3654e290ee5d7cbf1fb31888463fa2a180c.zip qemu-cb48f3654e290ee5d7cbf1fb31888463fa2a180c.tar.gz qemu-cb48f3654e290ee5d7cbf1fb31888463fa2a180c.tar.bz2 |
tcg: Add guest load/store primitives for TCGv_i128
These are not yet considering atomicity of the 16-byte value;
this is a direct replacement for the current target code which
uses a pair of 8-byte operations.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cputlb.c | 112 | ||||
-rw-r--r-- | accel/tcg/user-exec.c | 66 |
2 files changed, 178 insertions, 0 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 04e2707..4812d83 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2192,6 +2192,64 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); } +Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + uint64_t h, l; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + h = helper_be_ldq_mmu(env, addr, new_oi, ra); + l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return int128_make128(l, h); +} + +Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + uint64_t h, l; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + l = helper_le_ldq_mmu(env, addr, new_oi, ra); + h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return int128_make128(l, h); +} + /* * Store Helpers */ @@ -2546,6 +2604,60 @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); } +void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); + helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); + helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + #include "ldst_common.c.inc" /* diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index a8eb63a..ae67d84 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1031,6 +1031,42 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, return ret; } +Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + Int128 ret; + + validate_memop(oi, MO_128 | MO_BE); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + memcpy(&ret, haddr, 16); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + + if (!HOST_BIG_ENDIAN) { + ret = bswap128(ret); + } + return ret; +} + +Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + Int128 ret; + + validate_memop(oi, MO_128 | MO_LE); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + memcpy(&ret, haddr, 16); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + + if (HOST_BIG_ENDIAN) { + ret = bswap128(ret); + } + return ret; +} + void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, MemOpIdx oi, uintptr_t ra) { @@ -1115,6 +1151,36 @@ void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } +void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, + Int128 val, MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_128 | MO_BE); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + if (!HOST_BIG_ENDIAN) { + val = bswap128(val); + } + memcpy(haddr, &val, 16); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, + Int128 val, MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_128 | MO_LE); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + if (HOST_BIG_ENDIAN) { + val = bswap128(val); + } + memcpy(haddr, &val, 16); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) { uint32_t ret; |