aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-05 21:55:01 +0100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-11 09:53:41 +0100
commit0b3c75ad1a21574cc55b0c095a7dc21e2d27ffc8 (patch)
tree861006d476925fa726a93e85eb37dd247a3f3f9e /accel/tcg
parent9877ea05de9cdce6a5da87175d8455832f8148dc (diff)
downloadqemu-0b3c75ad1a21574cc55b0c095a7dc21e2d27ffc8.zip
qemu-0b3c75ad1a21574cc55b0c095a7dc21e2d27ffc8.tar.gz
qemu-0b3c75ad1a21574cc55b0c095a7dc21e2d27ffc8.tar.bz2
accel/tcg: Introduce tlb_read_idx
Instead of playing with offsetof in various places, use MMUAccessType to index an array. This is easily defined instead of the previous dummy padding array in the union. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cputlb.c104
1 files changed, 33 insertions, 71 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 0b8a5f9..5051244 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1441,34 +1441,17 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
}
}
-static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
-{
-#if TCG_OVERSIZED_GUEST
- return *(target_ulong *)((uintptr_t)entry + ofs);
-#else
- /* ofs might correspond to .addr_write, so use qatomic_read */
- return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
-#endif
-}
-
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
- size_t elt_ofs, target_ulong page)
+ MMUAccessType access_type, target_ulong page)
{
size_t vidx;
assert_cpu_is_self(env_cpu(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
- target_ulong cmp;
-
- /* elt_ofs might correspond to .addr_write, so use qatomic_read */
-#if TCG_OVERSIZED_GUEST
- cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
-#else
- cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
-#endif
+ target_ulong cmp = tlb_read_idx(vtlb, access_type);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@@ -1490,11 +1473,6 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
return false;
}
-/* Macro to call the above, with local variables from the use context. */
-#define VICTIM_TLB_HIT(TY, ADDR) \
- victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
- (ADDR) & TARGET_PAGE_MASK)
-
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
CPUTLBEntryFull *full, uintptr_t retaddr)
{
@@ -1527,29 +1505,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr, page_addr;
- size_t elt_ofs;
- int flags;
-
- switch (access_type) {
- case MMU_DATA_LOAD:
- elt_ofs = offsetof(CPUTLBEntry, addr_read);
- break;
- case MMU_DATA_STORE:
- elt_ofs = offsetof(CPUTLBEntry, addr_write);
- break;
- case MMU_INST_FETCH:
- elt_ofs = offsetof(CPUTLBEntry, addr_code);
- break;
- default:
- g_assert_not_reached();
- }
- tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ target_ulong tlb_addr = tlb_read_idx(entry, access_type);
+ target_ulong page_addr = addr & TARGET_PAGE_MASK;
+ int flags = TLB_FLAGS_MASK;
- flags = TLB_FLAGS_MASK;
- page_addr = addr & TARGET_PAGE_MASK;
if (!tlb_hit_page(tlb_addr, page_addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
CPUState *cs = env_cpu(env);
if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
@@ -1571,7 +1532,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
*/
flags &= ~TLB_INVALID_MASK;
}
- tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ tlb_addr = tlb_read_idx(entry, access_type);
}
flags &= tlb_addr;
@@ -1802,7 +1763,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (prot & PAGE_WRITE) {
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
+ addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
@@ -1835,7 +1797,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
} else /* if (prot & PAGE_READ) */ {
tlb_addr = tlbe->addr_read;
if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_read, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_LOAD,
+ addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
@@ -1929,13 +1892,9 @@ load_memop(const void *haddr, MemOp op)
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
- uintptr_t retaddr, MemOp op, bool code_read,
+ uintptr_t retaddr, MemOp op, MMUAccessType access_type,
FullLoadHelper *full_load)
{
- const size_t tlb_off = code_read ?
- offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
- const MMUAccessType access_type =
- code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
const unsigned a_bits = get_alignment_bits(get_memop(oi));
const size_t size = memop_size(op);
uintptr_t mmu_idx = get_mmuidx(oi);
@@ -1955,18 +1914,18 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ tlb_addr = tlb_read_idx(entry, access_type);
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ if (!victim_tlb_hit(env, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
- tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ tlb_addr = tlb_read_idx(entry, access_type);
tlb_addr &= ~TLB_INVALID_MASK;
}
@@ -2052,7 +2011,8 @@ static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_UB);
- return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_UB, MMU_DATA_LOAD,
+ full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
@@ -2065,7 +2025,7 @@ static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUW);
- return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
+ return load_helper(env, addr, oi, retaddr, MO_LEUW, MMU_DATA_LOAD,
full_le_lduw_mmu);
}
@@ -2079,7 +2039,7 @@ static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUW);
- return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
+ return load_helper(env, addr, oi, retaddr, MO_BEUW, MMU_DATA_LOAD,
full_be_lduw_mmu);
}
@@ -2093,7 +2053,7 @@ static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUL);
- return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
+ return load_helper(env, addr, oi, retaddr, MO_LEUL, MMU_DATA_LOAD,
full_le_ldul_mmu);
}
@@ -2107,7 +2067,7 @@ static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUL);
- return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
+ return load_helper(env, addr, oi, retaddr, MO_BEUL, MMU_DATA_LOAD,
full_be_ldul_mmu);
}
@@ -2121,7 +2081,7 @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUQ);
- return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
+ return load_helper(env, addr, oi, retaddr, MO_LEUQ, MMU_DATA_LOAD,
helper_le_ldq_mmu);
}
@@ -2129,7 +2089,7 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUQ);
- return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
+ return load_helper(env, addr, oi, retaddr, MO_BEUQ, MMU_DATA_LOAD,
helper_be_ldq_mmu);
}
@@ -2325,7 +2285,6 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
bool big_endian)
{
- const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
target_ulong page1, page2, tlb_addr, tlb_addr2;
@@ -2347,7 +2306,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
tlb_addr2 = tlb_addr_write(entry2);
if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
- if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
+ if (!victim_tlb_hit(env, mmu_idx, index2, MMU_DATA_STORE, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
index2 = tlb_index(env, mmu_idx, page2);
@@ -2400,7 +2359,6 @@ static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
- const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
const unsigned a_bits = get_alignment_bits(get_memop(oi));
const size_t size = memop_size(op);
uintptr_t mmu_idx = get_mmuidx(oi);
@@ -2423,7 +2381,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
@@ -2729,7 +2687,8 @@ void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
+ return load_helper(env, addr, oi, retaddr, MO_8,
+ MMU_INST_FETCH, full_ldub_code);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
@@ -2741,7 +2700,8 @@ uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
+ return load_helper(env, addr, oi, retaddr, MO_TEUW,
+ MMU_INST_FETCH, full_lduw_code);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
@@ -2753,7 +2713,8 @@ uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
+ return load_helper(env, addr, oi, retaddr, MO_TEUL,
+ MMU_INST_FETCH, full_ldl_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
@@ -2765,7 +2726,8 @@ uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
+ return load_helper(env, addr, oi, retaddr, MO_TEUQ,
+ MMU_INST_FETCH, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)