aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-10-14 11:12:34 +0100
committerPeter Maydell <peter.maydell@linaro.org>2024-10-14 11:12:34 +0100
commit3860a2a8de56fad71db42f4ad120eb7eff03b51f (patch)
tree1db830a2b3c4308a66f6fbe789e5f77f6ce15321 /accel
parentb38d263bca64bbba36d4b175ea0f5746b4c5604d (diff)
parente530581ee06573fcf48c7f7a6c3f8ec6e5809243 (diff)
downloadqemu-3860a2a8de56fad71db42f4ad120eb7eff03b51f.zip
qemu-3860a2a8de56fad71db42f4ad120eb7eff03b51f.tar.gz
qemu-3860a2a8de56fad71db42f4ad120eb7eff03b51f.tar.bz2
Merge tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu into staging
linux-user/i386: Emulate orig_ax linux-user/vm86: Fix compilation with Clang tcg: remove singlestep_enabled from DisasContextBase accel/tcg: Add TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/arm: Fix alignment fault priority in get_phys_addr_lpae # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcMRU4dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9dSQf+MUJq//oig+bDeUlQ # v3uBMFVi1DBYI1Y/xVODADpn8Ltv5s9v7N+/phi+St2W65OzGNYviHvq/abeyhdo # M40LGtOvjO6Mns+Z9NKTobtT8n4ap4JJyoFjuXFTHkMMDiQ/v7FkEJJoS3W2bemi # zmKYF/vWe3bwI+d3+dyaUjA92gSs+Hlj8uEVBlzn3ubA19ZdvtyfKURPQynrkwlo # dFtAOFRFBU6vrlJSBElxUfYO4jC4Cng19EOrWvIsuKAkACuhiHgah10i3WKw8Asz # 1iRUYXe0EOlX2RYNTD+Oj5j0cViRylirgPtIhEIPBuDP7m1Jy1JO4dVARUJBBU71 # Zd4Uuw== # =EX+a # -----END PGP SIGNATURE----- # gpg: Signature made Sun 13 Oct 2024 23:10:22 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu: (27 commits) target/arm: Fix alignment fault priority in get_phys_addr_lpae target/arm: Implement TCGCPUOps.tlb_fill_align target/arm: Move device detection earlier in get_phys_addr_lpae target/arm: Pass MemOp to get_phys_addr_lpae target/arm: Pass MemOp through get_phys_addr_twostage target/arm: Pass MemOp to get_phys_addr_nogpc target/arm: Pass MemOp to get_phys_addr_gpc target/arm: Pass MemOp to get_phys_addr_with_space_nogpc target/arm: Pass MemOp to get_phys_addr target/hppa: Implement TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/hppa: Fix priority of T, D, and B page faults target/hppa: Perform access rights before protection id check target/hppa: Add MemOp argument to hppa_get_physical_address accel/tcg: Use the alignment test in tlb_fill_align accel/tcg: Add TCGCPUOps.tlb_fill_align include/exec/memop: Introduce memop_atomicity_bits include/exec/memop: Rename get_alignment_bits include/exec/memop: Move get_alignment_bits from tcg.h accel/tcg: Assert noreturn from write-only page for atomics ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c160
-rw-r--r--accel/tcg/translator.c1
-rw-r--r--accel/tcg/user-exec.c4
3 files changed, 84 insertions, 81 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 117b516..b76a4ea 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
}
/*
- * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
- * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
- * be discarded and looked up again (e.g. via tlb_entry()).
+ * Note: tlb_fill_align() can trigger a resize of the TLB.
+ * This means that all of the caller's prior references to the TLB table
+ * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
+ * (e.g. via tlb_entry()).
*/
-static void tlb_fill(CPUState *cpu, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
+ int mmu_idx, MemOp memop, int size,
+ bool probe, uintptr_t ra)
{
- bool ok;
+ const TCGCPUOps *ops = cpu->cc->tcg_ops;
+ CPUTLBEntryFull full;
- /*
- * This is not a probe, so only valid return is success; failure
- * should result in exception + longjmp to the cpu loop.
- */
- ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
- access_type, mmu_idx, false, retaddr);
- assert(ok);
+ if (ops->tlb_fill_align) {
+ if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
+ memop, size, probe, ra)) {
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ return true;
+ }
+ } else {
+ /* Legacy behaviour is alignment before paging. */
+ if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
+ ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
+ }
+ if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
+ return true;
+ }
+ }
+ assert(probe);
+ return false;
}
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
@@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
- if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ 0, fault_size, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
return TLB_INVALID_MASK;
}
- /* TLB resize via tlb_fill may have moved the entry. */
+ /* TLB resize via tlb_fill_align may have moved the entry. */
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
- * to force the next access through tlb_fill. We've just
- * called tlb_fill, so we know that this entry *is* valid.
+ * to force the next access through tlb_fill_align. We've just
+ * called tlb_fill_align, so we know that this entry *is* valid.
*/
flags &= ~TLB_INVALID_MASK;
}
@@ -1607,16 +1620,17 @@ typedef struct MMULookupLocals {
* mmu_lookup1: translate one page
* @cpu: generic cpu state
* @data: lookup parameters
+ * @memop: memory operation for the access, or 0
* @mmu_idx: virtual address context
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
*
* Resolve the translation for the one page at @data.addr, filling in
* the rest of @data with the results. If the translation fails,
- * tlb_fill will longjmp out. Return true if the softmmu tlb for
+ * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
@@ -1631,7 +1645,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ memop, data->size, false, ra);
maybe_resized = true;
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
@@ -1643,6 +1658,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
+ if (likely(!maybe_resized)) {
+ /* Alignment has not been checked by tlb_fill_align. */
+ int a_bits = memop_alignment_bits(memop);
+
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ int at_bits = memop_atomicity_bits(memop);
+ a_bits = MAX(a_bits, at_bits);
+ }
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
+ cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
+ }
+ }
+
data->full = full;
data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
@@ -1699,7 +1733,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
- unsigned a_bits;
bool crosspage;
int flags;
@@ -1708,12 +1741,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
- /* Handle CPU specific unaligned behaviour */
- a_bits = get_alignment_bits(l->memop);
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
-
l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
@@ -1721,7 +1748,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1740,8 +1767,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
@@ -1760,31 +1787,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
- /*
- * This alignment check differs from the one above, in that this is
- * based on the atomicity of the operation. The intended use case is
- * the ARM memory type field of each PTE, where access to pages with
- * Device memory type require alignment.
- */
- if (unlikely(flags & TLB_CHECK_ALIGNED)) {
- MemOp size = l->memop & MO_SIZE;
-
- switch (l->memop & MO_ATOM_MASK) {
- case MO_ATOM_NONE:
- size = MO_8;
- break;
- case MO_ATOM_IFALIGN_PAIR:
- case MO_ATOM_WITHIN16_PAIR:
- size = size ? size - 1 : 0;
- break;
- default:
- break;
- }
- if (addr & ((1 << size) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
- }
-
return crosspage;
}
@@ -1797,34 +1799,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
+ bool did_tlb_fill = false;
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
- /* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
- /* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* Enforce qemu required alignment. */
- if (unlikely(addr & (size - 1))) {
- /* We get here if guest alignment was not requested,
- or was not enforced by cpu_unaligned_access above.
- We might widen the access and emulate, but for now
- mark an exception and exit the cpu loop. */
- goto stop_the_world;
- }
-
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
@@ -1833,8 +1819,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, size,
- MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
+ mop, size, false, retaddr);
+ did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
}
@@ -1848,15 +1835,32 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
+ 0, size, false, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
- * write, this shouldn't ever return. But just in case,
- * handle via stop-the-world.
+ * write, this shouldn't ever return.
+ */
+ g_assert_not_reached();
+ }
+
+ /* Enforce guest required alignment, if not handled by tlb_fill_align. */
+ if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
+
+ /* Enforce qemu required alignment. */
+ if (unlikely(addr & (size - 1))) {
+ /*
+ * We get here if guest alignment was not requested, or was not
+ * enforced by cpu_unaligned_access or tlb_fill_align above.
+ * We might widen the access and emulate, but for now
+ * mark an exception and exit the cpu loop.
*/
goto stop_the_world;
}
+
/* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 113edcf..cbad00a 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -129,7 +129,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = *max_insns;
- db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->insn_start = NULL;
db->fake_insn = false;
db->host_addr[0] = host_pc;
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 11b6d45..51b2c16 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -954,7 +954,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
@@ -1236,7 +1236,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */