aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-08-24 09:51:09 -0700
committerRichard Henderson <richard.henderson@linaro.org>2019-09-03 08:30:39 -0700
commit50b107c5d617eaf93301cef20221312e7a986701 (patch)
tree8ff2f1b256e9847308d30c90fc431d91023e45d1 /accel
parent5787585d0406cfd54dda0c71ea1a603347ce6e71 (diff)
downloadqemu-50b107c5d617eaf93301cef20221312e7a986701.zip
qemu-50b107c5d617eaf93301cef20221312e7a986701.tar.gz
qemu-50b107c5d617eaf93301cef20221312e7a986701.tar.bz2
cputlb: Handle watchpoints via TLB_WATCHPOINT
The raising of exceptions from check_watchpoint, buried inside of the I/O subsystem, is fundamentally broken. We do not have the helper return address with which we can unwind guest state. Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT. Move the call to cpu_check_watchpoint into the cputlb helpers where we do have the helper return address. This allows watchpoints on RAM to bypass the full i/o access path. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c89
1 files changed, 79 insertions, 10 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d0f8db3..9a9a626 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -710,6 +710,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page;
int asidx = cpu_asidx_from_attrs(cpu, attrs);
+ int wp_flags;
assert_cpu_is_self(cpu);
@@ -752,6 +753,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
code_address = address;
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
paddr_page, xlat, prot, &address);
+ wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
+ TARGET_PAGE_SIZE);
index = tlb_index(env, mmu_idx, vaddr_page);
te = tlb_entry(env, mmu_idx, vaddr_page);
@@ -805,6 +808,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
tn.addend = addend - vaddr_page;
if (prot & PAGE_READ) {
tn.addr_read = address;
+ if (wp_flags & BP_MEM_READ) {
+ tn.addr_read |= TLB_WATCHPOINT;
+ }
} else {
tn.addr_read = -1;
}
@@ -831,6 +837,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
if (prot & PAGE_WRITE_INV) {
tn.addr_write |= TLB_INVALID_MASK;
}
+ if (wp_flags & BP_MEM_WRITE) {
+ tn.addr_write |= TLB_WATCHPOINT;
+ }
}
copy_tlb_helper_locked(te, &tn);
@@ -1264,13 +1273,33 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
tlb_addr &= ~TLB_INVALID_MASK;
}
- /* Handle an IO access. */
+ /* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry;
+
+ /* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
- return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
- mmu_idx, addr, retaddr, access_type, op);
+
+ iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+
+ /* Handle watchpoints. */
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
+ /* On watchpoint hit, this will longjmp out. */
+ cpu_check_watchpoint(env_cpu(env), addr, size,
+ iotlbentry->attrs, BP_MEM_READ, retaddr);
+
+ /* The backing page may or may not require I/O. */
+ tlb_addr &= ~TLB_WATCHPOINT;
+ if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
+ goto do_aligned_access;
+ }
+ }
+
+ /* Handle I/O access. */
+ return io_readx(env, iotlbentry, mmu_idx, addr,
+ retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -1297,6 +1326,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
return res & MAKE_64BIT_MASK(0, size * 8);
}
+ do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (op) {
case MO_UB:
@@ -1486,13 +1516,32 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
- /* Handle an IO access. */
+ /* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry;
+
+ /* For anything that is unaligned, recurse through byte stores. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
- io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
- val, addr, retaddr, op);
+
+ iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+
+ /* Handle watchpoints. */
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
+ /* On watchpoint hit, this will longjmp out. */
+ cpu_check_watchpoint(env_cpu(env), addr, size,
+ iotlbentry->attrs, BP_MEM_WRITE, retaddr);
+
+ /* The backing page may or may not require I/O. */
+ tlb_addr &= ~TLB_WATCHPOINT;
+ if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
+ goto do_aligned_access;
+ }
+ }
+
+ /* Handle I/O access. */
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
return;
}
@@ -1517,10 +1566,29 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
- if (!tlb_hit_page(tlb_addr2, page2)
- && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
- tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
- mmu_idx, retaddr);
+ if (!tlb_hit_page(tlb_addr2, page2)) {
+ if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
+ tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+ }
+ tlb_addr2 = tlb_addr_write(entry2);
+ }
+
+ /*
+ * Handle watchpoints. Since this may trap, all checks
+ * must happen before any store.
+ */
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), addr, size - size2,
+ env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
+ BP_MEM_WRITE, retaddr);
+ }
+ if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), page2, size2,
+ env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
+ BP_MEM_WRITE, retaddr);
}
/*
@@ -1542,6 +1610,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return;
}
+ do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (op) {
case MO_UB: