aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c23
-rw-r--r--accel/tcg/user-exec.c114
2 files changed, 42 insertions, 95 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 3010dd4..631f1fe 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1742,6 +1742,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
bool crosspage;
+ vaddr last;
int flags;
l->memop = get_memop(oi);
@@ -1751,13 +1752,15 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
- l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
+ l->page[1].addr = 0;
l->page[1].size = 0;
- crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
- if (likely(!crosspage)) {
- mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ /* Lookup and recognize exceptions from the first page. */
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ last = addr + l->page[0].size - 1;
+ crosspage = (addr ^ last) & TARGET_PAGE_MASK;
+ if (likely(!crosspage)) {
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
@@ -1767,18 +1770,18 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
}
} else {
/* Finish compute of page crossing. */
- int size0 = l->page[1].addr - addr;
+ vaddr addr1 = last & TARGET_PAGE_MASK;
+ int size0 = addr1 - addr;
l->page[1].size = l->page[0].size - size0;
l->page[0].size = size0;
-
l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx,
- l->page[1].addr, addr);
+ addr1, addr);
/*
- * Lookup both pages, recognizing exceptions from either. If the
- * second lookup potentially resized, refresh first CPUTLBEntryFull.
+ * Lookup and recognize exceptions from the second page.
+ * If the lookup potentially resized the table, refresh the
+ * first CPUTLBEntryFull pointer.
*/
- mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 916f187..1800dff 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -269,48 +269,6 @@ static void pageflags_create(vaddr start, vaddr last, int flags)
interval_tree_insert(&p->itree, &pageflags_root);
}
-/* A subroutine of page_set_flags: remove everything in [start,last]. */
-static bool pageflags_unset(vaddr start, vaddr last)
-{
- bool inval_tb = false;
-
- while (true) {
- PageFlagsNode *p = pageflags_find(start, last);
- vaddr p_last;
-
- if (!p) {
- break;
- }
-
- if (p->flags & PAGE_EXEC) {
- inval_tb = true;
- }
-
- interval_tree_remove(&p->itree, &pageflags_root);
- p_last = p->itree.last;
-
- if (p->itree.start < start) {
- /* Truncate the node from the end, or split out the middle. */
- p->itree.last = start - 1;
- interval_tree_insert(&p->itree, &pageflags_root);
- if (last < p_last) {
- pageflags_create(last + 1, p_last, p->flags);
- break;
- }
- } else if (p_last <= last) {
- /* Range completely covers node -- remove it. */
- g_free_rcu(p, rcu);
- } else {
- /* Truncate the node from the start. */
- p->itree.start = last + 1;
- interval_tree_insert(&p->itree, &pageflags_root);
- break;
- }
- }
-
- return inval_tb;
-}
-
/*
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
@@ -356,15 +314,6 @@ static void pageflags_create_merge(vaddr start, vaddr last, int flags)
}
}
-/*
- * Allow the target to decide if PAGE_TARGET_[12] may be reset.
- * By default, they are not kept.
- */
-#ifndef PAGE_TARGET_STICKY
-#define PAGE_TARGET_STICKY 0
-#endif
-#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
-
/* A subroutine of page_set_flags: add flags to [start,last]. */
static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
@@ -377,7 +326,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
restart:
p = pageflags_find(start, last);
if (!p) {
- if (set_flags) {
+ if (set_flags & PAGE_VALID) {
pageflags_create_merge(start, last, set_flags);
}
goto done;
@@ -391,11 +340,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
/*
* Need to flush if an overlapping executable region
- * removes exec, or adds write.
+ * removes exec, adds write, or is a new mapping.
*/
if ((p_flags & PAGE_EXEC)
&& (!(merge_flags & PAGE_EXEC)
- || (merge_flags & ~p_flags & PAGE_WRITE))) {
+ || (merge_flags & ~p_flags & PAGE_WRITE)
+ || (clear_flags & PAGE_VALID))) {
inval_tb = true;
}
@@ -404,7 +354,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
* attempting to merge with adjacent regions.
*/
if (start == p_start && last == p_last) {
- if (merge_flags) {
+ if (merge_flags & PAGE_VALID) {
p->flags = merge_flags;
} else {
interval_tree_remove(&p->itree, &pageflags_root);
@@ -424,12 +374,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
interval_tree_insert(&p->itree, &pageflags_root);
if (last < p_last) {
- if (merge_flags) {
+ if (merge_flags & PAGE_VALID) {
pageflags_create(start, last, merge_flags);
}
pageflags_create(last + 1, p_last, p_flags);
} else {
- if (merge_flags) {
+ if (merge_flags & PAGE_VALID) {
pageflags_create(start, p_last, merge_flags);
}
if (p_last < last) {
@@ -438,18 +388,18 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
}
}
} else {
- if (start < p_start && set_flags) {
+ if (start < p_start && (set_flags & PAGE_VALID)) {
pageflags_create(start, p_start - 1, set_flags);
}
if (last < p_last) {
interval_tree_remove(&p->itree, &pageflags_root);
p->itree.start = last + 1;
interval_tree_insert(&p->itree, &pageflags_root);
- if (merge_flags) {
+ if (merge_flags & PAGE_VALID) {
pageflags_create(start, last, merge_flags);
}
} else {
- if (merge_flags) {
+ if (merge_flags & PAGE_VALID) {
p->flags = merge_flags;
} else {
interval_tree_remove(&p->itree, &pageflags_root);
@@ -497,7 +447,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
g_free_rcu(p, rcu);
goto restart;
}
- if (set_flags) {
+ if (set_flags & PAGE_VALID) {
pageflags_create(start, last, set_flags);
}
@@ -505,42 +455,36 @@ static bool pageflags_set_clear(vaddr start, vaddr last,
return inval_tb;
}
-void page_set_flags(vaddr start, vaddr last, int flags)
+void page_set_flags(vaddr start, vaddr last, int set_flags, int clear_flags)
{
- bool reset = false;
- bool inval_tb = false;
-
- /* This function should never be called with addresses outside the
- guest address space. If this assert fires, it probably indicates
- a missing call to h2g_valid. */
+ /*
+ * This function should never be called with addresses outside the
+ * guest address space. If this assert fires, it probably indicates
+ * a missing call to h2g_valid.
+ */
assert(start <= last);
assert(last <= guest_addr_max);
- /* Only set PAGE_ANON with new mappings. */
- assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
- if (!(flags & PAGE_VALID)) {
- flags = 0;
- } else {
- reset = flags & PAGE_RESET;
- flags &= ~PAGE_RESET;
- if (flags & PAGE_WRITE) {
- flags |= PAGE_WRITE_ORG;
- }
+ if (set_flags & PAGE_WRITE) {
+ set_flags |= PAGE_WRITE_ORG;
+ }
+ if (clear_flags & PAGE_WRITE) {
+ clear_flags |= PAGE_WRITE_ORG;
}
- if (!flags || reset) {
+ if (clear_flags & PAGE_VALID) {
page_reset_target_data(start, last);
- inval_tb |= pageflags_unset(start, last);
- }
- if (flags) {
- inval_tb |= pageflags_set_clear(start, last, flags,
- ~(reset ? 0 : PAGE_STICKY));
+ clear_flags = -1;
+ } else {
+ /* Only set PAGE_ANON with new mappings. */
+ assert(!(set_flags & PAGE_ANON));
}
- if (inval_tb) {
+
+ if (pageflags_set_clear(start, last, set_flags, clear_flags)) {
tb_invalidate_phys_range(NULL, start, last);
}
}