aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-common.h1
-rw-r--r--exec.c38
-rw-r--r--trace-events3
-rw-r--r--xen-mapcache.c33
4 files changed, 72 insertions, 3 deletions
diff --git a/cpu-common.h b/cpu-common.h
index 6410ccc..151c32c 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -67,6 +67,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
/* Same but slower, to use for migration, where the order of
* RAMBlocks must not change. */
void *qemu_safe_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
/* This should not be used by devices. */
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 8c81cff..a6df2d6 100644
--- a/exec.c
+++ b/exec.c
@@ -3110,6 +3110,27 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
return NULL;
}
+void qemu_put_ram_ptr(void *addr)
+{
+ trace_qemu_put_ram_ptr(addr);
+
+ if (xen_mapcache_enabled()) {
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr == block->host) {
+ break;
+ }
+ }
+ if (block && block->host) {
+ xen_unmap_block(block->host, block->length);
+ block->host = NULL;
+ } else {
+ qemu_map_cache_unlock(addr);
+ }
+ }
+}
+
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *block;
@@ -3825,6 +3846,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
cpu_physical_memory_set_dirty_flags(
addr1, (0xff & ~CODE_DIRTY_FLAG));
}
+ qemu_put_ram_ptr(ptr);
}
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3852,9 +3874,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
- memcpy(buf, ptr, l);
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
+ memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
+ qemu_put_ram_ptr(ptr);
}
}
len -= l;
@@ -3895,6 +3917,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
+ qemu_put_ram_ptr(ptr);
}
len -= l;
buf += l;
@@ -4036,6 +4059,15 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
access_len -= l;
}
}
+ if (xen_mapcache_enabled()) {
+ uint8_t *buffer1 = buffer;
+ uint8_t *end_buffer = buffer + len;
+
+ while (buffer1 < end_buffer) {
+ qemu_put_ram_ptr(buffer1);
+ buffer1 += TARGET_PAGE_SIZE;
+ }
+ }
return;
}
if (is_write) {
diff --git a/trace-events b/trace-events
index d703347..a00b63c 100644
--- a/trace-events
+++ b/trace-events
@@ -371,3 +371,6 @@ disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64""
disable qemu_map_cache_return(void* ptr) "%p"
disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64""
disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx"
+
+# exec.c
+disable qemu_put_ram_ptr(void* addr) "%p"
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 2ca18ce..349cc62 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -196,6 +196,39 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
return mapcache->last_address_vaddr + address_offset;
}
+void qemu_map_cache_unlock(void *buffer)
+{
+ MapCacheEntry *entry = NULL, *pentry = NULL;
+ MapCacheRev *reventry;
+ target_phys_addr_t paddr_index;
+ int found = 0;
+
+ QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
+ if (reventry->vaddr_req == buffer) {
+ paddr_index = reventry->paddr_index;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ return;
+ }
+ QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+ qemu_free(reventry);
+
+ entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
+ while (entry && entry->paddr_index != paddr_index) {
+ pentry = entry;
+ entry = entry->next;
+ }
+ if (!entry) {
+ return;
+ }
+ if (entry->lock > 0) {
+ entry->lock--;
+ }
+}
+
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
{
MapCacheRev *reventry;