aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-09-23 12:55:40 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-09-23 12:55:40 +0100
commit460b6c8e581aa06b86f59eebd9e52edfe7adf417 (patch)
tree9115b412fa484e5394ec05e27137849be5037daa /exec.c
parentc348b54ab5c3e6c80fbf365b671974fd92f39113 (diff)
parentbb86d05f4afab3ebfee2e897e295d61dbd8cc28e (diff)
downloadqemu-460b6c8e581aa06b86f59eebd9e52edfe7adf417.zip
qemu-460b6c8e581aa06b86f59eebd9e52edfe7adf417.tar.gz
qemu-460b6c8e581aa06b86f59eebd9e52edfe7adf417.tar.bz2
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* Speed up AddressSpaceDispatch creation (Alexey) * Fix kvm.c assert (David) * Memory fixes and further speedup (me) * Persistent reservation manager infrastructure (me) * virtio-serial: add enable_backend callback (Pavel) * chardev GMainContext fixes (Peter) # gpg: Signature made Fri 22 Sep 2017 20:07:33 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (32 commits) chardev: remove context in chr_update_read_handler chardev: use per-dev context for io_add_watch_poll chardev: add Chardev.gcontext field chardev: new qemu_chr_be_update_read_handlers() scsi: add persistent reservation manager using qemu-pr-helper scsi: add multipath support to qemu-pr-helper scsi: build qemu-pr-helper scsi, file-posix: add support for persistent reservation management memory: Share special empty FlatView memory: seek FlatView sharing candidates among children subregions memory: trace FlatView creation and destruction memory: Create FlatView directly memory: Get rid of address_space_init_shareable memory: Rework "info mtree" to print flat views and dispatch trees memory: Do not allocate FlatView in address_space_init memory: Share FlatView's and dispatch trees between address spaces memory: Move address_space_update_ioeventfds memory: Alloc dispatch tree where topology is generared memory: Store physical root MR in FlatView memory: Rename mem_begin/mem_commit/mem_add helpers ... # Conflicts: # configure
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c330
1 files changed, 200 insertions, 130 deletions
diff --git a/exec.c b/exec.c
index a25a4c6..7a80460 100644
--- a/exec.c
+++ b/exec.c
@@ -187,21 +187,18 @@ typedef struct PhysPageMap {
} PhysPageMap;
struct AddressSpaceDispatch {
- struct rcu_head rcu;
-
MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
PhysPageEntry phys_map;
PhysPageMap map;
- AddressSpace *as;
};
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
MemoryRegion iomem;
- AddressSpace *as;
+ FlatView *fv;
hwaddr base;
uint16_t sub_section[];
} subpage_t;
@@ -361,7 +358,7 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
}
}
-static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
+void address_space_dispatch_compact(AddressSpaceDispatch *d)
{
if (d->phys_map.skip) {
phys_page_compact(&d->phys_map, d->map.nodes);
@@ -471,12 +468,13 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
}
/* Called from RCU critical section */
-static MemoryRegionSection address_space_do_translate(AddressSpace *as,
- hwaddr addr,
- hwaddr *xlat,
- hwaddr *plen,
- bool is_write,
- bool is_mmio)
+static MemoryRegionSection flatview_do_translate(FlatView *fv,
+ hwaddr addr,
+ hwaddr *xlat,
+ hwaddr *plen,
+ bool is_write,
+ bool is_mmio,
+ AddressSpace **target_as)
{
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
@@ -484,8 +482,9 @@ static MemoryRegionSection address_space_do_translate(AddressSpace *as,
IOMMUMemoryRegionClass *imrc;
for (;;) {
- AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
- section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
+ section = address_space_translate_internal(
+ flatview_to_dispatch(fv), addr, &addr,
+ plen, is_mmio);
iommu_mr = memory_region_get_iommu(section->mr);
if (!iommu_mr) {
@@ -502,7 +501,8 @@ static MemoryRegionSection address_space_do_translate(AddressSpace *as,
goto translate_fail;
}
- as = iotlb.target_as;
+ fv = address_space_to_flatview(iotlb.target_as);
+ *target_as = iotlb.target_as;
}
*xlat = addr;
@@ -524,8 +524,8 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
plen = (hwaddr)-1;
/* This can never be MMIO. */
- section = address_space_do_translate(as, addr, &xlat, &plen,
- is_write, false);
+ section = flatview_do_translate(address_space_to_flatview(as), addr,
+ &xlat, &plen, is_write, false, &as);
/* Illegal translation */
if (section.mr == &io_mem_unassigned) {
@@ -548,7 +548,7 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
plen -= 1;
return (IOMMUTLBEntry) {
- .target_as = section.address_space,
+ .target_as = as,
.iova = addr & ~plen,
.translated_addr = xlat & ~plen,
.addr_mask = plen,
@@ -561,15 +561,15 @@ iotlb_fail:
}
/* Called from RCU critical section */
-MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
- hwaddr *xlat, hwaddr *plen,
- bool is_write)
+MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
+ hwaddr *plen, bool is_write)
{
MemoryRegion *mr;
MemoryRegionSection section;
+ AddressSpace *as = NULL;
/* This can be MMIO, so setup MMIO bit. */
- section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
+ section = flatview_do_translate(fv, addr, xlat, plen, is_write, true, &as);
mr = section.mr;
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
@@ -1219,7 +1219,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
} else {
AddressSpaceDispatch *d;
- d = atomic_rcu_read(&section->address_space->dispatch);
+ d = flatview_to_dispatch(section->fv);
iotlb = section - d->map.sections;
iotlb += xlat;
}
@@ -1245,7 +1245,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
uint16_t section);
-static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
+static subpage_t *subpage_init(FlatView *fv, hwaddr base);
static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
qemu_anon_ram_alloc;
@@ -1302,8 +1302,9 @@ static void phys_sections_free(PhysPageMap *map)
g_free(map->nodes);
}
-static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
+static void register_subpage(FlatView *fv, MemoryRegionSection *section)
{
+ AddressSpaceDispatch *d = flatview_to_dispatch(fv);
subpage_t *subpage;
hwaddr base = section->offset_within_address_space
& TARGET_PAGE_MASK;
@@ -1317,8 +1318,8 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
if (!(existing->mr->subpage)) {
- subpage = subpage_init(d->as, base);
- subsection.address_space = d->as;
+ subpage = subpage_init(fv, base);
+ subsection.fv = fv;
subsection.mr = &subpage->iomem;
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
phys_section_add(&d->map, &subsection));
@@ -1332,9 +1333,10 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
}
-static void register_multipage(AddressSpaceDispatch *d,
+static void register_multipage(FlatView *fv,
MemoryRegionSection *section)
{
+ AddressSpaceDispatch *d = flatview_to_dispatch(fv);
hwaddr start_addr = section->offset_within_address_space;
uint16_t section_index = phys_section_add(&d->map, section);
uint64_t num_pages = int128_get64(int128_rshift(section->size,
@@ -1344,10 +1346,8 @@ static void register_multipage(AddressSpaceDispatch *d,
phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
}
-static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
+void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
- AddressSpaceDispatch *d = as->next_dispatch;
MemoryRegionSection now = *section, remain = *section;
Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
@@ -1356,7 +1356,7 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
- now.offset_within_address_space;
now.size = int128_min(int128_make64(left), now.size);
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else {
now.size = int128_zero();
}
@@ -1366,13 +1366,13 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
remain.offset_within_region += int128_get64(now.size);
now = remain;
if (int128_lt(remain.size, page_size)) {
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
now.size = page_size;
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else {
now.size = int128_and(now.size, int128_neg(page_size));
- register_multipage(d, &now);
+ register_multipage(fv, &now);
}
}
}
@@ -2500,6 +2500,11 @@ static const MemoryRegionOps watch_mem_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
+static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ const uint8_t *buf, int len);
+static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
+ bool is_write);
+
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
unsigned len, MemTxAttrs attrs)
{
@@ -2511,8 +2516,7 @@ static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
subpage, len, addr);
#endif
- res = address_space_read(subpage->as, addr + subpage->base,
- attrs, buf, len);
+ res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
if (res) {
return res;
}
@@ -2561,8 +2565,7 @@ static MemTxResult subpage_write(void *opaque, hwaddr addr,
default:
abort();
}
- return address_space_write(subpage->as, addr + subpage->base,
- attrs, buf, len);
+ return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
}
static bool subpage_accepts(void *opaque, hwaddr addr,
@@ -2574,8 +2577,8 @@ static bool subpage_accepts(void *opaque, hwaddr addr,
__func__, subpage, is_write ? 'w' : 'r', len, addr);
#endif
- return address_space_access_valid(subpage->as, addr + subpage->base,
- len, is_write);
+ return flatview_access_valid(subpage->fv, addr + subpage->base,
+ len, is_write);
}
static const MemoryRegionOps subpage_ops = {
@@ -2609,12 +2612,12 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
return 0;
}
-static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
+static subpage_t *subpage_init(FlatView *fv, hwaddr base)
{
subpage_t *mmio;
mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
- mmio->as = as;
+ mmio->fv = fv;
mmio->base = base;
memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
NULL, TARGET_PAGE_SIZE);
@@ -2628,12 +2631,11 @@ static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
return mmio;
}
-static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
- MemoryRegion *mr)
+static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
{
- assert(as);
+ assert(fv);
MemoryRegionSection section = {
- .address_space = as,
+ .fv = fv,
.mr = mr,
.offset_within_address_space = 0,
.offset_within_region = 0,
@@ -2670,46 +2672,31 @@ static void io_mem_init(void)
NULL, UINT64_MAX);
}
-static void mem_begin(MemoryListener *listener)
+AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
uint16_t n;
- n = dummy_section(&d->map, as, &io_mem_unassigned);
+ n = dummy_section(&d->map, fv, &io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
- n = dummy_section(&d->map, as, &io_mem_notdirty);
+ n = dummy_section(&d->map, fv, &io_mem_notdirty);
assert(n == PHYS_SECTION_NOTDIRTY);
- n = dummy_section(&d->map, as, &io_mem_rom);
+ n = dummy_section(&d->map, fv, &io_mem_rom);
assert(n == PHYS_SECTION_ROM);
- n = dummy_section(&d->map, as, &io_mem_watch);
+ n = dummy_section(&d->map, fv, &io_mem_watch);
assert(n == PHYS_SECTION_WATCH);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
- d->as = as;
- as->next_dispatch = d;
+
+ return d;
}
-static void address_space_dispatch_free(AddressSpaceDispatch *d)
+void address_space_dispatch_free(AddressSpaceDispatch *d)
{
phys_sections_free(&d->map);
g_free(d);
}
-static void mem_commit(MemoryListener *listener)
-{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
- AddressSpaceDispatch *cur = as->dispatch;
- AddressSpaceDispatch *next = as->next_dispatch;
-
- phys_page_compact_all(next, next->map.nodes_nb);
-
- atomic_rcu_set(&as->dispatch, next);
- if (cur) {
- call_rcu(cur, address_space_dispatch_free, rcu);
- }
-}
-
static void tcg_commit(MemoryListener *listener)
{
CPUAddressSpace *cpuas;
@@ -2723,39 +2710,11 @@ static void tcg_commit(MemoryListener *listener)
* We reload the dispatch pointer now because cpu_reloading_memory_map()
* may have split the RCU critical section.
*/
- d = atomic_rcu_read(&cpuas->as->dispatch);
+ d = address_space_to_dispatch(cpuas->as);
atomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu);
}
-void address_space_init_dispatch(AddressSpace *as)
-{
- as->dispatch = NULL;
- as->dispatch_listener = (MemoryListener) {
- .begin = mem_begin,
- .commit = mem_commit,
- .region_add = mem_add,
- .region_nop = mem_add,
- .priority = 0,
- };
- memory_listener_register(&as->dispatch_listener, as);
-}
-
-void address_space_unregister(AddressSpace *as)
-{
- memory_listener_unregister(&as->dispatch_listener);
-}
-
-void address_space_destroy_dispatch(AddressSpace *as)
-{
- AddressSpaceDispatch *d = as->dispatch;
-
- atomic_rcu_set(&as->dispatch, NULL);
- if (d) {
- call_rcu(d, address_space_dispatch_free, rcu);
- }
-}
-
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
@@ -2899,11 +2858,11 @@ static bool prepare_mmio_access(MemoryRegion *mr)
}
/* Called within RCU critical section. */
-static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- const uint8_t *buf,
- int len, hwaddr addr1,
- hwaddr l, MemoryRegion *mr)
+static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf,
+ int len, hwaddr addr1,
+ hwaddr l, MemoryRegion *mr)
{
uint8_t *ptr;
uint64_t val;
@@ -2965,14 +2924,14 @@ static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
}
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, true);
+ mr = flatview_translate(fv, addr, &addr1, &l, true);
}
return result;
}
-MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, int len)
+static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ const uint8_t *buf, int len)
{
hwaddr l;
hwaddr addr1;
@@ -2982,20 +2941,27 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
if (len > 0) {
rcu_read_lock();
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, true);
- result = address_space_write_continue(as, addr, attrs, buf, len,
- addr1, l, mr);
+ mr = flatview_translate(fv, addr, &addr1, &l, true);
+ result = flatview_write_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
rcu_read_unlock();
}
return result;
}
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf, int len)
+{
+ return flatview_write(address_space_to_flatview(as), addr, attrs, buf, len);
+}
+
/* Called within RCU critical section. */
-MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len, hwaddr addr1, hwaddr l,
- MemoryRegion *mr)
+MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr)
{
uint8_t *ptr;
uint64_t val;
@@ -3055,14 +3021,14 @@ MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
}
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false);
}
return result;
}
-MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, int len)
+MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len)
{
hwaddr l;
hwaddr addr1;
@@ -3072,25 +3038,33 @@ MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
if (len > 0) {
rcu_read_lock();
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, false);
- result = address_space_read_continue(as, addr, attrs, buf, len,
- addr1, l, mr);
+ mr = flatview_translate(fv, addr, &addr1, &l, false);
+ result = flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
rcu_read_unlock();
}
return result;
}
-MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, int len, bool is_write)
+static MemTxResult flatview_rw(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len, bool is_write)
{
if (is_write) {
- return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
+ return flatview_write(fv, addr, attrs, (uint8_t *)buf, len);
} else {
- return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
+ return flatview_read(fv, addr, attrs, (uint8_t *)buf, len);
}
}
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, bool is_write)
+{
+ return flatview_rw(address_space_to_flatview(as),
+ addr, attrs, buf, len, is_write);
+}
+
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
int len, int is_write)
{
@@ -3248,7 +3222,8 @@ static void cpu_notify_map_clients(void)
qemu_mutex_unlock(&map_client_list_lock);
}
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
+static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
+ bool is_write)
{
MemoryRegion *mr;
hwaddr l, xlat;
@@ -3256,7 +3231,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
rcu_read_lock();
while (len > 0) {
l = len;
- mr = address_space_translate(as, addr, &xlat, &l, is_write);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) {
l = memory_access_size(mr, l, addr);
if (!memory_region_access_valid(mr, xlat, l, is_write)) {
@@ -3272,8 +3247,16 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
return true;
}
+bool address_space_access_valid(AddressSpace *as, hwaddr addr,
+ int len, bool is_write)
+{
+ return flatview_access_valid(address_space_to_flatview(as),
+ addr, len, is_write);
+}
+
static hwaddr
-address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
+flatview_extend_translation(FlatView *fv, hwaddr addr,
+ hwaddr target_len,
MemoryRegion *mr, hwaddr base, hwaddr len,
bool is_write)
{
@@ -3290,7 +3273,8 @@ address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_le
}
len = target_len;
- this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
+ this_mr = flatview_translate(fv, addr, &xlat,
+ &len, is_write);
if (this_mr != mr || xlat != base + done) {
return done;
}
@@ -3313,6 +3297,7 @@ void *address_space_map(AddressSpace *as,
hwaddr l, xlat;
MemoryRegion *mr;
void *ptr;
+ FlatView *fv = address_space_to_flatview(as);
if (len == 0) {
return NULL;
@@ -3320,7 +3305,7 @@ void *address_space_map(AddressSpace *as,
l = len;
rcu_read_lock();
- mr = address_space_translate(as, addr, &xlat, &l, is_write);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
@@ -3336,7 +3321,7 @@ void *address_space_map(AddressSpace *as,
memory_region_ref(mr);
bounce.mr = mr;
if (!is_write) {
- address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
+ flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
bounce.buffer, l);
}
@@ -3347,7 +3332,8 @@ void *address_space_map(AddressSpace *as,
memory_region_ref(mr);
- *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
+ *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
+ l, is_write);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
rcu_read_unlock();
@@ -3630,3 +3616,87 @@ void page_size_init(void)
}
qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
}
+
+#if !defined(CONFIG_USER_ONLY)
+
+static void mtree_print_phys_entries(fprintf_function mon, void *f,
+ int start, int end, int skip, int ptr)
+{
+ if (start == end - 1) {
+ mon(f, "\t%3d ", start);
+ } else {
+ mon(f, "\t%3d..%-3d ", start, end - 1);
+ }
+ mon(f, " skip=%d ", skip);
+ if (ptr == PHYS_MAP_NODE_NIL) {
+ mon(f, " ptr=NIL");
+ } else if (!skip) {
+ mon(f, " ptr=#%d", ptr);
+ } else {
+ mon(f, " ptr=[%d]", ptr);
+ }
+ mon(f, "\n");
+}
+
+#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
+ int128_sub((size), int128_one())) : 0)
+
+void mtree_print_dispatch(fprintf_function mon, void *f,
+ AddressSpaceDispatch *d, MemoryRegion *root)
+{
+ int i;
+
+ mon(f, " Dispatch\n");
+ mon(f, " Physical sections\n");
+
+ for (i = 0; i < d->map.sections_nb; ++i) {
+ MemoryRegionSection *s = d->map.sections + i;
+ const char *names[] = { " [unassigned]", " [not dirty]",
+ " [ROM]", " [watch]" };
+
+ mon(f, " #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx " %s%s%s%s%s",
+ i,
+ s->offset_within_address_space,
+ s->offset_within_address_space + MR_SIZE(s->mr->size),
+ s->mr->name ? s->mr->name : "(noname)",
+ i < ARRAY_SIZE(names) ? names[i] : "",
+ s->mr == root ? " [ROOT]" : "",
+ s == d->mru_section ? " [MRU]" : "",
+ s->mr->is_iommu ? " [iommu]" : "");
+
+ if (s->mr->alias) {
+ mon(f, " alias=%s", s->mr->alias->name ?
+ s->mr->alias->name : "noname");
+ }
+ mon(f, "\n");
+ }
+
+ mon(f, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
+ P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
+ for (i = 0; i < d->map.nodes_nb; ++i) {
+ int j, jprev;
+ PhysPageEntry prev;
+ Node *n = d->map.nodes + i;
+
+ mon(f, " [%d]\n", i);
+
+ for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
+ PhysPageEntry *pe = *n + j;
+
+ if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
+ continue;
+ }
+
+ mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
+
+ jprev = j;
+ prev = *pe;
+ }
+
+ if (jprev != ARRAY_SIZE(*n)) {
+ mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
+ }
+ }
+}
+
+#endif