aboutsummaryrefslogtreecommitdiff
path: root/target/i386/hax-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/hax-mem.c')
-rw-r--r--target/i386/hax-mem.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/target/i386/hax-mem.c b/target/i386/hax-mem.c
index 27a0d21..f46e855 100644
--- a/target/i386/hax-mem.c
+++ b/target/i386/hax-mem.c
@@ -174,6 +174,7 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
ram_addr_t size = int128_get64(section->size);
unsigned int delta;
uint64_t host_va;
+ uint32_t max_mapping_size;
/* We only care about RAM and ROM regions */
if (!memory_region_is_ram(mr)) {
@@ -206,10 +207,23 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
flags |= HAX_RAM_INFO_ROM;
}
- /* the kernel module interface uses 32-bit sizes (but we could split...) */
- g_assert(size <= UINT32_MAX);
-
- hax_update_mapping(start_pa, size, host_va, flags);
+ /*
+ * The kernel module interface uses 32-bit sizes:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_set_ram
+ *
+ * If the mapping size is longer than 32 bits, we can't process it in one
+ * call into the kernel. Instead, we split the mapping into smaller ones,
+ * and call hax_update_mapping() on each.
+ */
+ max_mapping_size = UINT32_MAX & qemu_real_host_page_mask;
+ while (size > max_mapping_size) {
+ hax_update_mapping(start_pa, max_mapping_size, host_va, flags);
+ start_pa += max_mapping_size;
+ size -= max_mapping_size;
+ host_va += max_mapping_size;
+ }
+ /* Now size <= max_mapping_size */
+ hax_update_mapping(start_pa, (uint32_t)size, host_va, flags);
}
static void hax_region_add(MemoryListener *listener,
@@ -283,12 +297,16 @@ static MemoryListener hax_memory_listener = {
static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
{
/*
- * In HAX, QEMU allocates the virtual address, and HAX kernel
- * populates the memory with physical memory. Currently we have no
- * paging, so user should make sure enough free memory in advance.
+ * We must register each RAM block with the HAXM kernel module, or
+ * hax_set_ram() will fail for any mapping into the RAM block:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_alloc_ram
+ *
+ * Old versions of the HAXM kernel module (< 6.2.0) used to preallocate all
+ * host physical pages for the RAM block as part of this registration
+ * process, hence the name hax_populate_ram().
*/
if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) {
- fprintf(stderr, "HAX failed to populate RAM");
+ fprintf(stderr, "HAX failed to populate RAM\n");
abort();
}
}