diff options
Diffstat (limited to 'system/memory.c')
-rw-r--r-- | system/memory.c | 85 |
1 files changed, 32 insertions, 53 deletions
diff --git a/system/memory.c b/system/memory.c index 4c82979..76b44b8 100644 --- a/system/memory.c +++ b/system/memory.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qapi/visitor.h" #include "qemu/bitops.h" #include "qemu/error-report.h" @@ -24,16 +24,16 @@ #include "qemu/qemu-print.h" #include "qom/object.h" #include "trace.h" - -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "system/kvm.h" #include "system/runstate.h" #include "system/tcg.h" #include "qemu/accel.h" #include "hw/boards.h" #include "migration/vmstate.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" + +#include "memory-internal.h" //#define DEBUG_UNASSIGNED @@ -353,15 +353,6 @@ static void flatview_simplify(FlatView *view) } } -static bool memory_region_big_endian(MemoryRegion *mr) -{ -#if TARGET_BIG_ENDIAN - return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) { if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { @@ -563,7 +554,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); access_mask = MAKE_64BIT_MASK(0, access_size * 8); - if (memory_region_big_endian(mr)) { + if (devend_big_endian(mr->ops->endianness)) { for (i = 0; i < size; i += access_size) { r |= access_fn(mr, addr + i, value, access_size, (size - access_size - i) * 8, access_mask, attrs); @@ -1391,7 +1382,7 @@ static void memory_region_ram_device_write(void *opaque, hwaddr addr, static const MemoryRegionOps ram_device_mem_ops = { .read = memory_region_ram_device_read, .write = memory_region_ram_device_write, - .endianness = DEVICE_HOST_ENDIAN, + .endianness = HOST_BIG_ENDIAN ? DEVICE_BIG_ENDIAN : DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, .max_access_size = 8, @@ -1636,7 +1627,7 @@ bool memory_region_init_resizeable_ram(MemoryRegion *mr, return true; } -#ifdef CONFIG_POSIX +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, @@ -2115,12 +2106,16 @@ RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr) return mr->rdm; } -void memory_region_set_ram_discard_manager(MemoryRegion *mr, - RamDiscardManager *rdm) +int memory_region_set_ram_discard_manager(MemoryRegion *mr, + RamDiscardManager *rdm) { g_assert(memory_region_is_ram(mr)); - g_assert(!rdm || !mr->rdm); + if (mr->rdm && rdm) { + return -EBUSY; + } + mr->rdm = rdm; + return 0; } uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, @@ -2143,7 +2138,7 @@ bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, MemoryRegionSection *section, - ReplayRamPopulate replay_fn, + ReplayRamDiscardState replay_fn, void *opaque) { RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); @@ -2152,15 +2147,15 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, return rdmc->replay_populated(rdm, section, replay_fn, opaque); } -void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, - MemoryRegionSection *section, - ReplayRamDiscard replay_fn, - void *opaque) +int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, + void *opaque) { RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); g_assert(rdmc->replay_discarded); - rdmc->replay_discarded(rdm, section, replay_fn, opaque); + return rdmc->replay_discarded(rdm, section, replay_fn, opaque); } void ram_discard_manager_register_listener(RamDiscardManager *rdm, @@ -2183,18 +2178,14 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, } /* Called with rcu_read_lock held. */ -bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, - ram_addr_t *ram_addr, bool *read_only, - bool *mr_has_discard_manager, Error **errp) +MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p, + Error **errp) { MemoryRegion *mr; hwaddr xlat; hwaddr len = iotlb->addr_mask + 1; bool writable = iotlb->perm & IOMMU_WO; - if (mr_has_discard_manager) { - *mr_has_discard_manager = false; - } /* * The IOMMU TLB entry we have just covers translation through * this IOMMU to its immediate target. We need to translate @@ -2204,7 +2195,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED); if (!memory_region_is_ram(mr)) { error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat); - return false; + return NULL; } else if (memory_region_has_ram_discard_manager(mr)) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); MemoryRegionSection tmp = { @@ -2212,9 +2203,6 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, .offset_within_region = xlat, .size = int128_make64(len), }; - if (mr_has_discard_manager) { - *mr_has_discard_manager = true; - } /* * Malicious VMs can map memory into the IOMMU, which is expected * to remain discarded. vfio will pin all pages, populating memory. @@ -2225,7 +2213,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, error_setg(errp, "iommu map to discarded memory (e.g., unplugged" " via virtio-mem): %" HWADDR_PRIx "", iotlb->translated_addr); - return false; + return NULL; } } @@ -2235,22 +2223,11 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, */ if (len & iotlb->addr_mask) { error_setg(errp, "iommu has granularity incompatible with target AS"); - return false; - } - - if (vaddr) { - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - } - - if (ram_addr) { - *ram_addr = memory_region_get_ram_addr(mr) + xlat; - } - - if (read_only) { - *read_only = !writable || mr->readonly; + return NULL; } - return true; + *xlat_p = xlat; + return mr; } void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) @@ -2584,7 +2561,8 @@ void memory_region_add_eventfd(MemoryRegion *mr, unsigned i; if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size); + adjust_endianness(mr, &mrfd.data, mop); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { @@ -2619,7 +2597,8 @@ void memory_region_del_eventfd(MemoryRegion *mr, unsigned i; if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size); + adjust_endianness(mr, &mrfd.data, mop); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { |