diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-03-18 18:26:36 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-05-09 00:13:38 +0200 |
commit | 48564041a73adbbff52834f9edbe3806fceefab7 (patch) | |
tree | 6ea31760c88eaa9d3da36effab22e7d8343ebee2 /include/exec | |
parent | a411c84b561baa94b28165c52f21c33517ee8f59 (diff) | |
download | qemu-48564041a73adbbff52834f9edbe3806fceefab7.zip qemu-48564041a73adbbff52834f9edbe3806fceefab7.tar.gz qemu-48564041a73adbbff52834f9edbe3806fceefab7.tar.bz2 |
exec: reintroduce MemoryRegion caching
MemoryRegionCache was reverted to "normal" address_space_* operations
for 2.9, due to lack of support for IOMMUs. Reinstate the
optimizations, caching only the IOMMU translation at address_cache_init
but not the IOMMU lookup and target AddressSpace translation are not
cached; now that MemoryRegionCache supports IOMMUs, it becomes more widely
applicable too.
The inlined fast path is defined in memory_ldst_cached.inc.h, while the
slow path uses memory_ldst.inc.c as before. The smaller fast path causes
a little code size reduction in MemoryRegionCache users:
hw/virtio/virtio.o text size before: 32373
hw/virtio/virtio.o text size after: 31941
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/exec')
-rw-r--r-- | include/exec/cpu-all.h | 6 | ||||
-rw-r--r-- | include/exec/memory-internal.h | 3 | ||||
-rw-r--r-- | include/exec/memory.h | 58 | ||||
-rw-r--r-- | include/exec/memory_ldst_cached.inc.h | 108 |
4 files changed, 169 insertions, 6 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 173edd1..a635f53 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -175,7 +175,7 @@ extern unsigned long reserved_va; #define TARGET_ENDIANNESS #include "exec/memory_ldst.inc.h" -#define SUFFIX _cached +#define SUFFIX _cached_slow #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #define TARGET_ENDIANNESS @@ -193,6 +193,10 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val #define TARGET_ENDIANNESS #include "exec/memory_ldst_phys.inc.h" +/* Inline fast path for direct RAM access. */ +#define ENDIANNESS +#include "exec/memory_ldst_cached.inc.h" + #define SUFFIX _cached #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h index 6a5ee42..58399b9 100644 --- a/include/exec/memory-internal.h +++ b/include/exec/memory-internal.h @@ -31,6 +31,9 @@ static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as) return flatview_to_dispatch(address_space_to_flatview(as)); } +FlatView *address_space_get_flatview(AddressSpace *as); +void flatview_unref(FlatView *view); + extern const MemoryRegionOps unassigned_mem_ops; bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, diff --git a/include/exec/memory.h b/include/exec/memory.h index ca361bc..525619a 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1688,12 +1688,16 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr, #include "exec/memory_ldst_phys.inc.h" struct MemoryRegionCache { + void *ptr; hwaddr xlat; hwaddr len; - AddressSpace *as; + FlatView *fv; + MemoryRegionSection mrs; + bool is_write; }; -#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL }) +#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) + /* address_space_ld*_cached: load from a cached #MemoryRegion * address_space_st*_cached: store into a cached #MemoryRegion @@ -1719,11 +1723,40 @@ struct MemoryRegionCache { * if NULL, this information is discarded */ -#define SUFFIX _cached +#define SUFFIX _cached_slow #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #include "exec/memory_ldst.inc.h" +/* Inline fast path for direct RAM access. */ +static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len); + if (likely(cache->ptr)) { + return ldub_p(cache->ptr + addr); + } else { + return address_space_ldub_cached_slow(cache, addr, attrs, result); + } +} + +static inline void address_space_stb_cached(MemoryRegionCache *cache, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len); + if (likely(cache->ptr)) { + stb_p(cache->ptr + addr, val); + } else { + address_space_stb_cached_slow(cache, addr, val, attrs, result); + } +} + +#define ENDIANNESS _le +#include "exec/memory_ldst_cached.inc.h" + +#define ENDIANNESS _be +#include "exec/memory_ldst_cached.inc.h" + #define SUFFIX _cached #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache @@ -1860,6 +1893,13 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, MemoryRegion *mr); void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); +/* Internal functions, part of the implementation of address_space_read_cached + * and address_space_write_cached. */ +void address_space_read_cached_slow(MemoryRegionCache *cache, + hwaddr addr, void *buf, int len); +void address_space_write_cached_slow(MemoryRegionCache *cache, + hwaddr addr, const void *buf, int len); + static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) { if (is_write) { @@ -1928,7 +1968,11 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, void *buf, int len) { assert(addr < cache->len && len <= cache->len - addr); - address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); + if (likely(cache->ptr)) { + memcpy(buf, cache->ptr + addr, len); + } else { + address_space_read_cached_slow(cache, addr, buf, len); + } } /** @@ -1944,7 +1988,11 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, void *buf, int len) { assert(addr < cache->len && len <= cache->len - addr); - address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); + if (likely(cache->ptr)) { + memcpy(cache->ptr + addr, buf, len); + } else { + address_space_write_cached_slow(cache, addr, buf, len); + } } #endif diff --git a/include/exec/memory_ldst_cached.inc.h b/include/exec/memory_ldst_cached.inc.h new file mode 100644 index 0000000..fd4bbb4 --- /dev/null +++ b/include/exec/memory_ldst_cached.inc.h @@ -0,0 +1,108 @@ +/* + * Memory access templates for MemoryRegionCache + * + * Copyright (c) 2018 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#define ADDRESS_SPACE_LD_CACHED(size) \ + glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)) +#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \ + glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)) +#define LD_P(size) \ + glue(glue(ld, size), glue(ENDIANNESS, _p)) + +static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 4 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(l)(cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(l)(cache, addr, attrs, result); + } +} + +static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 8 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(q)(cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(q)(cache, addr, attrs, result); + } +} + +static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 2 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(uw)(cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result); + } +} + +#undef ADDRESS_SPACE_LD_CACHED +#undef ADDRESS_SPACE_LD_CACHED_SLOW +#undef LD_P + +#define ADDRESS_SPACE_ST_CACHED(size) \ + glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)) +#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \ + glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)) +#define ST_P(size) \ + glue(glue(st, size), glue(ENDIANNESS, _p)) + +static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 4 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(l)(cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result); + } +} + +static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 2 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(w)(cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result); + } +} + +static inline void ADDRESS_SPACE_ST_CACHED(q)(MemoryRegionCache *cache, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 8 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(q)(cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(q)(cache, addr, val, attrs, result); + } +} + +#undef ADDRESS_SPACE_ST_CACHED +#undef ADDRESS_SPACE_ST_CACHED_SLOW +#undef ST_P + +#undef ENDIANNESS |