diff options
author | Edgar E. Iglesias <edgar.iglesias@amd.com> | 2025-04-25 15:16:01 +0200 |
---|---|---|
committer | Edgar E. Iglesias <edgar.iglesias@amd.com> | 2025-05-06 18:39:44 +0200 |
commit | 88fb705600a3b612c571efc9f1a6aed923a18dcc (patch) | |
tree | cdd9bc802beff0693ecf78a4ed224f074fe6c643 | |
parent | a4b20f737cda06bb8706a83e27f7fa89863ae689 (diff) | |
download | qemu-88fb705600a3b612c571efc9f1a6aed923a18dcc.zip qemu-88fb705600a3b612c571efc9f1a6aed923a18dcc.tar.gz qemu-88fb705600a3b612c571efc9f1a6aed923a18dcc.tar.bz2 |
xen: mapcache: Split mapcache_grants by ro and rw
Today, we don't track write-abiliy in the cache, if a user
requests a readable mapping followed by a writeable mapping
on the same page, the second lookup will incorrectly hit
the readable entry.
Split mapcache_grants by ro and rw access. Grants will now
have separate ways in the cache depending on writeability.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
-rw-r--r-- | hw/xen/xen-mapcache.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index 2c8f861..e31d379 100644 --- a/hw/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c @@ -75,7 +75,8 @@ typedef struct MapCache { } MapCache; static MapCache *mapcache; -static MapCache *mapcache_grants; +static MapCache *mapcache_grants_ro; +static MapCache *mapcache_grants_rw; static xengnttab_handle *xen_region_gnttabdev; static inline void mapcache_lock(MapCache *mc) @@ -176,9 +177,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) * Grant mappings must use XC_PAGE_SIZE granularity since we can't * map anything beyond the number of pages granted to us. */ - mapcache_grants = xen_map_cache_init_single(f, opaque, - XC_PAGE_SHIFT, - max_mcache_size); + mapcache_grants_ro = xen_map_cache_init_single(f, opaque, + XC_PAGE_SHIFT, + max_mcache_size); + mapcache_grants_rw = xen_map_cache_init_single(f, opaque, + XC_PAGE_SHIFT, + max_mcache_size); setrlimit(RLIMIT_AS, &rlimit_as); } @@ -456,9 +460,13 @@ uint8_t *xen_map_cache(MemoryRegion *mr, bool is_write) { bool grant = xen_mr_is_grants(mr); - MapCache *mc = grant ? mapcache_grants : mapcache; + MapCache *mc = mapcache; uint8_t *p; + if (grant) { + mc = is_write ? mapcache_grants_rw : mapcache_grants_ro; + } + if (grant && !lock) { /* * Grants are only supported via address_space_map(). Anything @@ -523,7 +531,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) addr = xen_ram_addr_from_mapcache_single(mapcache, ptr); if (addr == RAM_ADDR_INVALID) { - addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr); + addr = xen_ram_addr_from_mapcache_single(mapcache_grants_ro, ptr); + } + if (addr == RAM_ADDR_INVALID) { + addr = xen_ram_addr_from_mapcache_single(mapcache_grants_rw, ptr); } return addr; @@ -626,7 +637,8 @@ static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer) static void xen_invalidate_map_cache_entry_all(uint8_t *buffer) { xen_invalidate_map_cache_entry_single(mapcache, buffer); - xen_invalidate_map_cache_entry_single(mapcache_grants, buffer); + xen_invalidate_map_cache_entry_single(mapcache_grants_ro, buffer); + xen_invalidate_map_cache_entry_single(mapcache_grants_rw, buffer); } static void xen_invalidate_map_cache_entry_bh(void *opaque) |