diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2024-05-06 10:19:09 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2024-05-06 10:19:10 -0700 |
commit | 873f9ca3857cfeeef45441b116c91156736d529c (patch) | |
tree | 1a1f0c761dd0cf4204ec5b07e71f06b33438de75 /hw | |
parent | 604dc98970d1c2944b9c529f4474cf16b324067c (diff) | |
parent | 8372c3a0cbc5d41458ab3582164cfbcac9b434d4 (diff) | |
download | qemu-873f9ca3857cfeeef45441b116c91156736d529c.zip qemu-873f9ca3857cfeeef45441b116c91156736d529c.tar.gz qemu-873f9ca3857cfeeef45441b116c91156736d529c.tar.bz2 |
Merge tag 'accel-20240506' of https://github.com/philmd/qemu into staging
Accelerator patches
- Extract page-protection definitions to page-protection.h
- Rework in accel/tcg in preparation of extracting TCG fields from CPUState
- More uses of get_task_state() in user emulation
- Xen refactors in preparation for adding multiple map caches (Juergen & Edgar)
- MAINTAINERS updates (Aleksandar and Bin)
# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmY40CAACgkQ4+MsLN6t
# wN5drxAA1oIsuUzpAJmlMIxZwlzbICiuexgn/HH9DwWNlrarKo7V1l4YB8jd9WOg
# IKuj7c39kJKsDEB8BXApYwcly+l7DYdnAAI8Z7a+eN+ffKNl/0XBaLjsGf58RNwY
# fb39/cXWI9ZxKxsHMSyjpiu68gOGvZ5JJqa30Fr+eOGuug9Fn/fOe1zC6l/dMagy
# Dnym72stpD+hcsN5sVwohTBIk+7g9og1O/ctRx6Q3ZCOPz4p0+JNf8VUu43/reaR
# 294yRK++JrSMhOVFRzP+FH1G25NxiOrVCFXZsUTYU+qPDtdiKtjH1keI/sk7rwZ7
# U573lesl7ewQFf1PvMdaVf0TrQyOe6kUGr9Mn2k8+KgjYRAjTAQk8V4Ric/+xXSU
# 0rd7Cz7lyQ8jm0DoOElROv+lTDQs4dvm3BopF3Bojo4xHLHd3SFhROVPG4tvGQ3H
# 72Q5UPR2Jr2QZKiImvPceUOg0z5XxoN6KRUkSEpMFOiTRkbwnrH59z/qPijUpe6v
# 8l5IlI9GjwkL7pcRensp1VC6e9KC7F5Od1J/2RLDw3UQllMQXqVw2bxD3CEtDRJL
# QSZoS4d1jUCW4iAYdqh/8+2cOIPiCJ4ai5u7lSdjrIJkRErm32FV/pQLZauoHlT5
# eTPUgzDoRXVgI1X1slTpVXlEEvRNbhZqSkYLkXr80MLn5hTafo0=
# =3Qkg
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 06 May 2024 05:42:08 AM PDT
# gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
* tag 'accel-20240506' of https://github.com/philmd/qemu: (28 commits)
MAINTAINERS: Update my email address
MAINTAINERS: Update Aleksandar Rikalo email
system: Pass RAM MemoryRegion and is_write in xen_map_cache()
xen: mapcache: Break out xen_map_cache_init_single()
xen: mapcache: Break out xen_invalidate_map_cache_single()
xen: mapcache: Refactor xen_invalidate_map_cache_entry_unlocked
xen: mapcache: Refactor xen_replace_cache_entry_unlocked
xen: mapcache: Break out xen_ram_addr_from_mapcache_single
xen: mapcache: Refactor xen_remap_bucket for multi-instance
xen: mapcache: Refactor xen_map_cache for multi-instance
xen: mapcache: Refactor lock functions for multi-instance
xen: let xen_ram_addr_from_mapcache() return -1 in case of not found entry
system: let qemu_map_ram_ptr() use qemu_ram_ptr_length()
user: Use get_task_state() helper
user: Declare get_task_state() once in 'accel/tcg/vcpu-state.h'
user: Forward declare TaskState type definition
accel/tcg: Move @plugin_mem_cbs from CPUState to CPUNegativeOffsetState
accel/tcg: Restrict cpu_plugin_mem_cbs_enabled() to TCG
accel/tcg: Restrict qemu_plugin_vcpu_exit_hook() to TCG plugins
accel/tcg: Update CPUNegativeOffsetState::can_do_io field documentation
...
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/core/cpu-common.c | 4 | ||||
-rw-r--r-- | hw/ppc/ppc440_bamboo.c | 1 | ||||
-rw-r--r-- | hw/ppc/sam460ex.c | 1 | ||||
-rw-r--r-- | hw/ppc/virtex_ml507.c | 1 | ||||
-rw-r--r-- | hw/xen/xen-mapcache.c | 208 |
5 files changed, 123 insertions, 92 deletions
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index a72d48d..0f0a247 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -30,7 +30,9 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "trace.h" +#ifdef CONFIG_PLUGIN #include "qemu/plugin.h" +#endif CPUState *cpu_by_arch_id(int64_t id) { @@ -236,9 +238,11 @@ static void cpu_common_unrealizefn(DeviceState *dev) CPUState *cpu = CPU(dev); /* Call the plugin hook before clearing the cpu is fully unrealized */ +#ifdef CONFIG_PLUGIN if (tcg_enabled()) { qemu_plugin_vcpu_exit_hook(cpu); } +#endif /* NOTE: latest generic point before the cpu is fully unrealized */ cpu_exec_unrealizefn(cpu); diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index e18f57e..73f80cf 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -15,6 +15,7 @@ #include "qemu/units.h" #include "qemu/datadir.h" #include "qemu/error-report.h" +#include "exec/page-protection.h" #include "net/net.h" #include "hw/pci/pci.h" #include "hw/boards.h" diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index d42b677..8dc75fb 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -21,6 +21,7 @@ #include "kvm_ppc.h" #include "sysemu/device_tree.h" #include "sysemu/block-backend.h" +#include "exec/page-protection.h" #include "hw/loader.h" #include "elf.h" #include "exec/memory.h" diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index d02f330..c49da1f 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "exec/page-protection.h" #include "cpu.h" #include "hw/sysbus.h" #include "hw/char/serial.h" diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index 7f59080..fa6813b 100644 --- a/hw/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c @@ -74,14 +74,14 @@ typedef struct MapCache { static MapCache *mapcache; -static inline void mapcache_lock(void) +static inline void mapcache_lock(MapCache *mc) { - qemu_mutex_lock(&mapcache->lock); + qemu_mutex_lock(&mc->lock); } -static inline void mapcache_unlock(void) +static inline void mapcache_unlock(MapCache *mc) { - qemu_mutex_unlock(&mapcache->lock); + qemu_mutex_unlock(&mc->lock); } static inline int test_bits(int nr, int size, const unsigned long *addr) @@ -93,23 +93,44 @@ static inline int test_bits(int nr, int size, const unsigned long *addr) return 0; } -void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) +static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f, + void *opaque, + unsigned long max_size) { unsigned long size; - struct rlimit rlimit_as; + MapCache *mc; + + mc = g_new0(MapCache, 1); + + mc->phys_offset_to_gaddr = f; + mc->opaque = opaque; + qemu_mutex_init(&mc->lock); + + QTAILQ_INIT(&mc->locked_entries); - mapcache = g_new0(MapCache, 1); + mc->max_mcache_size = max_size; - mapcache->phys_offset_to_gaddr = f; - mapcache->opaque = opaque; - qemu_mutex_init(&mapcache->lock); + mc->nr_buckets = + (((mc->max_mcache_size >> XC_PAGE_SHIFT) + + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); - QTAILQ_INIT(&mapcache->locked_entries); + size = mc->nr_buckets * sizeof(MapCacheEntry); + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); + trace_xen_map_cache_init(mc->nr_buckets, size); + mc->entry = g_malloc0(size); + return mc; +} + +void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) +{ + struct rlimit rlimit_as; + unsigned long max_mcache_size; if (geteuid() == 0) { rlimit_as.rlim_cur = RLIM_INFINITY; rlimit_as.rlim_max = RLIM_INFINITY; - mapcache->max_mcache_size = MCACHE_MAX_SIZE; + max_mcache_size = MCACHE_MAX_SIZE; } else { getrlimit(RLIMIT_AS, &rlimit_as); rlimit_as.rlim_cur = rlimit_as.rlim_max; @@ -119,27 +140,18 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) " memory is not infinity"); } if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { - mapcache->max_mcache_size = rlimit_as.rlim_max - - NON_MCACHE_MEMORY_SIZE; + max_mcache_size = rlimit_as.rlim_max - NON_MCACHE_MEMORY_SIZE; } else { - mapcache->max_mcache_size = MCACHE_MAX_SIZE; + max_mcache_size = MCACHE_MAX_SIZE; } } + mapcache = xen_map_cache_init_single(f, opaque, max_mcache_size); setrlimit(RLIMIT_AS, &rlimit_as); - - mapcache->nr_buckets = - (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + - (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> - (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); - - size = mapcache->nr_buckets * sizeof (MapCacheEntry); - size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); - trace_xen_map_cache_init(mapcache->nr_buckets, size); - mapcache->entry = g_malloc0(size); } -static void xen_remap_bucket(MapCacheEntry *entry, +static void xen_remap_bucket(MapCache *mc, + MapCacheEntry *entry, void *vaddr, hwaddr size, hwaddr address_index, @@ -240,8 +252,9 @@ static void xen_remap_bucket(MapCacheEntry *entry, g_free(err); } -static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, - uint8_t lock, bool dma) +static uint8_t *xen_map_cache_unlocked(MapCache *mc, + hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma, bool is_write) { MapCacheEntry *entry, *pentry = NULL, *free_entry = NULL, *free_pentry = NULL; @@ -269,16 +282,16 @@ tryagain: test_bit_size = XC_PAGE_SIZE; } - if (mapcache->last_entry != NULL && - mapcache->last_entry->paddr_index == address_index && + if (mc->last_entry != NULL && + mc->last_entry->paddr_index == address_index && !lock && !size && test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, - mapcache->last_entry->valid_mapping)) { + mc->last_entry->valid_mapping)) { trace_xen_map_cache_return( - mapcache->last_entry->vaddr_base + address_offset + mc->last_entry->vaddr_base + address_offset ); - return mapcache->last_entry->vaddr_base + address_offset; + return mc->last_entry->vaddr_base + address_offset; } /* size is always a multiple of MCACHE_BUCKET_SIZE */ @@ -291,7 +304,7 @@ tryagain: cache_size = MCACHE_BUCKET_SIZE; } - entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + entry = &mc->entry[address_index % mc->nr_buckets]; while (entry && (lock || entry->lock) && entry->vaddr_base && (entry->paddr_index != address_index || entry->size != cache_size || @@ -312,24 +325,24 @@ tryagain: if (!entry) { entry = g_new0(MapCacheEntry, 1); pentry->next = entry; - xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { - xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy); } } if(!test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { - mapcache->last_entry = NULL; + mc->last_entry = NULL; #ifdef XEN_COMPAT_PHYSMAP - if (!translated && mapcache->phys_offset_to_gaddr) { - phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size); + if (!translated && mc->phys_offset_to_gaddr) { + phys_addr = mc->phys_offset_to_gaddr(phys_addr, size); translated = true; goto tryagain; } @@ -342,7 +355,7 @@ tryagain: return NULL; } - mapcache->last_entry = entry; + mc->last_entry = entry; if (lock) { MapCacheRev *reventry = g_new0(MapCacheRev, 1); entry->lock++; @@ -352,30 +365,32 @@ tryagain: abort(); } reventry->dma = dma; - reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; - reventry->paddr_index = mapcache->last_entry->paddr_index; + reventry->vaddr_req = mc->last_entry->vaddr_base + address_offset; + reventry->paddr_index = mc->last_entry->paddr_index; reventry->size = entry->size; - QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); + QTAILQ_INSERT_HEAD(&mc->locked_entries, reventry, next); } trace_xen_map_cache_return( - mapcache->last_entry->vaddr_base + address_offset + mc->last_entry->vaddr_base + address_offset ); - return mapcache->last_entry->vaddr_base + address_offset; + return mc->last_entry->vaddr_base + address_offset; } -uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, - uint8_t lock, bool dma) +uint8_t *xen_map_cache(MemoryRegion *mr, + hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma, + bool is_write) { uint8_t *p; - mapcache_lock(); - p = xen_map_cache_unlocked(phys_addr, size, lock, dma); - mapcache_unlock(); + mapcache_lock(mapcache); + p = xen_map_cache_unlocked(mapcache, phys_addr, size, lock, dma, is_write); + mapcache_unlock(mapcache); return p; } -ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr) { MapCacheEntry *entry = NULL; MapCacheRev *reventry; @@ -384,8 +399,8 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) ram_addr_t raddr; int found = 0; - mapcache_lock(); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + mapcache_lock(mc); + QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { if (reventry->vaddr_req == ptr) { paddr_index = reventry->paddr_index; size = reventry->size; @@ -395,30 +410,32 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) } if (!found) { trace_xen_ram_addr_from_mapcache_not_found(ptr); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - trace_xen_ram_addr_from_mapcache_found(reventry->paddr_index, - reventry->vaddr_req); - } - abort(); - return 0; + mapcache_unlock(mc); + return RAM_ADDR_INVALID; } - entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + entry = &mc->entry[paddr_index % mc->nr_buckets]; while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { entry = entry->next; } if (!entry) { trace_xen_ram_addr_from_mapcache_not_in_cache(ptr); - raddr = 0; + raddr = RAM_ADDR_INVALID; } else { raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); } - mapcache_unlock(); + mapcache_unlock(mc); return raddr; } -static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) +ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +{ + return xen_ram_addr_from_mapcache_single(mapcache, ptr); +} + +static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc, + uint8_t *buffer) { MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; @@ -426,7 +443,7 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) hwaddr size; int found = 0; - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { if (reventry->vaddr_req == buffer) { paddr_index = reventry->paddr_index; size = reventry->size; @@ -436,7 +453,7 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) } if (!found) { trace_xen_invalidate_map_cache_entry_unlocked_not_found(buffer); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { trace_xen_invalidate_map_cache_entry_unlocked_found( reventry->paddr_index, reventry->vaddr_req @@ -444,15 +461,15 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) } return; } - QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + QTAILQ_REMOVE(&mc->locked_entries, reventry, next); g_free(reventry); - if (mapcache->last_entry != NULL && - mapcache->last_entry->paddr_index == paddr_index) { - mapcache->last_entry = NULL; + if (mc->last_entry != NULL && + mc->last_entry->paddr_index == paddr_index) { + mc->last_entry = NULL; } - entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + entry = &mc->entry[paddr_index % mc->nr_buckets]; while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { pentry = entry; entry = entry->next; @@ -485,9 +502,9 @@ static void xen_invalidate_map_cache_entry_bh(void *opaque) { XenMapCacheData *data = opaque; - mapcache_lock(); - xen_invalidate_map_cache_entry_unlocked(data->buffer); - mapcache_unlock(); + mapcache_lock(mapcache); + xen_invalidate_map_cache_entry_unlocked(mapcache, data->buffer); + mapcache_unlock(mapcache); aio_co_wake(data->co); } @@ -503,23 +520,20 @@ void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer) xen_invalidate_map_cache_entry_bh, &data); qemu_coroutine_yield(); } else { - mapcache_lock(); - xen_invalidate_map_cache_entry_unlocked(buffer); - mapcache_unlock(); + mapcache_lock(mapcache); + xen_invalidate_map_cache_entry_unlocked(mapcache, buffer); + mapcache_unlock(mapcache); } } -void xen_invalidate_map_cache(void) +static void xen_invalidate_map_cache_single(MapCache *mc) { unsigned long i; MapCacheRev *reventry; - /* Flush pending AIO before destroying the mapcache */ - bdrv_drain_all(); - - mapcache_lock(); + mapcache_lock(mc); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { if (!reventry->dma) { continue; } @@ -527,8 +541,8 @@ void xen_invalidate_map_cache(void) reventry->vaddr_req); } - for (i = 0; i < mapcache->nr_buckets; i++) { - MapCacheEntry *entry = &mapcache->entry[i]; + for (i = 0; i < mc->nr_buckets; i++) { + MapCacheEntry *entry = &mc->entry[i]; if (entry->vaddr_base == NULL) { continue; @@ -549,12 +563,21 @@ void xen_invalidate_map_cache(void) entry->valid_mapping = NULL; } - mapcache->last_entry = NULL; + mc->last_entry = NULL; + + mapcache_unlock(mc); +} + +void xen_invalidate_map_cache(void) +{ + /* Flush pending AIO before destroying the mapcache */ + bdrv_drain_all(); - mapcache_unlock(); + xen_invalidate_map_cache_single(mapcache); } -static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, +static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc, + hwaddr old_phys_addr, hwaddr new_phys_addr, hwaddr size) { @@ -576,7 +599,7 @@ static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); } - entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + entry = &mc->entry[address_index % mc->nr_buckets]; while (entry && !(entry->paddr_index == address_index && entry->size == cache_size)) { entry = entry->next; @@ -591,7 +614,7 @@ static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr); - xen_remap_bucket(entry, entry->vaddr_base, + xen_remap_bucket(mc, entry, entry->vaddr_base, cache_size, address_index, false); if (!test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, @@ -611,8 +634,9 @@ uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, { uint8_t *p; - mapcache_lock(); - p = xen_replace_cache_entry_unlocked(old_phys_addr, new_phys_addr, size); - mapcache_unlock(); + mapcache_lock(mapcache); + p = xen_replace_cache_entry_unlocked(mapcache, old_phys_addr, + new_phys_addr, size); + mapcache_unlock(mapcache); return p; } |