aboutsummaryrefslogtreecommitdiff
path: root/hw/mem
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-09-26 20:57:29 +0200
committerDavid Hildenbrand <david@redhat.com>2023-10-12 14:15:22 +0200
commit766aa0a654d887ad8fed2f116c84a89e20102c14 (patch)
tree291cac1b0357ffb9fc0bd73223ff669c83899289 /hw/mem
parentf9716f4b0d6eaee5d0b1ccf428a102e0c148fa30 (diff)
downloadqemu-766aa0a654d887ad8fed2f116c84a89e20102c14.zip
qemu-766aa0a654d887ad8fed2f116c84a89e20102c14.tar.gz
qemu-766aa0a654d887ad8fed2f116c84a89e20102c14.tar.bz2
memory-device,vhost: Support memory devices that dynamically consume memslots
We want to support memory devices that have a dynamically managed memory region container as device memory region. This device memory region maps multiple RAM memory subregions (e.g., aliases to the same RAM memory region), whereby these subregions can be (un)mapped on demand. Each RAM subregion will consume a memslot in KVM and vhost, resulting in such a new device consuming memslots dynamically, and initially usually 0. We already track the number of used vs. required memslots for all memslots. From that, we can derive the number of reserved memslots that must not be used otherwise. The target use case is virtio-mem and the hyper-v balloon, which will dynamically map aliases to RAM memory region into their device memory region container. Properly document what's supported and what's not and extend the vhost memslot check accordingly. Message-ID: <20230926185738.277351-10-david@redhat.com> Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com>
Diffstat (limited to 'hw/mem')
-rw-r--r--hw/mem/memory-device.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index d37cfbd..1b14ba5 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -62,19 +62,44 @@ static unsigned int memory_device_get_memslots(MemoryDeviceState *md)
return 1;
}
+/*
+ * Memslots that are reserved by memory devices (required but still reported
+ * as free from KVM / vhost).
+ */
+static unsigned int get_reserved_memslots(MachineState *ms)
+{
+ if (ms->device_memory->used_memslots >
+ ms->device_memory->required_memslots) {
+ /* This is unexpected, and we warned already in the memory notifier. */
+ return 0;
+ }
+ return ms->device_memory->required_memslots -
+ ms->device_memory->used_memslots;
+}
+
+unsigned int memory_devices_get_reserved_memslots(void)
+{
+ if (!current_machine->device_memory) {
+ return 0;
+ }
+ return get_reserved_memslots(current_machine);
+}
+
static void memory_device_check_addable(MachineState *ms, MemoryDeviceState *md,
MemoryRegion *mr, Error **errp)
{
const uint64_t used_region_size = ms->device_memory->used_region_size;
const uint64_t size = memory_region_size(mr);
const unsigned int required_memslots = memory_device_get_memslots(md);
+ const unsigned int reserved_memslots = get_reserved_memslots(ms);
/* we will need memory slots for kvm and vhost */
- if (kvm_enabled() && kvm_get_free_memslots() < required_memslots) {
+ if (kvm_enabled() &&
+ kvm_get_free_memslots() < required_memslots + reserved_memslots) {
error_setg(errp, "hypervisor has not enough free memory slots left");
return;
}
- if (vhost_get_free_memslots() < required_memslots) {
+ if (vhost_get_free_memslots() < required_memslots + reserved_memslots) {
error_setg(errp, "a used vhost backend has not enough free memory slots left");
return;
}