From a494fdb715832000ee9047a549a35aacfea8175e Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 9 May 2023 10:27:37 +1000 Subject: numa: Validate cluster and NUMA node boundary if required MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some architectures like ARM64, multiple CPUs in one cluster can be associated with different NUMA nodes, which is irregular configuration because we shouldn't have this in baremetal environment. The irregular configuration causes Linux guest to misbehave, as the following warning messages indicate. -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \ -numa node,nodeid=0,cpus=0-1,memdev=ram0 \ -numa node,nodeid=1,cpus=2-3,memdev=ram1 \ -numa node,nodeid=2,cpus=4-5,memdev=ram2 \ ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910 Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1 pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : build_sched_domains+0x284/0x910 lr : build_sched_domains+0x184/0x910 sp : ffff80000804bd50 x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000 x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840 x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508 x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014 x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0 x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041 x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001 x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002 x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001 Call trace: build_sched_domains+0x284/0x910 sched_init_domains+0xac/0xe0 sched_init_smp+0x48/0xc8 kernel_init_freeable+0x140/0x1ac kernel_init+0x28/0x140 ret_from_fork+0x10/0x20 Improve the situation to warn when multiple CPUs in one cluster have been associated with different NUMA nodes. However, one NUMA node is allowed to be associated with different clusters. Signed-off-by: Gavin Shan Acked-by: Philippe Mathieu-Daudé Acked-by: Igor Mammedov Message-Id: <20230509002739.18388-2-gshan@redhat.com> Signed-off-by: Paolo Bonzini --- include/hw/boards.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/hw/boards.h b/include/hw/boards.h index a385010..6b267c2 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -274,6 +274,7 @@ struct MachineClass { bool nvdimm_supported; bool numa_mem_supported; bool auto_enable_numa; + bool cpu_cluster_has_numa_boundary; SMPCompatProps smp_props; const char *default_ram_id; -- cgit v1.1 From 3b6f485275ae95a81eec589d2773b86ca9ddec4d Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Sun, 18 Jun 2023 23:24:40 +0200 Subject: kvm: reuse per-vcpu stats fd to avoid vcpu interruption A regression has been detected in latency testing of KVM guests. More specifically, it was observed that the cyclictest numbers inside of an isolated vcpu (running on isolated pcpu) are: Where a maximum of 50us is acceptable. The implementation of KVM_GET_STATS_FD uses run_on_cpu to query per vcpu statistics, which interrupts the vcpu (and is unnecessary). To fix this, open the per vcpu stats fd on vcpu initialization, and read from that fd from QEMU's main thread. Signed-off-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- include/hw/core/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 4871ad8..3b765be 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -402,6 +402,7 @@ struct CPUState { struct kvm_dirty_gfn *kvm_dirty_gfns; uint32_t kvm_fetch_index; uint64_t dirty_pages; + int kvm_vcpu_stats_fd; /* Use by accel-block: CPU is executing an ioctl() */ QemuLockCnt in_ioctl_lock; -- cgit v1.1