diff options
Diffstat (limited to 'target/s390x')
-rw-r--r-- | target/s390x/cpu-sysemu.c | 13 | ||||
-rw-r--r-- | target/s390x/cpu.c | 16 | ||||
-rw-r--r-- | target/s390x/cpu.h | 82 | ||||
-rw-r--r-- | target/s390x/cpu_models.c | 1 | ||||
-rw-r--r-- | target/s390x/kvm/kvm.c | 166 | ||||
-rw-r--r-- | target/s390x/kvm/kvm_s390x.h | 1 | ||||
-rw-r--r-- | target/s390x/kvm/meson.build | 3 | ||||
-rw-r--r-- | target/s390x/kvm/stsi-topology.c | 334 |
8 files changed, 527 insertions, 89 deletions
diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c index 8112561..1cd30c1 100644 --- a/target/s390x/cpu-sysemu.c +++ b/target/s390x/cpu-sysemu.c @@ -307,3 +307,16 @@ void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg) kvm_s390_set_diag318(cs, arg.host_ulong); } } + +void s390_cpu_topology_set_changed(bool changed) +{ + int ret; + + if (kvm_enabled()) { + ret = kvm_s390_topology_set_mtcr(changed); + if (ret) { + error_report("Failed to set Modified Topology Change Report: %s", + strerror(-ret)); + } + } +} diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 6093ab0..6acfa1c 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -31,12 +31,14 @@ #include "qapi/qapi-types-machine.h" #include "sysemu/hw_accel.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "fpu/softfloat-helpers.h" #include "disas/capstone.h" #include "sysemu/tcg.h" #ifndef CONFIG_USER_ONLY #include "sysemu/reset.h" #endif +#include "hw/s390x/cpu-topology.h" #define CR0_RESET 0xE0UL #define CR14_RESET 0xC2000000UL; @@ -145,6 +147,14 @@ static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value) S390CPU *s390_cpu = S390_CPU(cpu); value->u.s390x.cpu_state = s390_cpu->env.cpu_state; +#if !defined(CONFIG_USER_ONLY) + if (s390_has_topology()) { + value->u.s390x.has_dedicated = true; + value->u.s390x.dedicated = s390_cpu->env.dedicated; + value->u.s390x.has_entitlement = true; + value->u.s390x.entitlement = s390_cpu->env.entitlement; + } +#endif } /* S390CPUClass::reset() */ @@ -290,6 +300,12 @@ static const gchar *s390_gdb_arch_name(CPUState *cs) static Property s390x_cpu_properties[] = { #if !defined(CONFIG_USER_ONLY) DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0), + DEFINE_PROP_INT32("socket-id", S390CPU, env.socket_id, -1), + DEFINE_PROP_INT32("book-id", S390CPU, env.book_id, -1), + DEFINE_PROP_INT32("drawer-id", S390CPU, env.drawer_id, -1), + DEFINE_PROP_BOOL("dedicated", S390CPU, env.dedicated, false), + DEFINE_PROP_CPUS390ENTITLEMENT("entitlement", S390CPU, env.entitlement, + S390_CPU_ENTITLEMENT_AUTO), #endif DEFINE_PROP_END_OF_LIST() }; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 7bea707..40c5ced 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -30,6 +30,7 @@ #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" #include "tcg/tcg_s390x.h" +#include "qapi/qapi-types-machine-common.h" #define ELF_MACHINE_UNAME "S390X" @@ -132,6 +133,11 @@ struct CPUArchState { #if !defined(CONFIG_USER_ONLY) uint32_t core_id; /* PoP "CPU address", same as cpu_index */ + int32_t socket_id; + int32_t book_id; + int32_t drawer_id; + bool dedicated; + CpuS390Entitlement entitlement; /* Used only for vertical polarization */ uint64_t cpuid; #endif @@ -564,6 +570,29 @@ typedef struct SysIB_322 { } SysIB_322; QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096); +/* + * Topology Magnitude fields (MAG) indicates the maximum number of + * topology list entries (TLE) at the corresponding nesting level. + */ +#define S390_TOPOLOGY_MAG 6 +#define S390_TOPOLOGY_MAG6 0 +#define S390_TOPOLOGY_MAG5 1 +#define S390_TOPOLOGY_MAG4 2 +#define S390_TOPOLOGY_MAG3 3 +#define S390_TOPOLOGY_MAG2 4 +#define S390_TOPOLOGY_MAG1 5 +/* Configuration topology */ +typedef struct SysIB_151x { + uint8_t reserved0[2]; + uint16_t length; + uint8_t mag[S390_TOPOLOGY_MAG]; + uint8_t reserved1; + uint8_t mnest; + uint32_t reserved2; + char tle[]; +} SysIB_151x; +QEMU_BUILD_BUG_ON(sizeof(SysIB_151x) != 16); + typedef union SysIB { SysIB_111 sysib_111; SysIB_121 sysib_121; @@ -571,9 +600,62 @@ typedef union SysIB { SysIB_221 sysib_221; SysIB_222 sysib_222; SysIB_322 sysib_322; + SysIB_151x sysib_151x; } SysIB; QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096); +/* + * CPU Topology List provided by STSI with fc=15 provides a list + * of two different Topology List Entries (TLE) types to specify + * the topology hierarchy. + * + * - Container Topology List Entry + * Defines a container to contain other Topology List Entries + * of any type, nested containers or CPU. + * - CPU Topology List Entry + * Specifies the CPUs position, type, entitlement and polarization + * of the CPUs contained in the last container TLE. + * + * There can be theoretically up to five levels of containers, QEMU + * uses only three levels, the drawer's, book's and socket's level. + * + * A container with a nesting level (NL) greater than 1 can only + * contain another container of nesting level NL-1. + * + * A container of nesting level 1 (socket), contains as many CPU TLE + * as needed to describe the position and qualities of all CPUs inside + * the container. + * The qualities of a CPU are polarization, entitlement and type. + * + * The CPU TLE defines the position of the CPUs of identical qualities + * using a 64bits mask which first bit has its offset defined by + * the CPU address origin field of the CPU TLE like in: + * CPU address = origin * 64 + bit position within the mask + */ +/* Container type Topology List Entry */ +typedef struct SYSIBContainerListEntry { + uint8_t nl; + uint8_t reserved[6]; + uint8_t id; +} SYSIBContainerListEntry; +QEMU_BUILD_BUG_ON(sizeof(SYSIBContainerListEntry) != 8); + +/* CPU type Topology List Entry */ +typedef struct SysIBCPUListEntry { + uint8_t nl; + uint8_t reserved0[3]; +#define SYSIB_TLE_POLARITY_MASK 0x03 +#define SYSIB_TLE_DEDICATED 0x04 + uint8_t flags; + uint8_t type; + uint16_t origin; + uint64_t mask; +} SysIBCPUListEntry; +QEMU_BUILD_BUG_ON(sizeof(SysIBCPUListEntry) != 16); + +void insert_stsi_15_1_x(S390CPU *cpu, int sel2, uint64_t addr, uint8_t ar, uintptr_t ra); +void s390_cpu_topology_set_changed(bool changed); + /* MMU defines */ #define ASCE_ORIGIN (~0xfffULL) /* segment table origin */ #define ASCE_SUBSPACE 0x200 /* subspace group control */ diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 98f14c0..4dead48 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -255,6 +255,7 @@ bool s390_has_feat(S390Feat feat) case S390_FEAT_SIE_CMMA: case S390_FEAT_SIE_PFMFI: case S390_FEAT_SIE_IBS: + case S390_FEAT_CONFIGURATION_TOPOLOGY: return false; break; default: diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index bc5c56a..0f0e784 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -86,6 +86,7 @@ #define PRIV_B9_EQBS 0x9c #define PRIV_B9_CLP 0xa0 +#define PRIV_B9_PTF 0xa2 #define PRIV_B9_PCISTG 0xd0 #define PRIV_B9_PCILG 0xd2 #define PRIV_B9_RPCIT 0xd3 @@ -138,7 +139,6 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO }; -static int cap_sync_regs; static int cap_async_pf; static int cap_mem_op; static int cap_mem_op_extension; @@ -337,21 +337,28 @@ int kvm_arch_get_default_type(MachineState *ms) int kvm_arch_init(MachineState *ms, KVMState *s) { + int required_caps[] = { + KVM_CAP_DEVICE_CTRL, + KVM_CAP_SYNC_REGS, + }; + + for (int i = 0; i < ARRAY_SIZE(required_caps); i++) { + if (!kvm_check_extension(s, required_caps[i])) { + error_report("KVM is missing capability #%d - " + "please use kernel 3.15 or newer", required_caps[i]); + return -1; + } + } + object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, false, NULL); - if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { - error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " - "please use kernel 3.15 or newer"); - return -1; - } if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { error_report("KVM is missing capability KVM_CAP_S390_COW - " "unsupported environment"); return -1; } - cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); @@ -365,6 +372,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); + kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0); if (ri_allowed()) { if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { cap_ri = 1; @@ -458,37 +466,28 @@ void kvm_s390_reset_vcpu_normal(S390CPU *cpu) static int can_sync_regs(CPUState *cs, int regs) { - return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; + return (cs->kvm_run->kvm_valid_regs & regs) == regs; } +#define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ + KVM_SYNC_CRS | KVM_SYNC_PREFIX) + int kvm_arch_put_registers(CPUState *cs, int level) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - struct kvm_sregs sregs; - struct kvm_regs regs; struct kvm_fpu fpu = {}; int r; int i; + g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); + /* always save the PSW and the GPRS*/ cs->kvm_run->psw_addr = env->psw.addr; cs->kvm_run->psw_mask = env->psw.mask; - if (can_sync_regs(cs, KVM_SYNC_GPRS)) { - for (i = 0; i < 16; i++) { - cs->kvm_run->s.regs.gprs[i] = env->regs[i]; - cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; - } - } else { - for (i = 0; i < 16; i++) { - regs.gprs[i] = env->regs[i]; - } - r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); - if (r < 0) { - return r; - } - } + memcpy(cs->kvm_run->s.regs.gprs, env->regs, sizeof(cs->kvm_run->s.regs.gprs)); + cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; if (can_sync_regs(cs, KVM_SYNC_VRS)) { for (i = 0; i < 32; i++) { @@ -521,6 +520,15 @@ int kvm_arch_put_registers(CPUState *cs, int level) return 0; } + /* + * Access registers, control registers and the prefix - these are + * always available via kvm_sync_regs in the kernels that we support + */ + memcpy(cs->kvm_run->s.regs.acrs, env->aregs, sizeof(cs->kvm_run->s.regs.acrs)); + memcpy(cs->kvm_run->s.regs.crs, env->cregs, sizeof(cs->kvm_run->s.regs.crs)); + cs->kvm_run->s.regs.prefix = env->psa; + cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_PREFIX; + if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { cs->kvm_run->s.regs.cputm = env->cputm; cs->kvm_run->s.regs.ckc = env->ckc; @@ -567,25 +575,6 @@ int kvm_arch_put_registers(CPUState *cs, int level) } } - /* access registers and control registers*/ - if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { - for (i = 0; i < 16; i++) { - cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; - cs->kvm_run->s.regs.crs[i] = env->cregs[i]; - } - cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; - cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; - } else { - for (i = 0; i < 16; i++) { - sregs.acrs[i] = env->aregs[i]; - sregs.crs[i] = env->cregs[i]; - } - r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); - if (r < 0) { - return r; - } - } - if (can_sync_regs(cs, KVM_SYNC_GSCB)) { memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; @@ -607,13 +596,6 @@ int kvm_arch_put_registers(CPUState *cs, int level) cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; } - /* Finally the prefix */ - if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { - cs->kvm_run->s.regs.prefix = env->psa; - cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; - } else { - /* prefix is only supported via sync regs */ - } return 0; } @@ -621,8 +603,6 @@ int kvm_arch_get_registers(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - struct kvm_sregs sregs; - struct kvm_regs regs; struct kvm_fpu fpu; int i, r; @@ -630,37 +610,14 @@ int kvm_arch_get_registers(CPUState *cs) env->psw.addr = cs->kvm_run->psw_addr; env->psw.mask = cs->kvm_run->psw_mask; - /* the GPRS */ - if (can_sync_regs(cs, KVM_SYNC_GPRS)) { - for (i = 0; i < 16; i++) { - env->regs[i] = cs->kvm_run->s.regs.gprs[i]; - } - } else { - r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); - if (r < 0) { - return r; - } - for (i = 0; i < 16; i++) { - env->regs[i] = regs.gprs[i]; - } - } + /* the GPRS, ACRS and CRS */ + g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); + memcpy(env->regs, cs->kvm_run->s.regs.gprs, sizeof(env->regs)); + memcpy(env->aregs, cs->kvm_run->s.regs.acrs, sizeof(env->aregs)); + memcpy(env->cregs, cs->kvm_run->s.regs.crs, sizeof(env->cregs)); - /* The ACRS and CRS */ - if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { - for (i = 0; i < 16; i++) { - env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; - env->cregs[i] = cs->kvm_run->s.regs.crs[i]; - } - } else { - r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); - if (r < 0) { - return r; - } - for (i = 0; i < 16; i++) { - env->aregs[i] = sregs.acrs[i]; - env->cregs[i] = sregs.crs[i]; - } - } + /* The prefix */ + env->psa = cs->kvm_run->s.regs.prefix; /* Floating point and vector registers */ if (can_sync_regs(cs, KVM_SYNC_VRS)) { @@ -685,11 +642,6 @@ int kvm_arch_get_registers(CPUState *cs) env->fpc = fpu.fpc; } - /* The prefix */ - if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { - env->psa = cs->kvm_run->s.regs.prefix; - } - if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { env->cputm = cs->kvm_run->s.regs.cputm; env->ckc = cs->kvm_run->s.regs.ckc; @@ -1457,6 +1409,13 @@ static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) } } +static void kvm_handle_ptf(S390CPU *cpu, struct kvm_run *run) +{ + uint8_t r1 = (run->s390_sieic.ipb >> 20) & 0x0f; + + s390_handle_ptf(cpu, r1, RA_IGNORED); +} + static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) { int r = 0; @@ -1474,6 +1433,9 @@ static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) case PRIV_B9_RPCIT: r = kvm_rpcit_service_call(cpu, run); break; + case PRIV_B9_PTF: + kvm_handle_ptf(cpu, run); + break; case PRIV_B9_EQBS: /* just inject exception */ r = -1; @@ -1911,9 +1873,12 @@ static int handle_stsi(S390CPU *cpu) if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { return 0; } - /* Only sysib 3.2.2 needs post-handling for now. */ insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); return 0; + case 15: + insert_stsi_15_1_x(cpu, run->s390_stsi.sel2, run->s390_stsi.addr, + run->s390_stsi.ar, RA_IGNORED); + return 0; default: return 0; } @@ -2495,6 +2460,14 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) set_bit(S390_FEAT_UNPACK, model->features); } + /* + * If we have kernel support for CPU Topology indicate the + * configuration-topology facility. + */ + if (kvm_check_extension(kvm_state, KVM_CAP_S390_CPU_TOPOLOGY)) { + set_bit(S390_FEAT_CONFIGURATION_TOPOLOGY, model->features); + } + /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ set_bit(S390_FEAT_ZPCI, model->features); set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); @@ -2661,6 +2634,23 @@ int kvm_s390_get_zpci_op(void) return cap_zpci_op; } +int kvm_s390_topology_set_mtcr(uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = KVM_S390_VM_CPU_TOPOLOGY, + .attr = attr, + }; + + if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) { + return 0; + } + if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_TOPOLOGY, attr)) { + return -ENOTSUP; + } + + return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); +} + void kvm_arch_accel_class_init(ObjectClass *oc) { } diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h index f978556..649dae5 100644 --- a/target/s390x/kvm/kvm_s390x.h +++ b/target/s390x/kvm/kvm_s390x.h @@ -47,5 +47,6 @@ void kvm_s390_crypto_reset(void); void kvm_s390_restart_interrupt(S390CPU *cpu); void kvm_s390_stop_interrupt(S390CPU *cpu); void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info); +int kvm_s390_topology_set_mtcr(uint64_t attr); #endif /* KVM_S390X_H */ diff --git a/target/s390x/kvm/meson.build b/target/s390x/kvm/meson.build index d6aca59..588a9aa 100644 --- a/target/s390x/kvm/meson.build +++ b/target/s390x/kvm/meson.build @@ -1,7 +1,8 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files( 'pv.c', - 'kvm.c' + 'kvm.c', + 'stsi-topology.c' ), if_false: files( 'stubs.c' )) diff --git a/target/s390x/kvm/stsi-topology.c b/target/s390x/kvm/stsi-topology.c new file mode 100644 index 0000000..efd2aa7 --- /dev/null +++ b/target/s390x/kvm/stsi-topology.c @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU S390x CPU Topology + * + * Copyright IBM Corp. 2022, 2023 + * Author(s): Pierre Morel <pmorel@linux.ibm.com> + * + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/s390x/sclp.h" +#include "hw/s390x/cpu-topology.h" + +QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_LOW != 1); +QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_MEDIUM != 2); +QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_HIGH != 3); + +/** + * fill_container: + * @p: The address of the container TLE to fill + * @level: The level of nesting for this container + * @id: The container receives a unique ID inside its own container + * + * Returns the next free TLE entry. + */ +static char *fill_container(char *p, int level, int id) +{ + SYSIBContainerListEntry *tle = (SYSIBContainerListEntry *)p; + + tle->nl = level; + tle->id = id; + return p + sizeof(*tle); +} + +/** + * fill_tle_cpu: + * @p: The address of the CPU TLE to fill + * @entry: a pointer to the S390TopologyEntry defining this + * CPU container. + * + * Returns the next free TLE entry. + */ +static char *fill_tle_cpu(char *p, S390TopologyEntry *entry) +{ + SysIBCPUListEntry *tle = (SysIBCPUListEntry *)p; + S390TopologyId topology_id = entry->id; + + tle->nl = 0; + tle->flags = 0; + if (topology_id.vertical) { + tle->flags |= topology_id.entitlement; + } + if (topology_id.dedicated) { + tle->flags |= SYSIB_TLE_DEDICATED; + } + tle->type = topology_id.type; + tle->origin = cpu_to_be16(topology_id.origin * 64); + tle->mask = cpu_to_be64(entry->mask); + return p + sizeof(*tle); +} + +/* + * Macro to check that the size of data after increment + * will not get bigger than the size of the SysIB. + */ +#define SYSIB_GUARD(data, x) do { \ + data += x; \ + if (data > sizeof(SysIB)) { \ + return 0; \ + } \ + } while (0) + +/** + * stsi_topology_fill_sysib: + * @p: A pointer to the position of the first TLE + * @level: The nested level wanted by the guest + * + * Fill the SYSIB with the topology information as described in + * the PoP, nesting containers as appropriate, with the maximum + * nesting limited by @level. + * + * Return value: + * On success: the size of the SysIB_15x after being filled with TLE. + * On error: 0 in the case we would overrun the end of the SysIB. + */ +static int stsi_topology_fill_sysib(S390TopologyList *topology_list, + char *p, int level) +{ + S390TopologyEntry *entry; + int last_drawer = -1; + int last_book = -1; + int last_socket = -1; + int drawer_id = 0; + int book_id = 0; + int socket_id = 0; + int n = sizeof(SysIB_151x); + + QTAILQ_FOREACH(entry, topology_list, next) { + bool drawer_change = last_drawer != entry->id.drawer; + bool book_change = drawer_change || last_book != entry->id.book; + bool socket_change = book_change || last_socket != entry->id.socket; + + if (level > 3 && drawer_change) { + SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); + p = fill_container(p, 3, drawer_id++); + book_id = 0; + } + if (level > 2 && book_change) { + SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); + p = fill_container(p, 2, book_id++); + socket_id = 0; + } + if (socket_change) { + SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); + p = fill_container(p, 1, socket_id++); + } + + SYSIB_GUARD(n, sizeof(SysIBCPUListEntry)); + p = fill_tle_cpu(p, entry); + last_drawer = entry->id.drawer; + last_book = entry->id.book; + last_socket = entry->id.socket; + } + + return n; +} + +/** + * setup_stsi: + * @topology_list: ordered list of groups of CPUs with same properties + * @sysib: pointer to a SysIB to be filled with SysIB_151x data + * @level: Nested level specified by the guest + * + * Setup the SYSIB for STSI 15.1, the header as well as the description + * of the topology. + */ +static int setup_stsi(S390TopologyList *topology_list, SysIB_151x *sysib, + int level) +{ + sysib->mnest = level; + switch (level) { + case 4: + sysib->mag[S390_TOPOLOGY_MAG4] = current_machine->smp.drawers; + sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.books; + sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets; + sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; + break; + case 3: + sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.drawers * + current_machine->smp.books; + sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets; + sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; + break; + case 2: + sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.drawers * + current_machine->smp.books * + current_machine->smp.sockets; + sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; + break; + } + + return stsi_topology_fill_sysib(topology_list, sysib->tle, level); +} + +/** + * s390_topology_add_cpu_to_entry: + * @entry: Topology entry to setup + * @cpu: the S390CPU to add + * + * Set the core bit inside the topology mask. + */ +static void s390_topology_add_cpu_to_entry(S390TopologyEntry *entry, + S390CPU *cpu) +{ + set_bit(63 - (cpu->env.core_id % 64), &entry->mask); +} + +/** + * s390_topology_from_cpu: + * @cpu: S390CPU to calculate the topology id + * + * Initialize the topology id from the CPU environment. + */ +static S390TopologyId s390_topology_from_cpu(S390CPU *cpu) +{ + S390TopologyId topology_id = { + .drawer = cpu->env.drawer_id, + .book = cpu->env.book_id, + .socket = cpu->env.socket_id, + .type = S390_TOPOLOGY_CPU_IFL, + .vertical = s390_topology.polarization == S390_CPU_POLARIZATION_VERTICAL, + .entitlement = cpu->env.entitlement, + .dedicated = cpu->env.dedicated, + .origin = cpu->env.core_id / 64, + }; + + return topology_id; +} + +/** + * s390_topology_id_cmp: + * @l: first S390TopologyId + * @r: second S390TopologyId + * + * Compare two topology ids according to the sorting order specified by the PoP. + * + * Returns a negative number if the first id is less than, 0 if it is equal to + * and positive if it is larger than the second id. + */ +static int s390_topology_id_cmp(const S390TopologyId *l, + const S390TopologyId *r) +{ + /* + * lexical order, compare less significant values only if more significant + * ones are equal + */ + return l->sentinel - r->sentinel ?: + l->drawer - r->drawer ?: + l->book - r->book ?: + l->socket - r->socket ?: + l->type - r->type ?: + /* logic is inverted for the next three */ + r->vertical - l->vertical ?: + r->entitlement - l->entitlement ?: + r->dedicated - l->dedicated ?: + l->origin - r->origin; +} + +static bool s390_topology_id_eq(const S390TopologyId *l, + const S390TopologyId *r) +{ + return !s390_topology_id_cmp(l, r); +} + +static bool s390_topology_id_lt(const S390TopologyId *l, + const S390TopologyId *r) +{ + return s390_topology_id_cmp(l, r) < 0; +} + +/** + * s390_topology_fill_list_sorted: + * @topology_list: list to fill + * + * Create S390TopologyEntrys as appropriate from all CPUs and fill the + * topology_list with the entries according to the order specified by the PoP. + */ +static void s390_topology_fill_list_sorted(S390TopologyList *topology_list) +{ + CPUState *cs; + S390TopologyEntry sentinel = { .id.sentinel = 1 }; + + QTAILQ_INIT(topology_list); + + QTAILQ_INSERT_HEAD(topology_list, &sentinel, next); + + CPU_FOREACH(cs) { + S390TopologyId id = s390_topology_from_cpu(S390_CPU(cs)); + S390TopologyEntry *entry = NULL, *tmp; + + QTAILQ_FOREACH(tmp, topology_list, next) { + if (s390_topology_id_eq(&id, &tmp->id)) { + entry = tmp; + break; + } else if (s390_topology_id_lt(&id, &tmp->id)) { + entry = g_malloc0(sizeof(*entry)); + entry->id = id; + QTAILQ_INSERT_BEFORE(tmp, entry, next); + break; + } + } + assert(entry); + s390_topology_add_cpu_to_entry(entry, S390_CPU(cs)); + } + + QTAILQ_REMOVE(topology_list, &sentinel, next); +} + +/** + * s390_topology_empty_list: + * + * Clear all entries in the S390Topology list. + */ +static void s390_topology_empty_list(S390TopologyList *topology_list) +{ + S390TopologyEntry *entry = NULL; + S390TopologyEntry *tmp = NULL; + + QTAILQ_FOREACH_SAFE(entry, topology_list, next, tmp) { + QTAILQ_REMOVE(topology_list, entry, next); + g_free(entry); + } +} + +/** + * insert_stsi_15_1_x: + * @cpu: the CPU doing the call for which we set CC + * @sel2: the selector 2, containing the nested level + * @addr: Guest logical address of the guest SysIB + * @ar: the access register number + * @ra: the return address + * + * Emulate STSI 15.1.x, that is, perform all necessary checks and + * fill the SYSIB. + * In case the topology description is too long to fit into the SYSIB, + * set CC=3 and abort without writing the SYSIB. + */ +void insert_stsi_15_1_x(S390CPU *cpu, int sel2, uint64_t addr, uint8_t ar, uintptr_t ra) +{ + S390TopologyList topology_list; + SysIB sysib = {0}; + int length; + + if (!s390_has_topology() || sel2 < 2 || sel2 > SCLP_READ_SCP_INFO_MNEST) { + setcc(cpu, 3); + return; + } + + s390_topology_fill_list_sorted(&topology_list); + length = setup_stsi(&topology_list, &sysib.sysib_151x, sel2); + s390_topology_empty_list(&topology_list); + + if (!length) { + setcc(cpu, 3); + return; + } + + sysib.sysib_151x.length = cpu_to_be16(length); + if (!s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, length)) { + setcc(cpu, 0); + } else { + s390_cpu_virt_mem_handle_exc(cpu, ra); + } +} |