aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/s390x/Makefile.objs1
-rw-r--r--target/s390x/cpu.c35
-rw-r--r--target/s390x/cpu.h43
-rw-r--r--target/s390x/cpu_models.c13
-rw-r--r--target/s390x/diag.c2
-rw-r--r--target/s390x/excp_helper.c119
-rw-r--r--target/s390x/helper.c152
-rw-r--r--target/s390x/helper.h2
-rw-r--r--target/s390x/insn-data.def2
-rw-r--r--target/s390x/internal.h23
-rw-r--r--target/s390x/interrupt.c172
-rw-r--r--target/s390x/ioinst.c136
-rw-r--r--target/s390x/kvm-stub.c13
-rw-r--r--target/s390x/kvm.c491
-rw-r--r--target/s390x/kvm_s390x.h3
-rw-r--r--target/s390x/mem_helper.c8
-rw-r--r--target/s390x/misc_helper.c109
-rw-r--r--target/s390x/mmu_helper.c96
-rw-r--r--target/s390x/sigp.c508
-rw-r--r--target/s390x/trace-events4
-rw-r--r--target/s390x/translate.c12
21 files changed, 1100 insertions, 844 deletions
diff --git a/target/s390x/Makefile.objs b/target/s390x/Makefile.objs
index c88ac81..31932de 100644
--- a/target/s390x/Makefile.objs
+++ b/target/s390x/Makefile.objs
@@ -2,6 +2,7 @@ obj-y += cpu.o cpu_models.o cpu_features.o gdbstub.o interrupt.o helper.o
obj-$(CONFIG_TCG) += translate.o cc_helper.o excp_helper.o fpu_helper.o
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o crypto_helper.o
obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o mmu_helper.o diag.o
+obj-$(CONFIG_SOFTMMU) += sigp.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 3fdf9ba..95f4283 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -56,10 +56,18 @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
static bool s390_cpu_has_work(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
- CPUS390XState *env = &cpu->env;
- return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->psw.mask & PSW_MASK_EXT);
+ /* STOPPED cpus can never wake up */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_LOAD &&
+ s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
+ return false;
+ }
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+
+ return s390_cpu_has_int(cpu);
}
#if !defined(CONFIG_USER_ONLY)
@@ -107,7 +115,6 @@ static void s390_cpu_initial_reset(CPUState *s)
env->gbea = 1;
env->pfault_token = -1UL;
- env->ext_index = -1;
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
@@ -145,7 +152,6 @@ static void s390_cpu_full_reset(CPUState *s)
env->gbea = 1;
env->pfault_token = -1UL;
- env->ext_index = -1;
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
@@ -331,8 +337,15 @@ unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
break;
case CPU_STATE_OPERATING:
case CPU_STATE_LOAD:
- /* unhalt the cpu for common infrastructure */
- s390_cpu_unhalt(cpu);
+ /*
+ * Starting a CPU with a PSW WAIT bit set:
+ * KVM: handles this internally and triggers another WAIT exit.
+ * TCG: will actually try to continue to run. Don't unhalt, will
+ * be done when the CPU actually has work (an interrupt).
+ */
+ if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) {
+ s390_cpu_unhalt(cpu);
+ }
break;
default:
error_report("Requested CPU state is not a valid S390 CPU state: %u",
@@ -394,14 +407,6 @@ void s390_cmma_reset(void)
}
}
-int s390_cpu_restart(S390CPU *cpu)
-{
- if (kvm_enabled()) {
- return kvm_s390_cpu_restart(cpu);
- }
- return -ENOSYS;
-}
-
int s390_get_memslot_count(void)
{
if (kvm_enabled()) {
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 7e864c8..4db8b54 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -53,7 +53,6 @@
#define MMU_USER_IDX 0
-#define MAX_EXT_QUEUE 16
#define MAX_IO_QUEUE 16
#define MAX_MCHK_QUEUE 16
@@ -67,12 +66,6 @@ typedef struct PSW {
uint64_t addr;
} PSW;
-typedef struct ExtQueue {
- uint32_t code;
- uint32_t param;
- uint32_t param64;
-} ExtQueue;
-
typedef struct IOIntQueue {
uint16_t id;
uint16_t nr;
@@ -128,12 +121,13 @@ struct CPUS390XState {
uint64_t cregs[16]; /* control registers */
- ExtQueue ext_queue[MAX_EXT_QUEUE];
IOIntQueue io_queue[MAX_IO_QUEUE][8];
MchkQueue mchk_queue[MAX_MCHK_QUEUE];
int pending_int;
- int ext_index;
+ uint32_t service_param;
+ uint16_t external_call_addr;
+ DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS);
int io_index[8];
int mchk_index;
@@ -351,6 +345,11 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#define CR0_LOWPROT 0x0000000010000000ULL
#define CR0_SECONDARY 0x0000000004000000ULL
#define CR0_EDAT 0x0000000000800000ULL
+#define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL
+#define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL
+#define CR0_CKC_SC 0x0000000000000800ULL
+#define CR0_CPU_TIMER_SC 0x0000000000000400ULL
+#define CR0_SERVICE_SC 0x0000000000000200ULL
/* MMU */
#define MMU_PRIMARY_IDX 0
@@ -401,14 +400,20 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
#define EXCP_EXT 1 /* external interrupt */
#define EXCP_SVC 2 /* supervisor call (syscall) */
#define EXCP_PGM 3 /* program interruption */
+#define EXCP_RESTART 4 /* restart interrupt */
+#define EXCP_STOP 5 /* stop interrupt */
#define EXCP_IO 7 /* I/O interrupt */
#define EXCP_MCHK 8 /* machine check */
-#define INTERRUPT_EXT (1 << 0)
-#define INTERRUPT_TOD (1 << 1)
-#define INTERRUPT_CPUTIMER (1 << 2)
-#define INTERRUPT_IO (1 << 3)
-#define INTERRUPT_MCHK (1 << 4)
+#define INTERRUPT_IO (1 << 0)
+#define INTERRUPT_MCHK (1 << 1)
+#define INTERRUPT_EXT_SERVICE (1 << 2)
+#define INTERRUPT_EXT_CPU_TIMER (1 << 3)
+#define INTERRUPT_EXT_CLOCK_COMPARATOR (1 << 4)
+#define INTERRUPT_EXTERNAL_CALL (1 << 5)
+#define INTERRUPT_EMERGENCY_SIGNAL (1 << 6)
+#define INTERRUPT_RESTART (1 << 7)
+#define INTERRUPT_STOP (1 << 8)
/* Program Status Word. */
#define S390_PSWM_REGNUM 0
@@ -589,6 +594,8 @@ struct sysib_322 {
#define SIGP_SET_PREFIX 0x0d
#define SIGP_STORE_STATUS_ADDR 0x0e
#define SIGP_SET_ARCH 0x12
+#define SIGP_COND_EMERGENCY 0x13
+#define SIGP_SENSE_RUNNING 0x15
#define SIGP_STORE_ADTL_STATUS 0x17
/* SIGP condition codes */
@@ -599,6 +606,7 @@ struct sysib_322 {
/* SIGP status bits */
#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_NOT_RUNNING 0x00000400UL
#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
@@ -675,7 +683,6 @@ bool s390_get_squash_mcss(void);
int s390_get_memslot_count(void);
int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit);
void s390_cmma_reset(void);
-int s390_cpu_restart(S390CPU *cpu);
void s390_enable_css_support(S390CPU *cpu);
int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
int vq, bool assign);
@@ -695,7 +702,6 @@ void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
/* helper.c */
#define cpu_init(cpu_model) cpu_generic_init(TYPE_S390_CPU, cpu_model)
-S390CPU *s390x_new_cpu(const char *typename, uint32_t core_id, Error **errp);
#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU
#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX)
@@ -729,6 +735,11 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
+/* sigp.c */
+int s390_cpu_restart(S390CPU *cpu);
+void s390_init_sigp(void);
+
+
/* outside of target/s390x/ */
S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 07ef8a3..9554f19 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -392,7 +392,7 @@ static void create_cpu_model_list(ObjectClass *klass, void *opaque)
/* strip off the -s390-cpu */
g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
- info = g_malloc0(sizeof(*info));
+ info = g_new0(CpuDefinitionInfo, 1);
info->name = name;
info->has_migration_safe = true;
info->migration_safe = scc->is_migration_safe;
@@ -412,7 +412,7 @@ static void create_cpu_model_list(ObjectClass *klass, void *opaque)
object_unref(obj);
}
- entry = g_malloc0(sizeof(*entry));
+ entry = g_new0(CpuDefinitionInfoList, 1);
entry->value = info;
entry->next = *cpu_list;
*cpu_list = entry;
@@ -574,7 +574,7 @@ CpuModelExpansionInfo *arch_query_cpu_model_expansion(CpuModelExpansionType type
}
/* convert it back to a static representation */
- expansion_info = g_malloc0(sizeof(*expansion_info));
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
cpu_info_from_model(expansion_info->model, &s390_model, delta_changes);
return expansion_info;
@@ -585,7 +585,7 @@ static void list_add_feat(const char *name, void *opaque)
strList **last = (strList **) opaque;
strList *entry;
- entry = g_malloc0(sizeof(*entry));
+ entry = g_new0(strList, 1);
entry->value = g_strdup(name);
entry->next = *last;
*last = entry;
@@ -609,7 +609,7 @@ CpuModelCompareInfo *arch_query_cpu_model_comparison(CpuModelInfo *infoa,
if (*errp) {
return NULL;
}
- compare_info = g_malloc0(sizeof(*compare_info));
+ compare_info = g_new0(CpuModelCompareInfo, 1);
/* check the cpu generation and ga level */
if (modela.def->gen == modelb.def->gen) {
@@ -713,7 +713,7 @@ CpuModelBaselineInfo *arch_query_cpu_model_baseline(CpuModelInfo *infoa,
bitmap_and(model.features, model.features, model.def->full_feat,
S390_FEAT_MAX);
- baseline_info = g_malloc0(sizeof(*baseline_info));
+ baseline_info = g_new0(CpuModelBaselineInfo, 1);
baseline_info->model = g_malloc0(sizeof(*baseline_info->model));
cpu_info_from_model(baseline_info->model, &model, true);
return baseline_info;
@@ -823,6 +823,7 @@ static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
S390_FEAT_DAT_ENH,
S390_FEAT_IDTE_SEGMENT,
S390_FEAT_STFLE,
+ S390_FEAT_SENSE_RUNNING_STATUS,
S390_FEAT_EXTENDED_IMMEDIATE,
S390_FEAT_EXTENDED_TRANSLATION_2,
S390_FEAT_MSA,
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
index 82a6239..dbbb9e8 100644
--- a/target/s390x/diag.c
+++ b/target/s390x/diag.c
@@ -144,7 +144,7 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
return;
}
- iplb = g_malloc0(sizeof(IplParameterBlock));
+ iplb = g_new0(IplParameterBlock, 1);
cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
if (!iplb_valid_len(iplb)) {
env->regs[r1 + 1] = DIAG_308_RC_INVALID;
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index 3e4349d..e04b670 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -95,7 +95,6 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, orig_vaddr, rw, mmu_idx);
- orig_vaddr &= TARGET_PAGE_MASK;
vaddr = orig_vaddr;
if (mmu_idx < MMU_REAL_IDX) {
@@ -127,7 +126,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
- tlb_set_page(cs, orig_vaddr, raddr, prot,
+ tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
return 0;
@@ -240,36 +239,62 @@ static void do_ext_interrupt(CPUS390XState *env)
{
S390CPU *cpu = s390_env_get_cpu(env);
uint64_t mask, addr;
+ uint16_t cpu_addr;
LowCore *lowcore;
- ExtQueue *q;
if (!(env->psw.mask & PSW_MASK_EXT)) {
cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
}
- if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
- cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
- }
-
- q = &env->ext_queue[env->ext_index];
lowcore = cpu_map_lowcore(env);
- lowcore->ext_int_code = cpu_to_be16(q->code);
- lowcore->ext_params = cpu_to_be32(q->param);
- lowcore->ext_params2 = cpu_to_be64(q->param64);
- lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
- lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
- lowcore->cpu_addr = cpu_to_be16(env->core_id | VIRTIO_SUBCODE_64);
+ if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
+ (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
+ cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
+ g_assert(cpu_addr < S390_MAX_CPUS);
+ lowcore->cpu_addr = cpu_to_be16(cpu_addr);
+ clear_bit(cpu_addr, env->emergency_signals);
+ if (bitmap_empty(env->emergency_signals, max_cpus)) {
+ env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
+ }
+ } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
+ lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
+ env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
+ } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
+ (env->cregs[0] & CR0_CKC_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
+ lowcore->cpu_addr = 0;
+ env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
+ } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
+ (env->cregs[0] & CR0_CPU_TIMER_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
+ lowcore->cpu_addr = 0;
+ env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
+ } else if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
+ (env->cregs[0] & CR0_SERVICE_SC)) {
+ /*
+ * FIXME: floating IRQs should be considered by all CPUs and
+ * shuld not get cleared by CPU reset.
+ */
+ lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
+ lowcore->ext_params = cpu_to_be32(env->service_param);
+ lowcore->cpu_addr = 0;
+ env->service_param = 0;
+ env->pending_int &= ~INTERRUPT_EXT_SERVICE;
+ } else {
+ g_assert_not_reached();
+ }
+
mask = be64_to_cpu(lowcore->external_new_psw.mask);
addr = be64_to_cpu(lowcore->external_new_psw.addr);
+ lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
cpu_unmap_lowcore(lowcore);
- env->ext_index--;
- if (env->ext_index == -1) {
- env->pending_int &= ~INTERRUPT_EXT;
- }
-
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
env->psw.mask, env->psw.addr);
@@ -412,38 +437,25 @@ void s390_cpu_do_interrupt(CPUState *cs)
qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
__func__, cs->exception_index, env->psw.addr);
- s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
/* handle machine checks */
- if ((env->psw.mask & PSW_MASK_MCHECK) &&
- (cs->exception_index == -1)) {
- if (env->pending_int & INTERRUPT_MCHK) {
- cs->exception_index = EXCP_MCHK;
- }
+ if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
+ cs->exception_index = EXCP_MCHK;
}
/* handle external interrupts */
- if ((env->psw.mask & PSW_MASK_EXT) &&
- cs->exception_index == -1) {
- if (env->pending_int & INTERRUPT_EXT) {
- /* code is already in env */
- cs->exception_index = EXCP_EXT;
- } else if (env->pending_int & INTERRUPT_TOD) {
- cpu_inject_ext(cpu, 0x1004, 0, 0);
- cs->exception_index = EXCP_EXT;
- env->pending_int &= ~INTERRUPT_EXT;
- env->pending_int &= ~INTERRUPT_TOD;
- } else if (env->pending_int & INTERRUPT_CPUTIMER) {
- cpu_inject_ext(cpu, 0x1005, 0, 0);
- cs->exception_index = EXCP_EXT;
- env->pending_int &= ~INTERRUPT_EXT;
- env->pending_int &= ~INTERRUPT_TOD;
- }
+ if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
+ cs->exception_index = EXCP_EXT;
}
/* handle I/O interrupts */
- if ((env->psw.mask & PSW_MASK_IO) &&
- (cs->exception_index == -1)) {
- if (env->pending_int & INTERRUPT_IO) {
- cs->exception_index = EXCP_IO;
- }
+ if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
+ cs->exception_index = EXCP_IO;
+ }
+ /* RESTART interrupt */
+ if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
+ cs->exception_index = EXCP_RESTART;
+ }
+ /* STOP interrupt has least priority */
+ if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
+ cs->exception_index = EXCP_STOP;
}
switch (cs->exception_index) {
@@ -462,9 +474,22 @@ void s390_cpu_do_interrupt(CPUState *cs)
case EXCP_MCHK:
do_mchk_interrupt(env);
break;
+ case EXCP_RESTART:
+ do_restart_interrupt(env);
+ break;
+ case EXCP_STOP:
+ do_stop_interrupt(env);
+ break;
+ }
+
+ /* WAIT PSW during interrupt injection or STOP interrupt */
+ if (cs->exception_index == EXCP_HLT) {
+ /* don't trigger a cpu_loop_exit(), use an interrupt instead */
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
}
cs->exception_index = -1;
+ /* we might still have pending interrupts, but not deliverable */
if (!env->pending_int) {
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
}
@@ -481,7 +506,7 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
the parent EXECUTE insn. */
return false;
}
- if (env->psw.mask & PSW_MASK_EXT) {
+ if (s390_cpu_has_int(cpu)) {
s390_cpu_do_interrupt(cs);
return true;
}
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 97adbcc..f78983d 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -26,6 +26,7 @@
#include "qemu/timer.h"
#include "exec/exec-all.h"
#include "hw/s390x/ioinst.h"
+#include "sysemu/hw_accel.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#endif
@@ -51,43 +52,15 @@
#ifndef CONFIG_USER_ONLY
void s390x_tod_timer(void *opaque)
{
- S390CPU *cpu = opaque;
- CPUS390XState *env = &cpu->env;
-
- env->pending_int |= INTERRUPT_TOD;
- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ cpu_inject_clock_comparator((S390CPU *) opaque);
}
void s390x_cpu_timer(void *opaque)
{
- S390CPU *cpu = opaque;
- CPUS390XState *env = &cpu->env;
-
- env->pending_int |= INTERRUPT_CPUTIMER;
- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ cpu_inject_cpu_timer((S390CPU *) opaque);
}
#endif
-S390CPU *s390x_new_cpu(const char *typename, uint32_t core_id, Error **errp)
-{
- S390CPU *cpu = S390_CPU(object_new(typename));
- Error *err = NULL;
-
- object_property_set_int(OBJECT(cpu), core_id, "core-id", &err);
- if (err != NULL) {
- goto out;
- }
- object_property_set_bool(OBJECT(cpu), true, "realized", &err);
-
-out:
- if (err) {
- error_propagate(errp, err);
- object_unref(OBJECT(cpu));
- cpu = NULL;
- }
- return cpu;
-}
-
#ifndef CONFIG_USER_ONLY
hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
@@ -121,6 +94,25 @@ hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
return phys_addr;
}
+static inline bool is_special_wait_psw(uint64_t psw_addr)
+{
+ /* signal quiesce */
+ return psw_addr == 0xfffUL;
+}
+
+void s390_handle_wait(S390CPU *cpu)
+{
+ if (s390_cpu_halt(cpu) == 0) {
+#ifndef CONFIG_USER_ONLY
+ if (is_special_wait_psw(cpu->env.psw.addr)) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ } else {
+ qemu_system_guest_panicked(NULL);
+ }
+#endif
+ }
+}
+
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
uint64_t old_mask = env->psw.mask;
@@ -135,13 +127,9 @@ void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
}
- if (mask & PSW_MASK_WAIT) {
- S390CPU *cpu = s390_env_get_cpu(env);
- if (s390_cpu_halt(cpu) == 0) {
-#ifndef CONFIG_USER_ONLY
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
-#endif
- }
+ /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
+ if (tcg_enabled() && (mask & PSW_MASK_WAIT)) {
+ s390_handle_wait(s390_env_get_cpu(env));
}
}
@@ -194,6 +182,7 @@ void do_restart_interrupt(CPUS390XState *env)
addr = be64_to_cpu(lowcore->restart_new_psw.addr);
cpu_unmap_lowcore(lowcore);
+ env->pending_int &= ~INTERRUPT_RESTART;
load_psw(env, mask, addr);
}
@@ -237,6 +226,95 @@ void s390_cpu_recompute_watchpoints(CPUState *cs)
}
}
+struct sigp_save_area {
+ uint64_t fprs[16]; /* 0x0000 */
+ uint64_t grs[16]; /* 0x0080 */
+ PSW psw; /* 0x0100 */
+ uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
+ uint32_t prefix; /* 0x0118 */
+ uint32_t fpc; /* 0x011c */
+ uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
+ uint32_t todpr; /* 0x0124 */
+ uint64_t cputm; /* 0x0128 */
+ uint64_t ckc; /* 0x0130 */
+ uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
+ uint32_t ars[16]; /* 0x0140 */
+ uint64_t crs[16]; /* 0x0384 */
+};
+QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);
+
+int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
+{
+ static const uint8_t ar_id = 1;
+ struct sigp_save_area *sa;
+ hwaddr len = sizeof(*sa);
+ int i;
+
+ sa = cpu_physical_memory_map(addr, &len, 1);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (len != sizeof(*sa)) {
+ cpu_physical_memory_unmap(sa, len, 1, 0);
+ return -EFAULT;
+ }
+
+ if (store_arch) {
+ cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
+ }
+ sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
+ sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
+ sa->prefix = cpu_to_be32(cpu->env.psa);
+ sa->fpc = cpu_to_be32(cpu->env.fpc);
+ sa->todpr = cpu_to_be32(cpu->env.todpr);
+ sa->cputm = cpu_to_be64(cpu->env.cputm);
+ sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
+ for (i = 0; i < 16; ++i) {
+ sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->ars[i] = cpu_to_be64(cpu->env.cregs[i]);
+ }
+
+ cpu_physical_memory_unmap(sa, len, 1, len);
+
+ return 0;
+}
+
+#define ADTL_GS_OFFSET 1024 /* offset of GS data in adtl save area */
+#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
+int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
+{
+ hwaddr save = len;
+ void *mem;
+
+ mem = cpu_physical_memory_map(addr, &save, 1);
+ if (!mem) {
+ return -EFAULT;
+ }
+ if (save != len) {
+ cpu_physical_memory_unmap(mem, len, 1, 0);
+ return -EFAULT;
+ }
+
+ /* FIXME: as soon as TCG supports these features, convert cpu->be */
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ memcpy(mem, &cpu->env.vregs, 512);
+ }
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
+ memcpy(mem + ADTL_GS_OFFSET, &cpu->env.gscb, 32);
+ }
+
+ cpu_physical_memory_unmap(mem, len, 1, len);
+
+ return 0;
+}
#endif /* CONFIG_USER_ONLY */
void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 52c2963..81c5727 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -138,7 +138,7 @@ DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64)
DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64)
DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
-DEF_HELPER_4(sigp, i32, env, i64, i32, i64)
+DEF_HELPER_4(sigp, i32, env, i64, i32, i32)
DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_4(idte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index d09f2ed..16e27c8 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -1010,7 +1010,7 @@
/* SET SYSTEM MASK */
C(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0)
/* SIGNAL PROCESSOR */
- C(0xae00, SIGP, RS_a, Z, r3_o, a2, 0, 0, sigp, 0)
+ C(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0)
/* STORE CLOCK */
C(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0)
C(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0)
diff --git a/target/s390x/internal.h b/target/s390x/internal.h
index 14bf3ea..3aff54a 100644
--- a/target/s390x/internal.h
+++ b/target/s390x/internal.h
@@ -352,6 +352,10 @@ void s390_cpu_recompute_watchpoints(CPUState *cs);
void s390x_tod_timer(void *opaque);
void s390x_cpu_timer(void *opaque);
void do_restart_interrupt(CPUS390XState *env);
+void s390_handle_wait(S390CPU *cpu);
+#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
+int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch);
+int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len);
#ifndef CONFIG_USER_ONLY
LowCore *cpu_map_lowcore(CPUS390XState *env);
void cpu_unmap_lowcore(LowCore *lowcore);
@@ -360,8 +364,18 @@ void cpu_unmap_lowcore(LowCore *lowcore);
/* interrupt.c */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
-void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
- uint64_t param64);
+void cpu_inject_clock_comparator(S390CPU *cpu);
+void cpu_inject_cpu_timer(S390CPU *cpu);
+void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
+int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr);
+bool s390_cpu_has_io_int(S390CPU *cpu);
+bool s390_cpu_has_ext_int(S390CPU *cpu);
+bool s390_cpu_has_mcck_int(S390CPU *cpu);
+bool s390_cpu_has_int(S390CPU *cpu);
+bool s390_cpu_has_restart_int(S390CPU *cpu);
+bool s390_cpu_has_stop_int(S390CPU *cpu);
+void cpu_inject_restart(S390CPU *cpu);
+void cpu_inject_stop(S390CPU *cpu);
/* ioinst.c */
@@ -403,4 +417,9 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
/* translate.c */
void s390x_translate_init(void);
+
+/* sigp.c */
+int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3);
+void do_stop_interrupt(CPUS390XState *env);
+
#endif /* S390X_INTERNAL_H */
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
index 058e219..ce6177c 100644
--- a/target/s390x/interrupt.c
+++ b/target/s390x/interrupt.c
@@ -54,24 +54,82 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
}
#if !defined(CONFIG_USER_ONLY)
-void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
- uint64_t param64)
+static void cpu_inject_service(S390CPU *cpu, uint32_t param)
{
CPUS390XState *env = &cpu->env;
- if (env->ext_index == MAX_EXT_QUEUE - 1) {
- /* ugh - can't queue anymore. Let's drop. */
+ /* multiplexing is good enough for sclp - kvm does it internally as well*/
+ env->service_param |= param;
+
+ env->pending_int |= INTERRUPT_EXT_SERVICE;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_clock_comparator(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_EXT_CLOCK_COMPARATOR;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_cpu_timer(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_EXT_CPU_TIMER;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr)
+{
+ CPUS390XState *env = &cpu->env;
+
+ g_assert(src_cpu_addr < S390_MAX_CPUS);
+ set_bit(src_cpu_addr, env->emergency_signals);
+
+ env->pending_int |= INTERRUPT_EMERGENCY_SIGNAL;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr)
+{
+ CPUS390XState *env = &cpu->env;
+
+ g_assert(src_cpu_addr < S390_MAX_CPUS);
+ if (env->pending_int & INTERRUPT_EXTERNAL_CALL) {
+ return -EBUSY;
+ }
+ env->external_call_addr = src_cpu_addr;
+
+ env->pending_int |= INTERRUPT_EXTERNAL_CALL;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ return 0;
+}
+
+void cpu_inject_restart(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (kvm_enabled()) {
+ kvm_s390_restart_interrupt(cpu);
return;
}
- env->ext_index++;
- assert(env->ext_index < MAX_EXT_QUEUE);
+ env->pending_int |= INTERRUPT_RESTART;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
- env->ext_queue[env->ext_index].code = code;
- env->ext_queue[env->ext_index].param = param;
- env->ext_queue[env->ext_index].param64 = param64;
+void cpu_inject_stop(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
- env->pending_int |= INTERRUPT_EXT;
+ if (kvm_enabled()) {
+ kvm_s390_stop_interrupt(cpu);
+ return;
+ }
+
+ env->pending_int |= INTERRUPT_STOP;
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
@@ -129,7 +187,7 @@ void s390_sclp_extint(uint32_t parm)
} else {
S390CPU *dummy_cpu = s390_cpu_addr2state(0);
- cpu_inject_ext(dummy_cpu, EXT_SERVICE, parm, 0);
+ cpu_inject_service(dummy_cpu, parm);
}
}
@@ -158,4 +216,96 @@ void s390_crw_mchk(void)
}
}
+bool s390_cpu_has_mcck_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_MCHECK)) {
+ return false;
+ }
+
+ return env->pending_int & INTERRUPT_MCHK;
+}
+
+bool s390_cpu_has_ext_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_EXT)) {
+ return false;
+ }
+
+ if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
+ (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
+ (env->cregs[0] & CR0_CKC_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
+ (env->cregs[0] & CR0_CPU_TIMER_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
+ (env->cregs[0] & CR0_SERVICE_SC)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool s390_cpu_has_io_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_IO)) {
+ return false;
+ }
+
+ return env->pending_int & INTERRUPT_IO;
+}
+
+bool s390_cpu_has_restart_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ return env->pending_int & INTERRUPT_RESTART;
+}
+
+bool s390_cpu_has_stop_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ return env->pending_int & INTERRUPT_STOP;
+}
#endif
+
+bool s390_cpu_has_int(S390CPU *cpu)
+{
+#ifndef CONFIG_USER_ONLY
+ if (!tcg_enabled()) {
+ return false;
+ }
+ return s390_cpu_has_mcck_int(cpu) ||
+ s390_cpu_has_ext_int(cpu) ||
+ s390_cpu_has_io_int(cpu) ||
+ s390_cpu_has_restart_int(cpu) ||
+ s390_cpu_has_stop_int(cpu);
+#else
+ return false;
+#endif
+}
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
index 47490f8..23962fb 100644
--- a/target/s390x/ioinst.c
+++ b/target/s390x/ioinst.c
@@ -42,8 +42,6 @@ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
{
int cssid, ssid, schid, m;
SubchDev *sch;
- int ret = -ENODEV;
- int cc;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
@@ -51,32 +49,17 @@ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
}
trace_ioinst_sch_id("xsch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_xsch(sch);
- }
- switch (ret) {
- case -ENODEV:
- cc = 3;
- break;
- case -EBUSY:
- cc = 2;
- break;
- case 0:
- cc = 0;
- break;
- default:
- cc = 1;
- break;
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_xsch(sch));
}
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
{
int cssid, ssid, schid, m;
SubchDev *sch;
- int ret = -ENODEV;
- int cc;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
@@ -84,23 +67,17 @@ void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
}
trace_ioinst_sch_id("csch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_csch(sch);
- }
- if (ret == -ENODEV) {
- cc = 3;
- } else {
- cc = 0;
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_csch(sch));
}
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
{
int cssid, ssid, schid, m;
SubchDev *sch;
- int ret = -ENODEV;
- int cc;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
@@ -108,24 +85,11 @@ void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
}
trace_ioinst_sch_id("hsch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_hsch(sch);
- }
- switch (ret) {
- case -ENODEV:
- cc = 3;
- break;
- case -EBUSY:
- cc = 2;
- break;
- case 0:
- cc = 0;
- break;
- default:
- cc = 1;
- break;
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_hsch(sch));
}
static int ioinst_schib_valid(SCHIB *schib)
@@ -147,8 +111,6 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
SubchDev *sch;
SCHIB schib;
uint64_t addr;
- int ret = -ENODEV;
- int cc;
CPUS390XState *env = &cpu->env;
uint8_t ar;
@@ -167,24 +129,11 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
}
trace_ioinst_sch_id("msch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_msch(sch, &schib);
- }
- switch (ret) {
- case -ENODEV:
- cc = 3;
- break;
- case -EBUSY:
- cc = 2;
- break;
- case 0:
- cc = 0;
- break;
- default:
- cc = 1;
- break;
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_msch(sch, &schib));
}
static void copy_orb_from_guest(ORB *dest, const ORB *src)
@@ -218,8 +167,6 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
SubchDev *sch;
ORB orig_orb, orb;
uint64_t addr;
- int ret = -ENODEV;
- int cc;
CPUS390XState *env = &cpu->env;
uint8_t ar;
@@ -239,33 +186,11 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
}
trace_ioinst_sch_id("ssch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_ssch(sch, &orb);
- }
- switch (ret) {
- case -ENODEV:
- cc = 3;
- break;
- case -EBUSY:
- cc = 2;
- break;
- case -EFAULT:
- /*
- * TODO:
- * I'm wondering whether there is something better
- * to do for us here (like setting some device or
- * subchannel status).
- */
- program_interrupt(env, PGM_ADDRESSING, 4);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
return;
- case 0:
- cc = 0;
- break;
- default:
- cc = 1;
- break;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_ssch(sch, &orb));
}
void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
@@ -784,8 +709,6 @@ void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
{
int cssid, ssid, schid, m;
SubchDev *sch;
- int ret = -ENODEV;
- int cc;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 4);
@@ -793,24 +716,11 @@ void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
}
trace_ioinst_sch_id("rsch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
- if (sch && css_subch_visible(sch)) {
- ret = css_do_rsch(sch);
- }
- switch (ret) {
- case -ENODEV:
- cc = 3;
- break;
- case -EINVAL:
- cc = 2;
- break;
- case 0:
- cc = 0;
- break;
- default:
- cc = 1;
- break;
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
}
- setcc(cpu, cc);
+ setcc(cpu, css_do_rsch(sch));
}
#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
diff --git a/target/s390x/kvm-stub.c b/target/s390x/kvm-stub.c
index 43f02c2..6bae3e9 100644
--- a/target/s390x/kvm-stub.c
+++ b/target/s390x/kvm-stub.c
@@ -93,11 +93,6 @@ int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
return -ENOSYS;
}
-int kvm_s390_cpu_restart(S390CPU *cpu)
-{
- return -ENOSYS;
-}
-
void kvm_s390_cmma_reset(void)
{
}
@@ -119,3 +114,11 @@ int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
void kvm_s390_crypto_reset(void)
{
}
+
+void kvm_s390_stop_interrupt(S390CPU *cpu)
+{
+}
+
+void kvm_s390_restart_interrupt(S390CPU *cpu)
+{
+}
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index d3700fc..88f27d7 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -135,8 +135,6 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
-static QemuMutex qemu_sigp_mutex;
-
static int cap_sync_regs;
static int cap_async_pf;
static int cap_mem_op;
@@ -322,8 +320,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
*/
/* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
- qemu_mutex_init(&qemu_sigp_mutex);
-
return 0;
}
@@ -1508,456 +1504,22 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
return r;
}
-typedef struct SigpInfo {
- uint64_t param;
- int cc;
- uint64_t *status_reg;
-} SigpInfo;
-
-static void set_sigp_status(SigpInfo *si, uint64_t status)
-{
- *si->status_reg &= 0xffffffff00000000ULL;
- *si->status_reg |= status;
- si->cc = SIGP_CC_STATUS_STORED;
-}
-
-static void sigp_start(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
-
- if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
- return;
- }
-
- s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- struct kvm_s390_irq irq = {
- .type = KVM_S390_SIGP_STOP,
- };
-
- if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
- return;
- }
-
- /* disabled wait - sleeping in user space */
- if (cs->halted) {
- s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
- } else {
- /* execute the stop function */
- cpu->env.sigp_order = SIGP_STOP;
- kvm_s390_vcpu_interrupt(cpu, &irq);
- }
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-#define ADTL_GS_OFFSET 1024 /* offset of GS data in adtl save area */
-#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
-static int do_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
-{
- hwaddr save = len;
- void *mem;
-
- mem = cpu_physical_memory_map(addr, &save, 1);
- if (!mem) {
- return -EFAULT;
- }
- if (save != len) {
- cpu_physical_memory_unmap(mem, len, 1, 0);
- return -EFAULT;
- }
-
- if (s390_has_feat(S390_FEAT_VECTOR)) {
- memcpy(mem, &cpu->env.vregs, 512);
- }
- if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
- memcpy(mem + ADTL_GS_OFFSET, &cpu->env.gscb, 32);
- }
-
- cpu_physical_memory_unmap(mem, len, 1, len);
-
- return 0;
-}
-
-struct sigp_save_area {
- uint64_t fprs[16]; /* 0x0000 */
- uint64_t grs[16]; /* 0x0080 */
- PSW psw; /* 0x0100 */
- uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
- uint32_t prefix; /* 0x0118 */
- uint32_t fpc; /* 0x011c */
- uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
- uint32_t todpr; /* 0x0124 */
- uint64_t cputm; /* 0x0128 */
- uint64_t ckc; /* 0x0130 */
- uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
- uint32_t ars[16]; /* 0x0140 */
- uint64_t crs[16]; /* 0x0384 */
-};
-QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);
-
-#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
-static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
-{
- static const uint8_t ar_id = 1;
- struct sigp_save_area *sa;
- hwaddr len = sizeof(*sa);
- int i;
-
- sa = cpu_physical_memory_map(addr, &len, 1);
- if (!sa) {
- return -EFAULT;
- }
- if (len != sizeof(*sa)) {
- cpu_physical_memory_unmap(sa, len, 1, 0);
- return -EFAULT;
- }
-
- if (store_arch) {
- cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
- }
- for (i = 0; i < 16; ++i) {
- sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
- }
- for (i = 0; i < 16; ++i) {
- sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
- }
- sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
- sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
- sa->prefix = cpu_to_be32(cpu->env.psa);
- sa->fpc = cpu_to_be32(cpu->env.fpc);
- sa->todpr = cpu_to_be32(cpu->env.todpr);
- sa->cputm = cpu_to_be64(cpu->env.cputm);
- sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
- for (i = 0; i < 16; ++i) {
- sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
- }
- for (i = 0; i < 16; ++i) {
- sa->ars[i] = cpu_to_be64(cpu->env.cregs[i]);
- }
-
- cpu_physical_memory_unmap(sa, len, 1, len);
-
- return 0;
-}
-
-static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- struct kvm_s390_irq irq = {
- .type = KVM_S390_SIGP_STOP,
- };
-
- /* disabled wait - sleeping in user space */
- if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
- s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
- }
-
- switch (s390_cpu_get_state(cpu)) {
- case CPU_STATE_OPERATING:
- cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
- kvm_s390_vcpu_interrupt(cpu, &irq);
- /* store will be performed when handling the stop intercept */
- break;
- case CPU_STATE_STOPPED:
- /* already stopped, just store the status */
- cpu_synchronize_state(cs);
- kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
- break;
- }
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- uint32_t address = si->param & 0x7ffffe00u;
-
- /* cpu has to be stopped */
- if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
- set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
- return;
- }
-
- cpu_synchronize_state(cs);
-
- if (kvm_s390_store_status(cpu, address, false)) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-#define ADTL_SAVE_LC_MASK 0xfUL
-static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- uint8_t lc = si->param & ADTL_SAVE_LC_MASK;
- hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK;
- hwaddr len = 1UL << (lc ? lc : 10);
-
- if (!s390_has_feat(S390_FEAT_VECTOR) &&
- !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
- set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
- return;
- }
-
- /* cpu has to be stopped */
- if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
- set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
- return;
- }
-
- /* address must be aligned to length */
- if (addr & (len - 1)) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
-
- /* no GS: only lc == 0 is valid */
- if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
- lc != 0) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
-
- /* GS: 0, 10, 11, 12 are valid */
- if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
- lc != 0 &&
- lc != 10 &&
- lc != 11 &&
- lc != 12) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
-
- cpu_synchronize_state(cs);
-
- if (do_store_adtl_status(cpu, addr, len)) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- struct kvm_s390_irq irq = {
- .type = KVM_S390_RESTART,
- };
-
- switch (s390_cpu_get_state(cpu)) {
- case CPU_STATE_STOPPED:
- /* the restart irq has to be delivered prior to any other pending irq */
- cpu_synchronize_state(cs);
- do_restart_interrupt(&cpu->env);
- s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
- break;
- case CPU_STATE_OPERATING:
- kvm_s390_vcpu_interrupt(cpu, &irq);
- break;
- }
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-int kvm_s390_cpu_restart(S390CPU *cpu)
-{
- SigpInfo si = {};
-
- run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
- DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
- return 0;
-}
-
-static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
- SigpInfo *si = arg.host_ptr;
-
- cpu_synchronize_state(cs);
- scc->initial_cpu_reset(cs);
- cpu_synchronize_post_reset(cs);
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
- SigpInfo *si = arg.host_ptr;
-
- cpu_synchronize_state(cs);
- scc->cpu_reset(cs);
- cpu_synchronize_post_reset(cs);
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPU *cpu = S390_CPU(cs);
- SigpInfo *si = arg.host_ptr;
- uint32_t addr = si->param & 0x7fffe000u;
-
- cpu_synchronize_state(cs);
-
- if (!address_space_access_valid(&address_space_memory, addr,
- sizeof(struct LowCore), false)) {
- set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
- return;
- }
-
- /* cpu has to be stopped */
- if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
- set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
- return;
- }
-
- cpu->env.psa = addr;
- cpu_synchronize_post_init(cs);
- si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
- uint64_t param, uint64_t *status_reg)
-{
- SigpInfo si = {
- .param = param,
- .status_reg = status_reg,
- };
-
- /* cpu available? */
- if (dst_cpu == NULL) {
- return SIGP_CC_NOT_OPERATIONAL;
- }
-
- /* only resets can break pending orders */
- if (dst_cpu->env.sigp_order != 0 &&
- order != SIGP_CPU_RESET &&
- order != SIGP_INITIAL_CPU_RESET) {
- return SIGP_CC_BUSY;
- }
-
- switch (order) {
- case SIGP_START:
- run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_STOP:
- run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_RESTART:
- run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_STOP_STORE_STATUS:
- run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_STORE_STATUS_ADDR:
- run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_STORE_ADTL_STATUS:
- run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_SET_PREFIX:
- run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_INITIAL_CPU_RESET:
- run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
- break;
- case SIGP_CPU_RESET:
- run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
- break;
- default:
- DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
- set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
- }
-
- return si.cc;
-}
-
-static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
- uint64_t *status_reg)
-{
- CPUState *cur_cs;
- S390CPU *cur_cpu;
- bool all_stopped = true;
-
- CPU_FOREACH(cur_cs) {
- cur_cpu = S390_CPU(cur_cs);
-
- if (cur_cpu == cpu) {
- continue;
- }
- if (s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
- all_stopped = false;
- }
- }
-
- *status_reg &= 0xffffffff00000000ULL;
-
- /* Reject set arch order, with czam we're always in z/Arch mode. */
- *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER :
- SIGP_STAT_INCORRECT_STATE);
- return SIGP_CC_STATUS_STORED;
-}
-
-static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
{
CPUS390XState *env = &cpu->env;
const uint8_t r1 = ipa1 >> 4;
const uint8_t r3 = ipa1 & 0x0f;
int ret;
uint8_t order;
- uint64_t *status_reg;
- uint64_t param;
- S390CPU *dst_cpu = NULL;
cpu_synchronize_state(CPU(cpu));
/* get order code */
- order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
- & SIGP_ORDER_MASK;
- status_reg = &env->regs[r1];
- param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
-
- if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
- ret = SIGP_CC_BUSY;
- goto out;
- }
-
- switch (order) {
- case SIGP_SET_ARCH:
- ret = sigp_set_architecture(cpu, param, status_reg);
- break;
- default:
- /* all other sigp orders target a single vcpu */
- dst_cpu = s390_cpu_addr2state(env->regs[r3]);
- ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
- }
- qemu_mutex_unlock(&qemu_sigp_mutex);
-
-out:
- trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
- dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
-
- if (ret >= 0) {
- setcc(cpu, ret);
- return 0;
- }
+ order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK;
- return ret;
+ ret = handle_sigp(env, order, r1, r3);
+ setcc(cpu, ret);
+ return 0;
}
static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
@@ -1985,7 +1547,7 @@ static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
r = handle_diag(cpu, run, run->s390_sieic.ipb);
break;
case IPA0_SIGP:
- r = handle_sigp(cpu, run, ipa1);
+ r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb);
break;
}
@@ -1997,12 +1559,6 @@ static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
return r;
}
-static bool is_special_wait_psw(CPUState *cs)
-{
- /* signal quiesce */
- return cs->kvm_run->psw_addr == 0xfffUL;
-}
-
static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
{
CPUState *cs = CPU(cpu);
@@ -2074,24 +1630,11 @@ static int handle_intercept(S390CPU *cpu)
case ICPT_WAITPSW:
/* disabled wait, since enabled wait is handled in kernel */
cpu_synchronize_state(cs);
- if (s390_cpu_halt(cpu) == 0) {
- if (is_special_wait_psw(cs)) {
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- } else {
- qemu_system_guest_panicked(NULL);
- }
- }
+ s390_handle_wait(cpu);
r = EXCP_HALTED;
break;
case ICPT_CPU_STOP:
- if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- }
- if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
- kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
- true);
- }
- cpu->env.sigp_order = 0;
+ do_stop_interrupt(&cpu->env);
r = EXCP_HALTED;
break;
case ICPT_OPEREXC:
@@ -2830,3 +2373,21 @@ void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
kvm_s390_enable_cmma();
}
}
+
+void kvm_s390_restart_interrupt(S390CPU *cpu)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_RESTART,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+void kvm_s390_stop_interrupt(S390CPU *cpu)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
diff --git a/target/s390x/kvm_s390x.h b/target/s390x/kvm_s390x.h
index 501fc5a..79b3594 100644
--- a/target/s390x/kvm_s390x.h
+++ b/target/s390x/kvm_s390x.h
@@ -35,13 +35,14 @@ int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_clock);
void kvm_s390_enable_css_support(S390CPU *cpu);
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
int vq, bool assign);
-int kvm_s390_cpu_restart(S390CPU *cpu);
int kvm_s390_get_memslot_count(void);
int kvm_s390_cmma_active(void);
void kvm_s390_cmma_reset(void);
void kvm_s390_reset_vcpu(S390CPU *cpu);
int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit);
void kvm_s390_crypto_reset(void);
+void kvm_s390_restart_interrupt(S390CPU *cpu);
+void kvm_s390_stop_interrupt(S390CPU *cpu);
/* implemented outside of target/s390x/ */
int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index bbbe1c6..69a1686 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -1687,18 +1687,10 @@ void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
{
uintptr_t ra = GETPC();
- CPUState *cs = CPU(s390_env_get_cpu(env));
int i;
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
- /* Check low-address protection */
- if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_PROTECTION, 4);
- return 1;
- }
-
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
cpu_stq_real_ra(env, real_addr + i, 0, ra);
}
diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c
index 0b93381..4afd90b 100644
--- a/target/s390x/misc_helper.c
+++ b/target/s390x/misc_helper.c
@@ -319,44 +319,14 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
}
uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
- uint64_t cpu_addr)
+ uint32_t r3)
{
- int cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ int cc;
- HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
- __func__, order_code, r1, cpu_addr);
-
- /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
- as parameter (input). Status (output) is always R1. */
-
- switch (order_code & SIGP_ORDER_MASK) {
- case SIGP_SET_ARCH:
- /* switch arch */
- break;
- case SIGP_SENSE:
- /* enumerate CPU status */
- if (cpu_addr) {
- /* XXX implement when SMP comes */
- return 3;
- }
- env->regs[r1] &= 0xffffffff00000000ULL;
- cc = 1;
- break;
-#if !defined(CONFIG_USER_ONLY)
- case SIGP_RESTART:
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
- break;
- case SIGP_STOP:
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
- break;
-#endif
- default:
- /* unknown sigp */
- fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
- cc = SIGP_CC_NOT_OPERATIONAL;
- }
+ /* TODO: needed to inject interrupts - push further down */
+ qemu_mutex_lock_iothread();
+ cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
+ qemu_mutex_unlock_iothread();
return cc;
}
@@ -505,66 +475,57 @@ void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
}
#endif
-/* The maximum bit defined at the moment is 129. */
-#define MAX_STFL_WORDS 3
+static uint8_t stfl_bytes[2048];
+static unsigned int used_stfl_bytes;
-/* Canonicalize the current cpu's features into the 64-bit words required
- by STFLE. Return the index-1 of the max word that is non-zero. */
-static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS])
+static void prepare_stfl(void)
{
- S390CPU *cpu = s390_env_get_cpu(env);
- const unsigned long *features = cpu->model->features;
- unsigned max_bit = 0;
- S390Feat feat;
+ static bool initialized;
+ int i;
- memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS);
-
- if (test_bit(S390_FEAT_ZARCH, features)) {
- /* z/Architecture is always active if around */
- words[0] = 1ull << (63 - 2);
+ /* racy, but we don't care, the same values are always written */
+ if (initialized) {
+ return;
}
- for (feat = find_first_bit(features, S390_FEAT_MAX);
- feat < S390_FEAT_MAX;
- feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) {
- const S390FeatDef *def = s390_feat_def(feat);
- if (def->type == S390_FEAT_TYPE_STFL) {
- unsigned bit = def->bit;
- if (bit > max_bit) {
- max_bit = bit;
- }
- assert(bit / 64 < MAX_STFL_WORDS);
- words[bit / 64] |= 1ULL << (63 - bit % 64);
+ s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
+ for (i = 0; i < sizeof(stfl_bytes); i++) {
+ if (stfl_bytes[i]) {
+ used_stfl_bytes = i + 1;
}
}
-
- return max_bit / 64;
+ initialized = true;
}
#ifndef CONFIG_USER_ONLY
void HELPER(stfl)(CPUS390XState *env)
{
- uint64_t words[MAX_STFL_WORDS];
LowCore *lowcore;
lowcore = cpu_map_lowcore(env);
- do_stfle(env, words);
- lowcore->stfl_fac_list = cpu_to_be32(words[0] >> 32);
+ prepare_stfl();
+ memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
cpu_unmap_lowcore(lowcore);
}
#endif
uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
{
- uint64_t words[MAX_STFL_WORDS];
- unsigned count_m1 = env->regs[0] & 0xff;
- unsigned max_m1 = do_stfle(env, words);
- unsigned i;
+ const uintptr_t ra = GETPC();
+ const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
+ const int max_bytes = ROUND_UP(used_stfl_bytes, 8);
+ int i;
+
+ if (addr & 0x7) {
+ cpu_restore_state(ENV_GET_CPU(env), ra);
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ }
- for (i = 0; i <= count_m1; ++i) {
- cpu_stq_data(env, addr + 8 * i, words[i]);
+ prepare_stfl();
+ for (i = 0; i < count_bytes; ++i) {
+ cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
}
- env->regs[0] = deposit64(env->regs[0], 0, 8, max_m1);
- return (count_m1 >= max_m1 ? 0 : 3);
+ env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
+ return count_bytes >= max_bytes ? 0 : 3;
}
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 9daa0fd..31e3f3f 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -106,6 +106,37 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
trigger_access_exception(env, type, ilen, tec);
}
+/* check whether the address would be proteted by Low-Address Protection */
+static bool is_low_address(uint64_t addr)
+{
+ return addr <= 511 || (addr >= 4096 && addr <= 4607);
+}
+
+/* check whether Low-Address Protection is enabled for mmu_translate() */
+static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc)
+{
+ if (!(env->cregs[0] & CR0_LOWPROT)) {
+ return false;
+ }
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ return true;
+ }
+
+ /* Check the private-space control bit */
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
+ case PSW_ASC_SECONDARY:
+ return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
+ case PSW_ASC_HOME:
+ return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
+ default:
+ /* We don't support access register mode */
+ error_report("unsupported addressing mode");
+ exit(1);
+ }
+}
+
/**
* Translate real address to absolute (= physical)
* address by taking care of the prefix mapping.
@@ -323,6 +354,24 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
}
*flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) {
+ /*
+ * If any part of this page is currently protected, make sure the
+ * TLB entry will not be reused.
+ *
+ * As the protected range is always the first 512 bytes of the
+ * two first pages, we are able to catch all writes to these areas
+ * just by looking at the start address (triggering the tlb miss).
+ */
+ *flags |= PAGE_WRITE_INV;
+ if (is_low_address(vaddr) && rw == MMU_DATA_STORE) {
+ if (exc) {
+ trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
+ }
+ return -EACCES;
+ }
+ }
+
vaddr &= TARGET_PAGE_MASK;
if (!(env->psw.mask & PSW_MASK_DAT)) {
@@ -392,50 +441,17 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
}
/**
- * lowprot_enabled: Check whether low-address protection is enabled
- */
-static bool lowprot_enabled(const CPUS390XState *env)
-{
- if (!(env->cregs[0] & CR0_LOWPROT)) {
- return false;
- }
- if (!(env->psw.mask & PSW_MASK_DAT)) {
- return true;
- }
-
- /* Check the private-space control bit */
- switch (env->psw.mask & PSW_MASK_ASC) {
- case PSW_ASC_PRIMARY:
- return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
- case PSW_ASC_SECONDARY:
- return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
- case PSW_ASC_HOME:
- return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
- default:
- /* We don't support access register mode */
- error_report("unsupported addressing mode");
- exit(1);
- }
-}
-
-/**
* translate_pages: Translate a set of consecutive logical page addresses
* to absolute addresses
*/
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
target_ulong *pages, bool is_write)
{
- bool lowprot = is_write && lowprot_enabled(&cpu->env);
uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
CPUS390XState *env = &cpu->env;
int ret, i, pflags;
for (i = 0; i < nr_pages; i++) {
- /* Low-address protection? */
- if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
- trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
- return -EACCES;
- }
ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
if (ret) {
return ret;
@@ -509,9 +525,19 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
target_ulong *addr, int *flags)
{
- /* TODO: low address protection once we flush the tlb on cr changes */
+ const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT;
+
*flags = PAGE_READ | PAGE_WRITE;
- *addr = mmu_real2abs(env, raddr);
+ if (is_low_address(raddr & TARGET_PAGE_MASK) && lowprot_enabled) {
+ /* see comment in mmu_translate() how this works */
+ *flags |= PAGE_WRITE_INV;
+ if (is_low_address(raddr) && rw == MMU_DATA_STORE) {
+ trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
+ return -EACCES;
+ }
+ }
+
+ *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK);
/* TODO: storage key handling */
return 0;
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
new file mode 100644
index 0000000..ac3f8e7
--- /dev/null
+++ b/target/s390x/sigp.c
@@ -0,0 +1,508 @@
+/*
+ * s390x SIGP instruction handling
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp. 2012
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "internal.h"
+#include "sysemu/hw_accel.h"
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+#include "sysemu/sysemu.h"
+#include "trace.h"
+
+QemuMutex qemu_sigp_mutex;
+
+typedef struct SigpInfo {
+ uint64_t param;
+ int cc;
+ uint64_t *status_reg;
+} SigpInfo;
+
+static void set_sigp_status(SigpInfo *si, uint64_t status)
+{
+ *si->status_reg &= 0xffffffff00000000ULL;
+ *si->status_reg |= status;
+ si->cc = SIGP_CC_STATUS_STORED;
+}
+
+static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si)
+{
+ uint8_t state = s390_cpu_get_state(dst_cpu);
+ bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL;
+ uint64_t status = 0;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* sensing without locks is racy, but it's the same for real hw */
+ if (state != CPU_STATE_STOPPED && !ext_call) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ if (ext_call) {
+ status |= SIGP_STAT_EXT_CALL_PENDING;
+ }
+ if (state == CPU_STATE_STOPPED) {
+ status |= SIGP_STAT_STOPPED;
+ }
+ set_sigp_status(si, status);
+ }
+}
+
+static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
+{
+ int ret;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id);
+ if (!ret) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING);
+ }
+}
+
+static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
+{
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_start(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ /* disabled wait - sleeping in user space */
+ if (cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+ } else {
+ /* execute the stop function */
+ cpu->env.sigp_order = SIGP_STOP;
+ cpu_inject_stop(cpu);
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ /* disabled wait - sleeping in user space */
+ if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+ }
+
+ switch (s390_cpu_get_state(cpu)) {
+ case CPU_STATE_OPERATING:
+ cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
+ cpu_inject_stop(cpu);
+ /* store will be performed in do_stop_interrup() */
+ break;
+ case CPU_STATE_STOPPED:
+ /* already stopped, just store the status */
+ cpu_synchronize_state(cs);
+ s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t address = si->param & 0x7ffffe00u;
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (s390_store_status(cpu, address, false)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define ADTL_SAVE_LC_MASK 0xfUL
+static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint8_t lc = si->param & ADTL_SAVE_LC_MASK;
+ hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK;
+ hwaddr len = 1UL << (lc ? lc : 10);
+
+ if (!s390_has_feat(S390_FEAT_VECTOR) &&
+ !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ /* address must be aligned to length */
+ if (addr & (len - 1)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* no GS: only lc == 0 is valid */
+ if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
+ lc != 0) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* GS: 0, 10, 11, 12 are valid */
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
+ lc != 0 &&
+ lc != 10 &&
+ lc != 11 &&
+ lc != 12) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (s390_store_adtl_status(cpu, addr, len)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ switch (s390_cpu_get_state(cpu)) {
+ case CPU_STATE_STOPPED:
+ /* the restart irq has to be delivered prior to any other pending irq */
+ cpu_synchronize_state(cs);
+ /*
+ * Set OPERATING (and unhalting) before loading the restart PSW.
+ * load_psw() will then properly halt the CPU again if necessary (TCG).
+ */
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+ do_restart_interrupt(&cpu->env);
+ break;
+ case CPU_STATE_OPERATING:
+ cpu_inject_restart(cpu);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->initial_cpu_reset(cs);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->cpu_reset(cs);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t addr = si->param & 0x7fffe000u;
+
+ cpu_synchronize_state(cs);
+
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(struct LowCore), false)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu->env.psa = addr;
+ tlb_flush(cs);
+ cpu_synchronize_post_init(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
+ SigpInfo *si)
+{
+ const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
+ uint16_t p_asn, s_asn, asn;
+ uint64_t psw_addr, psw_mask;
+ bool idle;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* this looks racy, but these values are only used when STOPPED */
+ idle = CPU(dst_cpu)->halted;
+ psw_addr = dst_cpu->env.psw.addr;
+ psw_mask = dst_cpu->env.psw.mask;
+ asn = si->param;
+ p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */
+ s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */
+
+ if (s390_cpu_get_state(dst_cpu) != CPU_STATE_STOPPED ||
+ (psw_mask & psw_int_mask) != psw_int_mask ||
+ (idle && psw_addr != 0) ||
+ (!idle && (asn == p_asn || asn == s_asn))) {
+ cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
+ } else {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ }
+
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
+{
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* sensing without locks is racy, but it's the same for real hw */
+ if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* If halted (which includes also STOPPED), it is not running */
+ if (CPU(dst_cpu)->halted) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ set_sigp_status(si, SIGP_STAT_NOT_RUNNING);
+ }
+}
+
+static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
+ uint64_t param, uint64_t *status_reg)
+{
+ SigpInfo si = {
+ .param = param,
+ .status_reg = status_reg,
+ };
+
+ /* cpu available? */
+ if (dst_cpu == NULL) {
+ return SIGP_CC_NOT_OPERATIONAL;
+ }
+
+ /* only resets can break pending orders */
+ if (dst_cpu->env.sigp_order != 0 &&
+ order != SIGP_CPU_RESET &&
+ order != SIGP_INITIAL_CPU_RESET) {
+ return SIGP_CC_BUSY;
+ }
+
+ switch (order) {
+ case SIGP_SENSE:
+ sigp_sense(dst_cpu, &si);
+ break;
+ case SIGP_EXTERNAL_CALL:
+ sigp_external_call(cpu, dst_cpu, &si);
+ break;
+ case SIGP_EMERGENCY:
+ sigp_emergency(cpu, dst_cpu, &si);
+ break;
+ case SIGP_START:
+ run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP:
+ run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_RESTART:
+ run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP_STORE_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_STATUS_ADDR:
+ run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_ADTL_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_SET_PREFIX:
+ run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_COND_EMERGENCY:
+ sigp_cond_emergency(cpu, dst_cpu, &si);
+ break;
+ case SIGP_SENSE_RUNNING:
+ sigp_sense_running(dst_cpu, &si);
+ break;
+ default:
+ set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
+ }
+
+ return si.cc;
+}
+
+static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
+ uint64_t *status_reg)
+{
+ CPUState *cur_cs;
+ S390CPU *cur_cpu;
+ bool all_stopped = true;
+
+ CPU_FOREACH(cur_cs) {
+ cur_cpu = S390_CPU(cur_cs);
+
+ if (cur_cpu == cpu) {
+ continue;
+ }
+ if (s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
+ all_stopped = false;
+ }
+ }
+
+ *status_reg &= 0xffffffff00000000ULL;
+
+ /* Reject set arch order, with czam we're always in z/Arch mode. */
+ *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER :
+ SIGP_STAT_INCORRECT_STATE);
+ return SIGP_CC_STATUS_STORED;
+}
+
+int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3)
+{
+ uint64_t *status_reg = &env->regs[r1];
+ uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
+ S390CPU *cpu = s390_env_get_cpu(env);
+ S390CPU *dst_cpu = NULL;
+ int ret;
+
+ if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
+ ret = SIGP_CC_BUSY;
+ goto out;
+ }
+
+ switch (order) {
+ case SIGP_SET_ARCH:
+ ret = sigp_set_architecture(cpu, param, status_reg);
+ break;
+ default:
+ /* all other sigp orders target a single vcpu */
+ dst_cpu = s390_cpu_addr2state(env->regs[r3]);
+ ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg);
+ }
+ qemu_mutex_unlock(&qemu_sigp_mutex);
+
+out:
+ trace_sigp_finished(order, CPU(cpu)->cpu_index,
+ dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
+ g_assert(ret >= 0);
+
+ return ret;
+}
+
+int s390_cpu_restart(S390CPU *cpu)
+{
+ SigpInfo si = {};
+
+ run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ return 0;
+}
+
+void do_stop_interrupt(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+ if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
+ s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
+ }
+ env->sigp_order = 0;
+ env->pending_int &= ~INTERRUPT_STOP;
+}
+
+void s390_init_sigp(void)
+{
+ qemu_mutex_init(&qemu_sigp_mutex);
+}
diff --git a/target/s390x/trace-events b/target/s390x/trace-events
index 4d871f5..a84e316 100644
--- a/target/s390x/trace-events
+++ b/target/s390x/trace-events
@@ -14,9 +14,11 @@ ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command 0x%04x, len 0x
kvm_enable_cmma(int rc) "CMMA: enabling with result code %d"
kvm_clear_cmma(int rc) "CMMA: clearing with result code %d"
kvm_failed_cpu_state_set(int cpu_index, uint8_t state, const char *msg) "Warning: Unable to set cpu %d state %" PRIu8 " to KVM: %s"
-kvm_sigp_finished(uint8_t order, int cpu_index, int dst_index, int cc) "SIGP: Finished order %u on cpu %d -> cpu %d with cc=%d"
# target/s390x/cpu.c
cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
cpu_halt(int cpu_index) "halting cpu %d"
cpu_unhalt(int cpu_index) "unhalting cpu %d"
+
+# target/s390x/sigp.c
+sigp_finished(uint8_t order, int cpu_index, int dst_index, int cc) "SIGP: Finished order %u on cpu %d -> cpu %d with cc=%d"
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 165d2ca..6ecf764 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -2719,7 +2719,8 @@ static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
gen_helper_lctl(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
- return NO_EXIT;
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return EXIT_PC_STALE_NOCHAIN;
}
static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
@@ -2730,7 +2731,8 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
gen_helper_lctlg(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
- return NO_EXIT;
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return EXIT_PC_STALE_NOCHAIN;
}
static ExitStatus op_lra(DisasContext *s, DisasOps *o)
@@ -3708,11 +3710,12 @@ static ExitStatus op_servc(DisasContext *s, DisasOps *o)
static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
- gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
+ gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
set_cc_static(s);
tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
return NO_EXIT;
}
#endif
@@ -4140,7 +4143,6 @@ static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_stfle(cc_op, cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;