aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/boards.h1
-rw-r--r--hw/s390x/Makefile.objs5
-rw-r--r--hw/s390x/css.c1277
-rw-r--r--hw/s390x/css.h99
-rw-r--r--hw/s390x/ipl.c2
-rw-r--r--hw/s390x/s390-virtio-bus.c (renamed from hw/s390-virtio-bus.c)16
-rw-r--r--hw/s390x/s390-virtio-bus.h (renamed from hw/s390-virtio-bus.h)12
-rw-r--r--hw/s390x/s390-virtio-ccw.c134
-rw-r--r--hw/s390x/s390-virtio-hcall.c2
-rw-r--r--hw/s390x/s390-virtio.c (renamed from hw/s390-virtio.c)131
-rw-r--r--hw/s390x/s390-virtio.h (renamed from hw/s390-virtio.h)6
-rw-r--r--hw/s390x/virtio-ccw.c960
-rw-r--r--hw/s390x/virtio-ccw.h98
-rw-r--r--target-s390x/Makefile.objs2
-rw-r--r--target-s390x/cpu.h247
-rw-r--r--target-s390x/helper.c200
-rw-r--r--target-s390x/ioinst.c761
-rw-r--r--target-s390x/ioinst.h230
-rw-r--r--target-s390x/kvm.c239
-rw-r--r--trace-events18
-rw-r--r--vl.c52
21 files changed, 4398 insertions, 94 deletions
diff --git a/hw/boards.h b/hw/boards.h
index 3ff9665..3813d4e 100644
--- a/hw/boards.h
+++ b/hw/boards.h
@@ -33,6 +33,7 @@ typedef struct QEMUMachine {
unsigned int no_serial:1,
no_parallel:1,
use_virtcon:1,
+ use_sclp:1,
no_floppy:1,
no_cdrom:1,
no_sdcard:1;
diff --git a/hw/s390x/Makefile.objs b/hw/s390x/Makefile.objs
index 1b40c2e..9f2f419 100644
--- a/hw/s390x/Makefile.objs
+++ b/hw/s390x/Makefile.objs
@@ -1,8 +1,9 @@
obj-y = s390-virtio-bus.o s390-virtio.o
-
-obj-y := $(addprefix ../,$(obj-y))
obj-y += s390-virtio-hcall.o
obj-y += sclp.o
obj-y += event-facility.o
obj-y += sclpquiesce.o sclpconsole.o
obj-y += ipl.o
+obj-y += css.o
+obj-y += s390-virtio-ccw.o
+obj-y += virtio-ccw.o
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
new file mode 100644
index 0000000..3244201
--- /dev/null
+++ b/hw/s390x/css.c
@@ -0,0 +1,1277 @@
+/*
+ * Channel subsystem base support.
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <hw/qdev.h>
+#include "qemu/bitops.h"
+#include "cpu.h"
+#include "ioinst.h"
+#include "css.h"
+#include "trace.h"
+
+typedef struct CrwContainer {
+ CRW crw;
+ QTAILQ_ENTRY(CrwContainer) sibling;
+} CrwContainer;
+
+typedef struct ChpInfo {
+ uint8_t in_use;
+ uint8_t type;
+ uint8_t is_virtual;
+} ChpInfo;
+
+typedef struct SubchSet {
+ SubchDev *sch[MAX_SCHID + 1];
+ unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
+ unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
+} SubchSet;
+
+typedef struct CssImage {
+ SubchSet *sch_set[MAX_SSID + 1];
+ ChpInfo chpids[MAX_CHPID + 1];
+} CssImage;
+
+typedef struct ChannelSubSys {
+ QTAILQ_HEAD(, CrwContainer) pending_crws;
+ bool do_crw_mchk;
+ bool crws_lost;
+ uint8_t max_cssid;
+ uint8_t max_ssid;
+ bool chnmon_active;
+ uint64_t chnmon_area;
+ CssImage *css[MAX_CSSID + 1];
+ uint8_t default_cssid;
+} ChannelSubSys;
+
+static ChannelSubSys *channel_subsys;
+
+int css_create_css_image(uint8_t cssid, bool default_image)
+{
+ trace_css_new_image(cssid, default_image ? "(default)" : "");
+ if (cssid > MAX_CSSID) {
+ return -EINVAL;
+ }
+ if (channel_subsys->css[cssid]) {
+ return -EBUSY;
+ }
+ channel_subsys->css[cssid] = g_malloc0(sizeof(CssImage));
+ if (default_image) {
+ channel_subsys->default_cssid = cssid;
+ }
+ return 0;
+}
+
+static uint16_t css_build_subchannel_id(SubchDev *sch)
+{
+ if (channel_subsys->max_cssid > 0) {
+ return (sch->cssid << 8) | (1 << 3) | (sch->ssid << 1) | 1;
+ }
+ return (sch->ssid << 1) | 1;
+}
+
+static void css_inject_io_interrupt(SubchDev *sch)
+{
+ S390CPU *cpu = s390_cpu_addr2state(0);
+ uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
+
+ trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
+ sch->curr_status.pmcw.intparm, isc, "");
+ s390_io_interrupt(cpu,
+ css_build_subchannel_id(sch),
+ sch->schid,
+ sch->curr_status.pmcw.intparm,
+ (0x80 >> isc) << 24);
+}
+
+void css_conditional_io_interrupt(SubchDev *sch)
+{
+ /*
+ * If the subchannel is not currently status pending, make it pending
+ * with alert status.
+ */
+ if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
+ S390CPU *cpu = s390_cpu_addr2state(0);
+ uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
+
+ trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
+ sch->curr_status.pmcw.intparm, isc,
+ "(unsolicited)");
+ sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ sch->curr_status.scsw.ctrl |=
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ /* Inject an I/O interrupt. */
+ s390_io_interrupt(cpu,
+ css_build_subchannel_id(sch),
+ sch->schid,
+ sch->curr_status.pmcw.intparm,
+ (0x80 >> isc) << 24);
+ }
+}
+
+static void sch_handle_clear_func(SubchDev *sch)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int path;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ /* Reset values prior to 'issueing the clear signal'. */
+ p->lpum = 0;
+ p->pom = 0xff;
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+
+ /* We always 'attempt to issue the clear signal', and we always succeed. */
+ sch->orb = NULL;
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
+ s->ctrl |= SCSW_STCTL_STATUS_PEND;
+
+ s->dstat = 0;
+ s->cstat = 0;
+ p->lpum = path;
+
+}
+
+static void sch_handle_halt_func(SubchDev *sch)
+{
+
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int path;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ /* We always 'attempt to issue the halt signal', and we always succeed. */
+ sch->orb = NULL;
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ s->ctrl &= ~SCSW_ACTL_HALT_PEND;
+ s->ctrl |= SCSW_STCTL_STATUS_PEND;
+
+ if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
+ !((s->ctrl & SCSW_ACTL_START_PEND) ||
+ (s->ctrl & SCSW_ACTL_SUSP))) {
+ s->dstat = SCSW_DSTAT_DEVICE_END;
+ }
+ s->cstat = 0;
+ p->lpum = path;
+
+}
+
+static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
+{
+ int i;
+
+ dest->reserved = src->reserved;
+ dest->cu_type = cpu_to_be16(src->cu_type);
+ dest->cu_model = src->cu_model;
+ dest->dev_type = cpu_to_be16(src->dev_type);
+ dest->dev_model = src->dev_model;
+ dest->unused = src->unused;
+ for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
+ dest->ciw[i].type = src->ciw[i].type;
+ dest->ciw[i].command = src->ciw[i].command;
+ dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
+ }
+}
+
+static CCW1 copy_ccw_from_guest(hwaddr addr)
+{
+ CCW1 tmp;
+ CCW1 ret;
+
+ cpu_physical_memory_read(addr, &tmp, sizeof(tmp));
+ ret.cmd_code = tmp.cmd_code;
+ ret.flags = tmp.flags;
+ ret.count = be16_to_cpu(tmp.count);
+ ret.cda = be32_to_cpu(tmp.cda);
+
+ return ret;
+}
+
+static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr)
+{
+ int ret;
+ bool check_len;
+ int len;
+ CCW1 ccw;
+
+ if (!ccw_addr) {
+ return -EIO;
+ }
+
+ ccw = copy_ccw_from_guest(ccw_addr);
+
+ /* Check for invalid command codes. */
+ if ((ccw.cmd_code & 0x0f) == 0) {
+ return -EINVAL;
+ }
+ if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
+ ((ccw.cmd_code & 0xf0) != 0)) {
+ return -EINVAL;
+ }
+
+ if (ccw.flags & CCW_FLAG_SUSPEND) {
+ return -EINPROGRESS;
+ }
+
+ check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
+
+ /* Look at the command. */
+ switch (ccw.cmd_code) {
+ case CCW_CMD_NOOP:
+ /* Nothing to do. */
+ ret = 0;
+ break;
+ case CCW_CMD_BASIC_SENSE:
+ if (check_len) {
+ if (ccw.count != sizeof(sch->sense_data)) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, sizeof(sch->sense_data));
+ cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ memset(sch->sense_data, 0, sizeof(sch->sense_data));
+ ret = 0;
+ break;
+ case CCW_CMD_SENSE_ID:
+ {
+ SenseId sense_id;
+
+ copy_sense_id_to_guest(&sense_id, &sch->id);
+ /* Sense ID information is device specific. */
+ if (check_len) {
+ if (ccw.count != sizeof(sense_id)) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, sizeof(sense_id));
+ /*
+ * Only indicate 0xff in the first sense byte if we actually
+ * have enough place to store at least bytes 0-3.
+ */
+ if (len >= 4) {
+ sense_id.reserved = 0xff;
+ } else {
+ sense_id.reserved = 0;
+ }
+ cpu_physical_memory_write(ccw.cda, &sense_id, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ break;
+ }
+ case CCW_CMD_TIC:
+ if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
+ ret = -EINVAL;
+ break;
+ }
+ sch->channel_prog = ccw.cda;
+ ret = -EAGAIN;
+ break;
+ default:
+ if (sch->ccw_cb) {
+ /* Handle device specific commands. */
+ ret = sch->ccw_cb(sch, ccw);
+ } else {
+ ret = -ENOSYS;
+ }
+ break;
+ }
+ sch->last_cmd = ccw;
+ sch->last_cmd_valid = true;
+ if (ret == 0) {
+ if (ccw.flags & CCW_FLAG_CC) {
+ sch->channel_prog += 8;
+ ret = -EAGAIN;
+ }
+ }
+
+ return ret;
+}
+
+static void sch_handle_start_func(SubchDev *sch)
+{
+
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ ORB *orb = sch->orb;
+ int path;
+ int ret;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ if (!(s->ctrl & SCSW_ACTL_SUSP)) {
+ /* Look at the orb and try to execute the channel program. */
+ p->intparm = orb->intparm;
+ if (!(orb->lpm & path)) {
+ /* Generate a deferred cc 3 condition. */
+ s->flags |= SCSW_FLAGS_MASK_CC;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
+ return;
+ }
+ } else {
+ s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
+ }
+ sch->last_cmd_valid = false;
+ do {
+ ret = css_interpret_ccw(sch, sch->channel_prog);
+ switch (ret) {
+ case -EAGAIN:
+ /* ccw chain, continue processing */
+ break;
+ case 0:
+ /* success */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_STATUS_PEND;
+ s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
+ break;
+ case -ENOSYS:
+ /* unsupported command, generate unit check (command reject) */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->dstat = SCSW_DSTAT_UNIT_CHECK;
+ /* Set sense bit 0 in ecw0. */
+ sch->sense_data[0] = 0x80;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ break;
+ case -EFAULT:
+ /* memory problem, generate channel data check */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->cstat = SCSW_CSTAT_DATA_CHECK;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ break;
+ case -EBUSY:
+ /* subchannel busy, generate deferred cc 1 */
+ s->flags &= ~SCSW_FLAGS_MASK_CC;
+ s->flags |= (1 << 8);
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ break;
+ case -EINPROGRESS:
+ /* channel program has been suspended */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->ctrl |= SCSW_ACTL_SUSP;
+ break;
+ default:
+ /* error, generate channel program check */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->cstat = SCSW_CSTAT_PROG_CHECK;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ break;
+ }
+ } while (ret == -EAGAIN);
+
+}
+
+/*
+ * On real machines, this would run asynchronously to the main vcpus.
+ * We might want to make some parts of the ssch handling (interpreting
+ * read/writes) asynchronous later on if we start supporting more than
+ * our current very simple devices.
+ */
+static void do_subchannel_work(SubchDev *sch)
+{
+
+ SCSW *s = &sch->curr_status.scsw;
+
+ if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
+ sch_handle_clear_func(sch);
+ } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
+ sch_handle_halt_func(sch);
+ } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
+ sch_handle_start_func(sch);
+ } else {
+ /* Cannot happen. */
+ return;
+ }
+ css_inject_io_interrupt(sch);
+}
+
+static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
+{
+ int i;
+
+ dest->intparm = cpu_to_be32(src->intparm);
+ dest->flags = cpu_to_be16(src->flags);
+ dest->devno = cpu_to_be16(src->devno);
+ dest->lpm = src->lpm;
+ dest->pnom = src->pnom;
+ dest->lpum = src->lpum;
+ dest->pim = src->pim;
+ dest->mbi = cpu_to_be16(src->mbi);
+ dest->pom = src->pom;
+ dest->pam = src->pam;
+ for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
+ dest->chpid[i] = src->chpid[i];
+ }
+ dest->chars = cpu_to_be32(src->chars);
+}
+
+static void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
+{
+ dest->flags = cpu_to_be16(src->flags);
+ dest->ctrl = cpu_to_be16(src->ctrl);
+ dest->cpa = cpu_to_be32(src->cpa);
+ dest->dstat = src->dstat;
+ dest->cstat = src->cstat;
+ dest->count = cpu_to_be16(src->count);
+}
+
+static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
+{
+ int i;
+
+ copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
+ copy_scsw_to_guest(&dest->scsw, &src->scsw);
+ dest->mba = cpu_to_be64(src->mba);
+ for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
+ dest->mda[i] = src->mda[i];
+ }
+}
+
+int css_do_stsch(SubchDev *sch, SCHIB *schib)
+{
+ /* Use current status. */
+ copy_schib_to_guest(schib, &sch->curr_status);
+ return 0;
+}
+
+static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
+{
+ int i;
+
+ dest->intparm = be32_to_cpu(src->intparm);
+ dest->flags = be16_to_cpu(src->flags);
+ dest->devno = be16_to_cpu(src->devno);
+ dest->lpm = src->lpm;
+ dest->pnom = src->pnom;
+ dest->lpum = src->lpum;
+ dest->pim = src->pim;
+ dest->mbi = be16_to_cpu(src->mbi);
+ dest->pom = src->pom;
+ dest->pam = src->pam;
+ for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
+ dest->chpid[i] = src->chpid[i];
+ }
+ dest->chars = be32_to_cpu(src->chars);
+}
+
+static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
+{
+ dest->flags = be16_to_cpu(src->flags);
+ dest->ctrl = be16_to_cpu(src->ctrl);
+ dest->cpa = be32_to_cpu(src->cpa);
+ dest->dstat = src->dstat;
+ dest->cstat = src->cstat;
+ dest->count = be16_to_cpu(src->count);
+}
+
+static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
+{
+ int i;
+
+ copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
+ copy_scsw_from_guest(&dest->scsw, &src->scsw);
+ dest->mba = be64_to_cpu(src->mba);
+ for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
+ dest->mda[i] = src->mda[i];
+ }
+}
+
+int css_do_msch(SubchDev *sch, SCHIB *orig_schib)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+ SCHIB schib;
+
+ if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl &
+ (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ copy_schib_from_guest(&schib, orig_schib);
+ /* Only update the program-modifiable fields. */
+ p->intparm = schib.pmcw.intparm;
+ p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP);
+ p->flags |= schib.pmcw.flags &
+ (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP);
+ p->lpm = schib.pmcw.lpm;
+ p->mbi = schib.pmcw.mbi;
+ p->pom = schib.pmcw.pom;
+ p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
+ p->chars |= schib.pmcw.chars &
+ (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
+ sch->curr_status.mba = schib.mba;
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_xsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
+ ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
+ (!(s->ctrl &
+ (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
+ (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Cancel the current operation. */
+ s->ctrl &= ~(SCSW_FCTL_START_FUNC |
+ SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_SUSP);
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ sch->orb = NULL;
+ s->dstat = 0;
+ s->cstat = 0;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_csch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Trigger the clear function. */
+ s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
+ s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_CLEAR_FUNC;
+
+ do_subchannel_work(sch);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_hsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
+ (s->ctrl & (SCSW_STCTL_PRIMARY |
+ SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT))) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Trigger the halt function. */
+ s->ctrl |= SCSW_FCTL_HALT_FUNC;
+ s->ctrl &= ~SCSW_FCTL_START_FUNC;
+ if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
+ (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
+ ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
+ s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
+ }
+ s->ctrl |= SCSW_ACTL_HALT_PEND;
+
+ do_subchannel_work(sch);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void css_update_chnmon(SubchDev *sch)
+{
+ if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
+ /* Not active. */
+ return;
+ }
+ /* The counter is conveniently located at the beginning of the struct. */
+ if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
+ /* Format 1, per-subchannel area. */
+ uint32_t count;
+
+ count = ldl_phys(sch->curr_status.mba);
+ count++;
+ stl_phys(sch->curr_status.mba, count);
+ } else {
+ /* Format 0, global area. */
+ uint32_t offset;
+ uint16_t count;
+
+ offset = sch->curr_status.pmcw.mbi << 5;
+ count = lduw_phys(channel_subsys->chnmon_area + offset);
+ count++;
+ stw_phys(channel_subsys->chnmon_area + offset, count);
+ }
+}
+
+int css_do_ssch(SubchDev *sch, ORB *orb)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & (SCSW_FCTL_START_FUNC |
+ SCSW_FCTL_HALT_FUNC |
+ SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* If monitoring is active, update counter. */
+ if (channel_subsys->chnmon_active) {
+ css_update_chnmon(sch);
+ }
+ sch->orb = orb;
+ sch->channel_prog = orb->cpa;
+ /* Trigger the start function. */
+ s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+
+ do_subchannel_work(sch);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void copy_irb_to_guest(IRB *dest, const IRB *src)
+{
+ int i;
+
+ copy_scsw_to_guest(&dest->scsw, &src->scsw);
+
+ for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
+ dest->esw[i] = cpu_to_be32(src->esw[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
+ dest->ecw[i] = cpu_to_be32(src->ecw[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
+ dest->emw[i] = cpu_to_be32(src->emw[i]);
+ }
+}
+
+int css_do_tsch(SubchDev *sch, IRB *target_irb)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ uint16_t stctl;
+ uint16_t fctl;
+ uint16_t actl;
+ IRB irb;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = 3;
+ goto out;
+ }
+
+ stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
+ fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
+ actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
+
+ /* Prepare the irb for the guest. */
+ memset(&irb, 0, sizeof(IRB));
+
+ /* Copy scsw from current status. */
+ memcpy(&irb.scsw, s, sizeof(SCSW));
+ if (stctl & SCSW_STCTL_STATUS_PEND) {
+ if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
+ SCSW_CSTAT_CHN_CTRL_CHK |
+ SCSW_CSTAT_INTF_CTRL_CHK)) {
+ irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
+ irb.esw[0] = 0x04804000;
+ } else {
+ irb.esw[0] = 0x00800000;
+ }
+ /* If a unit check is pending, copy sense data. */
+ if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
+ (p->chars & PMCW_CHARS_MASK_CSENSE)) {
+ irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
+ memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
+ irb.esw[1] = 0x02000000 | (sizeof(sch->sense_data) << 8);
+ }
+ }
+ /* Store the irb to the guest. */
+ copy_irb_to_guest(target_irb, &irb);
+
+ /* Clear conditions on subchannel, if applicable. */
+ if (stctl & SCSW_STCTL_STATUS_PEND) {
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
+ ((fctl & SCSW_FCTL_HALT_FUNC) &&
+ (actl & SCSW_ACTL_SUSP))) {
+ s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
+ }
+ if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+ s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_HALT_PEND |
+ SCSW_ACTL_CLEAR_PEND |
+ SCSW_ACTL_SUSP);
+ } else {
+ if ((actl & SCSW_ACTL_SUSP) &&
+ (fctl & SCSW_FCTL_START_FUNC)) {
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+ if (fctl & SCSW_FCTL_HALT_FUNC) {
+ s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_HALT_PEND |
+ SCSW_ACTL_CLEAR_PEND |
+ SCSW_ACTL_SUSP);
+ } else {
+ s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
+ }
+ }
+ }
+ /* Clear pending sense data. */
+ if (p->chars & PMCW_CHARS_MASK_CSENSE) {
+ memset(sch->sense_data, 0 , sizeof(sch->sense_data));
+ }
+ }
+
+ ret = ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
+
+out:
+ return ret;
+}
+
+static void copy_crw_to_guest(CRW *dest, const CRW *src)
+{
+ dest->flags = cpu_to_be16(src->flags);
+ dest->rsid = cpu_to_be16(src->rsid);
+}
+
+int css_do_stcrw(CRW *crw)
+{
+ CrwContainer *crw_cont;
+ int ret;
+
+ crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws);
+ if (crw_cont) {
+ QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
+ copy_crw_to_guest(crw, &crw_cont->crw);
+ g_free(crw_cont);
+ ret = 0;
+ } else {
+ /* List was empty, turn crw machine checks on again. */
+ memset(crw, 0, sizeof(*crw));
+ channel_subsys->do_crw_mchk = true;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+int css_do_tpi(IOIntCode *int_code, int lowcore)
+{
+ /* No pending interrupts for !KVM. */
+ return 0;
+ }
+
+int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
+ int rfmt, void *buf)
+{
+ int i, desc_size;
+ uint32_t words[8];
+ uint32_t chpid_type_word;
+ CssImage *css;
+
+ if (!m && !cssid) {
+ css = channel_subsys->css[channel_subsys->default_cssid];
+ } else {
+ css = channel_subsys->css[cssid];
+ }
+ if (!css) {
+ return 0;
+ }
+ desc_size = 0;
+ for (i = f_chpid; i <= l_chpid; i++) {
+ if (css->chpids[i].in_use) {
+ chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
+ if (rfmt == 0) {
+ words[0] = cpu_to_be32(chpid_type_word);
+ words[1] = 0;
+ memcpy(buf + desc_size, words, 8);
+ desc_size += 8;
+ } else if (rfmt == 1) {
+ words[0] = cpu_to_be32(chpid_type_word);
+ words[1] = 0;
+ words[2] = 0;
+ words[3] = 0;
+ words[4] = 0;
+ words[5] = 0;
+ words[6] = 0;
+ words[7] = 0;
+ memcpy(buf + desc_size, words, 32);
+ desc_size += 32;
+ }
+ }
+ }
+ return desc_size;
+}
+
+void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
+{
+ /* dct is currently ignored (not really meaningful for our devices) */
+ /* TODO: Don't ignore mbk. */
+ if (update && !channel_subsys->chnmon_active) {
+ /* Enable measuring. */
+ channel_subsys->chnmon_area = mbo;
+ channel_subsys->chnmon_active = true;
+ }
+ if (!update && channel_subsys->chnmon_active) {
+ /* Disable measuring. */
+ channel_subsys->chnmon_area = 0;
+ channel_subsys->chnmon_active = false;
+ }
+}
+
+int css_do_rsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
+ (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
+ (!(s->ctrl & SCSW_ACTL_SUSP))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If monitoring is active, update counter. */
+ if (channel_subsys->chnmon_active) {
+ css_update_chnmon(sch);
+ }
+
+ s->ctrl |= SCSW_ACTL_RESUME_PEND;
+ do_subchannel_work(sch);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_rchp(uint8_t cssid, uint8_t chpid)
+{
+ uint8_t real_cssid;
+
+ if (cssid > channel_subsys->max_cssid) {
+ return -EINVAL;
+ }
+ if (channel_subsys->max_cssid == 0) {
+ real_cssid = channel_subsys->default_cssid;
+ } else {
+ real_cssid = cssid;
+ }
+ if (!channel_subsys->css[real_cssid]) {
+ return -EINVAL;
+ }
+
+ if (!channel_subsys->css[real_cssid]->chpids[chpid].in_use) {
+ return -ENODEV;
+ }
+
+ if (!channel_subsys->css[real_cssid]->chpids[chpid].is_virtual) {
+ fprintf(stderr,
+ "rchp unsupported for non-virtual chpid %x.%02x!\n",
+ real_cssid, chpid);
+ return -ENODEV;
+ }
+
+ /* We don't really use a channel path, so we're done here. */
+ css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
+ channel_subsys->max_cssid > 0 ? 1 : 0, chpid);
+ if (channel_subsys->max_cssid > 0) {
+ css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
+ }
+ return 0;
+}
+
+bool css_schid_final(uint8_t cssid, uint8_t ssid, uint16_t schid)
+{
+ SubchSet *set;
+
+ if (cssid > MAX_CSSID || ssid > MAX_SSID || !channel_subsys->css[cssid] ||
+ !channel_subsys->css[cssid]->sch_set[ssid]) {
+ return true;
+ }
+ set = channel_subsys->css[cssid]->sch_set[ssid];
+ return schid > find_last_bit(set->schids_used,
+ (MAX_SCHID + 1) / sizeof(unsigned long));
+}
+
+static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
+{
+ CssImage *css;
+
+ trace_css_chpid_add(cssid, chpid, type);
+ if (cssid > MAX_CSSID) {
+ return -EINVAL;
+ }
+ css = channel_subsys->css[cssid];
+ if (!css) {
+ return -EINVAL;
+ }
+ if (css->chpids[chpid].in_use) {
+ return -EEXIST;
+ }
+ css->chpids[chpid].in_use = 1;
+ css->chpids[chpid].type = type;
+ css->chpids[chpid].is_virtual = 1;
+
+ css_generate_chp_crws(cssid, chpid);
+
+ return 0;
+}
+
+void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int i;
+ CssImage *css = channel_subsys->css[sch->cssid];
+
+ assert(css != NULL);
+ memset(p, 0, sizeof(PMCW));
+ p->flags |= PMCW_FLAGS_MASK_DNV;
+ p->devno = sch->devno;
+ /* single path */
+ p->pim = 0x80;
+ p->pom = 0xff;
+ p->pam = 0x80;
+ p->chpid[0] = chpid;
+ if (!css->chpids[chpid].in_use) {
+ css_add_virtual_chpid(sch->cssid, chpid, type);
+ }
+
+ memset(s, 0, sizeof(SCSW));
+ sch->curr_status.mba = 0;
+ for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
+ sch->curr_status.mda[i] = 0;
+ }
+}
+
+SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
+{
+ uint8_t real_cssid;
+
+ real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid;
+
+ if (!channel_subsys->css[real_cssid]) {
+ return NULL;
+ }
+
+ if (!channel_subsys->css[real_cssid]->sch_set[ssid]) {
+ return NULL;
+ }
+
+ return channel_subsys->css[real_cssid]->sch_set[ssid]->sch[schid];
+}
+
+bool css_subch_visible(SubchDev *sch)
+{
+ if (sch->ssid > channel_subsys->max_ssid) {
+ return false;
+ }
+
+ if (sch->cssid != channel_subsys->default_cssid) {
+ return (channel_subsys->max_cssid > 0);
+ }
+
+ return true;
+}
+
+bool css_present(uint8_t cssid)
+{
+ return (channel_subsys->css[cssid] != NULL);
+}
+
+bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
+{
+ if (!channel_subsys->css[cssid]) {
+ return false;
+ }
+ if (!channel_subsys->css[cssid]->sch_set[ssid]) {
+ return false;
+ }
+
+ return !!test_bit(devno,
+ channel_subsys->css[cssid]->sch_set[ssid]->devnos_used);
+}
+
+void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ uint16_t devno, SubchDev *sch)
+{
+ CssImage *css;
+ SubchSet *s_set;
+
+ trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
+ devno);
+ if (!channel_subsys->css[cssid]) {
+ fprintf(stderr,
+ "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
+ __func__, cssid, ssid, schid);
+ return;
+ }
+ css = channel_subsys->css[cssid];
+
+ if (!css->sch_set[ssid]) {
+ css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
+ }
+ s_set = css->sch_set[ssid];
+
+ s_set->sch[schid] = sch;
+ if (sch) {
+ set_bit(schid, s_set->schids_used);
+ set_bit(devno, s_set->devnos_used);
+ } else {
+ clear_bit(schid, s_set->schids_used);
+ clear_bit(devno, s_set->devnos_used);
+ }
+}
+
+void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
+{
+ CrwContainer *crw_cont;
+
+ trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
+ /* TODO: Maybe use a static crw pool? */
+ crw_cont = g_try_malloc0(sizeof(CrwContainer));
+ if (!crw_cont) {
+ channel_subsys->crws_lost = true;
+ return;
+ }
+ crw_cont->crw.flags = (rsc << 8) | erc;
+ if (chain) {
+ crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
+ }
+ crw_cont->crw.rsid = rsid;
+ if (channel_subsys->crws_lost) {
+ crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
+ channel_subsys->crws_lost = false;
+ }
+
+ QTAILQ_INSERT_TAIL(&channel_subsys->pending_crws, crw_cont, sibling);
+
+ if (channel_subsys->do_crw_mchk) {
+ S390CPU *cpu = s390_cpu_addr2state(0);
+
+ channel_subsys->do_crw_mchk = false;
+ /* Inject crw pending machine check. */
+ s390_crw_mchk(cpu);
+ }
+}
+
+void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ int hotplugged, int add)
+{
+ uint8_t guest_cssid;
+ bool chain_crw;
+
+ if (add && !hotplugged) {
+ return;
+ }
+ if (channel_subsys->max_cssid == 0) {
+ /* Default cssid shows up as 0. */
+ guest_cssid = (cssid == channel_subsys->default_cssid) ? 0 : cssid;
+ } else {
+ /* Show real cssid to the guest. */
+ guest_cssid = cssid;
+ }
+ /*
+ * Only notify for higher subchannel sets/channel subsystems if the
+ * guest has enabled it.
+ */
+ if ((ssid > channel_subsys->max_ssid) ||
+ (guest_cssid > channel_subsys->max_cssid) ||
+ ((channel_subsys->max_cssid == 0) &&
+ (cssid != channel_subsys->default_cssid))) {
+ return;
+ }
+ chain_crw = (channel_subsys->max_ssid > 0) ||
+ (channel_subsys->max_cssid > 0);
+ css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
+ if (chain_crw) {
+ css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
+ (guest_cssid << 8) | (ssid << 4));
+ }
+}
+
+void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
+{
+ /* TODO */
+}
+
+int css_enable_mcsse(void)
+{
+ trace_css_enable_facility("mcsse");
+ channel_subsys->max_cssid = MAX_CSSID;
+ return 0;
+}
+
+int css_enable_mss(void)
+{
+ trace_css_enable_facility("mss");
+ channel_subsys->max_ssid = MAX_SSID;
+ return 0;
+}
+
+static void css_init(void)
+{
+ channel_subsys = g_malloc0(sizeof(*channel_subsys));
+ QTAILQ_INIT(&channel_subsys->pending_crws);
+ channel_subsys->do_crw_mchk = true;
+ channel_subsys->crws_lost = false;
+ channel_subsys->chnmon_active = false;
+}
+machine_init(css_init);
+
+void css_reset_sch(SubchDev *sch)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+
+ p->intparm = 0;
+ p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
+ p->flags |= PMCW_FLAGS_MASK_DNV;
+ p->devno = sch->devno;
+ p->pim = 0x80;
+ p->lpm = p->pim;
+ p->pnom = 0;
+ p->lpum = 0;
+ p->mbi = 0;
+ p->pom = 0xff;
+ p->pam = 0x80;
+ p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
+ PMCW_CHARS_MASK_CSENSE);
+
+ memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
+ sch->curr_status.mba = 0;
+
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ sch->orb = NULL;
+}
+
+void css_reset(void)
+{
+ CrwContainer *crw_cont;
+
+ /* Clean up monitoring. */
+ channel_subsys->chnmon_active = false;
+ channel_subsys->chnmon_area = 0;
+
+ /* Clear pending CRWs. */
+ while ((crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws))) {
+ QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
+ g_free(crw_cont);
+ }
+ channel_subsys->do_crw_mchk = true;
+ channel_subsys->crws_lost = false;
+
+ /* Reset maximum ids. */
+ channel_subsys->max_cssid = 0;
+ channel_subsys->max_ssid = 0;
+}
diff --git a/hw/s390x/css.h b/hw/s390x/css.h
new file mode 100644
index 0000000..85ed05d
--- /dev/null
+++ b/hw/s390x/css.h
@@ -0,0 +1,99 @@
+/*
+ * Channel subsystem structures and definitions.
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef CSS_H
+#define CSS_H
+
+#include "ioinst.h"
+
+/* Channel subsystem constants. */
+#define MAX_SCHID 65535
+#define MAX_SSID 3
+#define MAX_CSSID 254 /* 255 is reserved */
+#define MAX_CHPID 255
+
+#define MAX_CIWS 62
+
+typedef struct CIW {
+ uint8_t type;
+ uint8_t command;
+ uint16_t count;
+} QEMU_PACKED CIW;
+
+typedef struct SenseId {
+ /* common part */
+ uint8_t reserved; /* always 0x'FF' */
+ uint16_t cu_type; /* control unit type */
+ uint8_t cu_model; /* control unit model */
+ uint16_t dev_type; /* device type */
+ uint8_t dev_model; /* device model */
+ uint8_t unused; /* padding byte */
+ /* extended part */
+ CIW ciw[MAX_CIWS]; /* variable # of CIWs */
+} QEMU_PACKED SenseId;
+
+/* Channel measurements, from linux/drivers/s390/cio/cmf.c. */
+typedef struct CMB {
+ uint16_t ssch_rsch_count;
+ uint16_t sample_count;
+ uint32_t device_connect_time;
+ uint32_t function_pending_time;
+ uint32_t device_disconnect_time;
+ uint32_t control_unit_queuing_time;
+ uint32_t device_active_only_time;
+ uint32_t reserved[2];
+} QEMU_PACKED CMB;
+
+typedef struct CMBE {
+ uint32_t ssch_rsch_count;
+ uint32_t sample_count;
+ uint32_t device_connect_time;
+ uint32_t function_pending_time;
+ uint32_t device_disconnect_time;
+ uint32_t control_unit_queuing_time;
+ uint32_t device_active_only_time;
+ uint32_t device_busy_time;
+ uint32_t initial_command_response_time;
+ uint32_t reserved[7];
+} QEMU_PACKED CMBE;
+
+struct SubchDev {
+ /* channel-subsystem related things: */
+ uint8_t cssid;
+ uint8_t ssid;
+ uint16_t schid;
+ uint16_t devno;
+ SCHIB curr_status;
+ uint8_t sense_data[32];
+ hwaddr channel_prog;
+ CCW1 last_cmd;
+ bool last_cmd_valid;
+ ORB *orb;
+ /* transport-provided data: */
+ int (*ccw_cb) (SubchDev *, CCW1);
+ SenseId id;
+ void *driver_data;
+};
+
+typedef SubchDev *(*css_subch_cb_func)(uint8_t m, uint8_t cssid, uint8_t ssid,
+ uint16_t schid);
+int css_create_css_image(uint8_t cssid, bool default_image);
+bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno);
+void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ uint16_t devno, SubchDev *sch);
+void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type);
+void css_reset(void);
+void css_reset_sch(SubchDev *sch);
+void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid);
+void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ int hotplugged, int add);
+void css_generate_chp_crws(uint8_t cssid, uint8_t chpid);
+#endif
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index 7cbbf99..86e8415 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -159,7 +159,7 @@ static void s390_ipl_class_init(ObjectClass *klass, void *data)
dc->no_user = 1;
}
-static TypeInfo s390_ipl_info = {
+static const TypeInfo s390_ipl_info = {
.class_init = s390_ipl_class_init,
.parent = TYPE_SYS_BUS_DEVICE,
.name = "s390-ipl",
diff --git a/hw/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index b5d1f2b..32f63b0 100644
--- a/hw/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -17,12 +17,12 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "hw.h"
+#include "hw/hw.h"
#include "block/block.h"
#include "sysemu/sysemu.h"
-#include "boards.h"
+#include "hw/boards.h"
#include "monitor/monitor.h"
-#include "loader.h"
+#include "hw/loader.h"
#include "elf.h"
#include "hw/virtio.h"
#include "hw/virtio-rng.h"
@@ -31,7 +31,7 @@
#include "hw/sysbus.h"
#include "sysemu/kvm.h"
-#include "hw/s390-virtio-bus.h"
+#include "hw/s390x/s390-virtio-bus.h"
#include "hw/virtio-bus.h"
/* #define DEBUG_S390 */
@@ -508,6 +508,13 @@ static int s390_virtio_busdev_init(DeviceState *dev)
return _info->init(_dev);
}
+static void s390_virtio_busdev_reset(DeviceState *dev)
+{
+ VirtIOS390Device *_dev = (VirtIOS390Device *)dev;
+
+ virtio_reset(_dev->vdev);
+}
+
static void virtio_s390_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -515,6 +522,7 @@ static void virtio_s390_device_class_init(ObjectClass *klass, void *data)
dc->init = s390_virtio_busdev_init;
dc->bus_type = TYPE_S390_VIRTIO_BUS;
dc->unplug = qdev_simple_unplug_cb;
+ dc->reset = s390_virtio_busdev_reset;
}
static const TypeInfo virtio_s390_device_info = {
diff --git a/hw/s390-virtio-bus.h b/hw/s390x/s390-virtio-bus.h
index 438b37f..4aacf83 100644
--- a/hw/s390-virtio-bus.h
+++ b/hw/s390x/s390-virtio-bus.h
@@ -19,12 +19,12 @@
#ifndef HW_S390_VIRTIO_BUS_H
#define HW_S390_VIRTIO_BUS_H 1
-#include "virtio-blk.h"
-#include "virtio-net.h"
-#include "virtio-rng.h"
-#include "virtio-serial.h"
-#include "virtio-scsi.h"
-#include "virtio-bus.h"
+#include "hw/virtio-blk.h"
+#include "hw/virtio-net.h"
+#include "hw/virtio-rng.h"
+#include "hw/virtio-serial.h"
+#include "hw/virtio-scsi.h"
+#include "hw/virtio-bus.h"
#define VIRTIO_DEV_OFFS_TYPE 0 /* 8 bits */
#define VIRTIO_DEV_OFFS_NUM_VQ 1 /* 8 bits */
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
new file mode 100644
index 0000000..6549211
--- /dev/null
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -0,0 +1,134 @@
+/*
+ * virtio ccw machine
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/boards.h"
+#include "exec/address-spaces.h"
+#include "s390-virtio.h"
+#include "sclp.h"
+#include "ioinst.h"
+#include "css.h"
+#include "virtio-ccw.h"
+
+static int virtio_ccw_hcall_notify(const uint64_t *args)
+{
+ uint64_t subch_id = args[0];
+ uint64_t queue = args[1];
+ SubchDev *sch;
+ int cssid, ssid, schid, m;
+
+ if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) {
+ return -EINVAL;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ return -EINVAL;
+ }
+ virtio_queue_notify(virtio_ccw_get_vdev(sch), queue);
+ return 0;
+
+}
+
+static int virtio_ccw_hcall_early_printk(const uint64_t *args)
+{
+ uint64_t mem = args[0];
+
+ if (mem < ram_size) {
+ /* Early printk */
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void virtio_ccw_register_hcalls(void)
+{
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY,
+ virtio_ccw_hcall_notify);
+ /* Tolerate early printk. */
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY,
+ virtio_ccw_hcall_early_printk);
+}
+
+static void ccw_init(QEMUMachineInitArgs *args)
+{
+ ram_addr_t my_ram_size = args->ram_size;
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ int shift = 0;
+ uint8_t *storage_keys;
+ int ret;
+ VirtualCssBus *css_bus;
+
+ /* s390x ram size detection needs a 16bit multiplier + an increment. So
+ guests > 64GB can be specified in 2MB steps etc. */
+ while ((my_ram_size >> (20 + shift)) > 65535) {
+ shift++;
+ }
+ my_ram_size = my_ram_size >> (20 + shift) << (20 + shift);
+
+ /* lets propagate the changed ram size into the global variable. */
+ ram_size = my_ram_size;
+
+ /* get a BUS */
+ css_bus = virtual_css_bus_init();
+ s390_sclp_init();
+ s390_init_ipl_dev(args->kernel_filename, args->kernel_cmdline,
+ args->initrd_filename);
+
+ /* register hypercalls */
+ virtio_ccw_register_hcalls();
+
+ /* allocate RAM */
+ memory_region_init_ram(ram, "s390.ram", my_ram_size);
+ vmstate_register_ram_global(ram);
+ memory_region_add_subregion(sysmem, 0, ram);
+
+ /* allocate storage keys */
+ storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);
+
+ /* init CPUs */
+ s390_init_cpus(args->cpu_model, storage_keys);
+
+ if (kvm_enabled()) {
+ kvm_s390_enable_css_support(s390_cpu_addr2state(0));
+ }
+ /*
+ * Create virtual css and set it as default so that non mcss-e
+ * enabled guests only see virtio devices.
+ */
+ ret = css_create_css_image(VIRTUAL_CSSID, true);
+ assert(ret == 0);
+
+ /* Create VirtIO network adapters */
+ s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
+}
+
+static QEMUMachine ccw_machine = {
+ .name = "s390-ccw-virtio",
+ .alias = "s390-ccw",
+ .desc = "VirtIO-ccw based S390 machine",
+ .init = ccw_init,
+ .block_default_type = IF_VIRTIO,
+ .no_cdrom = 1,
+ .no_floppy = 1,
+ .no_serial = 1,
+ .no_parallel = 1,
+ .no_sdcard = 1,
+ .use_sclp = 1,
+ .max_cpus = 255,
+ DEFAULT_MACHINE_OPTIONS,
+};
+
+static void ccw_machine_init(void)
+{
+ qemu_register_machine(&ccw_machine);
+}
+
+machine_init(ccw_machine_init)
diff --git a/hw/s390x/s390-virtio-hcall.c b/hw/s390x/s390-virtio-hcall.c
index d7938c0..ee62649 100644
--- a/hw/s390x/s390-virtio-hcall.c
+++ b/hw/s390x/s390-virtio-hcall.c
@@ -10,7 +10,7 @@
*/
#include "cpu.h"
-#include "hw/s390-virtio.h"
+#include "hw/s390x/s390-virtio.h"
#define MAX_DIAG_SUBCODES 255
diff --git a/hw/s390-virtio.c b/hw/s390x/s390-virtio.c
index 5edaabb..2a1d9ac 100644
--- a/hw/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -21,22 +21,22 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "hw.h"
+#include "hw/hw.h"
#include "block/block.h"
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
#include "net/net.h"
-#include "boards.h"
+#include "hw/boards.h"
#include "monitor/monitor.h"
-#include "loader.h"
+#include "hw/loader.h"
#include "hw/virtio.h"
#include "hw/sysbus.h"
#include "sysemu/kvm.h"
#include "exec/address-spaces.h"
-#include "hw/s390-virtio-bus.h"
+#include "hw/s390x/s390-virtio-bus.h"
#include "hw/s390x/sclp.h"
-#include "hw/s390-virtio.h"
+#include "hw/s390x/s390-virtio.h"
//#define DEBUG_S390
@@ -86,6 +86,9 @@ static int s390_virtio_hcall_reset(const uint64_t *args)
VirtIOS390Device *dev;
dev = s390_virtio_bus_find_mem(s390_bus, mem);
+ if (dev == NULL) {
+ return -EINVAL;
+ }
virtio_reset(dev->vdev);
stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0);
s390_virtio_device_sync(dev);
@@ -147,13 +150,73 @@ unsigned s390_del_running_cpu(CPUS390XState *env)
return s390_running_cpus;
}
+void s390_init_ipl_dev(const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename)
+{
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, "s390-ipl");
+ if (kernel_filename) {
+ qdev_prop_set_string(dev, "kernel", kernel_filename);
+ }
+ if (initrd_filename) {
+ qdev_prop_set_string(dev, "initrd", initrd_filename);
+ }
+ qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
+ qdev_init_nofail(dev);
+}
+
+void s390_init_cpus(const char *cpu_model, uint8_t *storage_keys)
+{
+ int i;
+
+ if (cpu_model == NULL) {
+ cpu_model = "host";
+ }
+
+ ipi_states = g_malloc(sizeof(S390CPU *) * smp_cpus);
+
+ for (i = 0; i < smp_cpus; i++) {
+ S390CPU *cpu;
+
+ cpu = cpu_s390x_init(cpu_model);
+
+ ipi_states[i] = cpu;
+ cpu->env.halted = 1;
+ cpu->env.exception_index = EXCP_HLT;
+ cpu->env.storage_keys = storage_keys;
+ }
+}
+
+
+void s390_create_virtio_net(BusState *bus, const char *name)
+{
+ int i;
+
+ for (i = 0; i < nb_nics; i++) {
+ NICInfo *nd = &nd_table[i];
+ DeviceState *dev;
+
+ if (!nd->model) {
+ nd->model = g_strdup("virtio");
+ }
+
+ if (strcmp(nd->model, "virtio")) {
+ fprintf(stderr, "S390 only supports VirtIO nics\n");
+ exit(1);
+ }
+
+ dev = qdev_create(bus, name);
+ qdev_set_nic_properties(dev, nd);
+ qdev_init_nofail(dev);
+ }
+}
+
/* PC hardware initialisation */
static void s390_init(QEMUMachineInitArgs *args)
{
ram_addr_t my_ram_size = args->ram_size;
- const char *cpu_model = args->cpu_model;
- CPUS390XState *env = NULL;
- DeviceState *dev;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
int shift = 0;
@@ -161,7 +224,6 @@ static void s390_init(QEMUMachineInitArgs *args)
void *virtio_region;
hwaddr virtio_region_len;
hwaddr virtio_region_start;
- int i;
/* s390x ram size detection needs a 16bit multiplier + an increment. So
guests > 64GB can be specified in 2MB steps etc. */
@@ -176,15 +238,8 @@ static void s390_init(QEMUMachineInitArgs *args)
/* get a BUS */
s390_bus = s390_virtio_bus_init(&my_ram_size);
s390_sclp_init();
- dev = qdev_create(NULL, "s390-ipl");
- if (args->kernel_filename) {
- qdev_prop_set_string(dev, "kernel", args->kernel_filename);
- }
- if (args->initrd_filename) {
- qdev_prop_set_string(dev, "initrd", args->initrd_filename);
- }
- qdev_prop_set_string(dev, "cmdline", args->kernel_cmdline);
- qdev_init_nofail(dev);
+ s390_init_ipl_dev(args->kernel_filename, args->kernel_cmdline,
+ args->initrd_filename);
/* register hypercalls */
s390_virtio_register_hcalls();
@@ -207,46 +262,10 @@ static void s390_init(QEMUMachineInitArgs *args)
storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);
/* init CPUs */
- if (cpu_model == NULL) {
- cpu_model = "host";
- }
-
- ipi_states = g_malloc(sizeof(S390CPU *) * smp_cpus);
-
- for (i = 0; i < smp_cpus; i++) {
- S390CPU *cpu;
- CPUS390XState *tmp_env;
-
- cpu = cpu_s390x_init(cpu_model);
- tmp_env = &cpu->env;
- if (!env) {
- env = tmp_env;
- }
- ipi_states[i] = cpu;
- tmp_env->halted = 1;
- tmp_env->exception_index = EXCP_HLT;
- tmp_env->storage_keys = storage_keys;
- }
-
+ s390_init_cpus(args->cpu_model, storage_keys);
/* Create VirtIO network adapters */
- for(i = 0; i < nb_nics; i++) {
- NICInfo *nd = &nd_table[i];
- DeviceState *dev;
-
- if (!nd->model) {
- nd->model = g_strdup("virtio");
- }
-
- if (strcmp(nd->model, "virtio")) {
- fprintf(stderr, "S390 only supports VirtIO nics\n");
- exit(1);
- }
-
- dev = qdev_create((BusState *)s390_bus, "virtio-net-s390");
- qdev_set_nic_properties(dev, nd);
- qdev_init_nofail(dev);
- }
+ s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");
}
static QEMUMachine s390_machine = {
diff --git a/hw/s390-virtio.h b/hw/s390x/s390-virtio.h
index 25bb610..a6c4c19 100644
--- a/hw/s390-virtio.h
+++ b/hw/s390x/s390-virtio.h
@@ -15,8 +15,14 @@
#define KVM_S390_VIRTIO_NOTIFY 0
#define KVM_S390_VIRTIO_RESET 1
#define KVM_S390_VIRTIO_SET_STATUS 2
+#define KVM_S390_VIRTIO_CCW_NOTIFY 3
typedef int (*s390_virtio_fn)(const uint64_t *args);
void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn);
+void s390_init_cpus(const char *cpu_model, uint8_t *storage_keys);
+void s390_init_ipl_dev(const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename);
+void s390_create_virtio_net(BusState *bus, const char *name);
#endif
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
new file mode 100644
index 0000000..231f81e
--- /dev/null
+++ b/hw/s390x/virtio-ccw.c
@@ -0,0 +1,960 @@
+/*
+ * virtio ccw target implementation
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/hw.h"
+#include "block/block.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/sysemu.h"
+#include "net/net.h"
+#include "monitor/monitor.h"
+#include "hw/virtio.h"
+#include "hw/virtio-serial.h"
+#include "hw/virtio-net.h"
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "hw/virtio-bus.h"
+
+#include "ioinst.h"
+#include "css.h"
+#include "virtio-ccw.h"
+#include "trace.h"
+
+static int virtual_css_bus_reset(BusState *qbus)
+{
+ /* This should actually be modelled via the generic css */
+ css_reset();
+
+ /* we dont traverse ourself, return 0 */
+ return 0;
+}
+
+
+static void virtual_css_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ k->reset = virtual_css_bus_reset;
+}
+
+static const TypeInfo virtual_css_bus_info = {
+ .name = TYPE_VIRTUAL_CSS_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(VirtualCssBus),
+ .class_init = virtual_css_bus_class_init,
+};
+
+static const VirtIOBindings virtio_ccw_bindings;
+
+VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
+{
+ VirtIODevice *vdev = NULL;
+
+ if (sch->driver_data) {
+ vdev = ((VirtioCcwDevice *)sch->driver_data)->vdev;
+ }
+ return vdev;
+}
+
+VirtualCssBus *virtual_css_bus_init(void)
+{
+ VirtualCssBus *cbus;
+ BusState *bus;
+ DeviceState *dev;
+
+ /* Create bridge device */
+ dev = qdev_create(NULL, "virtual-css-bridge");
+ qdev_init_nofail(dev);
+
+ /* Create bus on bridge device */
+ bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css");
+ cbus = VIRTUAL_CSS_BUS(bus);
+
+ /* Enable hotplugging */
+ bus->allow_hotplug = 1;
+
+ return cbus;
+}
+
+/* Communication blocks used by several channel commands. */
+typedef struct VqInfoBlock {
+ uint64_t queue;
+ uint32_t align;
+ uint16_t index;
+ uint16_t num;
+} QEMU_PACKED VqInfoBlock;
+
+typedef struct VqConfigBlock {
+ uint16_t index;
+ uint16_t num_max;
+} QEMU_PACKED VqConfigBlock;
+
+typedef struct VirtioFeatDesc {
+ uint32_t features;
+ uint8_t index;
+} QEMU_PACKED VirtioFeatDesc;
+
+/* Specify where the virtqueues for the subchannel are in guest memory. */
+static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
+ uint16_t index, uint16_t num)
+{
+ VirtioCcwDevice *dev = sch->driver_data;
+
+ if (index > VIRTIO_PCI_QUEUE_MAX) {
+ return -EINVAL;
+ }
+
+ /* Current code in virtio.c relies on 4K alignment. */
+ if (addr && (align != 4096)) {
+ return -EINVAL;
+ }
+
+ if (!dev) {
+ return -EINVAL;
+ }
+
+ virtio_queue_set_addr(dev->vdev, index, addr);
+ if (!addr) {
+ virtio_queue_set_vector(dev->vdev, index, 0);
+ } else {
+ /* Fail if we don't have a big enough queue. */
+ /* TODO: Add interface to handle vring.num changing */
+ if (virtio_queue_get_num(dev->vdev, index) > num) {
+ return -EINVAL;
+ }
+ virtio_queue_set_vector(dev->vdev, index, index);
+ }
+ /* tell notify handler in case of config change */
+ dev->vdev->config_vector = VIRTIO_PCI_QUEUE_MAX;
+ return 0;
+}
+
+static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
+{
+ int ret;
+ VqInfoBlock info;
+ uint8_t status;
+ VirtioFeatDesc features;
+ void *config;
+ hwaddr indicators;
+ VqConfigBlock vq_config;
+ VirtioCcwDevice *dev = sch->driver_data;
+ bool check_len;
+ int len;
+ hwaddr hw_len;
+
+ if (!dev) {
+ return -EINVAL;
+ }
+
+ trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
+ ccw.cmd_code);
+ check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
+
+ /* Look at the command. */
+ switch (ccw.cmd_code) {
+ case CCW_CMD_SET_VQ:
+ if (check_len) {
+ if (ccw.count != sizeof(info)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(info)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ info.queue = ldq_phys(ccw.cda);
+ info.align = ldl_phys(ccw.cda + sizeof(info.queue));
+ info.index = lduw_phys(ccw.cda + sizeof(info.queue)
+ + sizeof(info.align));
+ info.num = lduw_phys(ccw.cda + sizeof(info.queue)
+ + sizeof(info.align)
+ + sizeof(info.index));
+ ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index,
+ info.num);
+ sch->curr_status.scsw.count = 0;
+ }
+ break;
+ case CCW_CMD_VDEV_RESET:
+ virtio_reset(dev->vdev);
+ ret = 0;
+ break;
+ case CCW_CMD_READ_FEAT:
+ if (check_len) {
+ if (ccw.count != sizeof(features)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(features)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ features.index = ldub_phys(ccw.cda + sizeof(features.features));
+ if (features.index < ARRAY_SIZE(dev->host_features)) {
+ features.features = dev->host_features[features.index];
+ } else {
+ /* Return zeroes if the guest supports more feature bits. */
+ features.features = 0;
+ }
+ stl_le_phys(ccw.cda, features.features);
+ sch->curr_status.scsw.count = ccw.count - sizeof(features);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_WRITE_FEAT:
+ if (check_len) {
+ if (ccw.count != sizeof(features)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(features)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ features.index = ldub_phys(ccw.cda + sizeof(features.features));
+ features.features = ldl_le_phys(ccw.cda);
+ if (features.index < ARRAY_SIZE(dev->host_features)) {
+ if (dev->vdev->set_features) {
+ dev->vdev->set_features(dev->vdev, features.features);
+ }
+ dev->vdev->guest_features = features.features;
+ } else {
+ /*
+ * If the guest supports more feature bits, assert that it
+ * passes us zeroes for those we don't support.
+ */
+ if (features.features) {
+ fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n",
+ features.index, features.features);
+ /* XXX: do a unit check here? */
+ }
+ }
+ sch->curr_status.scsw.count = ccw.count - sizeof(features);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_READ_CONF:
+ if (check_len) {
+ if (ccw.count > dev->vdev->config_len) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, dev->vdev->config_len);
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ dev->vdev->get_config(dev->vdev, dev->vdev->config);
+ /* XXX config space endianness */
+ cpu_physical_memory_write(ccw.cda, dev->vdev->config, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_WRITE_CONF:
+ if (check_len) {
+ if (ccw.count > dev->vdev->config_len) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, dev->vdev->config_len);
+ hw_len = len;
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ config = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
+ if (!config) {
+ ret = -EFAULT;
+ } else {
+ len = hw_len;
+ /* XXX config space endianness */
+ memcpy(dev->vdev->config, config, len);
+ cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
+ if (dev->vdev->set_config) {
+ dev->vdev->set_config(dev->vdev, dev->vdev->config);
+ }
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ }
+ }
+ break;
+ case CCW_CMD_WRITE_STATUS:
+ if (check_len) {
+ if (ccw.count != sizeof(status)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(status)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ status = ldub_phys(ccw.cda);
+ virtio_set_status(dev->vdev, status);
+ if (dev->vdev->status == 0) {
+ virtio_reset(dev->vdev);
+ }
+ sch->curr_status.scsw.count = ccw.count - sizeof(status);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_SET_IND:
+ if (check_len) {
+ if (ccw.count != sizeof(indicators)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(indicators)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ indicators = ldq_phys(ccw.cda);
+ if (!indicators) {
+ ret = -EFAULT;
+ } else {
+ dev->indicators = indicators;
+ sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_SET_CONF_IND:
+ if (check_len) {
+ if (ccw.count != sizeof(indicators)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(indicators)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ indicators = ldq_phys(ccw.cda);
+ if (!indicators) {
+ ret = -EFAULT;
+ } else {
+ dev->indicators2 = indicators;
+ sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_READ_VQ_CONF:
+ if (check_len) {
+ if (ccw.count != sizeof(vq_config)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(vq_config)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ vq_config.index = lduw_phys(ccw.cda);
+ vq_config.num_max = virtio_queue_get_num(dev->vdev,
+ vq_config.index);
+ stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
+ sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
+
+static int virtio_ccw_device_init(VirtioCcwDevice *dev, VirtIODevice *vdev)
+{
+ unsigned int cssid = 0;
+ unsigned int ssid = 0;
+ unsigned int schid;
+ unsigned int devno;
+ bool have_devno = false;
+ bool found = false;
+ SubchDev *sch;
+ int ret;
+ int num;
+ DeviceState *parent = DEVICE(dev);
+
+ sch = g_malloc0(sizeof(SubchDev));
+
+ sch->driver_data = dev;
+ dev->sch = sch;
+
+ dev->vdev = vdev;
+ dev->indicators = 0;
+
+ /* Initialize subchannel structure. */
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ sch->orb = NULL;
+ /*
+ * Use a device number if provided. Otherwise, fall back to subchannel
+ * number.
+ */
+ if (dev->bus_id) {
+ num = sscanf(dev->bus_id, "%x.%x.%04x", &cssid, &ssid, &devno);
+ if (num == 3) {
+ if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
+ ret = -EINVAL;
+ error_report("Invalid cssid or ssid: cssid %x, ssid %x",
+ cssid, ssid);
+ goto out_err;
+ }
+ /* Enforce use of virtual cssid. */
+ if (cssid != VIRTUAL_CSSID) {
+ ret = -EINVAL;
+ error_report("cssid %x not valid for virtio devices", cssid);
+ goto out_err;
+ }
+ if (css_devno_used(cssid, ssid, devno)) {
+ ret = -EEXIST;
+ error_report("Device %x.%x.%04x already exists", cssid, ssid,
+ devno);
+ goto out_err;
+ }
+ sch->cssid = cssid;
+ sch->ssid = ssid;
+ sch->devno = devno;
+ have_devno = true;
+ } else {
+ ret = -EINVAL;
+ error_report("Malformed devno parameter '%s'", dev->bus_id);
+ goto out_err;
+ }
+ }
+
+ /* Find the next free id. */
+ if (have_devno) {
+ for (schid = 0; schid <= MAX_SCHID; schid++) {
+ if (!css_find_subch(1, cssid, ssid, schid)) {
+ sch->schid = schid;
+ css_subch_assign(cssid, ssid, schid, devno, sch);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ret = -ENODEV;
+ error_report("No free subchannel found for %x.%x.%04x", cssid, ssid,
+ devno);
+ goto out_err;
+ }
+ trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
+ "user-configured");
+ } else {
+ cssid = VIRTUAL_CSSID;
+ for (ssid = 0; ssid <= MAX_SSID; ssid++) {
+ for (schid = 0; schid <= MAX_SCHID; schid++) {
+ if (!css_find_subch(1, cssid, ssid, schid)) {
+ sch->cssid = cssid;
+ sch->ssid = ssid;
+ sch->schid = schid;
+ devno = schid;
+ /*
+ * If the devno is already taken, look further in this
+ * subchannel set.
+ */
+ while (css_devno_used(cssid, ssid, devno)) {
+ if (devno == MAX_SCHID) {
+ devno = 0;
+ } else if (devno == schid - 1) {
+ ret = -ENODEV;
+ error_report("No free devno found");
+ goto out_err;
+ } else {
+ devno++;
+ }
+ }
+ sch->devno = devno;
+ css_subch_assign(cssid, ssid, schid, devno, sch);
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ if (!found) {
+ ret = -ENODEV;
+ error_report("Virtual channel subsystem is full!");
+ goto out_err;
+ }
+ trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
+ "auto-configured");
+ }
+
+ /* Build initial schib. */
+ css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
+
+ sch->ccw_cb = virtio_ccw_cb;
+
+ /* Build senseid data. */
+ memset(&sch->id, 0, sizeof(SenseId));
+ sch->id.reserved = 0xff;
+ sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
+ sch->id.cu_model = dev->vdev->device_id;
+
+ virtio_bind_device(vdev, &virtio_ccw_bindings, DEVICE(dev));
+ /* Only the first 32 feature bits are used. */
+ dev->host_features[0] = vdev->get_features(vdev, dev->host_features[0]);
+ dev->host_features[0] |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
+ dev->host_features[0] |= 0x1 << VIRTIO_F_BAD_FEATURE;
+
+ css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
+ parent->hotplugged, 1);
+ return 0;
+
+out_err:
+ dev->sch = NULL;
+ g_free(sch);
+ return ret;
+}
+
+static int virtio_ccw_exit(VirtioCcwDevice *dev)
+{
+ SubchDev *sch = dev->sch;
+
+ if (sch) {
+ css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
+ g_free(sch);
+ }
+ dev->indicators = 0;
+ return 0;
+}
+
+static int virtio_ccw_net_init(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+
+ vdev = virtio_net_init((DeviceState *)dev, &dev->nic, &dev->net);
+ if (!vdev) {
+ return -1;
+ }
+
+ return virtio_ccw_device_init(dev, vdev);
+}
+
+static int virtio_ccw_net_exit(VirtioCcwDevice *dev)
+{
+ virtio_net_exit(dev->vdev);
+ return virtio_ccw_exit(dev);
+}
+
+static int virtio_ccw_blk_init(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+
+ vdev = virtio_blk_init((DeviceState *)dev, &dev->blk);
+ if (!vdev) {
+ return -1;
+ }
+
+ return virtio_ccw_device_init(dev, vdev);
+}
+
+static int virtio_ccw_blk_exit(VirtioCcwDevice *dev)
+{
+ virtio_blk_exit(dev->vdev);
+ blockdev_mark_auto_del(dev->blk.conf.bs);
+ return virtio_ccw_exit(dev);
+}
+
+static int virtio_ccw_serial_init(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+
+ vdev = virtio_serial_init((DeviceState *)dev, &dev->serial);
+ if (!vdev) {
+ return -1;
+ }
+
+ return virtio_ccw_device_init(dev, vdev);
+}
+
+static int virtio_ccw_serial_exit(VirtioCcwDevice *dev)
+{
+ virtio_serial_exit(dev->vdev);
+ return virtio_ccw_exit(dev);
+}
+
+static int virtio_ccw_balloon_init(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+
+ vdev = virtio_balloon_init((DeviceState *)dev);
+ if (!vdev) {
+ return -1;
+ }
+
+ return virtio_ccw_device_init(dev, vdev);
+}
+
+static int virtio_ccw_balloon_exit(VirtioCcwDevice *dev)
+{
+ virtio_balloon_exit(dev->vdev);
+ return virtio_ccw_exit(dev);
+}
+
+static int virtio_ccw_scsi_init(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+
+ vdev = virtio_scsi_init((DeviceState *)dev, &dev->scsi);
+ if (!vdev) {
+ return -1;
+ }
+
+ return virtio_ccw_device_init(dev, vdev);
+}
+
+static int virtio_ccw_scsi_exit(VirtioCcwDevice *dev)
+{
+ virtio_scsi_exit(dev->vdev);
+ return virtio_ccw_exit(dev);
+}
+
+/* DeviceState to VirtioCcwDevice. Note: used on datapath,
+ * be careful and test performance if you change this.
+ */
+static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
+{
+ return container_of(d, VirtioCcwDevice, parent_obj);
+}
+
+static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
+{
+ VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
+ SubchDev *sch = dev->sch;
+ uint64_t indicators;
+
+ if (vector >= 128) {
+ return;
+ }
+
+ if (vector < VIRTIO_PCI_QUEUE_MAX) {
+ indicators = ldq_phys(dev->indicators);
+ indicators |= 1ULL << vector;
+ stq_phys(dev->indicators, indicators);
+ } else {
+ vector = 0;
+ indicators = ldq_phys(dev->indicators2);
+ indicators |= 1ULL << vector;
+ stq_phys(dev->indicators2, indicators);
+ }
+
+ css_conditional_io_interrupt(sch);
+
+}
+
+static unsigned virtio_ccw_get_features(DeviceState *d)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ /* Only the first 32 feature bits are used. */
+ return dev->host_features[0];
+}
+
+static void virtio_ccw_reset(DeviceState *d)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ virtio_reset(dev->vdev);
+ css_reset_sch(dev->sch);
+}
+
+/**************** Virtio-ccw Bus Device Descriptions *******************/
+
+static const VirtIOBindings virtio_ccw_bindings = {
+ .notify = virtio_ccw_notify,
+ .get_features = virtio_ccw_get_features,
+};
+
+static Property virtio_ccw_net_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_VIRTIO_NET_FEATURES(VirtioCcwDevice, host_features[0]),
+ DEFINE_NIC_PROPERTIES(VirtioCcwDevice, nic),
+ DEFINE_PROP_UINT32("x-txtimer", VirtioCcwDevice,
+ net.txtimer, TX_TIMER_INTERVAL),
+ DEFINE_PROP_INT32("x-txburst", VirtioCcwDevice,
+ net.txburst, TX_BURST),
+ DEFINE_PROP_STRING("tx", VirtioCcwDevice, net.tx),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_net_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->init = virtio_ccw_net_init;
+ k->exit = virtio_ccw_net_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_net_properties;
+}
+
+static const TypeInfo virtio_ccw_net = {
+ .name = "virtio-net-ccw",
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_net_class_init,
+};
+
+static Property virtio_ccw_blk_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_BLOCK_PROPERTIES(VirtioCcwDevice, blk.conf),
+ DEFINE_PROP_STRING("serial", VirtioCcwDevice, blk.serial),
+#ifdef __linux__
+ DEFINE_PROP_BIT("scsi", VirtioCcwDevice, blk.scsi, 0, true),
+#endif
+ DEFINE_VIRTIO_BLK_FEATURES(VirtioCcwDevice, host_features[0]),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->init = virtio_ccw_blk_init;
+ k->exit = virtio_ccw_blk_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_blk_properties;
+}
+
+static const TypeInfo virtio_ccw_blk = {
+ .name = "virtio-blk-ccw",
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_blk_class_init,
+};
+
+static Property virtio_ccw_serial_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_UINT32("max_ports", VirtioCcwDevice,
+ serial.max_virtserial_ports, 31),
+ DEFINE_VIRTIO_COMMON_FEATURES(VirtioCcwDevice, host_features[0]),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->init = virtio_ccw_serial_init;
+ k->exit = virtio_ccw_serial_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_serial_properties;
+}
+
+static const TypeInfo virtio_ccw_serial = {
+ .name = "virtio-serial-ccw",
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_serial_class_init,
+};
+
+static Property virtio_ccw_balloon_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_VIRTIO_COMMON_FEATURES(VirtioCcwDevice, host_features[0]),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->init = virtio_ccw_balloon_init;
+ k->exit = virtio_ccw_balloon_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_balloon_properties;
+}
+
+static const TypeInfo virtio_ccw_balloon = {
+ .name = "virtio-balloon-ccw",
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_balloon_class_init,
+};
+
+static Property virtio_ccw_scsi_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_VIRTIO_SCSI_PROPERTIES(VirtioCcwDevice, host_features[0], scsi),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->init = virtio_ccw_scsi_init;
+ k->exit = virtio_ccw_scsi_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_scsi_properties;
+}
+
+static const TypeInfo virtio_ccw_scsi = {
+ .name = "virtio-scsi-ccw",
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_scsi_class_init,
+};
+
+static int virtio_ccw_busdev_init(DeviceState *dev)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+ VirtIOCCWDeviceClass *_info = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
+
+ virtio_ccw_bus_new(&_dev->bus, _dev);
+
+ return _info->init(_dev);
+}
+
+static int virtio_ccw_busdev_exit(DeviceState *dev)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+ VirtIOCCWDeviceClass *_info = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
+
+ return _info->exit(_dev);
+}
+
+static int virtio_ccw_busdev_unplug(DeviceState *dev)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+ SubchDev *sch = _dev->sch;
+
+ /*
+ * We should arrive here only for device_del, since we don't support
+ * direct hot(un)plug of channels, but only through virtio.
+ */
+ assert(sch != NULL);
+ /* Subchannel is now disabled and no longer valid. */
+ sch->curr_status.pmcw.flags &= ~(PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_DNV);
+
+ css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1, 0);
+
+ object_unparent(OBJECT(dev));
+ qdev_free(dev);
+ return 0;
+}
+
+static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->init = virtio_ccw_busdev_init;
+ dc->exit = virtio_ccw_busdev_exit;
+ dc->unplug = virtio_ccw_busdev_unplug;
+ dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
+
+}
+
+static const TypeInfo virtio_ccw_device_info = {
+ .name = TYPE_VIRTIO_CCW_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_device_class_init,
+ .class_size = sizeof(VirtIOCCWDeviceClass),
+ .abstract = true,
+};
+
+/***************** Virtual-css Bus Bridge Device ********************/
+/* Only required to have the virtio bus as child in the system bus */
+
+static int virtual_css_bridge_init(SysBusDevice *dev)
+{
+ /* nothing */
+ return 0;
+}
+
+static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = virtual_css_bridge_init;
+ dc->no_user = 1;
+}
+
+static const TypeInfo virtual_css_bridge_info = {
+ .name = "virtual-css-bridge",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysBusDevice),
+ .class_init = virtual_css_bridge_class_init,
+};
+
+/* virtio-ccw-bus */
+
+void virtio_ccw_bus_new(VirtioBusState *bus, VirtioCcwDevice *dev)
+{
+ DeviceState *qdev = DEVICE(dev);
+ BusState *qbus;
+
+ qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_CCW_BUS, qdev, NULL);
+ qbus = BUS(bus);
+ qbus->allow_hotplug = 0;
+}
+
+static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
+{
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ BusClass *bus_class = BUS_CLASS(klass);
+
+ bus_class->max_dev = 1;
+ k->notify = virtio_ccw_notify;
+ k->get_features = virtio_ccw_get_features;
+}
+
+static const TypeInfo virtio_ccw_bus_info = {
+ .name = TYPE_VIRTIO_CCW_BUS,
+ .parent = TYPE_VIRTIO_BUS,
+ .instance_size = sizeof(VirtioCcwBusState),
+ .class_init = virtio_ccw_bus_class_init,
+};
+
+static void virtio_ccw_register(void)
+{
+ type_register_static(&virtio_ccw_bus_info);
+ type_register_static(&virtual_css_bus_info);
+ type_register_static(&virtio_ccw_device_info);
+ type_register_static(&virtio_ccw_serial);
+ type_register_static(&virtio_ccw_blk);
+ type_register_static(&virtio_ccw_net);
+ type_register_static(&virtio_ccw_balloon);
+ type_register_static(&virtio_ccw_scsi);
+ type_register_static(&virtual_css_bridge_info);
+}
+
+type_init(virtio_ccw_register)
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
new file mode 100644
index 0000000..48474b3
--- /dev/null
+++ b/hw/s390x/virtio-ccw.h
@@ -0,0 +1,98 @@
+/*
+ * virtio ccw target definitions
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390X_VIRTIO_CCW_H
+#define HW_S390X_VIRTIO_CCW_H
+
+#include <hw/virtio-blk.h>
+#include <hw/virtio-net.h>
+#include <hw/virtio-serial.h>
+#include <hw/virtio-scsi.h>
+#include <hw/virtio-bus.h>
+
+#define VIRTUAL_CSSID 0xfe
+
+#define VIRTIO_CCW_CU_TYPE 0x3832
+#define VIRTIO_CCW_CHPID_TYPE 0x32
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_VQ_CONF 0x32
+
+#define TYPE_VIRTIO_CCW_DEVICE "virtio-ccw-device"
+#define VIRTIO_CCW_DEVICE(obj) \
+ OBJECT_CHECK(VirtioCcwDevice, (obj), TYPE_VIRTIO_CCW_DEVICE)
+#define VIRTIO_CCW_DEVICE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOCCWDeviceClass, (klass), TYPE_VIRTIO_CCW_DEVICE)
+#define VIRTIO_CCW_DEVICE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOCCWDeviceClass, (obj), TYPE_VIRTIO_CCW_DEVICE)
+
+typedef struct VirtioBusState VirtioCcwBusState;
+typedef struct VirtioBusClass VirtioCcwBusClass;
+
+#define TYPE_VIRTIO_CCW_BUS "virtio-ccw-bus"
+#define VIRTIO_CCW_BUS(obj) \
+ OBJECT_CHECK(VirtioCcwBus, (obj), TYPE_VIRTIO_CCW_BUS)
+#define VIRTIO_CCW_BUS_GET_CLASS(obj) \
+ OBJECT_CHECK(VirtioCcwBusState, (obj), TYPE_VIRTIO_CCW_BUS)
+#define VIRTIO_CCW_BUS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioCcwBusClass, klass, TYPE_VIRTIO_CCW_BUS)
+
+typedef struct VirtioCcwDevice VirtioCcwDevice;
+
+void virtio_ccw_bus_new(VirtioBusState *bus, VirtioCcwDevice *dev);
+
+typedef struct VirtIOCCWDeviceClass {
+ DeviceClass parent_class;
+ int (*init)(VirtioCcwDevice *dev);
+ int (*exit)(VirtioCcwDevice *dev);
+} VirtIOCCWDeviceClass;
+
+/* Change here if we want to support more feature bits. */
+#define VIRTIO_CCW_FEATURE_SIZE 1
+
+struct VirtioCcwDevice {
+ DeviceState parent_obj;
+ SubchDev *sch;
+ VirtIODevice *vdev;
+ char *bus_id;
+ VirtIOBlkConf blk;
+ NICConf nic;
+ uint32_t host_features[VIRTIO_CCW_FEATURE_SIZE];
+ virtio_serial_conf serial;
+ virtio_net_conf net;
+ VirtIOSCSIConf scsi;
+ VirtioBusState bus;
+ /* Guest provided values: */
+ hwaddr indicators;
+ hwaddr indicators2;
+};
+
+/* virtual css bus type */
+typedef struct VirtualCssBus {
+ BusState parent_obj;
+} VirtualCssBus;
+
+#define TYPE_VIRTUAL_CSS_BUS "virtual-css-bus"
+#define VIRTUAL_CSS_BUS(obj) \
+ OBJECT_CHECK(VirtualCssBus, (obj), TYPE_VIRTUAL_CSS_BUS)
+
+VirtualCssBus *virtual_css_bus_init(void);
+void virtio_ccw_device_update_status(SubchDev *sch);
+VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch);
+#endif
diff --git a/target-s390x/Makefile.objs b/target-s390x/Makefile.objs
index e728abf..3afb0b7 100644
--- a/target-s390x/Makefile.objs
+++ b/target-s390x/Makefile.objs
@@ -1,4 +1,4 @@
obj-y += translate.o helper.o cpu.o interrupt.o
obj-y += int_helper.o fpu_helper.o cc_helper.o mem_helper.o misc_helper.o
-obj-$(CONFIG_SOFTMMU) += machine.o
+obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index 1f2d942..9be4a47 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -50,6 +50,11 @@
#define MMU_USER_IDX 1
#define MAX_EXT_QUEUE 16
+#define MAX_IO_QUEUE 16
+#define MAX_MCHK_QUEUE 16
+
+#define PSW_MCHK_MASK 0x0004000000000000
+#define PSW_IO_MASK 0x0200000000000000
typedef struct PSW {
uint64_t mask;
@@ -62,6 +67,17 @@ typedef struct ExtQueue {
uint32_t param64;
} ExtQueue;
+typedef struct IOIntQueue {
+ uint16_t id;
+ uint16_t nr;
+ uint32_t parm;
+ uint32_t word;
+} IOIntQueue;
+
+typedef struct MchkQueue {
+ uint16_t type;
+} MchkQueue;
+
typedef struct CPUS390XState {
uint64_t regs[16]; /* GP registers */
CPU_DoubleU fregs[16]; /* FP registers */
@@ -93,9 +109,17 @@ typedef struct CPUS390XState {
uint64_t cregs[16]; /* control registers */
ExtQueue ext_queue[MAX_EXT_QUEUE];
- int pending_int;
+ IOIntQueue io_queue[MAX_IO_QUEUE][8];
+ MchkQueue mchk_queue[MAX_MCHK_QUEUE];
+ int pending_int;
int ext_index;
+ int io_index[8];
+ int mchk_index;
+
+ uint64_t ckc;
+ uint64_t cputm;
+ uint32_t todpr;
CPU_COMMON
@@ -123,6 +147,9 @@ static inline void cpu_clone_regs(CPUS390XState *env, target_ulong newsp)
}
#endif
+/* distinguish between 24 bit and 31 bit addressing */
+#define HIGH_ORDER_BIT 0x80000000
+
/* Interrupt Codes */
/* Program Interrupts */
#define PGM_OPERATION 0x0001
@@ -300,8 +327,27 @@ int cpu_s390x_handle_mmu_fault (CPUS390XState *env, target_ulong address, int rw
int mmu_idx);
#define cpu_handle_mmu_fault cpu_s390x_handle_mmu_fault
+#include "ioinst.h"
#ifndef CONFIG_USER_ONLY
+void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
+ int is_write);
+void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
+ int is_write);
+static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb)
+{
+ hwaddr addr = 0;
+ uint8_t reg;
+
+ reg = ipb >> 28;
+ if (reg > 0) {
+ addr = env->regs[reg];
+ }
+ addr += (ipb >> 16) & 0xfff;
+
+ return addr;
+}
+
void s390x_tod_timer(void *opaque);
void s390x_cpu_timer(void *opaque);
@@ -351,6 +397,114 @@ static inline unsigned s390_del_running_cpu(CPUS390XState *env)
void cpu_lock(void);
void cpu_unlock(void);
+typedef struct SubchDev SubchDev;
+
+#ifndef CONFIG_USER_ONLY
+SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
+ uint16_t schid);
+bool css_subch_visible(SubchDev *sch);
+void css_conditional_io_interrupt(SubchDev *sch);
+int css_do_stsch(SubchDev *sch, SCHIB *schib);
+bool css_schid_final(uint8_t cssid, uint8_t ssid, uint16_t schid);
+int css_do_msch(SubchDev *sch, SCHIB *schib);
+int css_do_xsch(SubchDev *sch);
+int css_do_csch(SubchDev *sch);
+int css_do_hsch(SubchDev *sch);
+int css_do_ssch(SubchDev *sch, ORB *orb);
+int css_do_tsch(SubchDev *sch, IRB *irb);
+int css_do_stcrw(CRW *crw);
+int css_do_tpi(IOIntCode *int_code, int lowcore);
+int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
+ int rfmt, void *buf);
+void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
+int css_enable_mcsse(void);
+int css_enable_mss(void);
+int css_do_rsch(SubchDev *sch);
+int css_do_rchp(uint8_t cssid, uint8_t chpid);
+bool css_present(uint8_t cssid);
+#else
+static inline SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
+ uint16_t schid)
+{
+ return NULL;
+}
+static inline bool css_subch_visible(SubchDev *sch)
+{
+ return false;
+}
+static inline void css_conditional_io_interrupt(SubchDev *sch)
+{
+}
+static inline int css_do_stsch(SubchDev *sch, SCHIB *schib)
+{
+ return -ENODEV;
+}
+static inline bool css_schid_final(uint8_t cssid, uint8_t ssid, uint16_t schid)
+{
+ return true;
+}
+static inline int css_do_msch(SubchDev *sch, SCHIB *schib)
+{
+ return -ENODEV;
+}
+static inline int css_do_xsch(SubchDev *sch)
+{
+ return -ENODEV;
+}
+static inline int css_do_csch(SubchDev *sch)
+{
+ return -ENODEV;
+}
+static inline int css_do_hsch(SubchDev *sch)
+{
+ return -ENODEV;
+}
+static inline int css_do_ssch(SubchDev *sch, ORB *orb)
+{
+ return -ENODEV;
+}
+static inline int css_do_tsch(SubchDev *sch, IRB *irb)
+{
+ return -ENODEV;
+}
+static inline int css_do_stcrw(CRW *crw)
+{
+ return 1;
+}
+static inline int css_do_tpi(IOIntCode *int_code, int lowcore)
+{
+ return 0;
+}
+static inline int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid,
+ int rfmt, uint8_t l_chpid, void *buf)
+{
+ return 0;
+}
+static inline void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
+{
+}
+static inline int css_enable_mss(void)
+{
+ return -EINVAL;
+}
+static inline int css_enable_mcsse(void)
+{
+ return -EINVAL;
+}
+static inline int css_do_rsch(SubchDev *sch)
+{
+ return -ENODEV;
+}
+static inline int css_do_rchp(uint8_t cssid, uint8_t chpid)
+{
+ return -ENODEV;
+}
+static inline bool css_present(uint8_t cssid)
+{
+ return false;
+}
+#endif
+
static inline void cpu_set_tls(CPUS390XState *env, target_ulong newtls)
{
env->aregs[0] = newtls >> 32;
@@ -370,10 +524,14 @@ void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define EXCP_EXT 1 /* external interrupt */
#define EXCP_SVC 2 /* supervisor call (syscall) */
#define EXCP_PGM 3 /* program interruption */
+#define EXCP_IO 7 /* I/O interrupt */
+#define EXCP_MCHK 8 /* machine check */
#define INTERRUPT_EXT (1 << 0)
#define INTERRUPT_TOD (1 << 1)
#define INTERRUPT_CPUTIMER (1 << 2)
+#define INTERRUPT_IO (1 << 3)
+#define INTERRUPT_MCHK (1 << 4)
/* Program Status Word. */
#define S390_PSWM_REGNUM 0
@@ -836,6 +994,45 @@ static inline void cpu_inject_ext(CPUS390XState *env, uint32_t code, uint32_t pa
cpu_interrupt(env, CPU_INTERRUPT_HARD);
}
+static inline void cpu_inject_io(CPUS390XState *env, uint16_t subchannel_id,
+ uint16_t subchannel_number,
+ uint32_t io_int_parm, uint32_t io_int_word)
+{
+ int isc = ffs(io_int_word << 2) - 1;
+
+ if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->io_index[isc]++;
+ assert(env->io_index[isc] < MAX_IO_QUEUE);
+
+ env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
+ env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
+ env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
+ env->io_queue[env->io_index[isc]][isc].word = io_int_word;
+
+ env->pending_int |= INTERRUPT_IO;
+ cpu_interrupt(env, CPU_INTERRUPT_HARD);
+}
+
+static inline void cpu_inject_crw_mchk(CPUS390XState *env)
+{
+ if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->mchk_index++;
+ assert(env->mchk_index < MAX_MCHK_QUEUE);
+
+ env->mchk_queue[env->mchk_index].type = 1;
+
+ env->pending_int |= INTERRUPT_MCHK;
+ cpu_interrupt(env, CPU_INTERRUPT_HARD);
+}
+
static inline bool cpu_has_work(CPUState *cpu)
{
CPUS390XState *env = &S390_CPU(cpu)->env;
@@ -859,4 +1056,52 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
uintptr_t retaddr);
+#include <sysemu/kvm.h>
+
+#ifdef CONFIG_KVM
+void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word);
+void kvm_s390_crw_mchk(S390CPU *cpu);
+void kvm_s390_enable_css_support(S390CPU *cpu);
+#else
+static inline void kvm_s390_io_interrupt(S390CPU *cpu,
+ uint16_t subchannel_id,
+ uint16_t subchannel_nr,
+ uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+}
+static inline void kvm_s390_crw_mchk(S390CPU *cpu)
+{
+}
+static inline void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+}
+#endif
+
+static inline void s390_io_interrupt(S390CPU *cpu,
+ uint16_t subchannel_id,
+ uint16_t subchannel_nr,
+ uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+ if (kvm_enabled()) {
+ kvm_s390_io_interrupt(cpu, subchannel_id, subchannel_nr, io_int_parm,
+ io_int_word);
+ } else {
+ cpu_inject_io(&cpu->env, subchannel_id, subchannel_nr, io_int_parm,
+ io_int_word);
+ }
+}
+
+static inline void s390_crw_mchk(S390CPU *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_s390_crw_mchk(cpu);
+ } else {
+ cpu_inject_crw_mchk(&cpu->env);
+ }
+}
+
#endif
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index 9a132e6..857c897 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -471,13 +471,56 @@ static uint64_t get_psw_mask(CPUS390XState *env)
return r;
}
+static LowCore *cpu_map_lowcore(CPUS390XState *env)
+{
+ LowCore *lowcore;
+ hwaddr len = sizeof(LowCore);
+
+ lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+ if (len < sizeof(LowCore)) {
+ cpu_abort(env, "Could not map lowcore\n");
+ }
+
+ return lowcore;
+}
+
+static void cpu_unmap_lowcore(LowCore *lowcore)
+{
+ cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
+}
+
+void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
+ int is_write)
+{
+ hwaddr start = addr;
+
+ /* Mind the prefix area. */
+ if (addr < 8192) {
+ /* Map the lowcore. */
+ start += env->psa;
+ *len = MIN(*len, 8192 - addr);
+ } else if ((addr >= env->psa) && (addr < env->psa + 8192)) {
+ /* Map the 0 page. */
+ start -= env->psa;
+ *len = MIN(*len, 8192 - start);
+ }
+
+ return cpu_physical_memory_map(start, len, is_write);
+}
+
+void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
+ int is_write)
+{
+ cpu_physical_memory_unmap(addr, len, is_write, len);
+}
+
static void do_svc_interrupt(CPUS390XState *env)
{
uint64_t mask, addr;
LowCore *lowcore;
- hwaddr len = TARGET_PAGE_SIZE;
- lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+ lowcore = cpu_map_lowcore(env);
lowcore->svc_code = cpu_to_be16(env->int_svc_code);
lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
@@ -486,7 +529,7 @@ static void do_svc_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
- cpu_physical_memory_unmap(lowcore, len, 1, len);
+ cpu_unmap_lowcore(lowcore);
load_psw(env, mask, addr);
}
@@ -495,7 +538,6 @@ static void do_program_interrupt(CPUS390XState *env)
{
uint64_t mask, addr;
LowCore *lowcore;
- hwaddr len = TARGET_PAGE_SIZE;
int ilen = env->int_pgm_ilen;
switch (ilen) {
@@ -513,7 +555,7 @@ static void do_program_interrupt(CPUS390XState *env)
qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
__func__, env->int_pgm_code, ilen);
- lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+ lowcore = cpu_map_lowcore(env);
lowcore->pgm_ilen = cpu_to_be16(ilen);
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
@@ -522,7 +564,7 @@ static void do_program_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->program_new_psw.mask);
addr = be64_to_cpu(lowcore->program_new_psw.addr);
- cpu_physical_memory_unmap(lowcore, len, 1, len);
+ cpu_unmap_lowcore(lowcore);
DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
env->int_pgm_code, ilen, env->psw.mask,
@@ -537,7 +579,6 @@ static void do_ext_interrupt(CPUS390XState *env)
{
uint64_t mask, addr;
LowCore *lowcore;
- hwaddr len = TARGET_PAGE_SIZE;
ExtQueue *q;
if (!(env->psw.mask & PSW_MASK_EXT)) {
@@ -549,7 +590,7 @@ static void do_ext_interrupt(CPUS390XState *env)
}
q = &env->ext_queue[env->ext_index];
- lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+ lowcore = cpu_map_lowcore(env);
lowcore->ext_int_code = cpu_to_be16(q->code);
lowcore->ext_params = cpu_to_be32(q->param);
@@ -560,7 +601,7 @@ static void do_ext_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->external_new_psw.mask);
addr = be64_to_cpu(lowcore->external_new_psw.addr);
- cpu_physical_memory_unmap(lowcore, len, 1, len);
+ cpu_unmap_lowcore(lowcore);
env->ext_index--;
if (env->ext_index == -1) {
@@ -573,12 +614,140 @@ static void do_ext_interrupt(CPUS390XState *env)
load_psw(env, mask, addr);
}
+static void do_io_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ IOIntQueue *q;
+ uint8_t isc;
+ int disable = 1;
+ int found = 0;
+
+ if (!(env->psw.mask & PSW_MASK_IO)) {
+ cpu_abort(env, "I/O int w/o I/O mask\n");
+ }
+
+ for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
+ if (env->io_index[isc] < 0) {
+ continue;
+ }
+ if (env->io_index[isc] > MAX_IO_QUEUE) {
+ cpu_abort(env, "I/O queue overrun for isc %d: %d\n",
+ isc, env->io_index[isc]);
+ }
+
+ q = &env->io_queue[env->io_index[isc]][isc];
+ if (!(env->cregs[6] & q->word)) {
+ disable = 0;
+ continue;
+ }
+ found = 1;
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->subchannel_id = cpu_to_be16(q->id);
+ lowcore->subchannel_nr = cpu_to_be16(q->nr);
+ lowcore->io_int_parm = cpu_to_be32(q->parm);
+ lowcore->io_int_word = cpu_to_be32(q->word);
+ lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->io_new_psw.mask);
+ addr = be64_to_cpu(lowcore->io_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ env->io_index[isc]--;
+ if (env->io_index >= 0) {
+ disable = 0;
+ }
+ break;
+ }
+
+ if (disable) {
+ env->pending_int &= ~INTERRUPT_IO;
+ }
+
+ if (found) {
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->psw.mask, env->psw.addr);
+ load_psw(env, mask, addr);
+ }
+}
+
+static void do_mchk_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ MchkQueue *q;
+ int i;
+
+ if (!(env->psw.mask & PSW_MASK_MCHECK)) {
+ cpu_abort(env, "Machine check w/o mchk mask\n");
+ }
+
+ if (env->mchk_index < 0 || env->mchk_index > MAX_MCHK_QUEUE) {
+ cpu_abort(env, "Mchk queue overrun: %d\n", env->mchk_index);
+ }
+
+ q = &env->mchk_queue[env->mchk_index];
+
+ if (q->type != 1) {
+ /* Don't know how to handle this... */
+ cpu_abort(env, "Unknown machine check type %d\n", q->type);
+ }
+ if (!(env->cregs[14] & (1 << 28))) {
+ /* CRW machine checks disabled */
+ return;
+ }
+
+ lowcore = cpu_map_lowcore(env);
+
+ for (i = 0; i < 16; i++) {
+ lowcore->floating_pt_save_area[i] = cpu_to_be64(env->fregs[i].ll);
+ lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
+ lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
+ lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
+ }
+ lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
+ lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
+ lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
+ lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
+ lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
+ lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
+ lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
+
+ lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
+ lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
+ lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
+ addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ env->mchk_index--;
+ if (env->mchk_index == -1) {
+ env->pending_int &= ~INTERRUPT_MCHK;
+ }
+
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->psw.mask, env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
+
void do_interrupt(CPUS390XState *env)
{
qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
__func__, env->exception_index, env->psw.addr);
s390_add_running_cpu(env);
+ /* handle machine checks */
+ if ((env->psw.mask & PSW_MASK_MCHECK) &&
+ (env->exception_index == -1)) {
+ if (env->pending_int & INTERRUPT_MCHK) {
+ env->exception_index = EXCP_MCHK;
+ }
+ }
/* handle external interrupts */
if ((env->psw.mask & PSW_MASK_EXT) &&
env->exception_index == -1) {
@@ -597,6 +766,13 @@ void do_interrupt(CPUS390XState *env)
env->pending_int &= ~INTERRUPT_TOD;
}
}
+ /* handle I/O interrupts */
+ if ((env->psw.mask & PSW_MASK_IO) &&
+ (env->exception_index == -1)) {
+ if (env->pending_int & INTERRUPT_IO) {
+ env->exception_index = EXCP_IO;
+ }
+ }
switch (env->exception_index) {
case EXCP_PGM:
@@ -608,6 +784,12 @@ void do_interrupt(CPUS390XState *env)
case EXCP_EXT:
do_ext_interrupt(env);
break;
+ case EXCP_IO:
+ do_io_interrupt(env);
+ break;
+ case EXCP_MCHK:
+ do_mchk_interrupt(env);
+ break;
}
env->exception_index = -1;
diff --git a/target-s390x/ioinst.c b/target-s390x/ioinst.c
new file mode 100644
index 0000000..e3531f3
--- /dev/null
+++ b/target-s390x/ioinst.c
@@ -0,0 +1,761 @@
+/*
+ * I/O instructions for S/390
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <sys/types.h>
+
+#include "cpu.h"
+#include "ioinst.h"
+#include "trace.h"
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+ int *schid)
+{
+ if (!IOINST_SCHID_ONE(value)) {
+ return -EINVAL;
+ }
+ if (!IOINST_SCHID_M(value)) {
+ if (IOINST_SCHID_CSSID(value)) {
+ return -EINVAL;
+ }
+ *cssid = 0;
+ *m = 0;
+ } else {
+ *cssid = IOINST_SCHID_CSSID(value);
+ *m = 1;
+ }
+ *ssid = IOINST_SCHID_SSID(value);
+ *schid = IOINST_SCHID_NR(value);
+ return 0;
+}
+
+int ioinst_handle_xsch(CPUS390XState *env, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("xsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_xsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+
+ return cc;
+}
+
+int ioinst_handle_csch(CPUS390XState *env, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("csch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_csch(sch);
+ }
+ if (ret == -ENODEV) {
+ cc = 3;
+ } else {
+ cc = 0;
+ }
+ return cc;
+}
+
+int ioinst_handle_hsch(CPUS390XState *env, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("hsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_hsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+
+ return cc;
+}
+
+static int ioinst_schib_valid(SCHIB *schib)
+{
+ if ((schib->pmcw.flags & PMCW_FLAGS_MASK_INVALID) ||
+ (schib->pmcw.chars & PMCW_CHARS_MASK_INVALID)) {
+ return 0;
+ }
+ /* Disallow extended measurements for now. */
+ if (schib->pmcw.chars & PMCW_CHARS_MASK_XMWME) {
+ return 0;
+ }
+ return 1;
+}
+
+int ioinst_handle_msch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ SCHIB *schib;
+ uint64_t addr;
+ int ret = -ENODEV;
+ int cc;
+ hwaddr len = sizeof(*schib);
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("msch", cssid, ssid, schid);
+ addr = decode_basedisp_s(env, ipb);
+ schib = s390_cpu_physical_memory_map(env, addr, &len, 0);
+ if (!schib || len != sizeof(*schib)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ cc = -EIO;
+ goto out;
+ }
+ if (!ioinst_schib_valid(schib)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ cc = -EIO;
+ goto out;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_msch(sch, schib);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+out:
+ s390_cpu_physical_memory_unmap(env, schib, len, 0);
+ return cc;
+}
+
+static void copy_orb_from_guest(ORB *dest, const ORB *src)
+{
+ dest->intparm = be32_to_cpu(src->intparm);
+ dest->ctrl0 = be16_to_cpu(src->ctrl0);
+ dest->lpm = src->lpm;
+ dest->ctrl1 = src->ctrl1;
+ dest->cpa = be32_to_cpu(src->cpa);
+}
+
+static int ioinst_orb_valid(ORB *orb)
+{
+ if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) ||
+ (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) {
+ return 0;
+ }
+ if ((orb->cpa & HIGH_ORDER_BIT) != 0) {
+ return 0;
+ }
+ return 1;
+}
+
+int ioinst_handle_ssch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ ORB *orig_orb, orb;
+ uint64_t addr;
+ int ret = -ENODEV;
+ int cc;
+ hwaddr len = sizeof(*orig_orb);
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("ssch", cssid, ssid, schid);
+ addr = decode_basedisp_s(env, ipb);
+ orig_orb = s390_cpu_physical_memory_map(env, addr, &len, 0);
+ if (!orig_orb || len != sizeof(*orig_orb)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ cc = -EIO;
+ goto out;
+ }
+ copy_orb_from_guest(&orb, orig_orb);
+ if (!ioinst_orb_valid(&orb)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ cc = -EIO;
+ goto out;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_ssch(sch, &orb);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+
+out:
+ s390_cpu_physical_memory_unmap(env, orig_orb, len, 0);
+ return cc;
+}
+
+int ioinst_handle_stcrw(CPUS390XState *env, uint32_t ipb)
+{
+ CRW *crw;
+ uint64_t addr;
+ int cc;
+ hwaddr len = sizeof(*crw);
+
+ addr = decode_basedisp_s(env, ipb);
+ crw = s390_cpu_physical_memory_map(env, addr, &len, 1);
+ if (!crw || len != sizeof(*crw)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ cc = -EIO;
+ goto out;
+ }
+ cc = css_do_stcrw(crw);
+ /* 0 - crw stored, 1 - zeroes stored */
+out:
+ s390_cpu_physical_memory_unmap(env, crw, len, 1);
+ return cc;
+}
+
+int ioinst_handle_stsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ uint64_t addr;
+ int cc;
+ SCHIB *schib;
+ hwaddr len = sizeof(*schib);
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("stsch", cssid, ssid, schid);
+ addr = decode_basedisp_s(env, ipb);
+ schib = s390_cpu_physical_memory_map(env, addr, &len, 1);
+ if (!schib || len != sizeof(*schib)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ cc = -EIO;
+ goto out;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch) {
+ if (css_subch_visible(sch)) {
+ css_do_stsch(sch, schib);
+ cc = 0;
+ } else {
+ /* Indicate no more subchannels in this css/ss */
+ cc = 3;
+ }
+ } else {
+ if (css_schid_final(cssid, ssid, schid)) {
+ cc = 3; /* No more subchannels in this css/ss */
+ } else {
+ /* Store an empty schib. */
+ memset(schib, 0, sizeof(*schib));
+ cc = 0;
+ }
+ }
+out:
+ s390_cpu_physical_memory_unmap(env, schib, len, 1);
+ return cc;
+}
+
+int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ IRB *irb;
+ uint64_t addr;
+ int ret = -ENODEV;
+ int cc;
+ hwaddr len = sizeof(*irb);
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("tsch", cssid, ssid, schid);
+ addr = decode_basedisp_s(env, ipb);
+ irb = s390_cpu_physical_memory_map(env, addr, &len, 1);
+ if (!irb || len != sizeof(*irb)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ cc = -EIO;
+ goto out;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_tsch(sch, irb);
+ /* 0 - status pending, 1 - not status pending */
+ cc = ret;
+ } else {
+ cc = 3;
+ }
+out:
+ s390_cpu_physical_memory_unmap(env, irb, sizeof(*irb), 1);
+ return cc;
+}
+
+typedef struct ChscReq {
+ uint16_t len;
+ uint16_t command;
+ uint32_t param0;
+ uint32_t param1;
+ uint32_t param2;
+} QEMU_PACKED ChscReq;
+
+typedef struct ChscResp {
+ uint16_t len;
+ uint16_t code;
+ uint32_t param;
+ char data[0];
+} QEMU_PACKED ChscResp;
+
+#define CHSC_MIN_RESP_LEN 0x0008
+
+#define CHSC_SCPD 0x0002
+#define CHSC_SCSC 0x0010
+#define CHSC_SDA 0x0031
+
+#define CHSC_SCPD_0_M 0x20000000
+#define CHSC_SCPD_0_C 0x10000000
+#define CHSC_SCPD_0_FMT 0x0f000000
+#define CHSC_SCPD_0_CSSID 0x00ff0000
+#define CHSC_SCPD_0_RFMT 0x00000f00
+#define CHSC_SCPD_0_RES 0xc000f000
+#define CHSC_SCPD_1_RES 0xffffff00
+#define CHSC_SCPD_01_CHPID 0x000000ff
+static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint32_t param1 = be32_to_cpu(req->param1);
+ uint16_t resp_code;
+ int rfmt;
+ uint16_t cssid;
+ uint8_t f_chpid, l_chpid;
+ int desc_size;
+ int m;
+
+ rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8;
+ if ((rfmt == 0) || (rfmt == 1)) {
+ rfmt = !!(param0 & CHSC_SCPD_0_C);
+ }
+ if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) ||
+ (param1 & CHSC_SCPD_1_RES) || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ if (param0 & CHSC_SCPD_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16;
+ m = param0 & CHSC_SCPD_0_M;
+ if (cssid != 0) {
+ if (!m || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ f_chpid = param0 & CHSC_SCPD_01_CHPID;
+ l_chpid = param1 & CHSC_SCPD_01_CHPID;
+ if (l_chpid < f_chpid) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ /* css_collect_chp_desc() is endian-aware */
+ desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt,
+ &res->data);
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(8 + desc_size);
+ res->param = cpu_to_be32(rfmt);
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = cpu_to_be32(rfmt);
+}
+
+#define CHSC_SCSC_0_M 0x20000000
+#define CHSC_SCSC_0_FMT 0x000f0000
+#define CHSC_SCSC_0_CSSID 0x0000ff00
+#define CHSC_SCSC_0_RES 0xdff000ff
+static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint8_t cssid;
+ uint16_t resp_code;
+ uint32_t general_chars[510];
+ uint32_t chsc_chars[508];
+
+ if (len != 0x0010) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+
+ if (param0 & CHSC_SCSC_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8;
+ if (cssid != 0) {
+ if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(4080);
+ res->param = 0;
+
+ memset(general_chars, 0, sizeof(general_chars));
+ memset(chsc_chars, 0, sizeof(chsc_chars));
+
+ general_chars[0] = cpu_to_be32(0x03000000);
+ general_chars[1] = cpu_to_be32(0x00059000);
+
+ chsc_chars[0] = cpu_to_be32(0x40000000);
+ chsc_chars[3] = cpu_to_be32(0x00040000);
+
+ memcpy(res->data, general_chars, sizeof(general_chars));
+ memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars));
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+#define CHSC_SDA_0_FMT 0x0f000000
+#define CHSC_SDA_0_OC 0x0000ffff
+#define CHSC_SDA_0_RES 0xf0ff0000
+#define CHSC_SDA_OC_MCSSE 0x0
+#define CHSC_SDA_OC_MSS 0x2
+static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res)
+{
+ uint16_t resp_code = 0x0001;
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint16_t oc;
+ int ret;
+
+ if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) {
+ resp_code = 0x0003;
+ goto out;
+ }
+
+ if (param0 & CHSC_SDA_0_FMT) {
+ resp_code = 0x0007;
+ goto out;
+ }
+
+ oc = param0 & CHSC_SDA_0_OC;
+ switch (oc) {
+ case CHSC_SDA_OC_MCSSE:
+ ret = css_enable_mcsse();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ case CHSC_SDA_OC_MSS:
+ ret = css_enable_mss();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ default:
+ resp_code = 0x0003;
+ goto out;
+ }
+
+out:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+static void ioinst_handle_chsc_unimplemented(ChscResp *res)
+{
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->code = cpu_to_be16(0x0004);
+ res->param = 0;
+}
+
+int ioinst_handle_chsc(CPUS390XState *env, uint32_t ipb)
+{
+ ChscReq *req;
+ ChscResp *res;
+ uint64_t addr;
+ int reg;
+ uint16_t len;
+ uint16_t command;
+ hwaddr map_size = TARGET_PAGE_SIZE;
+ int ret = 0;
+
+ trace_ioinst("chsc");
+ reg = (ipb >> 20) & 0x00f;
+ addr = env->regs[reg];
+ /* Page boundary? */
+ if (addr & 0xfff) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return -EIO;
+ }
+ req = s390_cpu_physical_memory_map(env, addr, &map_size, 1);
+ if (!req || map_size != TARGET_PAGE_SIZE) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ ret = -EIO;
+ goto out;
+ }
+ len = be16_to_cpu(req->len);
+ /* Length field valid? */
+ if ((len < 16) || (len > 4088) || (len & 7)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ ret = -EIO;
+ goto out;
+ }
+ memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
+ res = (void *)((char *)req + len);
+ command = be16_to_cpu(req->command);
+ trace_ioinst_chsc_cmd(command, len);
+ switch (command) {
+ case CHSC_SCSC:
+ ioinst_handle_chsc_scsc(req, res);
+ break;
+ case CHSC_SCPD:
+ ioinst_handle_chsc_scpd(req, res);
+ break;
+ case CHSC_SDA:
+ ioinst_handle_chsc_sda(req, res);
+ break;
+ default:
+ ioinst_handle_chsc_unimplemented(res);
+ break;
+ }
+
+out:
+ s390_cpu_physical_memory_unmap(env, req, map_size, 1);
+ return ret;
+}
+
+int ioinst_handle_tpi(CPUS390XState *env, uint32_t ipb)
+{
+ uint64_t addr;
+ int lowcore;
+ IOIntCode *int_code;
+ hwaddr len, orig_len;
+ int ret;
+
+ trace_ioinst("tpi");
+ addr = decode_basedisp_s(env, ipb);
+ lowcore = addr ? 0 : 1;
+ len = lowcore ? 8 /* two words */ : 12 /* three words */;
+ orig_len = len;
+ int_code = s390_cpu_physical_memory_map(env, addr, &len, 1);
+ if (!int_code || (len != orig_len)) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ ret = -EIO;
+ goto out;
+ }
+ ret = css_do_tpi(int_code, lowcore);
+out:
+ s390_cpu_physical_memory_unmap(env, int_code, len, 1);
+ return ret;
+}
+
+#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
+#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
+#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
+#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
+
+int ioinst_handle_schm(CPUS390XState *env, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb)
+{
+ uint8_t mbk;
+ int update;
+ int dct;
+
+ trace_ioinst("schm");
+
+ if (SCHM_REG1_RES(reg1)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+
+ mbk = SCHM_REG1_MBK(reg1);
+ update = SCHM_REG1_UPD(reg1);
+ dct = SCHM_REG1_DCT(reg1);
+
+ if (update && (reg2 & 0x0000000000000fff)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+
+ css_do_schm(mbk, update, dct, update ? reg2 : 0);
+
+ return 0;
+}
+
+int ioinst_handle_rsch(CPUS390XState *env, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("rsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_rsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EINVAL:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+
+ return cc;
+
+}
+
+#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
+#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
+#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
+int ioinst_handle_rchp(CPUS390XState *env, uint64_t reg1)
+{
+ int cc;
+ uint8_t cssid;
+ uint8_t chpid;
+ int ret;
+
+ if (RCHP_REG1_RES(reg1)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+
+ cssid = RCHP_REG1_CSSID(reg1);
+ chpid = RCHP_REG1_CHPID(reg1);
+
+ trace_ioinst_chp_id("rchp", cssid, chpid);
+
+ ret = css_do_rchp(cssid, chpid);
+
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ /* Invalid channel subsystem. */
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+
+ return cc;
+}
+
+#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
+int ioinst_handle_sal(CPUS390XState *env, uint64_t reg1)
+{
+ /* We do not provide address limit checking, so let's suppress it. */
+ if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ return 0;
+}
diff --git a/target-s390x/ioinst.h b/target-s390x/ioinst.h
new file mode 100644
index 0000000..d5a43f4
--- /dev/null
+++ b/target-s390x/ioinst.h
@@ -0,0 +1,230 @@
+/*
+ * S/390 channel I/O instructions
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+*/
+
+#ifndef IOINST_S390X_H
+#define IOINST_S390X_H
+/*
+ * Channel I/O related definitions, as defined in the Principles
+ * Of Operation (and taken from the Linux implementation).
+ */
+
+/* subchannel status word (command mode only) */
+typedef struct SCSW {
+ uint16_t flags;
+ uint16_t ctrl;
+ uint32_t cpa;
+ uint8_t dstat;
+ uint8_t cstat;
+ uint16_t count;
+} QEMU_PACKED SCSW;
+
+#define SCSW_FLAGS_MASK_KEY 0xf000
+#define SCSW_FLAGS_MASK_SCTL 0x0800
+#define SCSW_FLAGS_MASK_ESWF 0x0400
+#define SCSW_FLAGS_MASK_CC 0x0300
+#define SCSW_FLAGS_MASK_FMT 0x0080
+#define SCSW_FLAGS_MASK_PFCH 0x0040
+#define SCSW_FLAGS_MASK_ISIC 0x0020
+#define SCSW_FLAGS_MASK_ALCC 0x0010
+#define SCSW_FLAGS_MASK_SSI 0x0008
+#define SCSW_FLAGS_MASK_ZCC 0x0004
+#define SCSW_FLAGS_MASK_ECTL 0x0002
+#define SCSW_FLAGS_MASK_PNO 0x0001
+
+#define SCSW_CTRL_MASK_FCTL 0x7000
+#define SCSW_CTRL_MASK_ACTL 0x0fe0
+#define SCSW_CTRL_MASK_STCTL 0x001f
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1000
+#define SCSW_FCTL_HALT_FUNC 0x2000
+#define SCSW_FCTL_START_FUNC 0x4000
+
+#define SCSW_ACTL_SUSP 0x0020
+#define SCSW_ACTL_DEVICE_ACTIVE 0x0040
+#define SCSW_ACTL_SUBCH_ACTIVE 0x0080
+#define SCSW_ACTL_CLEAR_PEND 0x0100
+#define SCSW_ACTL_HALT_PEND 0x0200
+#define SCSW_ACTL_START_PEND 0x0400
+#define SCSW_ACTL_RESUME_PEND 0x0800
+
+#define SCSW_STCTL_STATUS_PEND 0x0001
+#define SCSW_STCTL_SECONDARY 0x0002
+#define SCSW_STCTL_PRIMARY 0x0004
+#define SCSW_STCTL_INTERMEDIATE 0x0008
+#define SCSW_STCTL_ALERT 0x0010
+
+#define SCSW_DSTAT_ATTENTION 0x80
+#define SCSW_DSTAT_STAT_MOD 0x40
+#define SCSW_DSTAT_CU_END 0x20
+#define SCSW_DSTAT_BUSY 0x10
+#define SCSW_DSTAT_CHANNEL_END 0x08
+#define SCSW_DSTAT_DEVICE_END 0x04
+#define SCSW_DSTAT_UNIT_CHECK 0x02
+#define SCSW_DSTAT_UNIT_EXCEP 0x01
+
+#define SCSW_CSTAT_PCI 0x80
+#define SCSW_CSTAT_INCORR_LEN 0x40
+#define SCSW_CSTAT_PROG_CHECK 0x20
+#define SCSW_CSTAT_PROT_CHECK 0x10
+#define SCSW_CSTAT_DATA_CHECK 0x08
+#define SCSW_CSTAT_CHN_CTRL_CHK 0x04
+#define SCSW_CSTAT_INTF_CTRL_CHK 0x02
+#define SCSW_CSTAT_CHAIN_CHECK 0x01
+
+/* path management control word */
+typedef struct PMCW {
+ uint32_t intparm;
+ uint16_t flags;
+ uint16_t devno;
+ uint8_t lpm;
+ uint8_t pnom;
+ uint8_t lpum;
+ uint8_t pim;
+ uint16_t mbi;
+ uint8_t pom;
+ uint8_t pam;
+ uint8_t chpid[8];
+ uint32_t chars;
+} QEMU_PACKED PMCW;
+
+#define PMCW_FLAGS_MASK_QF 0x8000
+#define PMCW_FLAGS_MASK_W 0x4000
+#define PMCW_FLAGS_MASK_ISC 0x3800
+#define PMCW_FLAGS_MASK_ENA 0x0080
+#define PMCW_FLAGS_MASK_LM 0x0060
+#define PMCW_FLAGS_MASK_MME 0x0018
+#define PMCW_FLAGS_MASK_MP 0x0004
+#define PMCW_FLAGS_MASK_TF 0x0002
+#define PMCW_FLAGS_MASK_DNV 0x0001
+#define PMCW_FLAGS_MASK_INVALID 0x0700
+
+#define PMCW_CHARS_MASK_ST 0x00e00000
+#define PMCW_CHARS_MASK_MBFC 0x00000004
+#define PMCW_CHARS_MASK_XMWME 0x00000002
+#define PMCW_CHARS_MASK_CSENSE 0x00000001
+#define PMCW_CHARS_MASK_INVALID 0xff1ffff8
+
+/* subchannel information block */
+typedef struct SCHIB {
+ PMCW pmcw;
+ SCSW scsw;
+ uint64_t mba;
+ uint8_t mda[4];
+} QEMU_PACKED SCHIB;
+
+/* interruption response block */
+typedef struct IRB {
+ SCSW scsw;
+ uint32_t esw[5];
+ uint32_t ecw[8];
+ uint32_t emw[8];
+} QEMU_PACKED IRB;
+
+/* operation request block */
+typedef struct ORB {
+ uint32_t intparm;
+ uint16_t ctrl0;
+ uint8_t lpm;
+ uint8_t ctrl1;
+ uint32_t cpa;
+} QEMU_PACKED ORB;
+
+#define ORB_CTRL0_MASK_KEY 0xf000
+#define ORB_CTRL0_MASK_SPND 0x0800
+#define ORB_CTRL0_MASK_STR 0x0400
+#define ORB_CTRL0_MASK_MOD 0x0200
+#define ORB_CTRL0_MASK_SYNC 0x0100
+#define ORB_CTRL0_MASK_FMT 0x0080
+#define ORB_CTRL0_MASK_PFCH 0x0040
+#define ORB_CTRL0_MASK_ISIC 0x0020
+#define ORB_CTRL0_MASK_ALCC 0x0010
+#define ORB_CTRL0_MASK_SSIC 0x0008
+#define ORB_CTRL0_MASK_C64 0x0002
+#define ORB_CTRL0_MASK_I2K 0x0001
+#define ORB_CTRL0_MASK_INVALID 0x0004
+
+#define ORB_CTRL1_MASK_ILS 0x80
+#define ORB_CTRL1_MASK_MIDAW 0x40
+#define ORB_CTRL1_MASK_ORBX 0x01
+#define ORB_CTRL1_MASK_INVALID 0x3e
+
+/* channel command word (type 1) */
+typedef struct CCW1 {
+ uint8_t cmd_code;
+ uint8_t flags;
+ uint16_t count;
+ uint32_t cda;
+} QEMU_PACKED CCW1;
+
+#define CCW_FLAG_DC 0x80
+#define CCW_FLAG_CC 0x40
+#define CCW_FLAG_SLI 0x20
+#define CCW_FLAG_SKIP 0x10
+#define CCW_FLAG_PCI 0x08
+#define CCW_FLAG_IDA 0x04
+#define CCW_FLAG_SUSPEND 0x02
+
+#define CCW_CMD_NOOP 0x03
+#define CCW_CMD_BASIC_SENSE 0x04
+#define CCW_CMD_TIC 0x08
+#define CCW_CMD_SENSE_ID 0xe4
+
+typedef struct CRW {
+ uint16_t flags;
+ uint16_t rsid;
+} QEMU_PACKED CRW;
+
+#define CRW_FLAGS_MASK_S 0x4000
+#define CRW_FLAGS_MASK_R 0x2000
+#define CRW_FLAGS_MASK_C 0x1000
+#define CRW_FLAGS_MASK_RSC 0x0f00
+#define CRW_FLAGS_MASK_A 0x0080
+#define CRW_FLAGS_MASK_ERC 0x003f
+
+#define CRW_ERC_INIT 0x02
+#define CRW_ERC_IPI 0x04
+
+#define CRW_RSC_SUBCH 0x3
+#define CRW_RSC_CHP 0x4
+
+/* I/O interruption code */
+typedef struct IOIntCode {
+ uint32_t subsys_id;
+ uint32_t intparm;
+ uint32_t interrupt_id;
+} QEMU_PACKED IOIntCode;
+
+/* schid disintegration */
+#define IOINST_SCHID_ONE(_schid) ((_schid & 0x00010000) >> 16)
+#define IOINST_SCHID_M(_schid) ((_schid & 0x00080000) >> 19)
+#define IOINST_SCHID_CSSID(_schid) ((_schid & 0xff000000) >> 24)
+#define IOINST_SCHID_SSID(_schid) ((_schid & 0x00060000) >> 17)
+#define IOINST_SCHID_NR(_schid) (_schid & 0x0000ffff)
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+ int *schid);
+int ioinst_handle_xsch(CPUS390XState *env, uint64_t reg1);
+int ioinst_handle_csch(CPUS390XState *env, uint64_t reg1);
+int ioinst_handle_hsch(CPUS390XState *env, uint64_t reg1);
+int ioinst_handle_msch(CPUS390XState *env, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_ssch(CPUS390XState *env, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_stcrw(CPUS390XState *env, uint32_t ipb);
+int ioinst_handle_stsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_chsc(CPUS390XState *env, uint32_t ipb);
+int ioinst_handle_tpi(CPUS390XState *env, uint32_t ipb);
+int ioinst_handle_schm(CPUS390XState *env, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb);
+int ioinst_handle_rsch(CPUS390XState *env, uint64_t reg1);
+int ioinst_handle_rchp(CPUS390XState *env, uint64_t reg1);
+int ioinst_handle_sal(CPUS390XState *env, uint64_t reg1);
+
+#endif
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 99deddf..2c24182 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -47,9 +47,29 @@
#define IPA0_DIAG 0x8300
#define IPA0_SIGP 0xae00
-#define IPA0_PRIV 0xb200
+#define IPA0_B2 0xb200
+#define IPA0_B9 0xb900
+#define IPA0_EB 0xeb00
#define PRIV_SCLP_CALL 0x20
+#define PRIV_CSCH 0x30
+#define PRIV_HSCH 0x31
+#define PRIV_MSCH 0x32
+#define PRIV_SSCH 0x33
+#define PRIV_STSCH 0x34
+#define PRIV_TSCH 0x35
+#define PRIV_TPI 0x36
+#define PRIV_SAL 0x37
+#define PRIV_RSCH 0x38
+#define PRIV_STCRW 0x39
+#define PRIV_STCPS 0x3a
+#define PRIV_RCHP 0x3b
+#define PRIV_SCHM 0x3c
+#define PRIV_CHSC 0x5f
+#define PRIV_SIGA 0x74
+#define PRIV_XSCH 0x76
+#define PRIV_SQBS 0x8a
+#define PRIV_EQBS 0x9c
#define DIAG_KVM_HYPERCALL 0x500
#define DIAG_KVM_BREAKPOINT 0x501
@@ -380,10 +400,123 @@ static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
return 0;
}
-static int handle_priv(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+static int kvm_handle_css_inst(S390CPU *cpu, struct kvm_run *run,
+ uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
+{
+ int r = 0;
+ int no_cc = 0;
+ CPUS390XState *env = &cpu->env;
+
+ if (ipa0 != 0xb2) {
+ /* Not handled for now. */
+ return -1;
+ }
+ cpu_synchronize_state(env);
+ switch (ipa1) {
+ case PRIV_XSCH:
+ r = ioinst_handle_xsch(env, env->regs[1]);
+ break;
+ case PRIV_CSCH:
+ r = ioinst_handle_csch(env, env->regs[1]);
+ break;
+ case PRIV_HSCH:
+ r = ioinst_handle_hsch(env, env->regs[1]);
+ break;
+ case PRIV_MSCH:
+ r = ioinst_handle_msch(env, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_SSCH:
+ r = ioinst_handle_ssch(env, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_STCRW:
+ r = ioinst_handle_stcrw(env, run->s390_sieic.ipb);
+ break;
+ case PRIV_STSCH:
+ r = ioinst_handle_stsch(env, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_TSCH:
+ /* We should only get tsch via KVM_EXIT_S390_TSCH. */
+ fprintf(stderr, "Spurious tsch intercept\n");
+ break;
+ case PRIV_CHSC:
+ r = ioinst_handle_chsc(env, run->s390_sieic.ipb);
+ break;
+ case PRIV_TPI:
+ /* This should have been handled by kvm already. */
+ fprintf(stderr, "Spurious tpi intercept\n");
+ break;
+ case PRIV_SCHM:
+ no_cc = 1;
+ r = ioinst_handle_schm(env, env->regs[1], env->regs[2],
+ run->s390_sieic.ipb);
+ break;
+ case PRIV_RSCH:
+ r = ioinst_handle_rsch(env, env->regs[1]);
+ break;
+ case PRIV_RCHP:
+ r = ioinst_handle_rchp(env, env->regs[1]);
+ break;
+ case PRIV_STCPS:
+ /* We do not provide this instruction, it is suppressed. */
+ no_cc = 1;
+ r = 0;
+ break;
+ case PRIV_SAL:
+ no_cc = 1;
+ r = ioinst_handle_sal(env, env->regs[1]);
+ break;
+ default:
+ r = -1;
+ break;
+ }
+
+ if (r >= 0) {
+ if (!no_cc) {
+ setcc(cpu, r);
+ }
+ r = 0;
+ } else if (r < -1) {
+ r = 0;
+ }
+ return r;
+}
+
+static int is_ioinst(uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
+{
+ int ret = 0;
+ uint16_t ipa = (ipa0 << 8) | ipa1;
+
+ switch (ipa) {
+ case IPA0_B2 | PRIV_CSCH:
+ case IPA0_B2 | PRIV_HSCH:
+ case IPA0_B2 | PRIV_MSCH:
+ case IPA0_B2 | PRIV_SSCH:
+ case IPA0_B2 | PRIV_STSCH:
+ case IPA0_B2 | PRIV_TPI:
+ case IPA0_B2 | PRIV_SAL:
+ case IPA0_B2 | PRIV_RSCH:
+ case IPA0_B2 | PRIV_STCRW:
+ case IPA0_B2 | PRIV_STCPS:
+ case IPA0_B2 | PRIV_RCHP:
+ case IPA0_B2 | PRIV_SCHM:
+ case IPA0_B2 | PRIV_CHSC:
+ case IPA0_B2 | PRIV_SIGA:
+ case IPA0_B2 | PRIV_XSCH:
+ case IPA0_B9 | PRIV_EQBS:
+ case IPA0_EB | PRIV_SQBS:
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+static int handle_priv(S390CPU *cpu, struct kvm_run *run,
+ uint8_t ipa0, uint8_t ipa1)
{
int r = 0;
uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
+ uint8_t ipb = run->s390_sieic.ipb & 0xff;
dprintf("KVM: PRIV: %d\n", ipa1);
switch (ipa1) {
@@ -391,8 +524,16 @@ static int handle_priv(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
r = kvm_sclp_service_call(cpu, run, ipbh0);
break;
default:
- dprintf("KVM: unknown PRIV: 0x%x\n", ipa1);
- r = -1;
+ if (is_ioinst(ipa0, ipa1, ipb)) {
+ r = kvm_handle_css_inst(cpu, run, ipa0, ipa1, ipb);
+ if (r == -1) {
+ setcc(cpu, 3);
+ r = 0;
+ }
+ } else {
+ dprintf("KVM: unknown PRIV: 0x%x\n", ipa1);
+ r = -1;
+ }
break;
}
@@ -533,15 +674,17 @@ static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
dprintf("handle_instruction 0x%x 0x%x\n", run->s390_sieic.ipa, run->s390_sieic.ipb);
switch (ipa0) {
- case IPA0_PRIV:
- r = handle_priv(cpu, run, ipa1);
- break;
- case IPA0_DIAG:
- r = handle_diag(env, run, ipb_code);
- break;
- case IPA0_SIGP:
- r = handle_sigp(cpu, run, ipa1);
- break;
+ case IPA0_B2:
+ case IPA0_B9:
+ case IPA0_EB:
+ r = handle_priv(cpu, run, ipa0 >> 8, ipa1);
+ break;
+ case IPA0_DIAG:
+ r = handle_diag(env, run, ipb_code);
+ break;
+ case IPA0_SIGP:
+ r = handle_sigp(cpu, run, ipa1);
+ break;
}
if (r < 0) {
@@ -600,6 +743,43 @@ static int handle_intercept(S390CPU *cpu)
return r;
}
+static int handle_tsch(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+ int ret;
+
+ cpu_synchronize_state(env);
+ ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
+ if (ret >= 0) {
+ /* Success; set condition code. */
+ setcc(cpu, ret);
+ ret = 0;
+ } else if (ret < -1) {
+ /*
+ * Failure.
+ * If an I/O interrupt had been dequeued, we have to reinject it.
+ */
+ if (run->s390_tsch.dequeued) {
+ uint16_t subchannel_id = run->s390_tsch.subchannel_id;
+ uint16_t subchannel_nr = run->s390_tsch.subchannel_nr;
+ uint32_t io_int_parm = run->s390_tsch.io_int_parm;
+ uint32_t io_int_word = run->s390_tsch.io_int_word;
+ uint32_t type = ((subchannel_id & 0xff00) << 24) |
+ ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
+
+ kvm_s390_interrupt_internal(cpu, type,
+ ((uint32_t)subchannel_id << 16)
+ | subchannel_nr,
+ ((uint64_t)io_int_parm << 32)
+ | io_int_word, 1);
+ }
+ ret = 0;
+ }
+ return ret;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
S390CPU *cpu = S390_CPU(cs);
@@ -612,6 +792,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
case KVM_EXIT_S390_RESET:
qemu_system_reset_request();
break;
+ case KVM_EXIT_S390_TSCH:
+ ret = handle_tsch(cpu);
+ break;
default:
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
break;
@@ -637,3 +820,33 @@ int kvm_arch_on_sigbus(int code, void *addr)
{
return 1;
}
+
+void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+ uint32_t type;
+
+ type = ((subchannel_id & 0xff00) << 24) |
+ ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
+ kvm_s390_interrupt_internal(cpu, type,
+ ((uint32_t)subchannel_id << 16) | subchannel_nr,
+ ((uint64_t)io_int_parm << 32) | io_int_word, 1);
+}
+
+void kvm_s390_crw_mchk(S390CPU *cpu)
+{
+ kvm_s390_interrupt_internal(cpu, KVM_S390_MCHK, 1 << 28,
+ 0x00400f1d40330000, 1);
+}
+
+void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+ struct kvm_enable_cap cap = {};
+ int r;
+
+ /* Activate host kernel channel subsystem support. */
+ cap.cap = KVM_CAP_S390_CSS_SUPPORT;
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_ENABLE_CAP, &cap);
+ assert(r == 0);
+}
diff --git a/trace-events b/trace-events
index 2b28076..1011f27 100644
--- a/trace-events
+++ b/trace-events
@@ -1072,3 +1072,21 @@ xics_ics_eoi(int nr) "ics_eoi: irq %#x"
hbitmap_iter_skip_words(const void *hb, void *hbi, uint64_t pos, unsigned long cur) "hb %p hbi %p pos %"PRId64" cur 0x%lx"
hbitmap_reset(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
hbitmap_set(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
+
+# target-s390x/ioinst.c
+ioinst(const char *insn) "IOINST: %s"
+ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x.%x.%04x)"
+ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)"
+ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command %04x, len %04x"
+
+# hw/s390x/css.c
+css_enable_facility(const char *facility) "CSS: enable %s"
+css_crw(uint8_t rsc, uint8_t erc, uint16_t rsid, const char *chained) "CSS: queueing crw: rsc=%x, erc=%x, rsid=%x %s"
+css_chpid_add(uint8_t cssid, uint8_t chpid, uint8_t type) "CSS: add chpid %x.%02x (type %02x)"
+css_new_image(uint8_t cssid, const char *default_cssid) "CSS: add css image %02x %s"
+css_assign_subch(const char *do_assign, uint8_t cssid, uint8_t ssid, uint16_t schid, uint16_t devno) "CSS: %s %x.%x.%04x (devno %04x)"
+css_io_interrupt(int cssid, int ssid, int schid, uint32_t intparm, uint8_t isc, const char *conditional) "CSS: I/O interrupt on sch %x.%x.%04x (intparm %08x, isc %x) %s"
+
+# hw/s390x/virtio-ccw.c
+virtio_ccw_interpret_ccw(int cssid, int ssid, int schid, int cmd_code) "VIRTIO-CCW: %x.%x.%04x: interpret command %x"
+virtio_ccw_new_device(int cssid, int ssid, int schid, int devno, const char *devno_mode) "VIRTIO-CCW: add subchannel %x.%x.%04x, devno %04x (%s)"
diff --git a/vl.c b/vl.c
index 7aab73b..910abb6 100644
--- a/vl.c
+++ b/vl.c
@@ -176,6 +176,7 @@ int main(int argc, char **argv)
#define DEFAULT_RAM_SIZE 128
#define MAX_VIRTIO_CONSOLES 1
+#define MAX_SCLP_CONSOLES 1
static const char *data_dir;
const char *bios_name = NULL;
@@ -203,6 +204,7 @@ int no_quit = 0;
CharDriverState *serial_hds[MAX_SERIAL_PORTS];
CharDriverState *parallel_hds[MAX_PARALLEL_PORTS];
CharDriverState *virtcon_hds[MAX_VIRTIO_CONSOLES];
+CharDriverState *sclp_hds[MAX_SCLP_CONSOLES];
int win2k_install_hack = 0;
int singlestep = 0;
int smp_cpus = 1;
@@ -271,6 +273,7 @@ static int tcg_tb_size;
static int default_serial = 1;
static int default_parallel = 1;
static int default_virtcon = 1;
+static int default_sclp = 1;
static int default_monitor = 1;
static int default_floppy = 1;
static int default_cdrom = 1;
@@ -2340,6 +2343,7 @@ struct device_config {
DEV_VIRTCON, /* -virtioconsole */
DEV_DEBUGCON, /* -debugcon */
DEV_GDB, /* -gdb, -s */
+ DEV_SCLP, /* s390 sclp */
} type;
const char *cmdline;
Location loc;
@@ -2458,6 +2462,39 @@ static int virtcon_parse(const char *devname)
return 0;
}
+static int sclp_parse(const char *devname)
+{
+ QemuOptsList *device = qemu_find_opts("device");
+ static int index = 0;
+ char label[32];
+ QemuOpts *dev_opts;
+
+ if (strcmp(devname, "none") == 0) {
+ return 0;
+ }
+ if (index == MAX_SCLP_CONSOLES) {
+ fprintf(stderr, "qemu: too many sclp consoles\n");
+ exit(1);
+ }
+
+ assert(arch_type == QEMU_ARCH_S390X);
+
+ dev_opts = qemu_opts_create(device, NULL, 0, NULL);
+ qemu_opt_set(dev_opts, "driver", "sclpconsole");
+
+ snprintf(label, sizeof(label), "sclpcon%d", index);
+ sclp_hds[index] = qemu_chr_new(label, devname, NULL);
+ if (!sclp_hds[index]) {
+ fprintf(stderr, "qemu: could not connect sclp console"
+ " to character backend '%s'\n", devname);
+ return -1;
+ }
+ qemu_opt_set(dev_opts, "chardev", label);
+
+ index++;
+ return 0;
+}
+
static int debugcon_parse(const char *devname)
{
QemuOpts *opts;
@@ -3615,6 +3652,7 @@ int main(int argc, char **argv, char **envp)
default_serial = 0;
default_parallel = 0;
default_virtcon = 0;
+ default_sclp = 0;
default_monitor = 0;
default_net = 0;
default_floppy = 0;
@@ -3832,6 +3870,9 @@ int main(int argc, char **argv, char **envp)
if (!machine->use_virtcon) {
default_virtcon = 0;
}
+ if (!machine->use_sclp) {
+ default_sclp = 0;
+ }
if (machine->no_floppy) {
default_floppy = 0;
}
@@ -3873,11 +3914,16 @@ int main(int argc, char **argv, char **envp)
add_device_config(DEV_SERIAL, "mon:stdio");
} else if (default_virtcon && default_monitor) {
add_device_config(DEV_VIRTCON, "mon:stdio");
+ } else if (default_sclp && default_monitor) {
+ add_device_config(DEV_SCLP, "mon:stdio");
} else {
if (default_serial)
add_device_config(DEV_SERIAL, "stdio");
if (default_virtcon)
add_device_config(DEV_VIRTCON, "stdio");
+ if (default_sclp) {
+ add_device_config(DEV_SCLP, "stdio");
+ }
if (default_monitor)
monitor_parse("stdio", "readline");
}
@@ -3890,6 +3936,9 @@ int main(int argc, char **argv, char **envp)
monitor_parse("vc:80Cx24C", "readline");
if (default_virtcon)
add_device_config(DEV_VIRTCON, "vc:80Cx24C");
+ if (default_sclp) {
+ add_device_config(DEV_SCLP, "vc:80Cx24C");
+ }
}
socket_init();
@@ -4060,6 +4109,9 @@ int main(int argc, char **argv, char **envp)
exit(1);
if (foreach_device_config(DEV_VIRTCON, virtcon_parse) < 0)
exit(1);
+ if (foreach_device_config(DEV_SCLP, sclp_parse) < 0) {
+ exit(1);
+ }
if (foreach_device_config(DEV_DEBUGCON, debugcon_parse) < 0)
exit(1);