diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-04-30 14:15:56 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2015-04-30 14:15:56 +0100 |
commit | 498147529d1f8e902e6528a0115143b53475791e (patch) | |
tree | 290b378074d06fc446f1daf4c638b68f18773bc8 /target-s390x | |
parent | 06feaacfb4cfef10cc0c93d97df7bfc8a71dbc7e (diff) | |
parent | 2c80e996e427ae31982f3405a762859578a6261d (diff) | |
download | qemu-498147529d1f8e902e6528a0115143b53475791e.zip qemu-498147529d1f8e902e6528a0115143b53475791e.tar.gz qemu-498147529d1f8e902e6528a0115143b53475791e.tar.bz2 |
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20150430' into staging
First pile of s390x patches for 2.4, including:
- some cleanup patches
- sort most of the s390x devices into categories
- support for the new STSI post handler, used to insert vm name and
friends
- support for the new MEM_OP ioctl (including access register mode)
for accessing guest memory
# gpg: Signature made Thu Apr 30 12:56:58 2015 BST using RSA key ID C6F02FAF
# gpg: Good signature from "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg: aka "Cornelia Huck <cornelia.huck@de.ibm.com>"
* remotes/cohuck/tags/s390x-20150430:
kvm: better advice for failed s390x startup
s390x/kvm: Support access register mode for KVM_S390_MEM_OP ioctl
s390x/mmu: Use ioctl for reading and writing from/to guest memory
s390x/kvm: Put vm name, extended name and UUID into STSI322 SYSIB
linux-headers: update
s390x/mmu: Use access type definitions instead of magic values
s390x/ipl: sort into categories
sclp: sort into categories
s390-virtio: sort into categories
virtio-ccw: sort into categories
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-s390x')
-rw-r--r-- | target-s390x/cpu.h | 37 | ||||
-rw-r--r-- | target-s390x/helper.c | 2 | ||||
-rw-r--r-- | target-s390x/ioinst.c | 42 | ||||
-rw-r--r-- | target-s390x/kvm.c | 145 | ||||
-rw-r--r-- | target-s390x/mmu_helper.c | 20 |
5 files changed, 201 insertions, 45 deletions
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 8135dda..ba7d250 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -356,7 +356,8 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, #ifndef CONFIG_USER_ONLY void do_restart_interrupt(CPUS390XState *env); -static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) +static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, + uint8_t *ar) { hwaddr addr = 0; uint8_t reg; @@ -366,6 +367,9 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) addr = env->regs[reg]; } addr += (ipb >> 16) & 0xfff; + if (ar) { + *ar = reg; + } return addr; } @@ -401,6 +405,8 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq); int kvm_s390_inject_flic(struct kvm_s390_irq *irq); void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock); #else @@ -418,6 +424,11 @@ static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) { return -ENOSYS; } +static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, + void *hostbuf, int len, bool is_write) +{ + return -ENOSYS; +} static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) { @@ -865,9 +876,13 @@ struct sysib_322 { uint8_t name[8]; uint32_t caf; uint8_t cpi[16]; - uint8_t res3[24]; + uint8_t res5[3]; + uint8_t ext_name_encoding; + uint32_t res3; + uint8_t uuid[16]; } vm[8]; - uint8_t res4[3552]; + uint8_t res4[1504]; + uint8_t ext_names[8][256]; }; /* MMU defines */ @@ -952,15 +967,15 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr); -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int len, - bool is_write); +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, + int len, bool is_write); -#define s390_cpu_virt_mem_read(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, false) -#define s390_cpu_virt_mem_write(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, true) -#define s390_cpu_virt_mem_check_write(cpu, laddr, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, NULL, len, true) +#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) +#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) +#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL diff --git a/target-s390x/helper.c b/target-s390x/helper.c index f1060c2..041c9c7 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -162,7 +162,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) vaddr &= 0x7fffffff; } - mmu_translate(env, vaddr, 2, asc, &raddr, &prot, false); + mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false); return raddr; } diff --git a/target-s390x/ioinst.c b/target-s390x/ioinst.c index b00a00c..e220cea 100644 --- a/target-s390x/ioinst.c +++ b/target-s390x/ioinst.c @@ -149,13 +149,14 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &schib, sizeof(schib))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || @@ -215,13 +216,14 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &orig_orb, sizeof(orb))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { return; } copy_orb_from_guest(&orb, &orig_orb); @@ -258,8 +260,9 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) uint64_t addr; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -268,7 +271,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) cc = css_do_stcrw(&crw); /* 0 - crw stored, 1 - zeroes stored */ - if (s390_cpu_virt_mem_write(cpu, addr, &crw, sizeof(crw)) == 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { setcc(cpu, cc); } else if (cc == 0) { /* Write failed: requeue CRW since STCRW is a suppressing instruction */ @@ -284,8 +287,9 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int cc; SCHIB schib; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -297,7 +301,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ - if (!s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib))) { + if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { program_interrupt(env, PGM_OPERAND, 2); } return; @@ -322,12 +326,13 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } } if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &schib, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, + sizeof(schib)) != 0) { return; } } else { /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { return; } } @@ -342,13 +347,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) IRB irb; uint64_t addr; int cc, irb_len; + uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -362,14 +368,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &irb, irb_len) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, irb_len) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { return -EFAULT; } } @@ -645,7 +651,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) * present CHSC sub-handlers ... if we ever need more, we should take * care of req->len here first. */ - if (s390_cpu_virt_mem_read(cpu, addr, buf, sizeof(ChscReq))) { + if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { return; } req = (ChscReq *)buf; @@ -677,7 +683,8 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) break; } - if (!s390_cpu_virt_mem_write(cpu, addr + len, res, be16_to_cpu(res->len))) { + if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, + be16_to_cpu(res->len))) { setcc(cpu, 0); /* Command execution complete */ } } @@ -690,9 +697,10 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) IOIntCode int_code; hwaddr len; int ret; + uint8_t ar; trace_ioinst("tpi"); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -702,7 +710,7 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) len = lowcore ? 8 /* two words */ : 12 /* three words */; ret = css_do_tpi(&int_code, lowcore); if (ret == 1) { - s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, &int_code, len); + s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len); } return ret; } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index b48c643..8e65e43 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -44,6 +44,7 @@ #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/ipl.h" +#include "hw/s390x/ebcdic.h" /* #define DEBUG_KVM */ @@ -122,6 +123,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static int cap_sync_regs; static int cap_async_pf; +static int cap_mem_op; static void *legacy_s390_alloc(size_t size, uint64_t *align); @@ -246,6 +248,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) { cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); + cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); kvm_s390_enable_cmma(s); @@ -255,6 +258,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); + kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); return 0; } @@ -548,6 +552,46 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); } +/** + * kvm_s390_mem_op: + * @addr: the logical start address in guest memory + * @ar: the access register number + * @hostbuf: buffer in host memory. NULL = do only checks w/o copying + * @len: length that should be transfered + * @is_write: true = write, false = read + * Returns: 0 on success, non-zero if an exception or error occured + * + * Use KVM ioctl to read/write from/to guest memory. An access exception + * is injected into the vCPU in case of translation errors. + */ +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write) +{ + struct kvm_s390_mem_op mem_op = { + .gaddr = addr, + .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, + .size = len, + .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE + : KVM_S390_MEMOP_LOGICAL_READ, + .buf = (uint64_t)hostbuf, + .ar = ar, + }; + int ret; + + if (!cap_mem_op) { + return -ENOSYS; + } + if (!hostbuf) { + mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); + if (ret < 0) { + error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret)); + } + return ret; +} + /* * Legacy layout for s390: * Older S390 KVM requires the topmost vma of the RAM to be @@ -975,7 +1019,8 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) return rc; } -static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t x2 = (run->s390_sieic.ipa & 0x000f); @@ -986,12 +1031,16 @@ static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (x2 ? env->regs[x2] : 0) + (long)(int)disp2; } -static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t base2 = run->s390_sieic.ipb >> 28; @@ -1001,6 +1050,9 @@ static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; } @@ -1032,11 +1084,12 @@ static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return stpcifc_service_call(cpu, r1, fiba); + return stpcifc_service_call(cpu, r1, fiba, ar); } static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) @@ -1058,22 +1111,24 @@ static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint8_t r3 = run->s390_sieic.ipa & 0x000f; uint64_t gaddr; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - gaddr = get_base_disp_rsy(cpu, run); + gaddr = get_base_disp_rsy(cpu, run, &ar); - return pcistb_service_call(cpu, r1, r3, gaddr); + return pcistb_service_call(cpu, r1, r3, gaddr, ar); } static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return mpcifc_service_call(cpu, r1, fiba); + return mpcifc_service_call(cpu, r1, fiba, ar); } static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) @@ -1202,7 +1257,7 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) * For any diagnose call we support, bits 48-63 of the resulting * address specify the function code; the remainder is ignored. */ - func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; + func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; switch (func_code) { case DIAG_IPL: kvm_handle_diag_308(cpu, run); @@ -1549,7 +1604,8 @@ static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) cpu_synchronize_state(CPU(cpu)); /* get order code */ - order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; + order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL) + & SIGP_ORDER_MASK; status_reg = &env->regs[r1]; param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; @@ -1723,6 +1779,72 @@ static int handle_tsch(S390CPU *cpu) return ret; } +static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) +{ + struct sysib_322 sysib; + int del; + + if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { + return; + } + /* Shift the stack of Extended Names to prepare for our own data */ + memmove(&sysib.ext_names[1], &sysib.ext_names[0], + sizeof(sysib.ext_names[0]) * (sysib.count - 1)); + /* First virt level, that doesn't provide Ext Names delimits stack. It is + * assumed it's not capable of managing Extended Names for lower levels. + */ + for (del = 1; del < sysib.count; del++) { + if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { + break; + } + } + if (del < sysib.count) { + memset(sysib.ext_names[del], 0, + sizeof(sysib.ext_names[0]) * (sysib.count - del)); + } + /* Insert short machine name in EBCDIC, padded with blanks */ + if (qemu_name) { + memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); + ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), + strlen(qemu_name))); + } + sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ + memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); + /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's + * considered by s390 as not capable of providing any Extended Name. + * Therefore if no name was specified on qemu invocation, we go with the + * same "KVMguest" default, which KVM has filled into short name field. + */ + if (qemu_name) { + strncpy((char *)sysib.ext_names[0], qemu_name, + sizeof(sysib.ext_names[0])); + } else { + strcpy((char *)sysib.ext_names[0], "KVMguest"); + } + /* Insert UUID */ + memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid)); + + s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); +} + +static int handle_stsi(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + struct kvm_run *run = cs->kvm_run; + + switch (run->s390_stsi.fc) { + case 3: + if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { + return 0; + } + /* Only sysib 3.2.2 needs post-handling for now. */ + insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); + return 0; + default: + return 0; + } +} + static int kvm_arch_handle_debug_exit(S390CPU *cpu) { CPUState *cs = CPU(cpu); @@ -1772,6 +1894,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) case KVM_EXIT_S390_TSCH: ret = handle_tsch(cpu); break; + case KVM_EXIT_S390_STSI: + ret = handle_stsi(cpu); + break; case KVM_EXIT_DEBUG: ret = kvm_arch_handle_debug_exit(cpu); break; diff --git a/target-s390x/mmu_helper.c b/target-s390x/mmu_helper.c index b061c85..30a38ec 100644 --- a/target-s390x/mmu_helper.c +++ b/target-s390x/mmu_helper.c @@ -68,7 +68,7 @@ static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr, { uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | 4 | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46; DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec); @@ -85,7 +85,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, int ilen = ILEN_LATER; uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46; DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits); @@ -94,7 +94,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, } /* Code accesses have an undefined ilc. */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { ilen = 2; } @@ -288,7 +288,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw, exc); - if ((rw == 1) && !(*flags & PAGE_WRITE)) { + if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) { trigger_prot_fault(env, vaddr, asc, rw, exc); return -1; } @@ -338,7 +338,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, * Instruction: Primary * Data: Secondary */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1], raddr, flags, rw, exc); *flags &= ~(PAGE_READ | PAGE_WRITE); @@ -435,6 +435,7 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, /** * s390_cpu_virt_mem_rw: * @laddr: the logical start address + * @ar: the access register number * @hostbuf: buffer in host memory. NULL = do only checks w/o copying * @len: length that should be transfered * @is_write: true = write, false = read @@ -443,13 +444,20 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, * Copy from/to guest memory using logical addresses. Note that we inject a * program interrupt in case there is an error while accessing the memory. */ -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { int currlen, nr_pages, i; target_ulong *pages; int ret; + if (kvm_enabled()) { + ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write); + if (ret >= 0) { + return ret; + } + } + nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS) + 1; pages = g_malloc(nr_pages * sizeof(*pages)); |