diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-08-24 09:35:21 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-08-24 09:35:21 +0100 |
commit | dd8014e4e904e895435aae9f11a686f072762782 (patch) | |
tree | ea1f526128f3d88a92f90cf8833b3adf9c8ff828 /hw | |
parent | 8367a77c4d3f6e1e60890f5510304feb2c621611 (diff) | |
parent | 3110f0ee19ccdb50adff3dfa1321039f69efddcd (diff) | |
download | qemu-dd8014e4e904e895435aae9f11a686f072762782.zip qemu-dd8014e4e904e895435aae9f11a686f072762782.tar.gz qemu-dd8014e4e904e895435aae9f11a686f072762782.tar.bz2 |
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.2-20200818' into staging
ppc patch queue 2020-08-18
Here's my first pull request for qemu-5.2, which has quite a few
accumulated things. Highlights are:
* Preliminary support for POWER10 (Power ISA 3.1) instruction emulation
* Add documentation on the (very confusing) pseries NUMA configuration
* Fix some bugs handling edge cases with XICS, XIVE and kernel_irqchip
* Fix icount for a number of POWER registers
* Many cleanups to error handling in XIVE code
* Validate size of -prom-env data
# gpg: Signature made Tue 18 Aug 2020 05:18:36 BST
# gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392
* remotes/dgibson/tags/ppc-for-5.2-20200818: (40 commits)
spapr/xive: Use xive_source_esb_len()
nvram: Exit QEMU if NVRAM cannot contain all -prom-env data
spapr/xive: Simplify error handling of kvmppc_xive_cpu_synchronize_state()
ppc/xive: Simplify error handling in xive_tctx_realize()
spapr/xive: Simplify error handling in kvmppc_xive_connect()
ppc/xive: Fix error handling in vmstate_xive_tctx_*() callbacks
spapr/xive: Fix error handling in kvmppc_xive_post_load()
spapr/kvm: Fix error handling in kvmppc_xive_pre_save()
spapr/xive: Rework error handling of kvmppc_xive_set_source_config()
spapr/xive: Rework error handling in kvmppc_xive_get_queues()
spapr/xive: Rework error handling of kvmppc_xive_[gs]et_queue_config()
spapr/xive: Rework error handling of kvmppc_xive_cpu_[gs]et_state()
spapr/xive: Rework error handling of kvmppc_xive_mmap()
spapr/xive: Rework error handling of kvmppc_xive_source_reset()
spapr/xive: Rework error handling of kvmppc_xive_cpu_connect()
spapr: Simplify error handling in spapr_phb_realize()
spapr/xive: Convert KVM device fd checks to assert()
ppc/xive: Introduce dedicated kvm_irqchip_in_kernel() wrappers
ppc/xive: Rework setup of XiveSource::esb_mmio
target/ppc: Integrate icount to purr, vtb, and tbu40
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/intc/spapr_xive.c | 47 | ||||
-rw-r--r-- | hw/intc/spapr_xive_kvm.c | 257 | ||||
-rw-r--r-- | hw/intc/xive.c | 57 | ||||
-rw-r--r-- | hw/nvram/chrp_nvram.c | 24 | ||||
-rw-r--r-- | hw/nvram/mac_nvram.c | 2 | ||||
-rw-r--r-- | hw/nvram/spapr_nvram.c | 3 | ||||
-rw-r--r-- | hw/ppc/spapr.c | 6 | ||||
-rw-r--r-- | hw/ppc/spapr_caps.c | 99 | ||||
-rw-r--r-- | hw/ppc/spapr_irq.c | 12 | ||||
-rw-r--r-- | hw/ppc/spapr_pci.c | 16 | ||||
-rw-r--r-- | hw/sparc/sun4m.c | 2 | ||||
-rw-r--r-- | hw/sparc64/sun4u.c | 2 |
12 files changed, 291 insertions, 236 deletions
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 89c8cd9..4bd0d60 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -148,12 +148,19 @@ static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, xive_end_queue_pic_print_info(end, 6, mon); } +/* + * kvm_irqchip_in_kernel() will cause the compiler to turn this + * info a nop if CONFIG_KVM isn't defined. + */ +#define spapr_xive_in_kernel(xive) \ + (kvm_irqchip_in_kernel() && (xive)->fd != -1) + void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) { XiveSource *xsrc = &xive->source; int i; - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_synchronize_state(xive, &local_err); @@ -329,7 +336,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); /* Set the mapping address of the END ESB pages after the source ESBs */ - xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + xive->end_base = xive->vc_base + xive_source_esb_len(xsrc); /* * Allocate the routing tables @@ -507,8 +514,10 @@ static const VMStateDescription vmstate_spapr_xive_eas = { static int vmstate_spapr_xive_pre_save(void *opaque) { - if (kvm_irqchip_in_kernel()) { - return kvmppc_xive_pre_save(SPAPR_XIVE(opaque)); + SpaprXive *xive = SPAPR_XIVE(opaque); + + if (spapr_xive_in_kernel(xive)) { + return kvmppc_xive_pre_save(xive); } return 0; @@ -520,8 +529,10 @@ static int vmstate_spapr_xive_pre_save(void *opaque) */ static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id) { - if (kvm_irqchip_in_kernel()) { - return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id); + SpaprXive *xive = SPAPR_XIVE(intc); + + if (spapr_xive_in_kernel(xive)) { + return kvmppc_xive_post_load(xive, version_id); } return 0; @@ -564,7 +575,7 @@ static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn, xive_source_irq_set_lsi(xsrc, lisn); } - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { return kvmppc_xive_source_reset_one(xsrc, lisn, errp); } @@ -641,7 +652,7 @@ static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val) { SpaprXive *xive = SPAPR_XIVE(intc); - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { kvmppc_xive_source_set_irq(&xive->source, irq, val); } else { xive_source_set_irq(&xive->source, irq, val); @@ -749,11 +760,16 @@ static void spapr_xive_deactivate(SpaprInterruptController *intc) spapr_xive_mmio_set_enabled(xive, false); - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { kvmppc_xive_disconnect(intc); } } +static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr) +{ + return spapr_xive_in_kernel(SPAPR_XIVE(xptr)); +} + static void spapr_xive_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -788,6 +804,7 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data) sicc->post_load = spapr_xive_post_load; xpc->match_nvt = spapr_xive_match_nvt; + xpc->in_kernel = spapr_xive_in_kernel_xptr; } static const TypeInfo spapr_xive_info = { @@ -1058,7 +1075,7 @@ static target_ulong h_int_set_source_config(PowerPCCPU *cpu, new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); } - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); @@ -1379,7 +1396,7 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, */ out: - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); @@ -1480,7 +1497,7 @@ static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, args[2] = 0; } - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); @@ -1642,7 +1659,7 @@ static target_ulong h_int_esb(PowerPCCPU *cpu, return H_P3; } - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, flags & SPAPR_XIVE_ESB_STORE); } else { @@ -1717,7 +1734,7 @@ static target_ulong h_int_sync(PowerPCCPU *cpu, * under KVM */ - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_sync_source(xive, lisn, &local_err); @@ -1761,7 +1778,7 @@ static target_ulong h_int_reset(PowerPCCPU *cpu, device_legacy_reset(DEVICE(xive)); - if (kvm_irqchip_in_kernel()) { + if (spapr_xive_in_kernel(xive)) { Error *local_err = NULL; kvmppc_xive_reset(xive, &local_err); diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index edb7ee0..e8667ce 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -73,54 +73,54 @@ static void kvm_cpu_disable_all(void) * XIVE Thread Interrupt Management context (KVM) */ -void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) +int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) { SpaprXive *xive = SPAPR_XIVE(tctx->xptr); uint64_t state[2]; int ret; - /* The KVM XIVE device is not in use yet */ - if (xive->fd == -1) { - return; - } + assert(xive->fd != -1); /* word0 and word1 of the OS ring. */ state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); if (ret != 0) { - error_setg_errno(errp, errno, + error_setg_errno(errp, -ret, "XIVE: could not restore KVM state of CPU %ld", kvm_arch_vcpu_id(tctx->cs)); + return ret; } + + return 0; } -void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) +int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) { SpaprXive *xive = SPAPR_XIVE(tctx->xptr); uint64_t state[2] = { 0 }; int ret; - /* The KVM XIVE device is not in use */ - if (xive->fd == -1) { - return; - } + assert(xive->fd != -1); ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); if (ret != 0) { - error_setg_errno(errp, errno, + error_setg_errno(errp, -ret, "XIVE: could not capture KVM state of CPU %ld", kvm_arch_vcpu_id(tctx->cs)); - return; + return ret; } /* word0 and word1 of the OS ring. */ *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0]; + + return 0; } typedef struct { XiveTCTX *tctx; - Error *err; + Error **errp; + int ret; } XiveCpuGetState; static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, @@ -128,14 +128,14 @@ static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, { XiveCpuGetState *s = arg.host_ptr; - kvmppc_xive_cpu_get_state(s->tctx, &s->err); + s->ret = kvmppc_xive_cpu_get_state(s->tctx, s->errp); } -void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) +int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) { XiveCpuGetState s = { .tctx = tctx, - .err = NULL, + .errp = errp, }; /* @@ -144,26 +144,21 @@ void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state, RUN_ON_CPU_HOST_PTR(&s)); - if (s.err) { - error_propagate(errp, s.err); - return; - } + return s.ret; } -void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) +int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) { + ERRP_GUARD(); SpaprXive *xive = SPAPR_XIVE(tctx->xptr); unsigned long vcpu_id; int ret; - /* The KVM XIVE device is not in use */ - if (xive->fd == -1) { - return; - } + assert(xive->fd != -1); /* Check if CPU was hot unplugged and replugged. */ if (kvm_cpu_is_enabled(tctx->cs)) { - return; + return 0; } vcpu_id = kvm_arch_vcpu_id(tctx->cs); @@ -171,28 +166,26 @@ void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd, vcpu_id, 0); if (ret < 0) { - Error *local_err = NULL; - - error_setg(&local_err, - "XIVE: unable to connect CPU%ld to KVM device: %s", - vcpu_id, strerror(errno)); - if (errno == ENOSPC) { - error_append_hint(&local_err, "Try -smp maxcpus=N with N < %u\n", + error_setg_errno(errp, -ret, + "XIVE: unable to connect CPU%ld to KVM device", + vcpu_id); + if (ret == -ENOSPC) { + error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n", MACHINE(qdev_get_machine())->smp.max_cpus); } - error_propagate(errp, local_err); - return; + return ret; } kvm_cpu_enable(tctx->cs); + return 0; } /* * XIVE Interrupt Source (KVM) */ -void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, - Error **errp) +int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, + Error **errp) { uint32_t end_idx; uint32_t end_blk; @@ -201,7 +194,6 @@ void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, bool masked; uint32_t eisn; uint64_t kvm_src; - Error *local_err = NULL; assert(xive_eas_is_valid(eas)); @@ -221,12 +213,8 @@ void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) & KVM_XIVE_SOURCE_EISN_MASK; - kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, - &kvm_src, true, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } + return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, + &kvm_src, true, errp); } void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp) @@ -245,10 +233,7 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) SpaprXive *xive = SPAPR_XIVE(xsrc->xive); uint64_t state = 0; - /* The KVM XIVE device is not in use */ - if (xive->fd == -1) { - return -ENODEV; - } + assert(xive->fd != -1); if (xive_source_irq_is_lsi(xsrc, srcno)) { state |= KVM_XIVE_LEVEL_SENSITIVE; @@ -261,24 +246,25 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) true, errp); } -static void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) +static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) { SpaprXive *xive = SPAPR_XIVE(xsrc->xive); int i; for (i = 0; i < xsrc->nr_irqs; i++) { - Error *local_err = NULL; + int ret; if (!xive_eas_is_valid(&xive->eat[i])) { continue; } - kvmppc_xive_source_reset_one(xsrc, i, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; + ret = kvmppc_xive_source_reset_one(xsrc, i, errp); + if (ret < 0) { + return ret; } } + + return 0; } /* @@ -381,15 +367,15 @@ void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) /* * sPAPR XIVE interrupt controller (KVM) */ -void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, - uint32_t end_idx, XiveEND *end, - Error **errp) +int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) { struct kvm_ppc_xive_eq kvm_eq = { 0 }; uint64_t kvm_eq_idx; uint8_t priority; uint32_t server; - Error *local_err = NULL; + int ret; assert(xive_end_is_valid(end)); @@ -401,11 +387,10 @@ void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & KVM_XIVE_EQ_SERVER_MASK; - kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, - &kvm_eq, false, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; + ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, false, errp); + if (ret < 0) { + return ret; } /* @@ -415,17 +400,18 @@ void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, */ end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) | xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex); + + return 0; } -void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, - uint32_t end_idx, XiveEND *end, - Error **errp) +int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) { struct kvm_ppc_xive_eq kvm_eq = { 0 }; uint64_t kvm_eq_idx; uint8_t priority; uint32_t server; - Error *local_err = NULL; /* * Build the KVM state from the local END structure. @@ -463,12 +449,9 @@ void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & KVM_XIVE_EQ_SERVER_MASK; - kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, - &kvm_eq, true, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } + return + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, true, errp); } void kvmppc_xive_reset(SpaprXive *xive, Error **errp) @@ -477,23 +460,24 @@ void kvmppc_xive_reset(SpaprXive *xive, Error **errp) NULL, true, errp); } -static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) +static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) { - Error *local_err = NULL; int i; + int ret; for (i = 0; i < xive->nr_ends; i++) { if (!xive_end_is_valid(&xive->endt[i])) { continue; } - kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, - &xive->endt[i], &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; + ret = kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], errp); + if (ret < 0) { + return ret; } } + + return 0; } /* @@ -592,10 +576,7 @@ static void kvmppc_xive_change_state_handler(void *opaque, int running, void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) { - /* The KVM XIVE device is not in use */ - if (xive->fd == -1) { - return; - } + assert(xive->fd != -1); /* * When the VM is stopped, the sources are masked and the previous @@ -621,19 +602,17 @@ void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) int kvmppc_xive_pre_save(SpaprXive *xive) { Error *local_err = NULL; + int ret; - /* The KVM XIVE device is not in use */ - if (xive->fd == -1) { - return 0; - } + assert(xive->fd != -1); /* EAT: there is no extra state to query from KVM */ /* ENDT */ - kvmppc_xive_get_queues(xive, &local_err); - if (local_err) { + ret = kvmppc_xive_get_queues(xive, &local_err); + if (ret < 0) { error_report_err(local_err); - return -1; + return ret; } return 0; @@ -650,6 +629,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) Error *local_err = NULL; CPUState *cs; int i; + int ret; /* The KVM XIVE device should be in use */ assert(xive->fd != -1); @@ -660,11 +640,10 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) continue; } - kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, - &xive->endt[i], &local_err); - if (local_err) { - error_report_err(local_err); - return -1; + ret = kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], &local_err); + if (ret < 0) { + goto fail; } } @@ -679,16 +658,14 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) * previously set in KVM. Since we don't do that for all interrupts * at reset time anymore, let's do it now. */ - kvmppc_xive_source_reset_one(&xive->source, i, &local_err); - if (local_err) { - error_report_err(local_err); - return -1; + ret = kvmppc_xive_source_reset_one(&xive->source, i, &local_err); + if (ret < 0) { + goto fail; } - kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); - if (local_err) { - error_report_err(local_err); - return -1; + ret = kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); + if (ret < 0) { + goto fail; } } @@ -705,17 +682,21 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); - if (local_err) { - error_report_err(local_err); - return -1; + ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); + if (ret < 0) { + goto fail; } } /* The source states will be restored when the machine starts running */ return 0; + +fail: + error_report_err(local_err); + return ret; } +/* Returns MAP_FAILED on error and sets errno */ static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, Error **errp) { @@ -726,7 +707,6 @@ static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, pgoff << page_shift); if (addr == MAP_FAILED) { error_setg_errno(errp, errno, "XIVE: unable to set memory mapping"); - return NULL; } return addr; @@ -741,10 +721,12 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc = &xive->source; - Error *local_err = NULL; - size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + size_t esb_len = xive_source_esb_len(xsrc); size_t tima_len = 4ull << TM_SHIFT; CPUState *cs; + int fd; + void *addr; + int ret; /* * The KVM XIVE device already in use. This is the case when @@ -760,18 +742,20 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, } /* First, create the KVM XIVE device */ - xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); - if (xive->fd < 0) { - error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device"); + fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); + if (fd < 0) { + error_setg_errno(errp, -fd, "XIVE: error creating KVM device"); return -1; } + xive->fd = fd; /* Tell KVM about the # of VCPUs we may have */ if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_NR_SERVERS)) { - if (kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, - KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true, - &local_err)) { + ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, + KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true, + errp); + if (ret < 0) { goto fail; } } @@ -779,14 +763,14 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, /* * 1. Source ESB pages - KVM mapping */ - xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, - &local_err); - if (local_err) { + addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, errp); + if (addr == MAP_FAILED) { goto fail; } + xsrc->esb_mmap = addr; memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc), - "xive.esb", esb_len, xsrc->esb_mmap); + "xive.esb-kvm", esb_len, xsrc->esb_mmap); memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_kvm, 1); @@ -797,11 +781,12 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, /* * 3. TIMA pages - KVM mapping */ - xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, - &local_err); - if (local_err) { + addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, errp); + if (addr == MAP_FAILED) { goto fail; } + xive->tm_mmap = addr; + memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive), "xive.tima", tima_len, xive->tm_mmap); memory_region_add_subregion_overlap(&xive->tm_mmio, 0, @@ -814,15 +799,15 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err); - if (local_err) { + ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp); + if (ret < 0) { goto fail; } } /* Update the KVM sources */ - kvmppc_xive_source_reset(xsrc, &local_err); - if (local_err) { + ret = kvmppc_xive_source_reset(xsrc, errp); + if (ret < 0) { goto fail; } @@ -832,7 +817,6 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, return 0; fail: - error_propagate(errp, local_err); kvmppc_xive_disconnect(intc); return -1; } @@ -843,14 +827,11 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc) XiveSource *xsrc; size_t esb_len; - /* The KVM XIVE device is not in use */ - if (!xive || xive->fd == -1) { - return; - } + assert(xive->fd != -1); /* Clear the KVM mapping */ xsrc = &xive->source; - esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + esb_len = xive_source_esb_len(xsrc); if (xsrc->esb_mmap) { memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm); @@ -871,10 +852,8 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc) * and removed from the list of devices of the VM. The VCPU * presenters are also detached from the device. */ - if (xive->fd != -1) { - close(xive->fd); - xive->fd = -1; - } + close(xive->fd); + xive->fd = -1; kvm_kernel_irqchip = false; kvm_msi_via_irqfd_allowed = false; diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 9a16243..489e625 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -592,6 +592,17 @@ static const char * const xive_tctx_ring_names[] = { "USER", "OS", "POOL", "PHYS", }; +/* + * kvm_irqchip_in_kernel() will cause the compiler to turn this + * info a nop if CONFIG_KVM isn't defined. + */ +#define xive_in_kernel(xptr) \ + (kvm_irqchip_in_kernel() && \ + ({ \ + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \ + xpc->in_kernel ? xpc->in_kernel(xptr) : false; \ + })) + void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) { int cpu_index; @@ -606,7 +617,7 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; - if (kvm_irqchip_in_kernel()) { + if (xive_in_kernel(tctx->xptr)) { Error *local_err = NULL; kvmppc_xive_cpu_synchronize_state(tctx, &local_err); @@ -651,7 +662,6 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) XiveTCTX *tctx = XIVE_TCTX(dev); PowerPCCPU *cpu; CPUPPCState *env; - Error *local_err = NULL; assert(tctx->cs); assert(tctx->xptr); @@ -671,10 +681,8 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) } /* Connect the presenter to the VCPU (required for CPU hotplug) */ - if (kvm_irqchip_in_kernel()) { - kvmppc_xive_cpu_connect(tctx, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (xive_in_kernel(tctx->xptr)) { + if (kvmppc_xive_cpu_connect(tctx, errp) < 0) { return; } } @@ -682,13 +690,15 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) static int vmstate_xive_tctx_pre_save(void *opaque) { + XiveTCTX *tctx = XIVE_TCTX(opaque); Error *local_err = NULL; + int ret; - if (kvm_irqchip_in_kernel()) { - kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err); - if (local_err) { + if (xive_in_kernel(tctx->xptr)) { + ret = kvmppc_xive_cpu_get_state(tctx, &local_err); + if (ret < 0) { error_report_err(local_err); - return -1; + return ret; } } @@ -697,17 +707,19 @@ static int vmstate_xive_tctx_pre_save(void *opaque) static int vmstate_xive_tctx_post_load(void *opaque, int version_id) { + XiveTCTX *tctx = XIVE_TCTX(opaque); Error *local_err = NULL; + int ret; - if (kvm_irqchip_in_kernel()) { + if (xive_in_kernel(tctx->xptr)) { /* * Required for hotplugged CPU, for which the state comes * after all states of the machine. */ - kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err); - if (local_err) { + ret = kvmppc_xive_cpu_set_state(tctx, &local_err); + if (ret < 0) { error_report_err(local_err); - return -1; + return ret; } } @@ -1128,6 +1140,7 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); + size_t esb_len = xive_source_esb_len(xsrc); assert(xsrc->xive); @@ -1147,11 +1160,11 @@ static void xive_source_realize(DeviceState *dev, Error **errp) xsrc->status = g_malloc0(xsrc->nr_irqs); xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); - if (!kvm_irqchip_in_kernel()) { - memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), - &xive_source_esb_ops, xsrc, "xive.esb", - (1ull << xsrc->esb_shift) * xsrc->nr_irqs); - } + memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len); + memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc), + &xive_source_esb_ops, xsrc, "xive.esb-emulated", + esb_len); + memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated); qemu_register_reset(xive_source_reset, dev); } @@ -1502,7 +1515,7 @@ static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, /* * Notification using the END ESe/ESn bit (Event State Buffer for - * escalation and notification). Profide futher coalescing in the + * escalation and notification). Provide further coalescing in the * Router. */ static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, @@ -1581,7 +1594,7 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, /* * Check the END ESn (Event State Buffer for notification) for - * even futher coalescing in the Router + * even further coalescing in the Router */ if (!xive_end_is_notify(&end)) { /* ESn[Q]=1 : end of notification */ @@ -1660,7 +1673,7 @@ do_escalation: /* * Check the END ESe (Event State Buffer for escalation) for even - * futher coalescing in the Router + * further coalescing in the Router */ if (!xive_end_is_uncond_escalation(&end)) { /* ESe[Q]=1 : end of notification */ diff --git a/hw/nvram/chrp_nvram.c b/hw/nvram/chrp_nvram.c index d969f26..d4d10a7 100644 --- a/hw/nvram/chrp_nvram.c +++ b/hw/nvram/chrp_nvram.c @@ -21,14 +21,21 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/error-report.h" #include "hw/nvram/chrp_nvram.h" #include "sysemu/sysemu.h" -static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str) +static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str, + int max_len) { int len; len = strlen(str) + 1; + + if (max_len < len) { + return -1; + } + memcpy(&nvram[addr], str, len); return addr + len; @@ -38,19 +45,26 @@ static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str) * Create a "system partition", used for the Open Firmware * environment variables. */ -int chrp_nvram_create_system_partition(uint8_t *data, int min_len) +int chrp_nvram_create_system_partition(uint8_t *data, int min_len, int max_len) { ChrpNvramPartHdr *part_header; unsigned int i; int end; + if (max_len < sizeof(*part_header)) { + goto fail; + } + part_header = (ChrpNvramPartHdr *)data; part_header->signature = CHRP_NVPART_SYSTEM; pstrcpy(part_header->name, sizeof(part_header->name), "system"); end = sizeof(ChrpNvramPartHdr); for (i = 0; i < nb_prom_envs; i++) { - end = chrp_nvram_set_var(data, end, prom_envs[i]); + end = chrp_nvram_set_var(data, end, prom_envs[i], max_len - end); + if (end == -1) { + goto fail; + } } /* End marker */ @@ -65,6 +79,10 @@ int chrp_nvram_create_system_partition(uint8_t *data, int min_len) chrp_nvram_finish_partition(part_header, end); return end; + +fail: + error_report("NVRAM is too small. Try to pass less data to -prom-env"); + exit(EXIT_FAILURE); } /** diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c index beec1c4..11f2d31 100644 --- a/hw/nvram/mac_nvram.c +++ b/hw/nvram/mac_nvram.c @@ -141,7 +141,7 @@ static void pmac_format_nvram_partition_of(MacIONVRAMState *nvr, int off, /* OpenBIOS nvram variables partition */ sysp_end = chrp_nvram_create_system_partition(&nvr->data[off], - DEF_SYSTEM_SIZE) + off; + DEF_SYSTEM_SIZE, len) + off; /* Free space partition */ chrp_nvram_create_free_partition(&nvr->data[sysp_end], len - sysp_end); diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c index 15d0828..3865134 100644 --- a/hw/nvram/spapr_nvram.c +++ b/hw/nvram/spapr_nvram.c @@ -188,7 +188,8 @@ static void spapr_nvram_realize(SpaprVioDevice *dev, Error **errp) } } else if (nb_prom_envs > 0) { /* Create a system partition to pass the -prom-env variables */ - chrp_nvram_create_system_partition(nvram->buf, MIN_NVRAM_SIZE / 4); + chrp_nvram_create_system_partition(nvram->buf, MIN_NVRAM_SIZE / 4, + nvram->size); chrp_nvram_create_free_partition(&nvram->buf[MIN_NVRAM_SIZE / 4], nvram->size - MIN_NVRAM_SIZE / 4); } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 1c8d098..dd2fa48 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -558,7 +558,8 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, int nb_numa_nodes = machine->numa_state->num_nodes; int ret, i, offset; uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; - uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)}; + uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32), + cpu_to_be32(lmb_size & 0xffffffff)}; uint32_t *int_buf, *cur_index, buf_len; int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; MemoryDeviceInfoList *dimms = NULL; @@ -905,7 +906,8 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) uint32_t lrdr_capacity[] = { cpu_to_be32(max_device_addr >> 32), cpu_to_be32(max_device_addr & 0xffffffff), - 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE), + cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32), + cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff), cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), }; uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0); diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index 3225fc5..10a80a8 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -180,24 +180,24 @@ static void spapr_cap_set_pagesize(Object *obj, Visitor *v, const char *name, static void cap_htm_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); if (!val) { /* TODO: We don't support disabling htm yet */ return; } if (tcg_enabled()) { - error_setg(errp, - "No Transactional Memory support in TCG," - " try appending -machine cap-htm=off"); + error_setg(errp, "No Transactional Memory support in TCG"); + error_append_hint(errp, "Try appending -machine cap-htm=off\n"); } else if (kvm_enabled() && !kvmppc_has_cap_htm()) { error_setg(errp, -"KVM implementation does not support Transactional Memory," - " try appending -machine cap-htm=off" - ); + "KVM implementation does not support Transactional Memory"); + error_append_hint(errp, "Try appending -machine cap-htm=off\n"); } } static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); PowerPCCPU *cpu = POWERPC_CPU(first_cpu); CPUPPCState *env = &cpu->env; @@ -209,13 +209,14 @@ static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) * rid of anything that doesn't do VMX */ g_assert(env->insns_flags & PPC_ALTIVEC); if (!(env->insns_flags2 & PPC2_VSX)) { - error_setg(errp, "VSX support not available," - " try appending -machine cap-vsx=off"); + error_setg(errp, "VSX support not available"); + error_append_hint(errp, "Try appending -machine cap-vsx=off\n"); } } static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); PowerPCCPU *cpu = POWERPC_CPU(first_cpu); CPUPPCState *env = &cpu->env; @@ -224,8 +225,8 @@ static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) return; } if (!(env->insns_flags2 & PPC2_DFP)) { - error_setg(errp, "DFP support not available," - " try appending -machine cap-dfp=off"); + error_setg(errp, "DFP support not available"); + error_append_hint(errp, "Try appending -machine cap-dfp=off\n"); } } @@ -239,6 +240,7 @@ SpaprCapPossible cap_cfpc_possible = { static void cap_safe_cache_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); uint8_t kvm_val = kvmppc_get_cap_safe_cache(); if (tcg_enabled() && val) { @@ -247,9 +249,9 @@ static void cap_safe_cache_apply(SpaprMachineState *spapr, uint8_t val, cap_cfpc_possible.vals[val]); } else if (kvm_enabled() && (val > kvm_val)) { error_setg(errp, - "Requested safe cache capability level not supported by kvm," - " try appending -machine cap-cfpc=%s", - cap_cfpc_possible.vals[kvm_val]); + "Requested safe cache capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-cfpc=%s\n", + cap_cfpc_possible.vals[kvm_val]); } } @@ -263,6 +265,7 @@ SpaprCapPossible cap_sbbc_possible = { static void cap_safe_bounds_check_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); uint8_t kvm_val = kvmppc_get_cap_safe_bounds_check(); if (tcg_enabled() && val) { @@ -271,9 +274,9 @@ static void cap_safe_bounds_check_apply(SpaprMachineState *spapr, uint8_t val, cap_sbbc_possible.vals[val]); } else if (kvm_enabled() && (val > kvm_val)) { error_setg(errp, -"Requested safe bounds check capability level not supported by kvm," - " try appending -machine cap-sbbc=%s", - cap_sbbc_possible.vals[kvm_val]); +"Requested safe bounds check capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-sbbc=%s\n", + cap_sbbc_possible.vals[kvm_val]); } } @@ -290,6 +293,7 @@ SpaprCapPossible cap_ibs_possible = { static void cap_safe_indirect_branch_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); uint8_t kvm_val = kvmppc_get_cap_safe_indirect_branch(); if (tcg_enabled() && val) { @@ -298,9 +302,9 @@ static void cap_safe_indirect_branch_apply(SpaprMachineState *spapr, cap_ibs_possible.vals[val]); } else if (kvm_enabled() && (val > kvm_val)) { error_setg(errp, -"Requested safe indirect branch capability level not supported by kvm," - " try appending -machine cap-ibs=%s", - cap_ibs_possible.vals[kvm_val]); +"Requested safe indirect branch capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-ibs=%s\n", + cap_ibs_possible.vals[kvm_val]); } } @@ -377,23 +381,35 @@ static void cap_hpt_maxpagesize_cpu_apply(SpaprMachineState *spapr, static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + if (!val) { /* capability disabled by default */ return; } if (tcg_enabled()) { - error_setg(errp, - "No Nested KVM-HV support in tcg," - " try appending -machine cap-nested-hv=off"); + error_setg(errp, "No Nested KVM-HV support in TCG"); + error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n"); } else if (kvm_enabled()) { + if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + error_setg(errp, "Nested KVM-HV only supported on POWER9"); + error_append_hint(errp, + "Try appending -machine max-cpu-compat=power9\n"); + return; + } + if (!kvmppc_has_cap_nested_kvm_hv()) { error_setg(errp, -"KVM implementation does not support Nested KVM-HV," - " try appending -machine cap-nested-hv=off"); + "KVM implementation does not support Nested KVM-HV"); + error_append_hint(errp, + "Try appending -machine cap-nested-hv=off\n"); } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) { - error_setg(errp, -"Error enabling cap-nested-hv with KVM, try cap-nested-hv=off"); + error_setg(errp, "Error enabling cap-nested-hv with KVM"); + error_append_hint(errp, + "Try appending -machine cap-nested-hv=off\n"); } } } @@ -401,6 +417,7 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, static void cap_large_decr_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); PowerPCCPU *cpu = POWERPC_CPU(first_cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); @@ -411,22 +428,23 @@ static void cap_large_decr_apply(SpaprMachineState *spapr, if (tcg_enabled()) { if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, spapr->max_compat_pvr)) { - error_setg(errp, - "Large decrementer only supported on POWER9, try -cpu POWER9"); + error_setg(errp, "Large decrementer only supported on POWER9"); + error_append_hint(errp, "Try -cpu POWER9\n"); return; } } else if (kvm_enabled()) { int kvm_nr_bits = kvmppc_get_cap_large_decr(); if (!kvm_nr_bits) { - error_setg(errp, - "No large decrementer support," - " try appending -machine cap-large-decr=off"); + error_setg(errp, "No large decrementer support"); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); } else if (pcc->lrg_decr_bits != kvm_nr_bits) { error_setg(errp, -"KVM large decrementer size (%d) differs to model (%d)," - " try appending -machine cap-large-decr=off", - kvm_nr_bits, pcc->lrg_decr_bits); + "KVM large decrementer size (%d) differs to model (%d)", + kvm_nr_bits, pcc->lrg_decr_bits); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); } } } @@ -435,14 +453,15 @@ static void cap_large_decr_cpu_apply(SpaprMachineState *spapr, PowerPCCPU *cpu, uint8_t val, Error **errp) { + ERRP_GUARD(); CPUPPCState *env = &cpu->env; target_ulong lpcr = env->spr[SPR_LPCR]; if (kvm_enabled()) { if (kvmppc_enable_cap_large_decr(cpu, val)) { - error_setg(errp, - "No large decrementer support," - " try appending -machine cap-large-decr=off"); + error_setg(errp, "No large decrementer support"); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); } } @@ -457,6 +476,7 @@ static void cap_large_decr_cpu_apply(SpaprMachineState *spapr, static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); uint8_t kvm_val = kvmppc_get_cap_count_cache_flush_assist(); if (tcg_enabled() && val) { @@ -479,14 +499,15 @@ static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val, return; } error_setg(errp, -"Requested count cache flush assist capability level not supported by kvm," - " try appending -machine cap-ccf-assist=off"); + "Requested count cache flush assist capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-ccf-assist=off\n"); } } static void cap_fwnmi_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { + ERRP_GUARD(); if (!val) { return; /* Disabled by default */ } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 2f8f7d6..72bb938 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -139,6 +139,7 @@ SpaprIrq spapr_irq_dual = { static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) { + ERRP_GUARD(); MachineState *machine = MACHINE(spapr); /* @@ -179,14 +180,19 @@ static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) /* * On a POWER9 host, some older KVM XICS devices cannot be destroyed and - * re-created. Detect that early to avoid QEMU to exit later when the - * guest reboots. + * re-created. Same happens with KVM nested guests. Detect that early to + * avoid QEMU to exit later when the guest reboots. */ if (kvm_enabled() && spapr->irq == &spapr_irq_dual && kvm_kernel_irqchip_required() && xics_kvm_has_broken_disconnect(spapr)) { - error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); + error_setg(errp, + "KVM is incompatible with ic-mode=dual,kernel-irqchip=on"); + error_append_hint(errp, + "This can happen with an old KVM or in a KVM nested guest.\n"); + error_append_hint(errp, + "Try without kernel-irqchip or with kernel-irqchip=off.\n"); return -1; } diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 363cdb3..0a418f1 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1796,6 +1796,7 @@ static void spapr_phb_destroy_msi(gpointer opaque) static void spapr_phb_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user * tries to add a sPAPR PHB to a non-pseries machine. */ @@ -1813,7 +1814,6 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) uint64_t msi_window_size = 4096; SpaprTceTable *tcet; const unsigned windows_supported = spapr_phb_windows_supported(sphb); - Error *local_err = NULL; if (!spapr) { error_setg(errp, TYPE_SPAPR_PCI_HOST_BRIDGE " needs a pseries machine"); @@ -1964,13 +1964,12 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) /* Initialize the LSI table */ for (i = 0; i < PCI_NUM_PINS; i++) { - uint32_t irq = SPAPR_IRQ_PCI_LSI + sphb->index * PCI_NUM_PINS + i; + int irq = SPAPR_IRQ_PCI_LSI + sphb->index * PCI_NUM_PINS + i; if (smc->legacy_irq_allocation) { - irq = spapr_irq_findone(spapr, &local_err); - if (local_err) { - error_propagate_prepend(errp, local_err, - "can't allocate LSIs: "); + irq = spapr_irq_findone(spapr, errp); + if (irq < 0) { + error_prepend(errp, "can't allocate LSIs: "); /* * Older machines will never support PHB hotplug, ie, this is an * init only path and QEMU will terminate. No need to rollback. @@ -1979,9 +1978,8 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) } } - spapr_irq_claim(spapr, irq, true, &local_err); - if (local_err) { - error_propagate_prepend(errp, local_err, "can't allocate LSIs: "); + if (spapr_irq_claim(spapr, irq, true, errp) < 0) { + error_prepend(errp, "can't allocate LSIs: "); goto unrealize; } diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 9be9304..cf7dfa4 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -143,7 +143,7 @@ static void nvram_init(Nvram *nvram, uint8_t *macaddr, memset(image, '\0', sizeof(image)); /* OpenBIOS nvram variables partition */ - sysp_end = chrp_nvram_create_system_partition(image, 0); + sysp_end = chrp_nvram_create_system_partition(image, 0, 0x1fd0); /* Free space partition */ chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end); diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index 9e30203..37310b7 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -136,7 +136,7 @@ static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size, memset(image, '\0', sizeof(image)); /* OpenBIOS nvram variables partition */ - sysp_end = chrp_nvram_create_system_partition(image, 0); + sysp_end = chrp_nvram_create_system_partition(image, 0, 0x1fd0); /* Free space partition */ chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end); |