diff options
Diffstat (limited to 'hw/intc')
73 files changed, 2423 insertions, 910 deletions
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index 93a604f..0409734 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -187,7 +187,7 @@ static void aw_a10_pic_reset(DeviceState *d) } } -static void aw_a10_pic_class_init(ObjectClass *klass, void *data) +static void aw_a10_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/apic.c b/hw/intc/apic.c index d18c1db..bcb1035 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -1176,7 +1176,7 @@ static void apic_unrealize(DeviceState *dev) local_apics[s->initial_apic_id] = NULL; } -static void apic_class_init(ObjectClass *klass, void *data) +static void apic_class_init(ObjectClass *klass, const void *data) { APICCommonClass *k = APIC_COMMON_CLASS(klass); diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 2a3e878..37a7a70 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -466,7 +466,7 @@ static void apic_common_initfn(Object *obj) apic_common_set_id, NULL, NULL); } -static void apic_common_class_init(ObjectClass *klass, void *data) +static void apic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index 3581ff8..899f133 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -59,7 +59,7 @@ static const uint8_t gic_id_gicv2[] = { static inline int gic_get_current_cpu(GICState *s) { if (!qtest_enabled() && s->num_cpu > 1) { - return current_cpu->cpu_index; + return current_cpu->cpu_index - s->first_cpu_index; } return 0; } @@ -2162,7 +2162,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) } -static void arm_gic_class_init(ObjectClass *klass, void *data) +static void arm_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ARMGICClass *agc = ARM_GIC_CLASS(klass); diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 5ac56e3..ed5be05 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -350,6 +350,7 @@ static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, static const Property arm_gic_common_properties[] = { DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1), + DEFINE_PROP_UINT32("first-cpu-index", GICState, first_cpu_index, 0), DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32), /* Revision can be 1 or 2 for GIC architecture specification * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. @@ -362,7 +363,7 @@ static const Property arm_gic_common_properties[] = { DEFINE_PROP_UINT32("num-priority-bits", GICState, n_prio_bits, 8), }; -static void arm_gic_common_class_init(ObjectClass *klass, void *data) +static void arm_gic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -382,7 +383,7 @@ static const TypeInfo arm_gic_common_type = { .class_size = sizeof(ARMGICCommonClass), .class_init = arm_gic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo []) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ARM_LINUX_BOOT_IF }, { }, }, diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index 40adb02..1e9232f 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -584,7 +584,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) } } -static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) +static void kvm_arm_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c index 3a8c626..cef0688 100644 --- a/hw/intc/arm_gicv2m.c +++ b/hw/intc/arm_gicv2m.c @@ -175,7 +175,7 @@ static const Property gicv2m_properties[] = { DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64), }; -static void gicv2m_class_init(ObjectClass *klass, void *data) +static void gicv2m_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c index 58e18ff..6059ce9 100644 --- a/hw/intc/arm_gicv3.c +++ b/hw/intc/arm_gicv3.c @@ -452,7 +452,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) gicv3_init_cpuif(s); } -static void arm_gicv3_class_init(ObjectClass *klass, void *data) +static void arm_gicv3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 76b2283..e438d8c 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -612,6 +612,7 @@ static const Property arm_gicv3_common_properties[] = { DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), DEFINE_PROP_BOOL("has-nmi", GICv3State, nmi_support, 0), DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), + DEFINE_PROP_UINT32("maintenance-interrupt-id", GICv3State, maint_irq, 0), /* * Compatibility property: force 8 bits of physical priority, even * if the CPU being emulated should have fewer. @@ -623,7 +624,7 @@ static const Property arm_gicv3_common_properties[] = { MemoryRegion *), }; -static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) +static void arm_gicv3_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -644,7 +645,7 @@ static const TypeInfo arm_gicv3_common_type = { .class_init = arm_gicv3_common_class_init, .instance_finalize = arm_gicv3_finalize, .abstract = true, - .interfaces = (InterfaceInfo []) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ARM_LINUX_BOOT_IF }, { }, }, diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index de37465..4b4cf09 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -584,7 +584,6 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, } gicv3_cpuif_virt_irq_fiq_update(cs); - return; } static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index 936368c..577b445 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -2007,7 +2007,7 @@ static const Property gicv3_its_props[] = { GICv3State *), }; -static void gicv3_its_class_init(ObjectClass *klass, void *data) +static void gicv3_its_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index 70dbee8..e946e3f 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -135,7 +135,7 @@ static void gicv3_its_common_reset_hold(Object *obj, ResetType type) memset(&s->baser, 0, sizeof(s->baser)); } -static void gicv3_its_common_class_init(ObjectClass *klass, void *data) +static void gicv3_its_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index e198974..9812d50 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -239,7 +239,7 @@ static const Property kvm_arm_its_props[] = { GICv3State *), }; -static void kvm_arm_its_class_init(ObjectClass *klass, void *data) +static void kvm_arm_its_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 8e17cab..8ed88e7 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -22,6 +22,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/intc/arm_gicv3_common.h" +#include "hw/arm/virt.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "system/kvm.h" @@ -825,6 +826,34 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->maint_irq) { + Error *kvm_nv_migration_blocker = NULL; + int ret; + + error_setg(&kvm_nv_migration_blocker, + "Live migration disabled because KVM nested virt is enabled"); + if (migrate_add_blocker(&kvm_nv_migration_blocker, errp)) { + error_free(kvm_nv_migration_blocker); + return; + } + + ret = kvm_device_check_attr(s->dev_fd, + KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0); + if (!ret) { + error_setg_errno(errp, errno, + "VGICv3 setting maintenance IRQ is not " + "supported by this host kernel"); + return; + } + + ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0, + &s->maint_irq, true, errp); + if (ret) { + error_setg_errno(errp, errno, "Failed to set VGIC maintenance IRQ"); + return; + } + } + multiple_redist_region_allowed = kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION); @@ -893,7 +922,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) } } -static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) +static void kvm_arm_gicv3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 7212c87..7c78961 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -988,6 +988,7 @@ static void nvic_nmi_trigger(void *opaque, int n, int level) static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) { ARMCPU *cpu = s->cpu; + ARMISARegisters *isar = &cpu->isar; uint32_t val; switch (offset) { @@ -1263,74 +1264,74 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr0; + return GET_IDREG(isar, ID_PFR0); case 0xd44: /* PFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr1; + return GET_IDREG(isar, ID_PFR1); case 0xd48: /* DFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_dfr0; + return GET_IDREG(isar, ID_DFR0); case 0xd4c: /* AFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->id_afr0; + return GET_IDREG(isar, ID_AFR0); case 0xd50: /* MMFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr0; + return GET_IDREG(isar, ID_MMFR0); case 0xd54: /* MMFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr1; + return GET_IDREG(isar, ID_MMFR1); case 0xd58: /* MMFR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr2; + return GET_IDREG(isar, ID_MMFR2); case 0xd5c: /* MMFR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr3; + return GET_IDREG(isar, ID_MMFR3); case 0xd60: /* ISAR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar0; + return GET_IDREG(&cpu->isar, ID_ISAR0); case 0xd64: /* ISAR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar1; + return GET_IDREG(&cpu->isar, ID_ISAR1); case 0xd68: /* ISAR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar2; + return GET_IDREG(&cpu->isar, ID_ISAR2); case 0xd6c: /* ISAR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar3; + return GET_IDREG(&cpu->isar, ID_ISAR3); case 0xd70: /* ISAR4. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar4; + return GET_IDREG(&cpu->isar, ID_ISAR4); case 0xd74: /* ISAR5. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar5; + return GET_IDREG(&cpu->isar, ID_ISAR5); case 0xd78: /* CLIDR */ - return cpu->clidr; + return GET_IDREG(&cpu->isar, CLIDR); case 0xd7c: /* CTR */ return cpu->ctr; case 0xd80: /* CSSIDR */ @@ -2730,7 +2731,7 @@ static void armv7m_nvic_instance_init(Object *obj) qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); } -static void armv7m_nvic_class_init(ObjectClass *klass, void *data) +static void armv7m_nvic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/aspeed_intc.c b/hw/intc/aspeed_intc.c index 3fd4170..5cd786d 100644 --- a/hw/intc/aspeed_intc.c +++ b/hw/intc/aspeed_intc.c @@ -62,6 +62,95 @@ REG32(GICINT196_STATUS, 0x44) REG32(GICINT197_EN, 0x50) REG32(GICINT197_STATUS, 0x54) +/* + * SSP INTC Registers + */ +REG32(SSPINT128_EN, 0x2000) +REG32(SSPINT128_STATUS, 0x2004) +REG32(SSPINT129_EN, 0x2100) +REG32(SSPINT129_STATUS, 0x2104) +REG32(SSPINT130_EN, 0x2200) +REG32(SSPINT130_STATUS, 0x2204) +REG32(SSPINT131_EN, 0x2300) +REG32(SSPINT131_STATUS, 0x2304) +REG32(SSPINT132_EN, 0x2400) +REG32(SSPINT132_STATUS, 0x2404) +REG32(SSPINT133_EN, 0x2500) +REG32(SSPINT133_STATUS, 0x2504) +REG32(SSPINT134_EN, 0x2600) +REG32(SSPINT134_STATUS, 0x2604) +REG32(SSPINT135_EN, 0x2700) +REG32(SSPINT135_STATUS, 0x2704) +REG32(SSPINT136_EN, 0x2800) +REG32(SSPINT136_STATUS, 0x2804) +REG32(SSPINT137_EN, 0x2900) +REG32(SSPINT137_STATUS, 0x2904) +REG32(SSPINT138_EN, 0x2A00) +REG32(SSPINT138_STATUS, 0x2A04) +REG32(SSPINT160_169_EN, 0x2B00) +REG32(SSPINT160_169_STATUS, 0x2B04) + +/* + * SSP INTCIO Registers + */ +REG32(SSPINT160_EN, 0x180) +REG32(SSPINT160_STATUS, 0x184) +REG32(SSPINT161_EN, 0x190) +REG32(SSPINT161_STATUS, 0x194) +REG32(SSPINT162_EN, 0x1A0) +REG32(SSPINT162_STATUS, 0x1A4) +REG32(SSPINT163_EN, 0x1B0) +REG32(SSPINT163_STATUS, 0x1B4) +REG32(SSPINT164_EN, 0x1C0) +REG32(SSPINT164_STATUS, 0x1C4) +REG32(SSPINT165_EN, 0x1D0) +REG32(SSPINT165_STATUS, 0x1D4) + +/* + * TSP INTC Registers + */ +REG32(TSPINT128_EN, 0x3000) +REG32(TSPINT128_STATUS, 0x3004) +REG32(TSPINT129_EN, 0x3100) +REG32(TSPINT129_STATUS, 0x3104) +REG32(TSPINT130_EN, 0x3200) +REG32(TSPINT130_STATUS, 0x3204) +REG32(TSPINT131_EN, 0x3300) +REG32(TSPINT131_STATUS, 0x3304) +REG32(TSPINT132_EN, 0x3400) +REG32(TSPINT132_STATUS, 0x3404) +REG32(TSPINT133_EN, 0x3500) +REG32(TSPINT133_STATUS, 0x3504) +REG32(TSPINT134_EN, 0x3600) +REG32(TSPINT134_STATUS, 0x3604) +REG32(TSPINT135_EN, 0x3700) +REG32(TSPINT135_STATUS, 0x3704) +REG32(TSPINT136_EN, 0x3800) +REG32(TSPINT136_STATUS, 0x3804) +REG32(TSPINT137_EN, 0x3900) +REG32(TSPINT137_STATUS, 0x3904) +REG32(TSPINT138_EN, 0x3A00) +REG32(TSPINT138_STATUS, 0x3A04) +REG32(TSPINT160_169_EN, 0x3B00) +REG32(TSPINT160_169_STATUS, 0x3B04) + +/* + * TSP INTCIO Registers + */ + +REG32(TSPINT160_EN, 0x200) +REG32(TSPINT160_STATUS, 0x204) +REG32(TSPINT161_EN, 0x210) +REG32(TSPINT161_STATUS, 0x214) +REG32(TSPINT162_EN, 0x220) +REG32(TSPINT162_STATUS, 0x224) +REG32(TSPINT163_EN, 0x230) +REG32(TSPINT163_STATUS, 0x234) +REG32(TSPINT164_EN, 0x240) +REG32(TSPINT164_STATUS, 0x244) +REG32(TSPINT165_EN, 0x250) +REG32(TSPINT165_STATUS, 0x254) + static const AspeedINTCIRQ *aspeed_intc_get_irq(AspeedINTCClass *aic, uint32_t reg) { @@ -111,7 +200,7 @@ static void aspeed_intc_set_irq_handler(AspeedINTCState *s, outpin_idx = intc_irq->outpin_idx; inpin_idx = intc_irq->inpin_idx; - if (s->mask[inpin_idx] || s->regs[status_reg]) { + if ((s->mask[inpin_idx] & select) || (s->regs[status_reg] & select)) { /* * a. mask is not 0 means in ISR mode * sources interrupt routine are executing. @@ -448,8 +537,90 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data, s->regs[reg] = data; break; } +} - return; +static void aspeed_ssp_intc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_SSPINT128_EN: + case R_SSPINT129_EN: + case R_SSPINT130_EN: + case R_SSPINT131_EN: + case R_SSPINT132_EN: + case R_SSPINT133_EN: + case R_SSPINT134_EN: + case R_SSPINT135_EN: + case R_SSPINT136_EN: + case R_SSPINT160_169_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_SSPINT128_STATUS: + case R_SSPINT129_STATUS: + case R_SSPINT130_STATUS: + case R_SSPINT131_STATUS: + case R_SSPINT132_STATUS: + case R_SSPINT133_STATUS: + case R_SSPINT134_STATUS: + case R_SSPINT135_STATUS: + case R_SSPINT136_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + case R_SSPINT160_169_STATUS: + aspeed_intc_status_handler_multi_outpins(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} + +static void aspeed_tsp_intc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_TSPINT128_EN: + case R_TSPINT129_EN: + case R_TSPINT130_EN: + case R_TSPINT131_EN: + case R_TSPINT132_EN: + case R_TSPINT133_EN: + case R_TSPINT134_EN: + case R_TSPINT135_EN: + case R_TSPINT136_EN: + case R_TSPINT160_169_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_TSPINT128_STATUS: + case R_TSPINT129_STATUS: + case R_TSPINT130_STATUS: + case R_TSPINT131_STATUS: + case R_TSPINT132_STATUS: + case R_TSPINT133_STATUS: + case R_TSPINT134_STATUS: + case R_TSPINT135_STATUS: + case R_TSPINT136_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + case R_TSPINT160_169_STATUS: + aspeed_intc_status_handler_multi_outpins(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } } static uint64_t aspeed_intcio_read(void *opaque, hwaddr offset, @@ -496,15 +667,77 @@ static void aspeed_intcio_write(void *opaque, hwaddr offset, uint64_t data, s->regs[reg] = data; break; } +} - return; +static void aspeed_ssp_intcio_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_SSPINT160_EN: + case R_SSPINT161_EN: + case R_SSPINT162_EN: + case R_SSPINT163_EN: + case R_SSPINT164_EN: + case R_SSPINT165_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_SSPINT160_STATUS: + case R_SSPINT161_STATUS: + case R_SSPINT162_STATUS: + case R_SSPINT163_STATUS: + case R_SSPINT164_STATUS: + case R_SSPINT165_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } } +static void aspeed_tsp_intcio_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_TSPINT160_EN: + case R_TSPINT161_EN: + case R_TSPINT162_EN: + case R_TSPINT163_EN: + case R_TSPINT164_EN: + case R_TSPINT165_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_TSPINT160_STATUS: + case R_TSPINT161_STATUS: + case R_TSPINT162_STATUS: + case R_TSPINT163_STATUS: + case R_TSPINT164_STATUS: + case R_TSPINT165_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} static const MemoryRegionOps aspeed_intc_ops = { .read = aspeed_intc_read, .write = aspeed_intc_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -515,6 +748,51 @@ static const MemoryRegionOps aspeed_intcio_ops = { .read = aspeed_intcio_read, .write = aspeed_intcio_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_ssp_intc_ops = { + .read = aspeed_intc_read, + .write = aspeed_ssp_intc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_ssp_intcio_ops = { + .read = aspeed_intcio_read, + .write = aspeed_ssp_intcio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_tsp_intc_ops = { + .read = aspeed_intc_read, + .write = aspeed_tsp_intc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_tsp_intcio_ops = { + .read = aspeed_intcio_read, + .write = aspeed_tsp_intcio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -587,7 +865,7 @@ static void aspeed_intc_unrealize(DeviceState *dev) s->regs = NULL; } -static void aspeed_intc_class_init(ObjectClass *klass, void *data) +static void aspeed_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); @@ -624,7 +902,7 @@ static AspeedINTCIRQ aspeed_2700_intc_irqs[ASPEED_INTC_MAX_INPINS] = { {9, 18, 1, R_GICINT136_EN, R_GICINT136_STATUS}, }; -static void aspeed_2700_intc_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); @@ -655,7 +933,7 @@ static AspeedINTCIRQ aspeed_2700_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { {5, 5, 1, R_GICINT197_EN, R_GICINT197_STATUS}, }; -static void aspeed_2700_intcio_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_intcio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); @@ -678,11 +956,153 @@ static const TypeInfo aspeed_2700_intcio_info = { .class_init = aspeed_2700_intcio_class_init, }; +static AspeedINTCIRQ aspeed_2700ssp_intc_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 10, R_SSPINT160_169_EN, R_SSPINT160_169_STATUS}, + {1, 10, 1, R_SSPINT128_EN, R_SSPINT128_STATUS}, + {2, 11, 1, R_SSPINT129_EN, R_SSPINT129_STATUS}, + {3, 12, 1, R_SSPINT130_EN, R_SSPINT130_STATUS}, + {4, 13, 1, R_SSPINT131_EN, R_SSPINT131_STATUS}, + {5, 14, 1, R_SSPINT132_EN, R_SSPINT132_STATUS}, + {6, 15, 1, R_SSPINT133_EN, R_SSPINT133_STATUS}, + {7, 16, 1, R_SSPINT134_EN, R_SSPINT134_STATUS}, + {8, 17, 1, R_SSPINT135_EN, R_SSPINT135_STATUS}, + {9, 18, 1, R_SSPINT136_EN, R_SSPINT136_STATUS}, +}; + +static void aspeed_2700ssp_intc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 SSP INTC Controller"; + aic->num_lines = 32; + aic->num_inpins = 10; + aic->num_outpins = 19; + aic->mem_size = 0x4000; + aic->nr_regs = 0x2B08 >> 2; + aic->reg_offset = 0x0; + aic->reg_ops = &aspeed_ssp_intc_ops; + aic->irq_table = aspeed_2700ssp_intc_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intc_irqs); +} + +static const TypeInfo aspeed_2700ssp_intc_info = { + .name = TYPE_ASPEED_2700SSP_INTC, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700ssp_intc_class_init, +}; + +static AspeedINTCIRQ aspeed_2700ssp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 1, R_SSPINT160_EN, R_SSPINT160_STATUS}, + {1, 1, 1, R_SSPINT161_EN, R_SSPINT161_STATUS}, + {2, 2, 1, R_SSPINT162_EN, R_SSPINT162_STATUS}, + {3, 3, 1, R_SSPINT163_EN, R_SSPINT163_STATUS}, + {4, 4, 1, R_SSPINT164_EN, R_SSPINT164_STATUS}, + {5, 5, 1, R_SSPINT165_EN, R_SSPINT165_STATUS}, +}; + +static void aspeed_2700ssp_intcio_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 SSP INTC IO Controller"; + aic->num_lines = 32; + aic->num_inpins = 6; + aic->num_outpins = 6; + aic->mem_size = 0x400; + aic->nr_regs = 0x1d8 >> 2; + aic->reg_offset = 0; + aic->reg_ops = &aspeed_ssp_intcio_ops; + aic->irq_table = aspeed_2700ssp_intcio_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intcio_irqs); +} + +static const TypeInfo aspeed_2700ssp_intcio_info = { + .name = TYPE_ASPEED_2700SSP_INTCIO, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700ssp_intcio_class_init, +}; + +static AspeedINTCIRQ aspeed_2700tsp_intc_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 10, R_TSPINT160_169_EN, R_TSPINT160_169_STATUS}, + {1, 10, 1, R_TSPINT128_EN, R_TSPINT128_STATUS}, + {2, 11, 1, R_TSPINT129_EN, R_TSPINT129_STATUS}, + {3, 12, 1, R_TSPINT130_EN, R_TSPINT130_STATUS}, + {4, 13, 1, R_TSPINT131_EN, R_TSPINT131_STATUS}, + {5, 14, 1, R_TSPINT132_EN, R_TSPINT132_STATUS}, + {6, 15, 1, R_TSPINT133_EN, R_TSPINT133_STATUS}, + {7, 16, 1, R_TSPINT134_EN, R_TSPINT134_STATUS}, + {8, 17, 1, R_TSPINT135_EN, R_TSPINT135_STATUS}, + {9, 18, 1, R_TSPINT136_EN, R_TSPINT136_STATUS}, +}; + +static void aspeed_2700tsp_intc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 TSP INTC Controller"; + aic->num_lines = 32; + aic->num_inpins = 10; + aic->num_outpins = 19; + aic->mem_size = 0x4000; + aic->nr_regs = 0x3B08 >> 2; + aic->reg_offset = 0; + aic->reg_ops = &aspeed_tsp_intc_ops; + aic->irq_table = aspeed_2700tsp_intc_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intc_irqs); +} + +static const TypeInfo aspeed_2700tsp_intc_info = { + .name = TYPE_ASPEED_2700TSP_INTC, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700tsp_intc_class_init, +}; + +static AspeedINTCIRQ aspeed_2700tsp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 1, R_TSPINT160_EN, R_TSPINT160_STATUS}, + {1, 1, 1, R_TSPINT161_EN, R_TSPINT161_STATUS}, + {2, 2, 1, R_TSPINT162_EN, R_TSPINT162_STATUS}, + {3, 3, 1, R_TSPINT163_EN, R_TSPINT163_STATUS}, + {4, 4, 1, R_TSPINT164_EN, R_TSPINT164_STATUS}, + {5, 5, 1, R_TSPINT165_EN, R_TSPINT165_STATUS}, +}; + +static void aspeed_2700tsp_intcio_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 TSP INTC IO Controller"; + aic->num_lines = 32; + aic->num_inpins = 6; + aic->num_outpins = 6; + aic->mem_size = 0x400; + aic->nr_regs = 0x258 >> 2; + aic->reg_offset = 0x0; + aic->reg_ops = &aspeed_tsp_intcio_ops; + aic->irq_table = aspeed_2700tsp_intcio_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intcio_irqs); +} + +static const TypeInfo aspeed_2700tsp_intcio_info = { + .name = TYPE_ASPEED_2700TSP_INTCIO, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700tsp_intcio_class_init, +}; + static void aspeed_intc_register_types(void) { type_register_static(&aspeed_intc_info); type_register_static(&aspeed_2700_intc_info); type_register_static(&aspeed_2700_intcio_info); + type_register_static(&aspeed_2700ssp_intc_info); + type_register_static(&aspeed_2700ssp_intcio_info); + type_register_static(&aspeed_2700tsp_intc_info); + type_register_static(&aspeed_2700tsp_intcio_info); } type_init(aspeed_intc_register_types); diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c index 55fe51a..7120088 100644 --- a/hw/intc/aspeed_vic.c +++ b/hw/intc/aspeed_vic.c @@ -339,7 +339,7 @@ static const VMStateDescription vmstate_aspeed_vic = { } }; -static void aspeed_vic_class_init(ObjectClass *klass, void *data) +static void aspeed_vic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_vic_realize; diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c index 4a42fcf..55e0a5a 100644 --- a/hw/intc/bcm2835_ic.c +++ b/hw/intc/bcm2835_ic.c @@ -219,7 +219,7 @@ static const VMStateDescription vmstate_bcm2835_ic = { } }; -static void bcm2835_ic_class_init(ObjectClass *klass, void *data) +static void bcm2835_ic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index 197a0e2..1c02853 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -384,7 +384,7 @@ static const VMStateDescription vmstate_bcm2836_control = { } }; -static void bcm2836_control_class_init(ObjectClass *klass, void *data) +static void bcm2836_control_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c index 6ddbcd4..ebbe234 100644 --- a/hw/intc/exynos4210_combiner.c +++ b/hw/intc/exynos4210_combiner.c @@ -329,7 +329,7 @@ static const Property exynos4210_combiner_properties[] = { DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0), }; -static void exynos4210_combiner_class_init(ObjectClass *klass, void *data) +static void exynos4210_combiner_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c index 01a5393..7e2d79d 100644 --- a/hw/intc/exynos4210_gic.c +++ b/hw/intc/exynos4210_gic.c @@ -115,7 +115,7 @@ static const Property exynos4210_gic_properties[] = { DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1), }; -static void exynos4210_gic_class_init(ObjectClass *klass, void *data) +static void exynos4210_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index aa5162c..2359861 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -185,7 +185,7 @@ static const Property goldfish_pic_properties[] = { DEFINE_PROP_UINT8("index", GoldfishPICState, idx, 0), }; -static void goldfish_pic_class_init(ObjectClass *oc, void *data) +static void goldfish_pic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc); @@ -204,7 +204,7 @@ static const TypeInfo goldfish_pic_info = { .class_init = goldfish_pic_class_init, .instance_init = goldfish_pic_instance_init, .instance_size = sizeof(GoldfishPICState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c index 95cdb41..e0f2646 100644 --- a/hw/intc/grlib_irqmp.c +++ b/hw/intc/grlib_irqmp.c @@ -380,7 +380,7 @@ static const Property grlib_irqmp_properties[] = { DEFINE_PROP_UINT32("ncpus", IRQMP, ncpus, 1), }; -static void grlib_irqmp_class_init(ObjectClass *klass, void *data) +static void grlib_irqmp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c index 729498f..447e8c2 100644 --- a/hw/intc/heathrow_pic.c +++ b/hw/intc/heathrow_pic.c @@ -184,7 +184,7 @@ static void heathrow_init(Object *obj) sysbus_init_mmio(sbd, &s->mem); } -static void heathrow_class_init(ObjectClass *oc, void *data) +static void heathrow_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c index d88b20f..b6f96bf 100644 --- a/hw/intc/i8259.c +++ b/hw/intc/i8259.c @@ -32,10 +32,7 @@ #include "trace.h" #include "qom/object.h" -/* debug PIC */ -//#define DEBUG_PIC - -//#define DEBUG_IRQ_LATENCY +/*#define DEBUG_IRQ_LATENCY*/ #define TYPE_I8259 "isa-i8259" typedef struct PICClass PICClass; @@ -436,7 +433,7 @@ qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in) return irq_set; } -static void i8259_class_init(ObjectClass *klass, void *data) +static void i8259_class_init(ObjectClass *klass, const void *data) { PICClass *k = PIC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index c77ff68..602e44c 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -200,7 +200,7 @@ static const Property pic_properties_common[] = { DEFINE_PROP_BIT("master", PICCommonState, master, 0, false), }; -static void pic_common_class_init(ObjectClass *klass, void *data) +static void pic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); @@ -226,7 +226,7 @@ static const TypeInfo pic_common_type = { .class_size = sizeof(PICCommonClass), .class_init = pic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c index e1c9ce7..09c3bfa 100644 --- a/hw/intc/imx_avic.c +++ b/hw/intc/imx_avic.c @@ -341,7 +341,7 @@ static void imx_avic_init(Object *obj) } -static void imx_avic_class_init(ObjectClass *klass, void *data) +static void imx_avic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c index 9e5cf28..58d286c 100644 --- a/hw/intc/imx_gpcv2.c +++ b/hw/intc/imx_gpcv2.c @@ -102,7 +102,7 @@ static const VMStateDescription vmstate_imx_gpcv2 = { }, }; -static void imx_gpcv2_class_init(ObjectClass *klass, void *data) +static void imx_gpcv2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 8cd1d85..133bef8 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -480,7 +480,7 @@ static const Property ioapic_properties[] = { DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF), }; -static void ioapic_class_init(ObjectClass *klass, void *data) +static void ioapic_class_init(ObjectClass *klass, const void *data) { IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index 7698963..fce3486 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -197,7 +197,7 @@ static const VMStateDescription vmstate_ioapic_common = { } }; -static void ioapic_common_class_init(ObjectClass *klass, void *data) +static void ioapic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); @@ -215,7 +215,7 @@ static const TypeInfo ioapic_common_type = { .class_size = sizeof(IOAPICCommonClass), .class_init = ioapic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/ioapic_internal.h b/hw/intc/ioapic_internal.h index 37b8565..5120576 100644 --- a/hw/intc/ioapic_internal.h +++ b/hw/intc/ioapic_internal.h @@ -22,7 +22,7 @@ #ifndef HW_INTC_IOAPIC_INTERNAL_H #define HW_INTC_IOAPIC_INTERNAL_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/intc/ioapic.h" #include "hw/sysbus.h" #include "qemu/notify.h" diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index a51a215..3e9c88d 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -11,7 +11,8 @@ #include "qapi/error.h" #include "hw/irq.h" #include "hw/loongarch/virt.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/kvm.h" #include "hw/intc/loongarch_extioi.h" #include "trace.h" @@ -351,37 +352,51 @@ static void loongarch_extioi_realize(DeviceState *dev, Error **errp) return; } - for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(sbd, &s->irq[i]); - } - - qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); - memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, - s, "extioi_system_mem", 0x900); - sysbus_init_mmio(sbd, &s->extioi_system_mem); - if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { - memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops, - s, "extioi_virt", EXTIOI_VIRT_SIZE); - sysbus_init_mmio(sbd, &s->virt_extend); s->features |= EXTIOI_VIRT_HAS_FEATURES; } else { s->status |= BIT(EXTIOI_ENABLE); } + + if (kvm_irqchip_in_kernel()) { + kvm_extioi_realize(dev, errp); + } else { + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(sbd, &s->extioi_system_mem); + if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { + memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops, + s, "extioi_virt", EXTIOI_VIRT_SIZE); + sysbus_init_mmio(sbd, &s->virt_extend); + } + } } -static void loongarch_extioi_unrealize(DeviceState *dev) +static void loongarch_extioi_reset_hold(Object *obj, ResetType type) { - LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); + LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(obj); + + if (lec->parent_phases.hold) { + lec->parent_phases.hold(obj, type); + } - g_free(s->cpu); + if (kvm_irqchip_in_kernel()) { + kvm_extioi_put(obj, 0); + } } -static void loongarch_extioi_reset(DeviceState *d) +static int vmstate_extioi_pre_save(void *opaque) { - LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(d); + if (kvm_irqchip_in_kernel()) { + return kvm_extioi_get(opaque); + } - s->status = 0; + return 0; } static int vmstate_extioi_post_load(void *opaque, int version_id) @@ -389,6 +404,10 @@ static int vmstate_extioi_post_load(void *opaque, int version_id) LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); int i, start_irq; + if (kvm_irqchip_in_kernel()) { + return kvm_extioi_put(opaque, version_id); + } + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { start_irq = i * 4; extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); @@ -401,17 +420,18 @@ static int vmstate_extioi_post_load(void *opaque, int version_id) return 0; } -static void loongarch_extioi_class_init(ObjectClass *klass, void *data) +static void loongarch_extioi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_CLASS(klass); LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_parent_realize(dc, loongarch_extioi_realize, &lec->parent_realize); - device_class_set_parent_unrealize(dc, loongarch_extioi_unrealize, - &lec->parent_unrealize); - device_class_set_legacy_reset(dc, loongarch_extioi_reset); + resettable_class_set_parent_phases(rc, NULL, loongarch_extioi_reset_hold, + NULL, &lec->parent_phases); + lecc->pre_save = vmstate_extioi_pre_save; lecc->post_load = vmstate_extioi_post_load; } diff --git a/hw/intc/loongarch_extioi_common.c b/hw/intc/loongarch_extioi_common.c index ff3974f..ba03383 100644 --- a/hw/intc/loongarch_extioi_common.c +++ b/hw/intc/loongarch_extioi_common.c @@ -108,6 +108,50 @@ static void loongarch_extioi_common_realize(DeviceState *dev, Error **errp) } } +static void loongarch_extioi_common_unrealize(DeviceState *dev) +{ + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); + + g_free(s->cpu); +} + +static void loongarch_extioi_common_reset_hold(Object *obj, ResetType type) +{ + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(obj); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(obj); + ExtIOICore *core; + int i; + + if (lecc->parent_phases.hold) { + lecc->parent_phases.hold(obj, type); + } + + /* Clear HW registers for the board */ + memset(s->nodetype, 0, sizeof(s->nodetype)); + memset(s->bounce, 0, sizeof(s->bounce)); + memset(s->isr, 0, sizeof(s->isr)); + memset(s->enable, 0, sizeof(s->enable)); + memset(s->ipmap, 0, sizeof(s->ipmap)); + memset(s->coremap, 0, sizeof(s->coremap)); + memset(s->sw_pending, 0, sizeof(s->sw_pending)); + memset(s->sw_ipmap, 0, sizeof(s->sw_ipmap)); + memset(s->sw_coremap, 0, sizeof(s->sw_coremap)); + + for (i = 0; i < s->num_cpu; i++) { + core = s->cpu + i; + /* EXTIOI with targeted CPU available however not present */ + if (!core->cpu) { + continue; + } + + /* Clear HW registers for CPUs */ + memset(core->coreisr, 0, sizeof(core->coreisr)); + memset(core->sw_isr, 0, sizeof(core->sw_isr)); + } + + s->status = 0; +} + static int loongarch_extioi_common_pre_save(void *opaque) { LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)opaque; @@ -174,14 +218,21 @@ static const Property extioi_properties[] = { features, EXTIOI_HAS_VIRT_EXTENSION, 0), }; -static void loongarch_extioi_common_class_init(ObjectClass *klass, void *data) +static void loongarch_extioi_common_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_parent_realize(dc, loongarch_extioi_common_realize, &lecc->parent_realize); + device_class_set_parent_unrealize(dc, loongarch_extioi_common_unrealize, + &lecc->parent_unrealize); + resettable_class_set_parent_phases(rc, NULL, + loongarch_extioi_common_reset_hold, + NULL, &lecc->parent_phases); device_class_set_props(dc, extioi_properties); dc->vmsd = &vmstate_loongarch_extioi; hc->plug = loongarch_extioi_cpu_plug; @@ -195,7 +246,7 @@ static const TypeInfo loongarch_extioi_common_types[] = { .instance_size = sizeof(LoongArchExtIOICommonState), .class_size = sizeof(LoongArchExtIOICommonClass), .class_init = loongarch_extioi_common_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/intc/loongarch_extioi_kvm.c b/hw/intc/loongarch_extioi_kvm.c new file mode 100644 index 0000000..aa2e8c7 --- /dev/null +++ b/hw/intc/loongarch_extioi_kvm.c @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch EXTIOI interrupt kvm support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/intc/loongarch_extioi.h" +#include "linux/kvm.h" +#include "qapi/error.h" +#include "system/kvm.h" + +static void kvm_extioi_access_reg(int fd, uint64_t addr, void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_extioi_access_sw_state(int fd, uint64_t addr, + void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS, + addr, val, write, &error_abort); +} + +static void kvm_extioi_access_sw_status(void *opaque, bool write) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int addr; + + addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE; + kvm_extioi_access_sw_state(les->dev_fd, addr, &lecs->status, write); +} + +static void kvm_extioi_access_regs(void *opaque, bool write) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int fd = les->dev_fd; + int addr, offset, cpu; + + for (addr = EXTIOI_NODETYPE_START; addr < EXTIOI_NODETYPE_END; addr += 4) { + offset = (addr - EXTIOI_NODETYPE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->nodetype[offset], write); + } + + for (addr = EXTIOI_IPMAP_START; addr < EXTIOI_IPMAP_END; addr += 4) { + offset = (addr - EXTIOI_IPMAP_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->ipmap[offset], write); + } + + for (addr = EXTIOI_ENABLE_START; addr < EXTIOI_ENABLE_END; addr += 4) { + offset = (addr - EXTIOI_ENABLE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->enable[offset], write); + } + + for (addr = EXTIOI_BOUNCE_START; addr < EXTIOI_BOUNCE_END; addr += 4) { + offset = (addr - EXTIOI_BOUNCE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->bounce[offset], write); + } + + for (addr = EXTIOI_ISR_START; addr < EXTIOI_ISR_END; addr += 4) { + offset = (addr - EXTIOI_ISR_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->isr[offset], write); + } + + for (addr = EXTIOI_COREMAP_START; addr < EXTIOI_COREMAP_END; addr += 4) { + offset = (addr - EXTIOI_COREMAP_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->coremap[offset], write); + } + + for (cpu = 0; cpu < lecs->num_cpu; cpu++) { + for (addr = EXTIOI_COREISR_START; + addr < EXTIOI_COREISR_END; addr += 4) { + offset = (addr - EXTIOI_COREISR_START) / 4; + kvm_extioi_access_reg(fd, (cpu << 16) | addr, + &lecs->cpu[cpu].coreisr[offset], write); + } + } +} + +int kvm_extioi_get(void *opaque) +{ + kvm_extioi_access_regs(opaque, false); + kvm_extioi_access_sw_status(opaque, false); + return 0; +} + +int kvm_extioi_put(void *opaque, int version_id) +{ + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int fd = les->dev_fd; + + if (fd == 0) { + return 0; + } + + kvm_extioi_access_regs(opaque, true); + kvm_extioi_access_sw_status(opaque, true); + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED, + NULL, true, &error_abort); + return 0; +} + +void kvm_extioi_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(dev); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(dev); + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_EIOINTC, false); + if (ret < 0) { + fprintf(stderr, "create KVM_LOONGARCH_EIOINTC failed: %s\n", + strerror(-ret)); + abort(); + } + + les->dev_fd = ret; + ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU, + &lecs->num_cpu, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_NUM_CPU failed: %s\n", + strerror(-ret)); + abort(); + } + + ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE, + &lecs->features, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_FEATURE failed: %s\n", + strerror(-ret)); + abort(); + } +} diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index b10641d..fc8005c 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -11,6 +11,7 @@ #include "qapi/error.h" #include "hw/intc/loongarch_ipi.h" #include "hw/qdev-properties.h" +#include "system/kvm.h" #include "target/loongarch/cpu.h" static AddressSpace *get_iocsr_as(CPUState *cpu) @@ -91,6 +92,40 @@ static void loongarch_ipi_realize(DeviceState *dev, Error **errp) lics->cpu[i].ipi = lics; qdev_init_gpio_out(dev, &lics->cpu[i].irq, 1); } + + if (kvm_irqchip_in_kernel()) { + kvm_ipi_realize(dev, errp); + } +} + +static void loongarch_ipi_reset_hold(Object *obj, ResetType type) +{ + int i; + LoongarchIPIClass *lic = LOONGARCH_IPI_GET_CLASS(obj); + LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(obj); + IPICore *core; + + if (lic->parent_phases.hold) { + lic->parent_phases.hold(obj, type); + } + + for (i = 0; i < lics->num_cpu; i++) { + core = lics->cpu + i; + /* IPI with targeted CPU available however not present */ + if (!core->cpu) { + continue; + } + + core->status = 0; + core->en = 0; + core->set = 0; + core->clear = 0; + memset(core->buf, 0, sizeof(core->buf)); + } + + if (kvm_irqchip_in_kernel()) { + kvm_ipi_put(obj, 0); + } } static void loongarch_ipi_cpu_plug(HotplugHandler *hotplug_dev, @@ -140,19 +175,42 @@ static void loongarch_ipi_cpu_unplug(HotplugHandler *hotplug_dev, core->cpu = NULL; } -static void loongarch_ipi_class_init(ObjectClass *klass, void *data) +static int loongarch_ipi_pre_save(void *opaque) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_ipi_get(opaque); + } + + return 0; +} + +static int loongarch_ipi_post_load(void *opaque, int version_id) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_ipi_put(opaque, version_id); + } + + return 0; +} + +static void loongarch_ipi_class_init(ObjectClass *klass, const void *data) { LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); LoongarchIPIClass *lic = LOONGARCH_IPI_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_parent_realize(dc, loongarch_ipi_realize, &lic->parent_realize); + resettable_class_set_parent_phases(rc, NULL, loongarch_ipi_reset_hold, + NULL, &lic->parent_phases); licc->get_iocsr_as = get_iocsr_as; licc->cpu_by_arch_id = loongarch_cpu_by_arch_id; hc->plug = loongarch_ipi_cpu_plug; hc->unplug = loongarch_ipi_cpu_unplug; + licc->pre_save = loongarch_ipi_pre_save; + licc->post_load = loongarch_ipi_post_load; } static const TypeInfo loongarch_ipi_types[] = { @@ -162,7 +220,7 @@ static const TypeInfo loongarch_ipi_types[] = { .instance_size = sizeof(LoongarchIPIState), .class_size = sizeof(LoongarchIPIClass), .class_init = loongarch_ipi_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c new file mode 100644 index 0000000..dd4c367 --- /dev/null +++ b/hw/intc/loongarch_ipi_kvm.c @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch IPI interrupt KVM support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/intc/loongarch_ipi.h" +#include "system/kvm.h" +#include "target/loongarch/cpu.h" + +static void kvm_ipi_access_reg(int fd, uint64_t addr, uint32_t *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_IPI_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_ipi_access_regs(void *opaque, bool write) +{ + LoongsonIPICommonState *ipi = (LoongsonIPICommonState *)opaque; + LoongarchIPIState *lis = LOONGARCH_IPI(opaque); + IPICore *core; + uint64_t attr; + int i, cpu_index, fd = lis->dev_fd; + + if (fd == 0) { + return; + } + + for (i = 0; i < ipi->num_cpu; i++) { + core = &ipi->cpu[i]; + if (core->cpu == NULL) { + continue; + } + cpu_index = i; + + attr = (cpu_index << 16) | CORE_STATUS_OFF; + kvm_ipi_access_reg(fd, attr, &core->status, write); + + attr = (cpu_index << 16) | CORE_EN_OFF; + kvm_ipi_access_reg(fd, attr, &core->en, write); + + attr = (cpu_index << 16) | CORE_SET_OFF; + kvm_ipi_access_reg(fd, attr, &core->set, write); + + attr = (cpu_index << 16) | CORE_CLEAR_OFF; + kvm_ipi_access_reg(fd, attr, &core->clear, write); + + attr = (cpu_index << 16) | CORE_BUF_20; + kvm_ipi_access_reg(fd, attr, &core->buf[0], write); + + attr = (cpu_index << 16) | CORE_BUF_28; + kvm_ipi_access_reg(fd, attr, &core->buf[2], write); + + attr = (cpu_index << 16) | CORE_BUF_30; + kvm_ipi_access_reg(fd, attr, &core->buf[4], write); + + attr = (cpu_index << 16) | CORE_BUF_38; + kvm_ipi_access_reg(fd, attr, &core->buf[6], write); + } +} + +int kvm_ipi_get(void *opaque) +{ + kvm_ipi_access_regs(opaque, false); + return 0; +} + +int kvm_ipi_put(void *opaque, int version_id) +{ + kvm_ipi_access_regs(opaque, true); + return 0; +} + +void kvm_ipi_realize(DeviceState *dev, Error **errp) +{ + LoongarchIPIState *lis = LOONGARCH_IPI(dev); + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_IPI, false); + if (ret < 0) { + fprintf(stderr, "IPI KVM_CREATE_DEVICE failed: %s\n", + strerror(-ret)); + abort(); + } + + lis->dev_fd = ret; +} diff --git a/hw/intc/loongarch_pch_msi.c b/hw/intc/loongarch_pch_msi.c index 66b5c1e..f6d1631 100644 --- a/hw/intc/loongarch_pch_msi.c +++ b/hw/intc/loongarch_pch_msi.c @@ -13,6 +13,7 @@ #include "hw/pci/msi.h" #include "hw/misc/unimp.h" #include "migration/vmstate.h" +#include "system/kvm.h" #include "trace.h" static uint64_t loongarch_msi_mem_read(void *opaque, hwaddr addr, unsigned size) @@ -26,6 +27,15 @@ static void loongarch_msi_mem_write(void *opaque, hwaddr addr, LoongArchPCHMSI *s = (LoongArchPCHMSI *)opaque; int irq_num; + if (kvm_irqchip_in_kernel()) { + MSIMessage msg; + + msg.address = addr; + msg.data = val; + kvm_irqchip_send_msi(kvm_state, msg); + return; + } + /* * vector number is irq number from upper extioi intc * need subtract irq base to get msi vector offset @@ -42,13 +52,6 @@ static const MemoryRegionOps loongarch_pch_msi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void pch_msi_irq_handler(void *opaque, int irq, int level) -{ - LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(opaque); - - qemu_set_irq(s->pch_msi_irq[irq], level); -} - static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp) { LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(dev); @@ -59,9 +62,7 @@ static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp) } s->pch_msi_irq = g_new(qemu_irq, s->irq_num); - qdev_init_gpio_out(dev, s->pch_msi_irq, s->irq_num); - qdev_init_gpio_in(dev, pch_msi_irq_handler, s->irq_num); } static void loongarch_pch_msi_unrealize(DeviceState *dev) @@ -88,7 +89,7 @@ static const Property loongarch_msi_properties[] = { DEFINE_PROP_UINT32("msi_irq_num", LoongArchPCHMSI, irq_num, 0), }; -static void loongarch_pch_msi_class_init(ObjectClass *klass, void *data) +static void loongarch_pch_msi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index acd75cc..c4b242d 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -7,8 +7,10 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/intc/loongarch_pch_pic.h" +#include "system/kvm.h" #include "trace.h" #include "qapi/error.h" @@ -47,6 +49,11 @@ static void pch_pic_irq_handler(void *opaque, int irq, int level) assert(irq < s->irq_num); trace_loongarch_pch_pic_irq_handler(irq, level); + if (kvm_irqchip_in_kernel()) { + kvm_set_irq(kvm_state, irq, !!level); + return; + } + if (s->intedge & mask) { /* Edge triggered */ if (level) { @@ -71,308 +78,196 @@ static void pch_pic_irq_handler(void *opaque, int irq, int level) pch_pic_update_irq(s, mask, level); } -static uint64_t loongarch_pch_pic_low_readw(void *opaque, hwaddr addr, - unsigned size) +static uint64_t pch_pic_read(void *opaque, hwaddr addr, uint64_t field_mask) { LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); uint64_t val = 0; - uint32_t offset = addr & 0xfff; + uint32_t offset; - switch (offset) { - case PCH_PIC_INT_ID_LO: - val = PCH_PIC_INT_ID_VAL; + offset = addr & 7; + addr -= offset; + switch (addr) { + case PCH_PIC_INT_ID: + val = cpu_to_le64(s->id.data); break; - case PCH_PIC_INT_ID_HI: - /* - * With 7A1000 manual - * bit 0-15 pch irqchip version - * bit 16-31 irq number supported with pch irqchip - */ - val = deposit32(PCH_PIC_INT_ID_VER, 16, 16, s->irq_num - 1); + case PCH_PIC_INT_MASK: + val = s->int_mask; break; - case PCH_PIC_INT_MASK_LO: - val = (uint32_t)s->int_mask; + case PCH_PIC_INT_EDGE: + val = s->intedge; break; - case PCH_PIC_INT_MASK_HI: - val = s->int_mask >> 32; + case PCH_PIC_HTMSI_EN: + val = s->htmsi_en; break; - case PCH_PIC_INT_EDGE_LO: - val = (uint32_t)s->intedge; + case PCH_PIC_AUTO_CTRL0: + case PCH_PIC_AUTO_CTRL1: + /* PCH PIC connect to EXTIOI always, discard auto_ctrl access */ break; - case PCH_PIC_INT_EDGE_HI: - val = s->intedge >> 32; + case PCH_PIC_INT_STATUS: + val = s->intisr & (~s->int_mask); break; - case PCH_PIC_HTMSI_EN_LO: - val = (uint32_t)s->htmsi_en; + case PCH_PIC_INT_POL: + val = s->int_polarity; break; - case PCH_PIC_HTMSI_EN_HI: - val = s->htmsi_en >> 32; + case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: + val = *(uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); break; - case PCH_PIC_AUTO_CTRL0_LO: - case PCH_PIC_AUTO_CTRL0_HI: - case PCH_PIC_AUTO_CTRL1_LO: - case PCH_PIC_AUTO_CTRL1_HI: + case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: + val = *(uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "pch_pic_read: Bad address 0x%"PRIx64"\n", addr); break; } - trace_loongarch_pch_pic_low_readw(size, addr, val); - return val; + return (val >> (offset * 8)) & field_mask; } -static uint64_t get_writew_val(uint64_t value, uint32_t target, bool hi) -{ - uint64_t mask = 0xffffffff00000000; - uint64_t data = target; - - return hi ? (value & ~mask) | (data << 32) : (value & mask) | data; -} - -static void loongarch_pch_pic_low_writew(void *opaque, hwaddr addr, - uint64_t value, unsigned size) +static void pch_pic_write(void *opaque, hwaddr addr, uint64_t value, + uint64_t field_mask) { LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); - uint32_t offset, old_valid, data = (uint32_t)value; - uint64_t old, int_mask; - offset = addr & 0xfff; - - trace_loongarch_pch_pic_low_writew(size, addr, data); - - switch (offset) { - case PCH_PIC_INT_MASK_LO: - old = s->int_mask; - s->int_mask = get_writew_val(old, data, 0); - old_valid = (uint32_t)old; - if (old_valid & ~data) { - pch_pic_update_irq(s, (old_valid & ~data), 1); - } - if (~old_valid & data) { - pch_pic_update_irq(s, (~old_valid & data), 0); - } - break; - case PCH_PIC_INT_MASK_HI: + uint32_t offset; + uint64_t old, mask, data, *ptemp; + + offset = addr & 7; + addr -= offset; + mask = field_mask << (offset * 8); + data = (value & field_mask) << (offset * 8); + switch (addr) { + case PCH_PIC_INT_MASK: old = s->int_mask; - s->int_mask = get_writew_val(old, data, 1); - old_valid = (uint32_t)(old >> 32); - int_mask = old_valid & ~data; - if (int_mask) { - pch_pic_update_irq(s, int_mask << 32, 1); + s->int_mask = (old & ~mask) | data; + if (old & ~data) { + pch_pic_update_irq(s, old & ~data, 1); } - int_mask = ~old_valid & data; - if (int_mask) { - pch_pic_update_irq(s, int_mask << 32, 0); + + if (~old & data) { + pch_pic_update_irq(s, ~old & data, 0); } break; - case PCH_PIC_INT_EDGE_LO: - s->intedge = get_writew_val(s->intedge, data, 0); - break; - case PCH_PIC_INT_EDGE_HI: - s->intedge = get_writew_val(s->intedge, data, 1); + case PCH_PIC_INT_EDGE: + s->intedge = (s->intedge & ~mask) | data; break; - case PCH_PIC_INT_CLEAR_LO: + case PCH_PIC_INT_CLEAR: if (s->intedge & data) { - s->intirr &= (~data); + s->intirr &= ~data; pch_pic_update_irq(s, data, 0); - s->intisr &= (~data); + s->intisr &= ~data; } break; - case PCH_PIC_INT_CLEAR_HI: - value <<= 32; - if (s->intedge & value) { - s->intirr &= (~value); - pch_pic_update_irq(s, value, 0); - s->intisr &= (~value); - } + case PCH_PIC_HTMSI_EN: + s->htmsi_en = (s->htmsi_en & ~mask) | data; break; - case PCH_PIC_HTMSI_EN_LO: - s->htmsi_en = get_writew_val(s->htmsi_en, data, 0); + case PCH_PIC_AUTO_CTRL0: + case PCH_PIC_AUTO_CTRL1: + /* Discard auto_ctrl access */ break; - case PCH_PIC_HTMSI_EN_HI: - s->htmsi_en = get_writew_val(s->htmsi_en, data, 1); + case PCH_PIC_INT_POL: + s->int_polarity = (s->int_polarity & ~mask) | data; break; - case PCH_PIC_AUTO_CTRL0_LO: - case PCH_PIC_AUTO_CTRL0_HI: - case PCH_PIC_AUTO_CTRL1_LO: - case PCH_PIC_AUTO_CTRL1_HI: + case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: + ptemp = (uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); + *ptemp = (*ptemp & ~mask) | data; + break; + case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: + ptemp = (uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); + *ptemp = (*ptemp & ~mask) | data; break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "pch_pic_write: Bad address 0x%"PRIx64"\n", addr); break; } } -static uint64_t loongarch_pch_pic_high_readw(void *opaque, hwaddr addr, - unsigned size) +static uint64_t loongarch_pch_pic_read(void *opaque, hwaddr addr, + unsigned size) { - LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); uint64_t val = 0; - uint32_t offset = addr & 0xfff; - switch (offset) { - case STATUS_LO_START: - val = (uint32_t)(s->intisr & (~s->int_mask)); + switch (size) { + case 1: + val = pch_pic_read(opaque, addr, UCHAR_MAX); break; - case STATUS_HI_START: - val = (s->intisr & (~s->int_mask)) >> 32; + case 2: + val = pch_pic_read(opaque, addr, USHRT_MAX); break; - case POL_LO_START: - val = (uint32_t)s->int_polarity; + case 4: + val = pch_pic_read(opaque, addr, UINT_MAX); break; - case POL_HI_START: - val = s->int_polarity >> 32; + case 8: + val = pch_pic_read(opaque, addr, UINT64_MAX); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "loongarch_pch_pic_read: Bad size %d\n", size); break; } - trace_loongarch_pch_pic_high_readw(size, addr, val); + trace_loongarch_pch_pic_read(size, addr, val); return val; } -static void loongarch_pch_pic_high_writew(void *opaque, hwaddr addr, - uint64_t value, unsigned size) +static void loongarch_pch_pic_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) { - LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); - uint32_t offset, data = (uint32_t)value; - offset = addr & 0xfff; - - trace_loongarch_pch_pic_high_writew(size, addr, data); + trace_loongarch_pch_pic_write(size, addr, value); - switch (offset) { - case STATUS_LO_START: - s->intisr = get_writew_val(s->intisr, data, 0); - break; - case STATUS_HI_START: - s->intisr = get_writew_val(s->intisr, data, 1); + switch (size) { + case 1: + pch_pic_write(opaque, addr, value, UCHAR_MAX); break; - case POL_LO_START: - s->int_polarity = get_writew_val(s->int_polarity, data, 0); + case 2: + pch_pic_write(opaque, addr, value, USHRT_MAX); break; - case POL_HI_START: - s->int_polarity = get_writew_val(s->int_polarity, data, 1); break; - default: - break; - } -} - -static uint64_t loongarch_pch_pic_readb(void *opaque, hwaddr addr, - unsigned size) -{ - LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); - uint64_t val = 0; - uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; - int64_t offset_tmp; - - switch (offset) { - case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: - offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - val = s->htmsi_vector[offset_tmp]; - } - break; - case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: - offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - val = s->route_entry[offset_tmp]; - } - break; - default: - break; - } - - trace_loongarch_pch_pic_readb(size, addr, val); - return val; -} - -static void loongarch_pch_pic_writeb(void *opaque, hwaddr addr, - uint64_t data, unsigned size) -{ - LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); - int32_t offset_tmp; - uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; - - trace_loongarch_pch_pic_writeb(size, addr, data); - - switch (offset) { - case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: - offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - s->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff); - } + case 4: + pch_pic_write(opaque, addr, value, UINT_MAX); break; - case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: - offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - s->route_entry[offset_tmp] = (uint8_t)(data & 0xff); - } + case 8: + pch_pic_write(opaque, addr, value, UINT64_MAX); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "loongarch_pch_pic_write: Bad size %d\n", size); break; } } -static const MemoryRegionOps loongarch_pch_pic_reg32_low_ops = { - .read = loongarch_pch_pic_low_readw, - .write = loongarch_pch_pic_low_writew, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -static const MemoryRegionOps loongarch_pch_pic_reg32_high_ops = { - .read = loongarch_pch_pic_high_readw, - .write = loongarch_pch_pic_high_writew, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -static const MemoryRegionOps loongarch_pch_pic_reg8_ops = { - .read = loongarch_pch_pic_readb, - .write = loongarch_pch_pic_writeb, +static const MemoryRegionOps loongarch_pch_pic_ops = { + .read = loongarch_pch_pic_read, + .write = loongarch_pch_pic_write, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 8, + /* + * PCH PIC device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .unaligned = false, }, .impl = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 8, }, .endianness = DEVICE_LITTLE_ENDIAN, }; -static void loongarch_pch_pic_reset(DeviceState *d) +static void loongarch_pic_reset_hold(Object *obj, ResetType type) { - LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(d); - int i; - - s->int_mask = -1; - s->htmsi_en = 0x0; - s->intedge = 0x0; - s->intclr = 0x0; - s->auto_crtl0 = 0x0; - s->auto_crtl1 = 0x0; - for (i = 0; i < 64; i++) { - s->route_entry[i] = 0x1; - s->htmsi_vector[i] = 0x0; + LoongarchPICClass *lpc = LOONGARCH_PIC_GET_CLASS(obj); + + if (lpc->parent_phases.hold) { + lpc->parent_phases.hold(obj, type); + } + + if (kvm_irqchip_in_kernel()) { + kvm_pic_put(obj, 0); } - s->intirr = 0x0; - s->intisr = 0x0; - s->last_intirr = 0x0; - s->int_polarity = 0x0; } static void loongarch_pic_realize(DeviceState *dev, Error **errp) @@ -390,28 +285,49 @@ static void loongarch_pic_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(dev, s->parent_irq, s->irq_num); qdev_init_gpio_in(dev, pch_pic_irq_handler, s->irq_num); - memory_region_init_io(&s->iomem32_low, OBJECT(dev), - &loongarch_pch_pic_reg32_low_ops, - s, PCH_PIC_NAME(.reg32_part1), 0x100); - memory_region_init_io(&s->iomem8, OBJECT(dev), &loongarch_pch_pic_reg8_ops, - s, PCH_PIC_NAME(.reg8), 0x2a0); - memory_region_init_io(&s->iomem32_high, OBJECT(dev), - &loongarch_pch_pic_reg32_high_ops, - s, PCH_PIC_NAME(.reg32_part2), 0xc60); - sysbus_init_mmio(sbd, &s->iomem32_low); - sysbus_init_mmio(sbd, &s->iomem8); - sysbus_init_mmio(sbd, &s->iomem32_high); + if (kvm_irqchip_in_kernel()) { + kvm_pic_realize(dev, errp); + } else { + memory_region_init_io(&s->iomem, OBJECT(dev), + &loongarch_pch_pic_ops, + s, TYPE_LOONGARCH_PIC, VIRT_PCH_REG_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + } +} + +static int loongarch_pic_pre_save(LoongArchPICCommonState *opaque) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_pic_get(opaque); + } + + return 0; +} + +static int loongarch_pic_post_load(LoongArchPICCommonState *opaque, + int version_id) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_pic_put(opaque, version_id); + } + + return 0; } -static void loongarch_pic_class_init(ObjectClass *klass, void *data) +static void loongarch_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongarchPICClass *lpc = LOONGARCH_PIC_CLASS(klass); + LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - device_class_set_legacy_reset(dc, loongarch_pch_pic_reset); + resettable_class_set_parent_phases(rc, NULL, loongarch_pic_reset_hold, + NULL, &lpc->parent_phases); device_class_set_parent_realize(dc, loongarch_pic_realize, &lpc->parent_realize); + lpcc->pre_save = loongarch_pic_pre_save; + lpcc->post_load = loongarch_pic_post_load; } static const TypeInfo loongarch_pic_types[] = { diff --git a/hw/intc/loongarch_pic_common.c b/hw/intc/loongarch_pic_common.c index e7f541d..de17050 100644 --- a/hw/intc/loongarch_pic_common.c +++ b/hw/intc/loongarch_pic_common.c @@ -44,6 +44,40 @@ static void loongarch_pic_common_realize(DeviceState *dev, Error **errp) } } +static void loongarch_pic_common_reset_hold(Object *obj, ResetType type) +{ + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(obj); + int i; + + /* + * With Loongson 7A1000 user manual + * Chapter 5.2 "Description of Interrupt-related Registers" + * + * Interrupt controller identification register 1 + * Bit 24-31 Interrupt Controller ID + * Interrupt controller identification register 2 + * Bit 0-7 Interrupt Controller version number + * Bit 16-23 The number of interrupt sources supported + */ + s->id.desc.id = PCH_PIC_INT_ID_VAL; + s->id.desc.version = PCH_PIC_INT_ID_VER; + s->id.desc.irq_num = s->irq_num - 1; + s->int_mask = UINT64_MAX; + s->htmsi_en = 0x0; + s->intedge = 0x0; + s->intclr = 0x0; + s->auto_crtl0 = 0x0; + s->auto_crtl1 = 0x0; + for (i = 0; i < 64; i++) { + s->route_entry[i] = 0x1; + s->htmsi_vector[i] = 0x0; + } + s->intirr = 0x0; + s->intisr = 0x0; + s->last_intirr = 0x0; + s->int_polarity = 0x0; +} + static const Property loongarch_pic_common_properties[] = { DEFINE_PROP_UINT32("pch_pic_irq_num", LoongArchPICCommonState, irq_num, 0), }; @@ -71,13 +105,18 @@ static const VMStateDescription vmstate_loongarch_pic_common = { } }; -static void loongarch_pic_common_class_init(ObjectClass *klass, void *data) +static void loongarch_pic_common_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_parent_realize(dc, loongarch_pic_common_realize, &lpcc->parent_realize); + resettable_class_set_parent_phases(rc, NULL, + loongarch_pic_common_reset_hold, + NULL, &lpcc->parent_phases); device_class_set_props(dc, loongarch_pic_common_properties); dc->vmsd = &vmstate_loongarch_pic_common; } diff --git a/hw/intc/loongarch_pic_kvm.c b/hw/intc/loongarch_pic_kvm.c new file mode 100644 index 0000000..dd504ec --- /dev/null +++ b/hw/intc/loongarch_pic_kvm.c @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch kvm pch pic interrupt support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "hw/loongarch/virt.h" +#include "hw/pci-host/ls7a.h" +#include "system/kvm.h" + +static void kvm_pch_pic_access_reg(int fd, uint64_t addr, void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_pch_pic_access(void *opaque, bool write) +{ + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); + LoongarchPICState *lps = LOONGARCH_PIC(opaque); + int fd = lps->dev_fd; + int addr, offset; + + if (fd == 0) { + return; + } + + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_MASK, &s->int_mask, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_HTMSI_EN, &s->htmsi_en, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_EDGE, &s->intedge, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL0, &s->auto_crtl0, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL1, &s->auto_crtl1, write); + + for (addr = PCH_PIC_ROUTE_ENTRY; + addr < PCH_PIC_ROUTE_ENTRY_END; addr++) { + offset = addr - PCH_PIC_ROUTE_ENTRY; + kvm_pch_pic_access_reg(fd, addr, &s->route_entry[offset], write); + } + + for (addr = PCH_PIC_HTMSI_VEC; addr < PCH_PIC_HTMSI_VEC_END; addr++) { + offset = addr - PCH_PIC_HTMSI_VEC; + kvm_pch_pic_access_reg(fd, addr, &s->htmsi_vector[offset], write); + } + + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_REQUEST, &s->intirr, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_STATUS, &s->intisr, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_POL, &s->int_polarity, write); +} + +int kvm_pic_get(void *opaque) +{ + kvm_pch_pic_access(opaque, false); + return 0; +} + +int kvm_pic_put(void *opaque, int version_id) +{ + kvm_pch_pic_access(opaque, true); + return 0; +} + +void kvm_pic_realize(DeviceState *dev, Error **errp) +{ + LoongarchPICState *lps = LOONGARCH_PIC(dev); + uint64_t pch_pic_base = VIRT_PCH_REG_BASE; + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_PCHPIC, false); + if (ret < 0) { + fprintf(stderr, "Create KVM_LOONGARCH_PCHPIC failed: %s\n", + strerror(-ret)); + abort(); + } + + lps->dev_fd = ret; + ret = kvm_device_access(lps->dev_fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL, + KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT, + &pch_pic_base, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_PCH_PIC_INIT failed: %s\n", + strerror(-ret)); + abort(); + } +} diff --git a/hw/intc/loongson_ipi.c b/hw/intc/loongson_ipi.c index d2268a2..fbc73e8 100644 --- a/hw/intc/loongson_ipi.c +++ b/hw/intc/loongson_ipi.c @@ -101,7 +101,7 @@ static const Property loongson_ipi_properties[] = { DEFINE_PROP_UINT32("num-cpu", LoongsonIPICommonState, num_cpu, 1), }; -static void loongson_ipi_class_init(ObjectClass *klass, void *data) +static void loongson_ipi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongsonIPIClass *lic = LOONGSON_IPI_CLASS(klass); diff --git a/hw/intc/loongson_ipi_common.c b/hw/intc/loongson_ipi_common.c index f5ab502..8cd78d4 100644 --- a/hw/intc/loongson_ipi_common.c +++ b/hw/intc/loongson_ipi_common.c @@ -11,6 +11,7 @@ #include "hw/irq.h" #include "qemu/log.h" #include "migration/vmstate.h" +#include "system/kvm.h" #include "trace.h" MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, uint64_t *data, @@ -255,6 +256,10 @@ static void loongson_ipi_common_realize(DeviceState *dev, Error **errp) LoongsonIPICommonState *s = LOONGSON_IPI_COMMON(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + if (kvm_irqchip_in_kernel()) { + return; + } + memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), &loongson_ipi_iocsr_ops, s, "loongson_ipi_iocsr", 0x48); @@ -277,10 +282,38 @@ static void loongson_ipi_common_unrealize(DeviceState *dev) g_free(s->cpu); } +static int loongson_ipi_common_pre_save(void *opaque) +{ + IPICore *ipicore = (IPICore *)opaque; + LoongsonIPICommonState *s = ipicore->ipi; + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s); + + if (licc->pre_save) { + return licc->pre_save(s); + } + + return 0; +} + +static int loongson_ipi_common_post_load(void *opaque, int version_id) +{ + IPICore *ipicore = (IPICore *)opaque; + LoongsonIPICommonState *s = ipicore->ipi; + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s); + + if (licc->post_load) { + return licc->post_load(s, version_id); + } + + return 0; +} + static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", .version_id = 2, .minimum_version_id = 2, + .pre_save = loongson_ipi_common_pre_save, + .post_load = loongson_ipi_common_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), @@ -303,7 +336,7 @@ static const VMStateDescription vmstate_loongson_ipi_common = { } }; -static void loongson_ipi_common_class_init(ObjectClass *klass, void *data) +static void loongson_ipi_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass); diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index a82b80f..2532322 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -90,7 +90,7 @@ static const Property m68k_irqc_properties[] = { TYPE_M68K_CPU, ArchCPU *), }; -static void m68k_irqc_class_init(ObjectClass *oc, void *data) +static void m68k_irqc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); @@ -110,7 +110,7 @@ static const TypeInfo m68k_irqc_type_info = { .instance_size = sizeof(M68KIRQCState), .instance_init = m68k_irqc_instance_init, .class_init = m68k_irqc_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { TYPE_INTERRUPT_STATS_PROVIDER }, { } diff --git a/hw/intc/meson.build b/hw/intc/meson.build index 602da30..3137521 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -71,6 +71,12 @@ specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c')) specific_ss.add(when: 'CONFIG_LOONGSON_IPI_COMMON', if_true: files('loongson_ipi_common.c')) specific_ss.add(when: 'CONFIG_LOONGSON_IPI', if_true: files('loongson_ipi.c')) specific_ss.add(when: 'CONFIG_LOONGARCH_IPI', if_true: files('loongarch_ipi.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_IPI'], + if_true: files('loongarch_ipi_kvm.c')) specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c', 'loongarch_pic_common.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_PCH_PIC'], + if_true: files('loongarch_pic_kvm.c')) specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_MSI', if_true: files('loongarch_pch_msi.c')) specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c', 'loongarch_extioi_common.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_EXTIOI'], + if_true: files('loongarch_extioi_kvm.c')) diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c index 5e3cbea..0c50ba4 100644 --- a/hw/intc/mips_gic.c +++ b/hw/intc/mips_gic.c @@ -14,7 +14,7 @@ #include "qemu/module.h" #include "qapi/error.h" #include "hw/sysbus.h" -#include "exec/memory.h" +#include "system/memory.h" #include "system/kvm.h" #include "system/reset.h" #include "kvm_mips.h" @@ -255,7 +255,6 @@ static void gic_write_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr, return; bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr); - return; } static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) @@ -443,7 +442,7 @@ static const Property mips_gic_properties[] = { DEFINE_PROP_UINT32("num-irq", MIPSGICState, num_irq, 256), }; -static void mips_gic_class_init(ObjectClass *klass, void *data) +static void mips_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c index 095a3d5..c61158b 100644 --- a/hw/intc/omap_intc.c +++ b/hw/intc/omap_intc.c @@ -102,8 +102,8 @@ static inline void omap_inth_update(OMAPIntcState *s, int is_fiq) } } -#define INT_FALLING_EDGE 0 -#define INT_LOW_LEVEL 1 +#define INT_FALLING_EDGE 0 +#define INT_LOW_LEVEL 1 static void omap_set_intr(void *opaque, int irq, int req) { @@ -142,13 +142,13 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr, offset &= 0xff; switch (offset) { - case 0x00: /* ITR */ + case 0x00: /* ITR */ return bank->irqs; - case 0x04: /* MIR */ + case 0x04: /* MIR */ return bank->mask; - case 0x10: /* SIR_IRQ_CODE */ + case 0x10: /* SIR_IRQ_CODE */ case 0x14: /* SIR_FIQ_CODE */ if (bank_no != 0) break; @@ -159,49 +159,49 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr, bank->irqs &= ~(1 << i); return line_no; - case 0x18: /* CONTROL_REG */ + case 0x18: /* CONTROL_REG */ if (bank_no != 0) break; return 0; - case 0x1c: /* ILR0 */ - case 0x20: /* ILR1 */ - case 0x24: /* ILR2 */ - case 0x28: /* ILR3 */ - case 0x2c: /* ILR4 */ - case 0x30: /* ILR5 */ - case 0x34: /* ILR6 */ - case 0x38: /* ILR7 */ - case 0x3c: /* ILR8 */ - case 0x40: /* ILR9 */ - case 0x44: /* ILR10 */ - case 0x48: /* ILR11 */ - case 0x4c: /* ILR12 */ - case 0x50: /* ILR13 */ - case 0x54: /* ILR14 */ - case 0x58: /* ILR15 */ - case 0x5c: /* ILR16 */ - case 0x60: /* ILR17 */ - case 0x64: /* ILR18 */ - case 0x68: /* ILR19 */ - case 0x6c: /* ILR20 */ - case 0x70: /* ILR21 */ - case 0x74: /* ILR22 */ - case 0x78: /* ILR23 */ - case 0x7c: /* ILR24 */ - case 0x80: /* ILR25 */ - case 0x84: /* ILR26 */ - case 0x88: /* ILR27 */ - case 0x8c: /* ILR28 */ - case 0x90: /* ILR29 */ - case 0x94: /* ILR30 */ - case 0x98: /* ILR31 */ + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ i = (offset - 0x1c) >> 2; return (bank->priority[i] << 2) | (((bank->sens_edge >> i) & 1) << 1) | ((bank->fiq >> i) & 1); - case 0x9c: /* ISR */ + case 0x9c: /* ISR */ return 0x00000000; } @@ -219,24 +219,24 @@ static void omap_inth_write(void *opaque, hwaddr addr, offset &= 0xff; switch (offset) { - case 0x00: /* ITR */ + case 0x00: /* ITR */ /* Important: ignore the clearing if the IRQ is level-triggered and the input bit is 1 */ bank->irqs &= value | (bank->inputs & bank->sens_edge); return; - case 0x04: /* MIR */ + case 0x04: /* MIR */ bank->mask = value; omap_inth_update(s, 0); omap_inth_update(s, 1); return; - case 0x10: /* SIR_IRQ_CODE */ - case 0x14: /* SIR_FIQ_CODE */ + case 0x10: /* SIR_IRQ_CODE */ + case 0x14: /* SIR_FIQ_CODE */ OMAP_RO_REG(addr); break; - case 0x18: /* CONTROL_REG */ + case 0x18: /* CONTROL_REG */ if (bank_no != 0) break; if (value & 2) { @@ -251,38 +251,38 @@ static void omap_inth_write(void *opaque, hwaddr addr, } return; - case 0x1c: /* ILR0 */ - case 0x20: /* ILR1 */ - case 0x24: /* ILR2 */ - case 0x28: /* ILR3 */ - case 0x2c: /* ILR4 */ - case 0x30: /* ILR5 */ - case 0x34: /* ILR6 */ - case 0x38: /* ILR7 */ - case 0x3c: /* ILR8 */ - case 0x40: /* ILR9 */ - case 0x44: /* ILR10 */ - case 0x48: /* ILR11 */ - case 0x4c: /* ILR12 */ - case 0x50: /* ILR13 */ - case 0x54: /* ILR14 */ - case 0x58: /* ILR15 */ - case 0x5c: /* ILR16 */ - case 0x60: /* ILR17 */ - case 0x64: /* ILR18 */ - case 0x68: /* ILR19 */ - case 0x6c: /* ILR20 */ - case 0x70: /* ILR21 */ - case 0x74: /* ILR22 */ - case 0x78: /* ILR23 */ - case 0x7c: /* ILR24 */ - case 0x80: /* ILR25 */ - case 0x84: /* ILR26 */ - case 0x88: /* ILR27 */ - case 0x8c: /* ILR28 */ - case 0x90: /* ILR29 */ - case 0x94: /* ILR30 */ - case 0x98: /* ILR31 */ + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ i = (offset - 0x1c) >> 2; bank->priority[i] = (value >> 2) & 0x1f; bank->sens_edge &= ~(1 << i); @@ -291,7 +291,7 @@ static void omap_inth_write(void *opaque, hwaddr addr, bank->fiq |= (value & 1) << i; return; - case 0x9c: /* ISR */ + case 0x9c: /* ISR */ for (i = 0; i < 32; i ++) if (value & (1 << i)) { omap_set_intr(s, 32 * bank_no + i, 1); @@ -379,7 +379,7 @@ static const Property omap_intc_properties[] = { DEFINE_PROP_UINT32("size", OMAPIntcState, size, 0x100), }; -static void omap_intc_class_init(ObjectClass *klass, void *data) +static void omap_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c index 42af456..047c367 100644 --- a/hw/intc/ompic.c +++ b/hw/intc/ompic.c @@ -13,7 +13,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #define TYPE_OR1K_OMPIC "or1k-ompic" @@ -155,7 +155,7 @@ static const VMStateDescription vmstate_or1k_ompic = { } }; -static void or1k_ompic_class_init(ObjectClass *klass, void *data) +static void or1k_ompic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index 78a82d0..87733eb 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -1611,7 +1611,7 @@ static const Property openpic_properties[] = { DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1), }; -static void openpic_class_init(ObjectClass *oc, void *data) +static void openpic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c index 9cdaa97..673ea9c 100644 --- a/hw/intc/openpic_kvm.c +++ b/hw/intc/openpic_kvm.c @@ -267,7 +267,7 @@ static const Property kvm_openpic_properties[] = { OPENPIC_MODEL_FSL_MPIC_20), }; -static void kvm_openpic_class_init(ObjectClass *oc, void *data) +static void kvm_openpic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c index a5e2d76..838c21c 100644 --- a/hw/intc/pl190.c +++ b/hw/intc/pl190.c @@ -273,7 +273,7 @@ static const VMStateDescription vmstate_pl190 = { } }; -static void pl190_class_init(ObjectClass *klass, void *data) +static void pl190_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index ccbe95a..c2ca40b 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -470,14 +470,13 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) return xive->regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive *xive = PNV_XIVE(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; for (i = 0; i < chip->nr_cores; i++) { @@ -510,17 +509,18 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " "thread context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } } - return count; + return !!match->count; } static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr) @@ -2068,7 +2068,7 @@ static const Property pnv_xive_properties[] = { DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), }; -static void pnv_xive_class_init(ObjectClass *klass, void *data) +static void pnv_xive_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -2106,7 +2106,7 @@ static const TypeInfo pnv_xive_info = { .instance_size = sizeof(PnvXive), .class_init = pnv_xive_class_init, .class_size = sizeof(PnvXiveClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 0b81dad..e019cad 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -101,12 +101,10 @@ static uint32_t pnv_xive2_block_id(PnvXive2 *xive) } /* - * Remote access to controllers. HW uses MMIOs. For now, a simple scan - * of the chips is good enough. - * - * TODO: Block scope support + * Remote access to INT controllers. HW uses MMIOs(?). For now, a simple + * scan of all the chips INT controller is good enough. */ -static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) +static PnvXive2 *pnv_xive2_get_remote(uint32_t vsd_type, hwaddr fwd_addr) { PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); int i; @@ -115,10 +113,23 @@ static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); PnvXive2 *xive = &chip10->xive; - if (pnv_xive2_block_id(xive) == blk) { + /* + * Is this the XIVE matching the forwarded VSD address is for this + * VSD type + */ + if ((vsd_type == VST_ESB && fwd_addr == xive->esb_base) || + (vsd_type == VST_END && fwd_addr == xive->end_base) || + ((vsd_type == VST_NVP || + vsd_type == VST_NVG) && fwd_addr == xive->nvpg_base) || + (vsd_type == VST_NVC && fwd_addr == xive->nvc_base)) { return xive; } } + + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: >>>>> %s vsd_type %u fwd_addr 0x%"HWADDR_PRIx + " NOT FOUND\n", + __func__, vsd_type, fwd_addr); return NULL; } @@ -251,8 +262,7 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk, /* Remote VST access */ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { - xive = pnv_xive2_get_remote(blk); - + xive = pnv_xive2_get_remote(type, (vsd & VSD_ADDRESS_MASK)); return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0; } @@ -595,20 +605,28 @@ static uint32_t pnv_xive2_get_config(Xive2Router *xrtr) { PnvXive2 *xive = PNV_XIVE2(xrtr); uint32_t cfg = 0; + uint64_t reg = xive->cq_regs[CQ_XIVE_CFG >> 3]; - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) { + if (reg & CQ_XIVE_CFG_GEN1_TIMA_OS) { cfg |= XIVE2_GEN1_TIMA_OS; } - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { + if (reg & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { cfg |= XIVE2_VP_SAVE_RESTORE; } - if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, - xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) { + if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, reg) == + CQ_XIVE_CFG_THREADID_8BITS) { cfg |= XIVE2_THREADID_8BITS; } + if (reg & CQ_XIVE_CFG_EN_VP_GRP_PRIORITY) { + cfg |= XIVE2_EN_VP_GRP_PRIORITY; + } + + cfg = SETFIELD(XIVE2_VP_INT_PRIO, cfg, + GETFIELD(CQ_XIVE_CFG_VP_INT_PRIO, reg)); + return cfg; } @@ -622,24 +640,28 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive2 *xive = PNV_XIVE2(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; bool gen1_tima_os = xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + static int next_start_core; + static int next_start_thread; + int start_core = next_start_core; + int start_thread = next_start_thread; for (i = 0; i < chip->nr_cores; i++) { - PnvCore *pc = chip->cores[i]; + PnvCore *pc = chip->cores[(i + start_core) % chip->nr_cores]; CPUCore *cc = CPU_CORE(pc); for (j = 0; j < cc->nr_threads; j++) { - PowerPCCPU *cpu = pc->threads[j]; + /* Start search for match with different thread each call */ + PowerPCCPU *cpu = pc->threads[(j + start_thread) % cc->nr_threads]; XiveTCTX *tctx; int ring; @@ -669,7 +691,8 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, "thread context NVT %x/%x\n", nvt_blk, nvt_idx); /* Should set a FIR if we ever model it */ - return -1; + match->count++; + continue; } /* * For a group notification, we need to know if the @@ -684,14 +707,23 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, if (!match->tctx) { match->ring = ring; match->tctx = tctx; + + next_start_thread = j + start_thread + 1; + if (next_start_thread >= cc->nr_threads) { + next_start_thread = 0; + next_start_core = i + start_core + 1; + if (next_start_core >= chip->nr_cores) { + next_start_core = 0; + } + } } - count++; + match->count++; } } } } - return count; + return !!match->count; } static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) @@ -1173,7 +1205,8 @@ static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset, case CQ_FIRMASK_OR: /* FIR error reporting */ break; default: - xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset); + xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1304,7 +1337,6 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH2_SPEC: case VC_ENDC_WATCH3_SPEC: watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6; - xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT); pnv_xive2_endc_cache_watch_release(xive, watch_engine); val = xive->vc_regs[reg]; break; @@ -1315,10 +1347,11 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH3_DATA0: /* * Load DATA registers from cache with data requested by the - * SPEC register + * SPEC register. Clear gen_flipped bit in word 1. */ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6; pnv_xive2_end_cache_load(xive, watch_engine); + xive->vc_regs[reg] &= ~(uint64_t)END2_W1_GEN_FLIPPED; val = xive->vc_regs[reg]; break; @@ -1386,7 +1419,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * ESB cache updates (not modeled) */ - /* case VC_ESBC_FLUSH_CTRL: */ + case VC_ESBC_FLUSH_CTRL: + if (val & VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ESBC_FLUSH_POLL: xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID; /* ESB update */ @@ -1402,7 +1442,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * EAS cache updates (not modeled) */ - /* case VC_EASC_FLUSH_CTRL: */ + case VC_EASC_FLUSH_CTRL: + if (val & VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_EASC_FLUSH_POLL: xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID; /* EAS update */ @@ -1441,7 +1488,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; - /* case VC_ENDC_FLUSH_CTRL: */ + case VC_ENDC_FLUSH_CTRL: + if (val & VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ENDC_FLUSH_POLL: xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID; break; @@ -1470,7 +1524,8 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "VC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1661,7 +1716,14 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, pnv_xive2_nxc_update(xive, watch_engine); break; - /* case PC_NXC_FLUSH_CTRL: */ + case PC_NXC_FLUSH_CTRL: + if (val & PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case PC_NXC_FLUSH_POLL: xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID; break; @@ -1678,7 +1740,8 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "PC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1765,7 +1828,8 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, xive->tctxt_regs[reg] = val; break; default: - xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "TCTXT: invalid write @0x%"HWADDR_PRIx + " data 0x%"PRIx64, offset, val); return; } } @@ -1836,7 +1900,8 @@ static void pnv_xive2_xscom_write(void *opaque, hwaddr offset, pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size); break; default: - xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1904,7 +1969,8 @@ static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1946,7 +2012,8 @@ static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset, { PnvXive2 *xive = PNV_XIVE2(opaque); - xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); } static const MemoryRegionOps pnv_xive2_ic_lsi_ops = { @@ -2049,7 +2116,8 @@ static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset, inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI; break; default: - xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -2505,7 +2573,7 @@ static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } -static void pnv_xive2_class_init(ObjectClass *klass, void *data) +static void pnv_xive2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -2547,7 +2615,7 @@ static const TypeInfo pnv_xive2_info = { .instance_size = sizeof(PnvXive2), .class_init = pnv_xive2_class_init, .class_size = sizeof(PnvXive2Class), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h index e8b87b3..d53300f 100644 --- a/hw/intc/pnv_xive2_regs.h +++ b/hw/intc/pnv_xive2_regs.h @@ -66,6 +66,7 @@ #define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28) +#define CQ_XIVE_CFG_EN_VP_GRP_PRIORITY PPC_BIT(32) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */ diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c index 7de4bf9..bc4dc90 100644 --- a/hw/intc/ppc-uic.c +++ b/hw/intc/ppc-uic.c @@ -281,7 +281,7 @@ static const VMStateDescription ppc_uic_vmstate = { }, }; -static void ppc_uic_class_init(ObjectClass *klass, void *data) +static void ppc_uic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c index 9b12116..63e25c2 100644 --- a/hw/intc/realview_gic.c +++ b/hw/intc/realview_gic.c @@ -63,7 +63,7 @@ static void realview_gic_init(Object *obj) qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", 1); } -static void realview_gic_class_init(ObjectClass *oc, void *data) +static void realview_gic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index db374a7..4623cfa0 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -28,6 +28,7 @@ #include "qemu/module.h" #include "hw/sysbus.h" #include "target/riscv/cpu.h" +#include "target/riscv/time_helper.h" #include "hw/qdev-properties.h" #include "hw/intc/riscv_aclint.h" #include "qemu/timer.h" @@ -240,6 +241,10 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), mtimer->hartid_base + i, mtimer->timecmp[i]); + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + } return; } @@ -328,7 +333,7 @@ static const VMStateDescription vmstate_riscv_mtimer = { } }; -static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data) +static void riscv_aclint_mtimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_mtimer_realize; @@ -509,7 +514,7 @@ static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type) } } -static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data) +static void riscv_aclint_swi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_swi_realize; diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index 5964cde..a1d9fa5 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -22,7 +22,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/bswap.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/sysbus.h" #include "hw/pci/msi.h" #include "hw/boards.h" @@ -628,7 +628,7 @@ static void riscv_aplic_request(void *opaque, int irq, int level) static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) { - uint32_t irq, word, idc; + uint32_t irq, word, idc, sm; RISCVAPLICState *aplic = opaque; /* Reads must be 4 byte words */ @@ -696,6 +696,10 @@ static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) } else if ((APLIC_TARGET_BASE <= addr) && (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return 0; + } return aplic->target[irq]; } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { @@ -962,10 +966,18 @@ static const Property riscv_aplic_properties[] = { DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0), }; +static bool riscv_aplic_state_needed(void *opaque) +{ + RISCVAPLICState *aplic = opaque; + + return riscv_use_emulated_aplic(aplic->msimode); +} + static const VMStateDescription vmstate_riscv_aplic = { .name = "riscv_aplic", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, + .needed = riscv_aplic_state_needed, .fields = (const VMStateField[]) { VMSTATE_UINT32(domaincfg, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), @@ -997,7 +1009,7 @@ static const VMStateDescription vmstate_riscv_aplic = { } }; -static void riscv_aplic_class_init(ObjectClass *klass, void *data) +static void riscv_aplic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index 241b12f..6174e1a 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -22,7 +22,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/bswap.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/sysbus.h" #include "hw/pci/msi.h" #include "hw/boards.h" @@ -398,10 +398,16 @@ static const Property riscv_imsic_properties[] = { DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0), }; +static bool riscv_imsic_state_needed(void *opaque) +{ + return !kvm_irqchip_in_kernel(); +} + static const VMStateDescription vmstate_riscv_imsic = { .name = "riscv_imsic", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, + .needed = riscv_imsic_state_needed, .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, num_pages, 0, @@ -416,7 +422,7 @@ static const VMStateDescription vmstate_riscv_imsic = { } }; -static void riscv_imsic_class_init(ObjectClass *klass, void *data) +static void riscv_imsic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c index ca13c5f..f861552 100644 --- a/hw/intc/rx_icu.c +++ b/hw/intc/rx_icu.c @@ -368,7 +368,7 @@ static const Property rxicu_properties[] = { qdev_prop_uint8, uint8_t), }; -static void rxicu_class_init(ObjectClass *klass, void *data) +static void rxicu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index c20f4c1..8f4c9fd 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -450,7 +450,7 @@ static const Property qemu_s390_flic_properties[] = { migrate_all_state, true), }; -static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) +static void qemu_s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); @@ -470,11 +470,6 @@ static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk; } -static const Property s390_flic_common_properties[] = { - DEFINE_PROP_BOOL("migration-enabled", S390FLICState, - migration_enabled, true), -}; - static void s390_flic_common_realize(DeviceState *dev, Error **errp) { S390FLICState *fs = S390_FLIC_COMMON(dev); @@ -482,11 +477,10 @@ static void s390_flic_common_realize(DeviceState *dev, Error **errp) fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION); } -static void s390_flic_class_init(ObjectClass *oc, void *data) +static void s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - device_class_set_props(dc, s390_flic_common_properties); dc->realize = s390_flic_common_realize; } @@ -515,18 +509,10 @@ static void qemu_s390_flic_register_types(void) type_init(qemu_s390_flic_register_types) -static bool adapter_info_so_needed(void *opaque) -{ - S390FLICState *fs = s390_get_flic(); - - return fs->migration_enabled; -} - const VMStateDescription vmstate_adapter_info_so = { .name = "s390_adapter_info/summary_offset", .version_id = 1, .minimum_version_id = 1, - .needed = adapter_info_so_needed, .fields = (const VMStateField[]) { VMSTATE_UINT32(summary_offset, AdapterInfo), VMSTATE_END_OF_LIST() diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index 10aaafb..f833a39 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -670,7 +670,7 @@ static void kvm_s390_flic_reset(DeviceState *dev) flic_enable_pfault(flic); } -static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) +static void kvm_s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index a5b0f6e..3160b21 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -446,7 +446,7 @@ static const Property sifive_plic_properties[] = { DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), }; -static void sifive_plic_class_init(ObjectClass *klass, void *data) +static void sifive_plic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c index f83709a..00b80bb 100644 --- a/hw/intc/slavio_intctl.c +++ b/hw/intc/slavio_intctl.c @@ -441,7 +441,7 @@ static void slavio_intctl_init(Object *obj) } } -static void slavio_intctl_class_init(ObjectClass *klass, void *data) +static void slavio_intctl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); @@ -460,7 +460,7 @@ static const TypeInfo slavio_intctl_info = { .instance_size = sizeof(SLAVIO_INTCTLState), .instance_init = slavio_intctl_init, .class_init = slavio_intctl_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index ce734b0..e393f5d 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -428,14 +428,13 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, g_assert_not_reached(); } -static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, - uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, + uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { CPUState *cs; - int count = 0; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -463,16 +462,17 @@ static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, if (match->tctx) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " "context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } - return count; + return !!match->count; } static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr) @@ -809,7 +809,7 @@ static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr) return spapr_xive_in_kernel(SPAPR_XIVE(xptr)); } -static void spapr_xive_class_init(ObjectClass *klass, void *data) +static void spapr_xive_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); @@ -856,7 +856,7 @@ static const TypeInfo spapr_xive_info = { .instance_size = sizeof(SpaprXive), .class_init = spapr_xive_class_init, .class_size = sizeof(SpaprXiveClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_SPAPR_INTC }, { } }, diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 0ba9a02..018c609 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -274,11 +274,13 @@ kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device" kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x" # xive.c -xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" -xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" -xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" +xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" +xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" +xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_source_notify(uint32_t srcno) "Processing notification for queued IRQ 0x%x" +xive_source_blocked(uint32_t srcno) "No action needed for IRQ 0x%x currently" xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x" xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 @@ -289,6 +291,10 @@ xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x # xive2.c xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" +xive_redistribute(uint32_t index, uint8_t ring, uint8_t end_blk, uint32_t end_idx) "Redistribute from target=%d ring=0x%x NVP 0x%x/0x%x" +xive_end_enqueue(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "Queue event for END 0x%x/0x%x data=0x%x" +xive_escalate_end(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t esc_data) "Escalate from END 0x%x/0x%x to END 0x%x/0x%x data=0x%x" +xive_escalate_esb(uint8_t end_blk, uint32_t end_idx, uint32_t lisn) "Escalate from END 0x%x/0x%x to LISN=0x%x" # pnv_xive.c pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 @@ -314,12 +320,8 @@ loongson_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x loongson_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 # loongarch_pch_pic.c loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" -loongarch_pch_pic_low_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_low_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_high_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_high_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_readb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_writeb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 # loongarch_pch_msi.c loongarch_msi_set_irq(int irq_num) "set msi irq %d" diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 9c1b7bb..d9a199e 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -350,7 +350,7 @@ static const Property icp_properties[] = { DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *), }; -static void icp_class_init(ObjectClass *klass, void *data) +static void icp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -681,7 +681,7 @@ static const Property ics_properties[] = { XICSFabric *), }; -static void ics_class_init(ObjectClass *klass, void *data) +static void ics_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/xics_pnv.c b/hw/intc/xics_pnv.c index 753c067..ff602d9 100644 --- a/hw/intc/xics_pnv.c +++ b/hw/intc/xics_pnv.c @@ -176,7 +176,7 @@ static void pnv_icp_realize(DeviceState *dev, Error **errp) icp, "icp-thread", 0x1000); } -static void pnv_icp_class_init(ObjectClass *klass, void *data) +static void pnv_icp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICPStateClass *icpc = ICP_CLASS(klass); diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index a0d97bd..7663596 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -436,7 +436,7 @@ static void xics_spapr_deactivate(SpaprInterruptController *intc) } } -static void ics_spapr_class_init(ObjectClass *klass, void *data) +static void ics_spapr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICSStateClass *isc = ICS_CLASS(klass); @@ -461,7 +461,7 @@ static const TypeInfo ics_spapr_info = { .name = TYPE_ICS_SPAPR, .parent = TYPE_ICS, .class_init = ics_spapr_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_SPAPR_INTC }, { } }, diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c index ab1c4a3..5257ad5 100644 --- a/hw/intc/xilinx_intc.c +++ b/hw/intc/xilinx_intc.c @@ -214,7 +214,7 @@ static const Property xilinx_intc_properties[] = { DEFINE_PROP_UINT32("kind-of-intr", XpsIntc, c_kind_of_intr, 0), }; -static void xilinx_intc_class_init(ObjectClass *klass, void *data) +static void xilinx_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 3eb28c2..e0ffcf8 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -25,6 +25,58 @@ /* * XIVE Thread Interrupt Management context */ +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t cur_ring; + + for (cur_ring = ring; cur_ring <= TM_QW3_HV_PHYS; + cur_ring += XIVE_TM_RING_SIZE) { + if (!(tctx->regs[cur_ring + TM_WORD2] & 0x80)) { + return false; + } + } + return true; +} + +bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr) +{ + switch (ring) { + case TM_QW1_OS: + return !!(nsr & TM_QW1_NSR_EO); + case TM_QW2_HV_POOL: + case TM_QW3_HV_PHYS: + return !!(nsr & TM_QW3_NSR_HE); + default: + g_assert_not_reached(); + } +} + +bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr) +{ + if ((nsr & TM_NSR_GRP_LVL) > 0) { + g_assert(xive_nsr_indicates_exception(ring, nsr)); + return true; + } + return false; +} + +uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr) +{ + /* NSR determines if pool/phys ring is for phys or pool interrupt */ + if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) { + uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6; + + if (he == TM_QW3_NSR_HE_PHYS) { + return TM_QW3_HV_PHYS; + } else if (he == TM_QW3_NSR_HE_POOL) { + return TM_QW2_HV_POOL; + } else { + /* Don't support LSI mode */ + g_assert_not_reached(); + } + } + return ring; +} static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) { @@ -41,74 +93,83 @@ static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) } } -static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +/* + * interrupt is accepted on the presentation ring, for PHYS ring the NSR + * directs it to the PHYS or POOL rings. + */ +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t sig_ring) { - uint8_t *regs = &tctx->regs[ring]; - uint8_t nsr = regs[TM_NSR]; + uint8_t *sig_regs = &tctx->regs[sig_ring]; + uint8_t nsr = sig_regs[TM_NSR]; - qemu_irq_lower(xive_tctx_output(tctx, ring)); + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); - if (regs[TM_NSR] != 0) { - uint8_t cppr = regs[TM_PIPR]; - uint8_t alt_ring; - uint8_t *alt_regs; + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); - /* POOL interrupt uses IPB in QW2, POOL ring */ - if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) { - alt_ring = TM_QW2_HV_POOL; - } else { - alt_ring = ring; - } - alt_regs = &tctx->regs[alt_ring]; + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + uint8_t cppr = sig_regs[TM_PIPR]; + uint8_t ring; + uint8_t *regs; + + ring = xive_nsr_exception_ring(sig_ring, nsr); + regs = &tctx->regs[ring]; - regs[TM_CPPR] = cppr; + sig_regs[TM_CPPR] = cppr; /* * If the interrupt was for a specific VP, reset the pending * buffer bit, otherwise clear the logical server indicator */ - if (regs[TM_NSR] & TM_NSR_GRP_LVL) { - regs[TM_NSR] &= ~TM_NSR_GRP_LVL; - } else { - alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); + if (!xive_nsr_indicates_group_exception(sig_ring, nsr)) { + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); } - /* Drop the exception bit and any group/crowd */ - regs[TM_NSR] = 0; + /* Clear the exception from NSR */ + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, sig_ring)); - trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, - alt_regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + trace_xive_tctx_accept(tctx->cs->cpu_index, ring, + regs[TM_IPB], sig_regs[TM_PIPR], + sig_regs[TM_CPPR], sig_regs[TM_NSR]); } - return ((uint64_t)nsr << 8) | regs[TM_CPPR]; + return ((uint64_t)nsr << 8) | sig_regs[TM_CPPR]; } -void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) +/* Change PIPR and calculate NSR and irq based on PIPR, CPPR, group */ +void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t pipr, + uint8_t group_level) { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; - if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); + + sig_regs[TM_PIPR] = pipr; + + if (pipr < sig_regs[TM_CPPR]) { switch (ring) { case TM_QW1_OS: - regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); + sig_regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); break; case TM_QW2_HV_POOL: - alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); break; case TM_QW3_HV_PHYS: - regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); break; default: g_assert_not_reached(); } trace_xive_tctx_notify(tctx->cs->cpu_index, ring, - regs[TM_IPB], alt_regs[TM_PIPR], - alt_regs[TM_CPPR], alt_regs[TM_NSR]); + regs[TM_IPB], pipr, + sig_regs[TM_CPPR], sig_regs[TM_NSR]); qemu_irq_raise(xive_tctx_output(tctx, ring)); + } else { + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, ring)); } } @@ -124,25 +185,32 @@ void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { - uint8_t *regs = &tctx->regs[ring]; + uint8_t *sig_regs = &tctx->regs[ring]; uint8_t pipr_min; uint8_t ring_min; + g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + /* XXX: should show pool IPB for PHYS ring */ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - cppr, regs[TM_NSR]); + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, sig_regs[TM_NSR]); if (cppr > XIVE_PRIORITY_MAX) { cppr = 0xff; } - tctx->regs[ring + TM_CPPR] = cppr; + sig_regs[TM_CPPR] = cppr; /* * Recompute the PIPR based on local pending interrupts. The PHYS * ring must take the minimum of both the PHYS and POOL PIPR values. */ - pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); ring_min = ring; /* PHYS updates also depend on POOL values */ @@ -151,7 +219,6 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) /* POOL values only matter if POOL ctx is valid */ if (pool_regs[TM_WORD2] & 0x80) { - uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); /* @@ -165,30 +232,39 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } } - regs[TM_PIPR] = pipr_min; + /* CPPR has changed, this may present or preclude a pending exception */ + xive_tctx_pipr_set(tctx, ring_min, pipr_min, 0); +} + +static void xive_tctx_pipr_recompute_from_ipb(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; - /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring_min, 0); + /* Does not support a presented group interrupt */ + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); + + xive_tctx_pipr_set(tctx, ring, xive_ipb_to_pipr(regs[TM_IPB]), 0); } -void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, - uint8_t group_level) - { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; +void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; + uint8_t pipr = xive_priority_to_pipr(priority); if (group_level == 0) { - /* VP-specific */ regs[TM_IPB] |= xive_priority_to_ipb(priority); - alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); - } else { - /* VP-group */ - alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); + if (pipr >= sig_regs[TM_PIPR]) { + /* VP interrupts can come here with lower priority than PIPR */ + return; + } } - xive_tctx_notify(tctx, ring, group_level); - } + g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB])); + g_assert(pipr < sig_regs[TM_PIPR]); + xive_tctx_pipr_set(tctx, ring, pipr, group_level); +} /* * XIVE Thread Interrupt Management Area (TIMA) @@ -206,25 +282,78 @@ static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); } +static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + if (nvt_blk) { + *nvt_blk = xive_nvt_blk(cam); + } + if (nvt_idx) { + *nvt_idx = xive_nvt_idx(cam); + } + if (vp) { + *vp = !!(cam & TM_QW2W2_VP); + } +} + +static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t cam = be32_to_cpu(qw2w2); + + xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp); + return qw2w2; +} + +static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2) +{ + memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); +} + static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); uint32_t qw2w2; + uint32_t qw2w2_new; + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vp; + + qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp); + + if (!vp) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n", + nvt_blk, nvt_idx); + } + + /* Invalidate CAM line */ + qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0); + xive_tctx_set_pool_cam(tctx, qw2w2_new); + + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL); + /* Re-check phys for interrupts if pool was disabled */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW3_HV_PHYS); - qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); - memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); return qw2w2; } static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; - uint8_t qw3b8; + uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + uint8_t qw3b8_new; + + qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + if (!(qw3b8 & TM_QW3B8_VT)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n"); + } + qw3b8_new = qw3b8 & ~TM_QW3B8_VT; + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new; - qw3b8 = qw3b8_prev & ~TM_QW3B8_VT; - tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8; + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS); return qw3b8; } @@ -255,14 +384,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, static const uint8_t xive_tm_hw_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; @@ -326,7 +455,7 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return; } @@ -357,7 +486,7 @@ static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return -1; } @@ -403,6 +532,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); } +static void xive_tm_set_pool_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW2_HV_POOL, value & 0xff); +} + /* * Adjust the PIPR to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. @@ -410,7 +545,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + + /* XXX: how should this work exactly? */ + regs[TM_IPB] |= xive_priority_to_ipb(value & 0xff); + xive_tctx_pipr_recompute_from_ipb(tctx, ring); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -454,7 +594,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); if (!vo) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n", nvt_blk, nvt_idx); } @@ -466,7 +606,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, return qw1w2; } -static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, +static void xive_tctx_restore_nvp(XiveRouter *xrtr, XiveTCTX *tctx, uint8_t nvt_blk, uint32_t nvt_idx) { XiveNVT nvt; @@ -492,16 +632,6 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, uint8_t *regs = &tctx->regs[TM_QW1_OS]; regs[TM_IPB] |= ipb; } - - /* - * Always call xive_tctx_pipr_update(). Even if there were no - * escalation triggered, there could be a pending interrupt which - * was saved when the context was pulled and that we need to take - * into account by recalculating the PIPR (which is not - * saved/restored). - * It will also raise the External interrupt signal if needed. - */ - xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ } /* @@ -523,7 +653,17 @@ static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, /* Check the interrupt pending bits */ if (vo) { - xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + xive_tctx_restore_nvp(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + + /* + * Always call xive_tctx_recompute_from_ipb(). Even if there were no + * escalation triggered, there could be a pending interrupt which + * was saved when the context was pulled and that we need to take + * into account by recalculating the PIPR (which is not + * saved/restored). + * It will also raise the External interrupt signal if needed. + */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW1_OS); /* fxb */ } } @@ -542,6 +682,8 @@ typedef struct XiveTmOp { uint8_t page_offset; uint32_t op_offset; unsigned size; + bool hw_ok; + bool sw_ok; void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); @@ -554,34 +696,34 @@ static const XiveTmOp xive_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, - xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive_tm_vt_push, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, - xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, - xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, - xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, - xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, - xive_tm_pull_phys_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive_tm_pull_phys_ctx }, }; static const XiveTmOp xive2_tm_operations[] = { @@ -589,50 +731,58 @@ static const XiveTmOp xive2_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, - NULL }, - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, - xive_tm_vt_poll }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, - NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive2_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, true, true, + xive_tm_set_os_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 4, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 8, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_LGS, 1, true, true, + xive_tm_set_pool_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive2_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive2_tm_push_phys_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, true, true, + xive2_tm_set_hv_target, NULL }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, - xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, - xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, - xive_tm_pull_phys_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, - xive_tm_pull_phys_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, - NULL }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive2_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, true, false, + xive2_tm_pull_os_ctx_ol, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, true, false, + xive2_tm_pull_phys_ctx_ol, NULL }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, true, false, + xive2_tm_ack_os_el, NULL }, }; static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, @@ -674,21 +824,31 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); /* - * TODO: check V bit in Q[0-3]W2 - */ - - /* * First, check for special operations in the 2K region */ + xto = xive_tm_find_op(tctx->xptr, offset, size, true); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } + if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); } else { xto->write_handler(xptr, tctx, offset, value, size); } @@ -698,7 +858,6 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (xto) { xto->write_handler(xptr, tctx, offset, value, size); return; @@ -707,6 +866,11 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Finish with raw access to the register values */ + if (hw_owned) { + /* Store context operations are dangerous when context is valid */ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } xive_tm_raw_write(tctx, offset, value, size); } @@ -714,20 +878,30 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; uint64_t ret; - /* - * TODO: check V bit in Q[0-3]W2 - */ + xto = xive_tm_find_op(tctx->xptr, offset, size, false); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } /* * First, check for special operations in the 2K region */ if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); return -1; } ret = xto->read_handler(xptr, tctx, offset, size); @@ -737,7 +911,6 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (xto) { ret = xto->read_handler(xptr, tctx, offset, size); goto out; @@ -930,7 +1103,7 @@ static const Property xive_tctx_properties[] = { XivePresenter *), }; -static void xive_tctx_class_init(ObjectClass *klass, void *data) +static void xive_tctx_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1191,6 +1364,7 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) /* Forward the source event notification for routing */ if (ret) { + trace_xive_source_notify(srcno); xive_source_notify(xsrc, srcno); } break; @@ -1286,6 +1460,8 @@ out: /* Forward the source event notification for routing */ if (notify) { xive_source_notify(xsrc, srcno); + } else { + trace_xive_source_blocked(srcno); } } @@ -1412,7 +1588,7 @@ static const Property xive_source_properties[] = { XiveNotifier *), }; -static void xive_source_class_init(ObjectClass *klass, void *data) +static void xive_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1672,8 +1848,8 @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index) return 1U << (first_zero + 1); } -static uint8_t xive_get_group_level(bool crowd, bool ignore, - uint32_t nvp_blk, uint32_t nvp_index) +uint8_t xive_get_group_level(bool crowd, bool ignore, + uint32_t nvp_blk, uint32_t nvp_index) { int first_zero; uint8_t level; @@ -1791,15 +1967,14 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, * This is our simple Xive Presenter Engine model. It is merged in the * Router as it does not require an extra object. */ -bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, +bool xive_presenter_match(XiveFabric *xfb, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, bool *precluded) + uint32_t logic_serv, XiveTCTXMatch *match) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); - XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false }; - uint8_t group_level; - int count; + + memset(match, 0, sizeof(*match)); /* * Ask the machine to scan the interrupt controllers for a match. @@ -1824,22 +1999,8 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, * a new command to the presenters (the equivalent of the "assign" * power bus command in the documented full notify sequence. */ - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, - priority, logic_serv, &match); - if (count < 0) { - return false; - } - - /* handle CPU exception delivery */ - if (count) { - group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx); - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); - xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); - } else { - *precluded = match.precluded; - } - - return !!count; + return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, + priority, logic_serv, match); } /* @@ -1876,7 +2037,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) uint8_t nvt_blk; uint32_t nvt_idx; XiveNVT nvt; - bool found, precluded; + XiveTCTXMatch match; uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); @@ -1956,16 +2117,16 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) return; } - found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, - false /* crowd */, - xive_get_field32(END_W7_F0_IGNORE, end.w7), - priority, - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), - &precluded); - /* we don't support VP-group notification on P9, so precluded is not used */ /* TODO: Auto EOI. */ - - if (found) { + /* we don't support VP-group notification on P9, so precluded is not used */ + if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx, + false /* crowd */, + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0); + xive_tctx_pipr_present(match.tctx, match.ring, priority, 0); return; } @@ -2085,7 +2246,7 @@ static const Property xive_router_properties[] = { TYPE_XIVE_FABRIC, XiveFabric *), }; -static void xive_router_class_init(ObjectClass *klass, void *data) +static void xive_router_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); @@ -2108,7 +2269,7 @@ static const TypeInfo xive_router_info = { .instance_size = sizeof(XiveRouter), .class_size = sizeof(XiveRouterClass), .class_init = xive_router_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { TYPE_XIVE_PRESENTER }, { } @@ -2254,7 +2415,7 @@ static const Property xive_end_source_properties[] = { XiveRouter *), }; -static void xive_end_source_class_init(ObjectClass *klass, void *data) +static void xive_end_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index 7d584df..ee5fa26 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -19,6 +19,13 @@ #include "hw/ppc/xive2_regs.h" #include "trace.h" +static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data, + bool redistribute); + +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, + uint8_t *nvp_blk, uint32_t *nvp_idx); + uint32_t xive2_router_get_config(Xive2Router *xrtr) { Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); @@ -188,12 +195,27 @@ void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); } +#define XIVE2_QSIZE_CHUNK_CL 128 +#define XIVE2_QSIZE_CHUNK_4k 4096 +/* Calculate max number of queue entries for an END */ +static uint32_t xive2_end_get_qentries(Xive2End *end) +{ + uint32_t w3 = end->w3; + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3); + if (xive_get_field32(END2_W3_CL, w3)) { + g_assert(qsize <= 4); + return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t); + } else { + g_assert(qsize <= 12); + return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t); + } +} + void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); int i; /* @@ -223,8 +245,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) uint64_t qaddr_base = xive2_end_qaddr(end); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); @@ -341,13 +362,12 @@ void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) static void xive2_end_enqueue(Xive2End *end, uint32_t data) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); uint64_t qaddr = qaddr_base + (qindex << 2); uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { @@ -361,8 +381,8 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) qgen ^= 1; end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); - /* TODO(PowerNV): reset GF bit on a cache watch operation */ - end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); + /* Set gen flipped to 1, it gets reset on a cache watch operation */ + end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1); } end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); } @@ -492,12 +512,13 @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, */ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx, - uint8_t ring) + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; Xive2Nvp nvp; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { @@ -533,7 +554,14 @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, } nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); - nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); + + if ((nvp.w0 & NVP2_W0_P) || ring != TM_QW2_HV_POOL) { + /* + * Non-pool contexts always save CPPR (ignore p bit). XXX: Clarify + * whether that is the correct behaviour. + */ + nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, sig_regs[TM_CPPR]); + } if (nvp.w0 & NVP2_W0_L) { /* * Typically not used. If LSMFB is restored with 0, it will @@ -555,6 +583,7 @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); } +/* POOL cam is the same as OS cam encoding */ static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, uint32_t *nvp_idx, bool *valid, bool *hw) { @@ -584,6 +613,79 @@ static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); } +static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t pipr = sig_regs[TM_PIPR]; + uint8_t crowd = NVx_CROWD_LVL(nsr); + uint8_t group = NVx_GROUP_LVL(nsr); + uint8_t nvgc_blk, end_blk, nvp_blk; + uint32_t nvgc_idx, end_idx, nvp_idx; + Xive2Nvgc nvgc; + uint8_t prio_limit; + uint32_t cfg; + + /* redistribution is only for group/crowd interrupts */ + if (!xive_nsr_indicates_group_exception(ring, nsr)) { + return; + } + + /* Don't check return code since ring is expected to be invalidated */ + xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + /* convert crowd/group to blk/idx */ + if (group > 0) { + nvgc_idx = (nvp_idx & (0xffffffff << group)) | + ((1 << (group - 1)) - 1); + } else { + nvgc_idx = nvp_idx; + } + + if (crowd > 0) { + crowd = (crowd == 3) ? 4 : crowd; + nvgc_blk = (nvp_blk & (0xffffffff << crowd)) | + ((1 << (crowd - 1)) - 1); + } else { + nvgc_blk = nvp_blk; + } + + /* Use blk/idx to retrieve the NVGC */ + if (xive2_router_get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", + crowd ? "NVC" : "NVG", nvgc_blk, nvgc_idx); + return; + } + + /* retrieve the END blk/idx from the NVGC */ + end_blk = xive_get_field32(NVGC2_W1_END_BLK, nvgc.w1); + end_idx = xive_get_field32(NVGC2_W1_END_IDX, nvgc.w1); + + /* determine number of priorities being used */ + cfg = xive2_router_get_config(xrtr); + if (cfg & XIVE2_EN_VP_GRP_PRIORITY) { + prio_limit = 1 << GETFIELD(NVGC2_W1_PSIZE, nvgc.w1); + } else { + prio_limit = 1 << GETFIELD(XIVE2_VP_INT_PRIO, cfg); + } + + /* add priority offset to end index */ + end_idx += pipr % prio_limit; + + /* trigger the group END */ + xive2_router_end_notify(xrtr, end_blk, end_idx, 0, true); + + /* clear interrupt indication for the context */ + sig_regs[TM_NSR] = 0; + sig_regs[TM_PIPR] = sig_regs[TM_CPPR]; + xive_tctx_reset_signal(tctx, ring); +} + +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring); + static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size, uint8_t ring) { @@ -595,10 +697,11 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, uint8_t cur_ring; bool valid; bool do_save; + uint8_t nsr; xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); - if (!valid) { + if (xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", nvp_blk, nvp_idx); } @@ -608,21 +711,53 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, cur_ring += XIVE_TM_RING_SIZE) { uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); + bool is_valid = !!(xive_get_field32(TM2_QW1W2_VO, ringw2)); + uint8_t *sig_regs; + memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); + + /* Skip the rest for USER or invalid contexts */ + if ((cur_ring == TM_QW0_USER) || !is_valid) { + continue; + } + + /* Active group/crowd interrupts need to be redistributed */ + sig_regs = xive_tctx_signal_regs(tctx, ring); + nsr = sig_regs[TM_NSR]; + if (xive_nsr_indicates_group_exception(cur_ring, nsr)) { + /* Ensure ring matches NSR (for HV NSR POOL vs PHYS rings) */ + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive2_redistribute(xrtr, tctx, cur_ring); + } + } + + /* + * Lower external interrupt line of requested ring and below except for + * USER, which doesn't exist. + */ + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive_tctx_reset_signal(tctx, cur_ring); + } + } } - if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { - xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); + if (ring == TM_QW2_HV_POOL) { + /* Re-check phys for interrupts if pool was disabled */ + nsr = tctx->regs[TM_QW3_HV_PHYS + TM_NSR]; + if (xive_nsr_indicates_exception(TM_QW3_HV_PHYS, nsr)) { + /* Ring must be PHYS because POOL would have been redistributed */ + g_assert(xive_nsr_exception_ring(TM_QW3_HV_PHYS, nsr) == + TM_QW3_HV_PHYS); + } else { + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + } } - /* - * Lower external interrupt line of requested ring and below except for - * USER, which doesn't exist. - */ - for (cur_ring = TM_QW1_OS; cur_ring <= ring; - cur_ring += XIVE_TM_RING_SIZE) { - xive_tctx_reset_signal(tctx, cur_ring); + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { + xive2_tctx_save_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx); } + return target_ringw2; } @@ -632,6 +767,18 @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); } +uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW2_HV_POOL); +} + +uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW3_HV_PHYS); +} + #define REPORT_LINE_GEN1_SIZE 16 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, @@ -741,12 +888,15 @@ void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); } -static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx, - Xive2Nvp *nvp) +static uint8_t xive2_tctx_restore_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; uint8_t cppr; if (!xive2_nvp_is_hw(nvp)) { @@ -759,10 +909,10 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); - tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; - tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); - tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); - tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); + sig_regs[TM_CPPR] = cppr; + regs[TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); + regs[TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); + regs[TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); @@ -771,9 +921,18 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, /* * Checkout privilege: 0:OS, 1:Pool, 2:Hard * - * TODO: we only support OS push/pull + * TODO: we don't support hard push/pull */ - nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + switch (ring) { + case TM_QW1_OS: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + break; + case TM_QW2_HV_POOL: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 1); + break; + default: + g_assert_not_reached(); + } xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); @@ -781,18 +940,14 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, return cppr; } -static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, +/* Restore TIMA VP context from NVP backlog */ +static void xive2_tctx_restore_nvp(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, uint8_t nvp_blk, uint32_t nvp_idx, bool do_restore) { - XivePresenter *xptr = XIVE_PRESENTER(xrtr); + uint8_t *regs = &tctx->regs[ring]; uint8_t ipb; - uint8_t backlog_level; - uint8_t group_level; - uint8_t first_group; - uint8_t backlog_prio; - uint8_t group_prio; - uint8_t *regs = &tctx->regs[TM_QW1_OS]; Xive2Nvp nvp; /* @@ -812,9 +967,8 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, } /* Automatically restore thread context registers */ - if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && - do_restore) { - xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_restore) { + xive2_tctx_restore_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx, &nvp); } ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); @@ -822,143 +976,230 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); } + /* IPB bits in the backlog are merged with the TIMA IPB bits */ regs[TM_IPB] |= ipb; - backlog_prio = xive_ipb_to_pipr(ipb); - backlog_level = 0; - - first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); - if (first_group && regs[TM_LSMFB] < backlog_prio) { - group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx, - first_group, &group_level); - regs[TM_LSMFB] = group_prio; - if (regs[TM_LGS] && group_prio < backlog_prio) { - /* VP can take a group interrupt */ - xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx, - group_prio, group_level); - backlog_prio = group_prio; - backlog_level = group_level; - } - } - - /* - * Compute the PIPR based on the restored state. - * It will raise the External interrupt signal if needed. - */ - xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); } /* - * Updating the OS CAM line can trigger a resend of interrupt + * Updating the ring CAM line can trigger a resend of interrupt */ -void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, uint64_t value, unsigned size) +static void xive2_tm_push_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size, + uint8_t ring) { uint32_t cam; - uint32_t qw1w2; - uint64_t qw1dw1; + uint32_t w2; + uint64_t dw1; uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + bool v; bool do_restore; + if (xive_ring_valid(tctx, ring)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Attempt to push VP to enabled" + " ring 0x%02x\n", ring); + return; + } + /* First update the thead context */ switch (size) { + case 1: + tctx->regs[ring + TM_WORD2] = value & 0xff; + cam = xive2_tctx_hw_cam_line(xptr, tctx); + cam |= ((value & 0xc0) << 24); /* V and H bits */ + break; case 4: cam = value; - qw1w2 = cpu_to_be32(cam); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + w2 = cpu_to_be32(cam); + memcpy(&tctx->regs[ring + TM_WORD2], &w2, 4); break; case 8: cam = value >> 32; - qw1dw1 = cpu_to_be64(value); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); + dw1 = cpu_to_be64(value); + memcpy(&tctx->regs[ring + TM_WORD2], &dw1, 8); break; default: g_assert_not_reached(); } - xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &v, &do_restore); /* Check the interrupt pending bits */ - if (vo) { - xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, - do_restore); + if (v) { + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t cur_ring; + + xive2_tctx_restore_nvp(xrtr, tctx, ring, + nvp_blk, nvp_idx, do_restore); + + for (cur_ring = TM_QW1_OS; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, cur_ring); + uint8_t nsr = sig_regs[TM_NSR]; + + if (!xive_ring_valid(tctx, cur_ring)) { + continue; + } + + if (cur_ring == TM_QW2_HV_POOL) { + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + g_assert(xive_nsr_exception_ring(cur_ring, nsr) == + TM_QW3_HV_PHYS); + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(ring, nsr)); + } + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + break; + } + xive2_tctx_process_pending(tctx, cur_ring); + } } } +void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW1_OS); +} + +void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW2_HV_POOL); +} + +void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); +} + +/* returns -1 if ring is invalid, but still populates block and index */ static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, - uint32_t *nvp_blk, uint32_t *nvp_idx) + uint8_t *nvp_blk, uint32_t *nvp_idx) { - uint32_t w2, cam; + uint32_t w2; + uint32_t cam = 0; + int rc = 0; w2 = xive_tctx_word2(&tctx->regs[ring]); switch (ring) { case TM_QW1_OS: if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { - return -1; + rc = -1; } cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); break; case TM_QW2_HV_POOL: if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { - return -1; + rc = -1; } cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); break; case TM_QW3_HV_PHYS: if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { - return -1; + rc = -1; } cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); break; default: - return -1; + rc = -1; } *nvp_blk = xive2_nvp_blk(cam); *nvp_idx = xive2_nvp_idx(cam); - return 0; + return rc; } -static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) +static void xive2_tctx_accept_el(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t ring, uint8_t cl_ring) { - uint8_t *regs = &tctx->regs[ring]; - Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); - uint8_t old_cppr, backlog_prio, first_group, group_level = 0; - uint8_t pipr_min, lsmfb_min, ring_min; - bool group_enabled; - uint32_t nvp_blk, nvp_idx; + uint64_t rd; + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t nvp_idx, xive2_cfg; + uint8_t nvp_blk; Xive2Nvp nvp; - int rc; + uint64_t phys_addr; + uint8_t OGen = 0; - trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - cppr, regs[TM_NSR]); + xive2_tctx_get_nvp_indexes(tctx, cl_ring, &nvp_blk, &nvp_idx); - if (cppr > XIVE_PRIORITY_MAX) { - cppr = 0xff; + if (xive2_router_get_nvp(xrtr, (uint8_t)nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + + rd = xive_tctx_accept(tctx, ring); + + if (ring == TM_QW1_OS) { + OGen = tctx->regs[ring + TM_OGEN]; + } + xive2_cfg = xive2_router_get_config(xrtr); + phys_addr = xive2_nvp_reporting_addr(&nvp); + uint8_t report_data[REPORT_LINE_GEN1_SIZE]; + memset(report_data, 0xff, sizeof(report_data)); + if ((OGen == 1) || (xive2_cfg & XIVE2_GEN1_TIMA_OS)) { + report_data[8] = (rd >> 8) & 0xff; + report_data[9] = rd & 0xff; + } else { + report_data[0] = (rd >> 8) & 0xff; + report_data[1] = rd & 0xff; } + cpu_physical_memory_write(phys_addr, report_data, REPORT_LINE_GEN1_SIZE); +} + +void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_accept_el(xptr, tctx, TM_QW1_OS, TM_QW1_OS); +} + +/* Re-calculate and present pending interrupts */ +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t backlog_prio; + uint8_t first_group; + uint8_t group_level; + uint8_t pipr_min; + uint8_t lsmfb_min; + uint8_t ring_min; + uint8_t cppr = sig_regs[TM_CPPR]; + bool group_enabled; + Xive2Nvp nvp; + int rc; - old_cppr = regs[TM_CPPR]; - regs[TM_CPPR] = cppr; + g_assert(sig_ring == TM_QW3_HV_PHYS || sig_ring == TM_QW1_OS); + g_assert(sig_regs[TM_WORD2] & 0x80); + g_assert(!xive_nsr_indicates_group_exception(sig_ring, sig_regs[TM_NSR])); /* * Recompute the PIPR based on local pending interrupts. It will * be adjusted below if needed in case of pending group interrupts. */ - pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); - group_enabled = !!regs[TM_LGS]; - lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; - ring_min = ring; +again: + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); + group_enabled = !!sig_regs[TM_LGS]; + lsmfb_min = group_enabled ? sig_regs[TM_LSMFB] : 0xff; + ring_min = sig_ring; + group_level = 0; /* PHYS updates also depend on POOL values */ - if (ring == TM_QW3_HV_PHYS) { - uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; + if (sig_ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; /* POOL values only matter if POOL ctx is valid */ - if (pregs[TM_WORD2] & 0x80) { - - uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); - uint8_t pool_lsmfb = pregs[TM_LSMFB]; + if (pool_regs[TM_WORD2] & 0x80) { + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); + uint8_t pool_lsmfb = pool_regs[TM_LSMFB]; /* * Determine highest priority interrupt and @@ -972,7 +1213,7 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } /* Values needed for group priority calculation */ - if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { + if (pool_regs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { group_enabled = true; lsmfb_min = pool_lsmfb; if (lsmfb_min < pipr_min) { @@ -981,32 +1222,26 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } } } - regs[TM_PIPR] = pipr_min; - - rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); - if (rc) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); - return; - } - - if (cppr < old_cppr) { - /* - * FIXME: check if there's a group interrupt being presented - * and if the new cppr prevents it. If so, then the group - * interrupt needs to be re-added to the backlog and - * re-triggered (see re-trigger END info in the NVGC - * structure) - */ - } if (group_enabled && lsmfb_min < cppr && - lsmfb_min < regs[TM_PIPR]) { + lsmfb_min < pipr_min) { + + uint8_t nvp_blk; + uint32_t nvp_idx; + /* * Thread has seen a group interrupt with a higher priority * than the new cppr or pending local interrupt. Check the * backlog */ + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); + if (rc) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid " + "context\n"); + return; + } + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", nvp_blk, nvp_idx); @@ -1030,14 +1265,85 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) nvp_blk, nvp_idx, first_group, &group_level); tctx->regs[ring_min + TM_LSMFB] = backlog_prio; - if (backlog_prio != 0xFF) { - xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, - backlog_prio, group_level); - regs[TM_PIPR] = backlog_prio; + if (backlog_prio != lsmfb_min) { + /* + * If the group backlog scan finds a less favored or no interrupt, + * then re-do the processing which may turn up a more favored + * interrupt from IPB or the other pool. Backlog should not + * find a priority < LSMFB. + */ + g_assert(backlog_prio >= lsmfb_min); + goto again; + } + + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, + backlog_prio, group_level); + pipr_min = backlog_prio; + } + + if (pipr_min > cppr) { + pipr_min = cppr; + } + xive_tctx_pipr_set(tctx, ring_min, pipr_min, group_level); +} + +/* NOTE: CPPR only exists for TM_QW1_OS and TM_QW3_HV_PHYS */ +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t sig_ring, uint8_t cppr) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t old_cppr; + uint8_t nsr = sig_regs[TM_NSR]; + + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + /* XXX: should show pool IPB for PHYS ring */ + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, sig_ring, + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, nsr); + + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + old_cppr = sig_regs[TM_CPPR]; + sig_regs[TM_CPPR] = cppr; + + /* Handle increased CPPR priority (lower value) */ + if (cppr < old_cppr) { + if (cppr <= sig_regs[TM_PIPR]) { + /* CPPR lowered below PIPR, must un-present interrupt */ + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + if (xive_nsr_indicates_group_exception(sig_ring, nsr)) { + /* redistribute precluded active grp interrupt */ + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(sig_ring, nsr)); + return; + } + } + + /* interrupt is VP directed, pending in IPB */ + xive_tctx_pipr_set(tctx, sig_ring, cppr, 0); + return; + } else { + /* CPPR was lowered, but still above PIPR. No action needed. */ + return; } } - /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring_min, group_level); + + /* CPPR didn't change, nothing needs to be done */ + if (cppr == old_cppr) { + return; + } + + /* CPPR priority decreased (higher value) */ + if (!xive_nsr_indicates_exception(sig_ring, nsr)) { + xive2_tctx_process_pending(tctx, sig_ring); + } } void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, @@ -1052,6 +1358,34 @@ void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + uint8_t priority = value & 0xff; + + /* + * XXX: should this simply set a bit in IPB and wait for it to be picked + * up next cycle, or is it supposed to present it now? We implement the + * latter here. + */ + regs[TM_IPB] |= xive_priority_to_ipb(priority); + if (xive_ipb_to_pipr(regs[TM_IPB]) >= regs[TM_PIPR]) { + return; + } + if (xive_nsr_indicates_group_exception(ring, regs[TM_NSR])) { + xive2_redistribute(xrtr, tctx, ring); + } + + xive_tctx_pipr_present(tctx, ring, priority, 0); +} + static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) { uint8_t *regs = &tctx->regs[ring]; @@ -1259,9 +1593,7 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); /* * The xive2_presenter_tctx_match() above tells if there's a match @@ -1269,7 +1601,7 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) * priority to know if the thread can take the interrupt now or if * it is precluded. */ - if (priority < alt_regs[TM_CPPR]) { + if (priority < sig_regs[TM_PIPR]) { return false; } return true; @@ -1322,12 +1654,14 @@ static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, * message has the same parameters than in the function below. */ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, - uint32_t end_idx, uint32_t end_data) + uint32_t end_idx, uint32_t end_data, + bool redistribute) { Xive2End end; uint8_t priority; uint8_t format; - bool found, precluded; + XiveTCTXMatch match; + bool crowd, cam_ignore; uint8_t nvx_blk; uint32_t nvx_idx; @@ -1350,7 +1684,8 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - if (xive2_end_is_enqueue(&end)) { + if (!redistribute && xive2_end_is_enqueue(&end)) { + trace_xive_end_enqueue(end_blk, end_idx, end_data); xive2_end_enqueue(&end, end_data); /* Enqueuing event data modifies the EQ toggle and index */ xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); @@ -1396,16 +1731,28 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, */ nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); - - found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx, - xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), - priority, - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), - &precluded); + crowd = xive2_end_is_crowd(&end); + cam_ignore = xive2_end_is_ignore(&end); /* TODO: Auto EOI. */ + if (xive_presenter_match(xrtr->xfb, format, nvx_blk, nvx_idx, + crowd, cam_ignore, priority, + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + XiveTCTX *tctx = match.tctx; + uint8_t ring = match.ring; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t group_level; + + if (priority < sig_regs[TM_PIPR] && + xive_nsr_indicates_group_exception(ring, nsr)) { + xive2_redistribute(xrtr, tctx, xive_nsr_exception_ring(ring, nsr)); + } - if (found) { + group_level = xive_get_group_level(crowd, cam_ignore, nvx_blk, nvx_idx); + trace_xive_presenter_notify(nvx_blk, nvx_idx, ring, group_level); + xive_tctx_pipr_present(tctx, ring, priority, group_level); return; } @@ -1423,7 +1770,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - if (!xive2_end_is_ignore(&end)) { + if (!cam_ignore) { uint8_t ipb; Xive2Nvp nvp; @@ -1452,9 +1799,6 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, } else { Xive2Nvgc nvgc; uint32_t backlog; - bool crowd; - - crowd = xive2_end_is_crowd(&end); /* * For groups and crowds, the per-priority backlog @@ -1486,9 +1830,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, if (backlog == 1) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx, - xive2_end_is_crowd(&end), - xive2_end_is_ignore(&end), - priority); + crowd, cam_ignore, priority); if (!xive2_end_is_precluded_escalation(&end)) { /* @@ -1522,18 +1864,41 @@ do_escalation: } } - /* - * The END trigger becomes an Escalation trigger - */ - xive2_router_end_notify(xrtr, - xive_get_field32(END2_W4_END_BLOCK, end.w4), - xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), - xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); + if (xive2_end_is_escalate_end(&end)) { + /* + * Perform END Adaptive escalation processing + * The END trigger becomes an Escalation trigger + */ + uint8_t esc_blk = xive_get_field32(END2_W4_END_BLOCK, end.w4); + uint32_t esc_idx = xive_get_field32(END2_W4_ESC_END_INDEX, end.w4); + uint32_t esc_data = xive_get_field32(END2_W5_ESC_END_DATA, end.w5); + trace_xive_escalate_end(end_blk, end_idx, esc_blk, esc_idx, esc_data); + xive2_router_end_notify(xrtr, esc_blk, esc_idx, esc_data, false); + } /* end END adaptive escalation */ + + else { + uint32_t lisn; /* Logical Interrupt Source Number */ + + /* + * Perform ESB escalation processing + * E[N] == 1 --> N + * Req[Block] <- E[ESB_Block] + * Req[Index] <- E[ESB_Index] + * Req[Offset] <- 0x000 + * Execute <ESB Store> Req command + */ + lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK, end.w4), + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4)); + + trace_xive_escalate_esb(end_blk, end_idx, lisn); + xive2_notify(xrtr, lisn, true /* pq_checked */); + } + + return; } -void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +void xive2_notify(Xive2Router *xrtr , uint32_t lisn, bool pq_checked) { - Xive2Router *xrtr = XIVE2_ROUTER(xn); uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); uint32_t eas_idx = XIVE_EAS_INDEX(lisn); Xive2Eas eas; @@ -1576,13 +1941,31 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) return; } + /* TODO: add support for EAS resume */ + if (xive2_eas_is_resume(&eas)) { + qemu_log_mask(LOG_UNIMP, + "XIVE: EAS resume processing unimplemented - LISN %x\n", + lisn); + return; + } + /* * The event trigger becomes an END trigger */ xive2_router_end_notify(xrtr, - xive_get_field64(EAS2_END_BLOCK, eas.w), - xive_get_field64(EAS2_END_INDEX, eas.w), - xive_get_field64(EAS2_END_DATA, eas.w)); + xive_get_field64(EAS2_END_BLOCK, eas.w), + xive_get_field64(EAS2_END_INDEX, eas.w), + xive_get_field64(EAS2_END_DATA, eas.w), + false); + return; +} + +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xn); + + xive2_notify(xrtr, lisn, pq_checked); + return; } static const Property xive2_router_properties[] = { @@ -1590,7 +1973,7 @@ static const Property xive2_router_properties[] = { TYPE_XIVE_FABRIC, XiveFabric *), }; -static void xive2_router_class_init(ObjectClass *klass, void *data) +static void xive2_router_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); @@ -1609,7 +1992,7 @@ static const TypeInfo xive2_router_info = { .instance_size = sizeof(Xive2Router), .class_size = sizeof(Xive2RouterClass), .class_init = xive2_router_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { TYPE_XIVE_PRESENTER }, { } @@ -1805,7 +2188,7 @@ static const Property xive2_end_source_properties[] = { Xive2Router *), }; -static void xive2_end_source_class_init(ObjectClass *klass, void *data) +static void xive2_end_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c index ccdab24..9200585 100644 --- a/hw/intc/xlnx-pmu-iomod-intc.c +++ b/hw/intc/xlnx-pmu-iomod-intc.c @@ -531,7 +531,7 @@ static const VMStateDescription vmstate_xlnx_pmu_io_intc = { } }; -static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, void *data) +static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c index 7241377..610cd0e 100644 --- a/hw/intc/xlnx-zynqmp-ipi.c +++ b/hw/intc/xlnx-zynqmp-ipi.c @@ -355,7 +355,7 @@ static const VMStateDescription vmstate_zynqmp_pmu_ipi = { } }; -static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, void *data) +static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); |