aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-08-15 12:00:53 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-08-15 12:00:53 +0100
commit48a539df4a25b009c5a1239039349d54185fc0df (patch)
tree12e4a30c7e80a9c496474ade2c348ebb17142dc0
parentc7fb81a53cacbdac7430d7c4b326f8ad04a3461f (diff)
parent054e7adf4e64e4acb3b033348ebf7cc871baa34f (diff)
downloadqemu-48a539df4a25b009c5a1239039349d54185fc0df.zip
qemu-48a539df4a25b009c5a1239039349d54185fc0df.tar.gz
qemu-48a539df4a25b009c5a1239039349d54185fc0df.tar.bz2
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180814' into staging
target-arm queue: * Implement more of ARMv6-M support * Support direct execution from non-RAM regions; use this to implmeent execution from small (<1K) MPU regions * GICv2: implement the virtualization extensions * support a virtualization-capable GICv2 in the virt and xlnx-zynqmp boards * arm: Fix return code of arm_load_elf() so we can detect failure to load the file correctly * Implement HCR_EL2.TGE ("trap general exceptions") bit * Implement tailchaining for M profile cores * Fix bugs in SVE compare, saturating add/sub, WHILE, MOVZ # gpg: Signature made Tue 14 Aug 2018 17:23:38 BST # gpg: using RSA key 3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20180814: (45 commits) target/arm: Fix typo in helper_sve_movz_d target/arm: Reorganize SVE WHILE target/arm: Fix typo in do_sat_addsub_64 target/arm: Fix sign of sve_cmpeq_ppzw/sve_cmpne_ppzw target/arm: Implement tailchaining for M profile cores target/arm: Restore M-profile CONTROL.SPSEL before any tailchaining target/arm: Initialize exc_secure correctly in do_v7m_exception_exit() target/arm: Improve exception-taken logging target/arm: Treat SCTLR_EL1.M as if it were zero when HCR_EL2.TGE is set target/arm: Provide accessor functions for HCR_EL2.{IMO, FMO, AMO} target/arm: Honour HCR_EL2.TGE when raising synchronous exceptions target/arm: Honour HCR_EL2.TGE and MDCR_EL2.TDE in debug register access checks target/arm: Mask virtual interrupts if HCR_EL2.TGE is set arm: Fix return code of arm_load_elf arm/virt: Add support for GICv2 virtualization extensions xlnx-zynqmp: Improve GIC wiring and MMIO mapping intc/arm_gic: Improve traces intc/arm_gic: Implement maintenance interrupt generation intc/arm_gic: Implement gic_update_virt() function intc/arm_gic: Implement the virtual interface registers ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cpu-exec.c3
-rw-r--r--accel/tcg/cputlb.c111
-rw-r--r--accel/tcg/softmmu_template.h11
-rw-r--r--accel/tcg/translate-all.c23
-rw-r--r--exec.c6
-rw-r--r--hw/arm/boot.c8
-rw-r--r--hw/arm/virt-acpi-build.c6
-rw-r--r--hw/arm/virt.c52
-rw-r--r--hw/arm/xlnx-zynqmp.c92
-rw-r--r--hw/intc/arm_gic.c987
-rw-r--r--hw/intc/arm_gic_common.c154
-rw-r--r--hw/intc/arm_gic_kvm.c31
-rw-r--r--hw/intc/arm_gicv3_cpuif.c19
-rw-r--r--hw/intc/armv7m_nvic.c82
-rw-r--r--hw/intc/gic_internal.h282
-rw-r--r--hw/intc/trace-events12
-rw-r--r--include/exec/exec-all.h2
-rw-r--r--include/hw/arm/virt.h4
-rw-r--r--include/hw/arm/xlnx-zynqmp.h4
-rw-r--r--include/hw/intc/arm_gic_common.h43
-rw-r--r--include/hw/intc/armv7m_nvic.h1
-rw-r--r--include/migration/vmstate.h3
-rw-r--r--include/qom/cpu.h6
-rw-r--r--memory.c3
-rw-r--r--target/arm/cpu.c4
-rw-r--r--target/arm/cpu.h62
-rw-r--r--target/arm/helper.c129
-rw-r--r--target/arm/op_helper.c14
-rw-r--r--target/arm/sve_helper.c19
-rw-r--r--target/arm/translate-sve.c51
30 files changed, 1725 insertions, 499 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index c738b7f..6bcb6d9 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -332,6 +332,9 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
+ if (phys_pc == -1) {
+ return NULL;
+ }
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 563fa30..f4702ce 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -741,39 +741,6 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
prot, mmu_idx, size);
}
-static void report_bad_exec(CPUState *cpu, target_ulong addr)
-{
- /* Accidentally executing outside RAM or ROM is quite common for
- * several user-error situations, so report it in a way that
- * makes it clear that this isn't a QEMU bug and provide suggestions
- * about what a user could do to fix things.
- */
- error_report("Trying to execute code outside RAM or ROM at 0x"
- TARGET_FMT_lx, addr);
- error_printf("This usually means one of the following happened:\n\n"
- "(1) You told QEMU to execute a kernel for the wrong machine "
- "type, and it crashed on startup (eg trying to run a "
- "raspberry pi kernel on a versatilepb QEMU machine)\n"
- "(2) You didn't give QEMU a kernel or BIOS filename at all, "
- "and QEMU executed a ROM full of no-op instructions until "
- "it fell off the end\n"
- "(3) Your guest kernel has a bug and crashed by jumping "
- "off into nowhere\n\n"
- "This is almost always one of the first two, so check your "
- "command line and that you are using the right type of kernel "
- "for this machine.\n"
- "If you think option (3) is likely then you can try debugging "
- "your guest with the -d debug options; in particular "
- "-d guest_errors will cause the log to include a dump of the "
- "guest register state at this point.\n\n"
- "Execution cannot continue; stopping here.\n\n");
-
- /* Report also to the logs, with more detail including register dump */
- qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
- "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
- log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
-}
-
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
ram_addr_t ram_addr;
@@ -789,7 +756,7 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx,
target_ulong addr, uintptr_t retaddr,
- bool recheck, int size)
+ bool recheck, MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
@@ -831,6 +798,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
cpu->mem_io_vaddr = addr;
+ cpu->mem_io_access_type = access_type;
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
@@ -843,7 +811,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
+ cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
@@ -958,11 +926,6 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
int mmu_idx, index;
void *p;
- MemoryRegion *mr;
- MemoryRegionSection *section;
- CPUState *cpu = ENV_GET_CPU(env);
- CPUIOTLBEntry *iotlbentry;
- hwaddr physaddr, mr_offset;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env, true);
@@ -973,69 +936,19 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
}
- if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
- /*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- int index;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access. We can't handle this, so for now just stop */
- cpu_abort(cpu, "Unable to handle guest executing from RAM within "
- "a small MPU region at 0x" TARGET_FMT_lx, addr);
- }
+ if (unlikely(env->tlb_table[mmu_idx][index].addr_code &
+ (TLB_RECHECK | TLB_MMIO))) {
/*
- * Fall through to handle IO accesses (which will almost certainly
- * also result in failure)
+ * Return -1 if we can't translate and execute from an entire
+ * page of RAM here, which will cause us to execute by loading
+ * and translating one insn at a time, without caching:
+ * - TLB_RECHECK: means the MMU protection covers a smaller range
+ * than a target page, so we must redo the MMU check every insn
+ * - TLB_MMIO: region is not backed by RAM
*/
+ return -1;
}
- iotlbentry = &env->iotlb[mmu_idx][index];
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
- mr = section->mr;
- if (memory_region_is_unassigned(mr)) {
- qemu_mutex_lock_iothread();
- if (memory_region_request_mmio_ptr(mr, addr)) {
- qemu_mutex_unlock_iothread();
- /* A MemoryRegion is potentially added so re-run the
- * get_page_addr_code.
- */
- return get_page_addr_code(env, addr);
- }
- qemu_mutex_unlock_iothread();
-
- /* Give the new-style cpu_transaction_failed() hook first chance
- * to handle this.
- * This is not the ideal place to detect and generate CPU
- * exceptions for instruction fetch failure (for instance
- * we don't know the length of the access that the CPU would
- * use, and it would be better to go ahead and try the access
- * and use the MemTXResult it produced). However it is the
- * simplest place we have currently available for the check.
- */
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
- physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
- iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
-
- cpu_unassigned_access(cpu, addr, false, true, 0, 4);
- /* The CPU's unassigned access hook might have longjumped out
- * with an exception. If it didn't (or there was no hook) then
- * we can't proceed further.
- */
- report_bad_exec(cpu, addr);
- exit(1);
- }
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
return qemu_ram_addr_from_host_nofail(p);
}
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
index badbf14..f060a69 100644
--- a/accel/tcg/softmmu_template.h
+++ b/accel/tcg/softmmu_template.h
@@ -99,11 +99,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr,
- bool recheck)
+ bool recheck,
+ MMUAccessType access_type)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
- DATA_SIZE);
+ access_type, DATA_SIZE);
}
#endif
@@ -140,7 +141,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_LE(res);
return res;
}
@@ -207,7 +209,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_BE(res);
return res;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 1571987..898c3bb 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1493,7 +1493,7 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
*/
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
- if (page_addr == -1) {
+ if (page_addr == -1 && tb->page_addr[0] != -1) {
page_lock_tb(tb);
do_tb_phys_invalidate(tb, true);
page_unlock_tb(tb);
@@ -1608,6 +1608,17 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
assert_memory_lock();
+ if (phys_pc == -1) {
+ /*
+ * If the TB is not associated with a physical RAM page then
+ * it must be a temporary one-insn TB, and we have nothing to do
+ * except fill in the page_addr[] fields.
+ */
+ assert(tb->cflags & CF_NOCACHE);
+ tb->page_addr[0] = tb->page_addr[1] = -1;
+ return tb;
+ }
+
/*
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
@@ -1677,6 +1688,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
phys_pc = get_page_addr_code(env, pc);
+ if (phys_pc == -1) {
+ /* Generate a temporary TB with 1 insn in it */
+ cflags &= ~CF_COUNT_MASK;
+ cflags |= CF_NOCACHE | 1;
+ }
+
buffer_overflow:
tb = tb_alloc(pc);
if (unlikely(!tb)) {
@@ -2121,7 +2138,9 @@ void tb_check_watchpoint(CPUState *cpu)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
- tb_invalidate_phys_range(addr, addr + 1);
+ if (addr != -1) {
+ tb_invalidate_phys_range(addr, addr + 1);
+ }
}
}
diff --git a/exec.c b/exec.c
index 4f5df07..e7be076 100644
--- a/exec.c
+++ b/exec.c
@@ -402,12 +402,6 @@ static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
}
}
-bool memory_region_is_unassigned(MemoryRegion *mr)
-{
- return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
- && mr != &io_mem_watch;
-}
-
/* Called from RCU critical section */
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index e09201c..ca9467e 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -818,9 +818,9 @@ static int do_arm_linux_init(Object *obj, void *opaque)
return 0;
}
-static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr,
- int elf_machine, AddressSpace *as)
+static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ int elf_machine, AddressSpace *as)
{
bool elf_is64;
union {
@@ -829,7 +829,7 @@ static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
} elf_header;
int data_swab = 0;
bool big_endian;
- uint64_t ret = -1;
+ int64_t ret = -1;
Error *err = NULL;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 6ea47e2..ce31abd 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -659,6 +659,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
gicc->length = sizeof(*gicc);
if (vms->gic_version == 2) {
gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
+ gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
+ gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
}
gicc->cpu_interface_number = cpu_to_le32(i);
gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
@@ -668,8 +670,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
}
- if (vms->virt && vms->gic_version == 3) {
- gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ));
+ if (vms->virt) {
+ gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
}
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 281ddcd..0807be9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -131,6 +131,8 @@ static const MemMapEntry a15memmap[] = {
[VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
[VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
[VIRT_GIC_V2M] = { 0x08020000, 0x00001000 },
+ [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 },
+ [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 },
/* The space in between here is reserved for GICv3 CPU/vCPU/HYP */
[VIRT_GIC_ITS] = { 0x08080000, 0x00020000 },
/* This redistributor space allows up to 2*64kB*123 CPUs */
@@ -440,18 +442,33 @@ static void fdt_add_gic_node(VirtMachineState *vms)
if (vms->virt) {
qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
- GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
}
} else {
/* 'cortex-a15-gic' means 'GIC v2' */
qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
"arm,cortex-a15-gic");
- qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
- 2, vms->memmap[VIRT_GIC_DIST].base,
- 2, vms->memmap[VIRT_GIC_DIST].size,
- 2, vms->memmap[VIRT_GIC_CPU].base,
- 2, vms->memmap[VIRT_GIC_CPU].size);
+ if (!vms->virt) {
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_CPU].base,
+ 2, vms->memmap[VIRT_GIC_CPU].size);
+ } else {
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_CPU].base,
+ 2, vms->memmap[VIRT_GIC_CPU].size,
+ 2, vms->memmap[VIRT_GIC_HYP].base,
+ 2, vms->memmap[VIRT_GIC_HYP].size,
+ 2, vms->memmap[VIRT_GIC_VCPU].base,
+ 2, vms->memmap[VIRT_GIC_VCPU].size);
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
+ }
}
qemu_fdt_setprop_cell(vms->fdt, nodename, "phandle", vms->gic_phandle);
@@ -573,6 +590,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
qdev_prop_set_uint32(gicdev, "redist-region-count[1]",
MIN(smp_cpus - redist0_count, redist1_capacity));
}
+ } else {
+ if (!kvm_irqchip_in_kernel()) {
+ qdev_prop_set_bit(gicdev, "has-virtualization-extensions",
+ vms->virt);
+ }
}
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
@@ -584,6 +606,10 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
}
} else {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
+ if (vms->virt) {
+ sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_HYP].base);
+ sysbus_mmio_map(gicbusdev, 3, vms->memmap[VIRT_GIC_VCPU].base);
+ }
}
/* Wire the outputs from each CPU's generic timer and the GICv3
@@ -610,9 +636,17 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
ppibase + timer_irq[irq]));
}
- qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
- qdev_get_gpio_in(gicdev, ppibase
- + ARCH_GICV3_MAINT_IRQ));
+ if (type == 3) {
+ qemu_irq irq = qdev_get_gpio_in(gicdev,
+ ppibase + ARCH_GIC_MAINT_IRQ);
+ qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
+ 0, irq);
+ } else if (vms->virt) {
+ qemu_irq irq = qdev_get_gpio_in(gicdev,
+ ppibase + ARCH_GIC_MAINT_IRQ);
+ sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq);
+ }
+
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
qdev_get_gpio_in(gicdev, ppibase
+ VIRTUAL_PMU_IRQ));
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 8de4868..c195040 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -29,12 +29,17 @@
#define ARM_PHYS_TIMER_PPI 30
#define ARM_VIRT_TIMER_PPI 27
+#define ARM_HYP_TIMER_PPI 26
+#define ARM_SEC_TIMER_PPI 29
+#define GIC_MAINTENANCE_PPI 25
#define GEM_REVISION 0x40070106
#define GIC_BASE_ADDR 0xf9000000
#define GIC_DIST_ADDR 0xf9010000
#define GIC_CPU_ADDR 0xf9020000
+#define GIC_VIFACE_ADDR 0xf9040000
+#define GIC_VCPU_ADDR 0xf9060000
#define SATA_INTR 133
#define SATA_ADDR 0xFD0C0000
@@ -111,11 +116,54 @@ static const int adma_ch_intr[XLNX_ZYNQMP_NUM_ADMA_CH] = {
typedef struct XlnxZynqMPGICRegion {
int region_index;
uint32_t address;
+ uint32_t offset;
+ bool virt;
} XlnxZynqMPGICRegion;
static const XlnxZynqMPGICRegion xlnx_zynqmp_gic_regions[] = {
- { .region_index = 0, .address = GIC_DIST_ADDR, },
- { .region_index = 1, .address = GIC_CPU_ADDR, },
+ /* Distributor */
+ {
+ .region_index = 0,
+ .address = GIC_DIST_ADDR,
+ .offset = 0,
+ .virt = false
+ },
+
+ /* CPU interface */
+ {
+ .region_index = 1,
+ .address = GIC_CPU_ADDR,
+ .offset = 0,
+ .virt = false
+ },
+ {
+ .region_index = 1,
+ .address = GIC_CPU_ADDR + 0x10000,
+ .offset = 0x1000,
+ .virt = false
+ },
+
+ /* Virtual interface */
+ {
+ .region_index = 2,
+ .address = GIC_VIFACE_ADDR,
+ .offset = 0,
+ .virt = true
+ },
+
+ /* Virtual CPU interface */
+ {
+ .region_index = 3,
+ .address = GIC_VCPU_ADDR,
+ .offset = 0,
+ .virt = true
+ },
+ {
+ .region_index = 3,
+ .address = GIC_VCPU_ADDR + 0x10000,
+ .offset = 0x1000,
+ .virt = true
+ },
};
static inline int arm_gic_ppi_index(int cpu_nr, int ppi_index)
@@ -281,6 +329,9 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32);
qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2);
qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", num_apus);
+ qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", s->secure);
+ qdev_prop_set_bit(DEVICE(&s->gic),
+ "has-virtualization-extensions", s->virt);
/* Realize APUs before realizing the GIC. KVM requires this. */
for (i = 0; i < num_apus; i++) {
@@ -325,19 +376,23 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
for (i = 0; i < XLNX_ZYNQMP_GIC_REGIONS; i++) {
SysBusDevice *gic = SYS_BUS_DEVICE(&s->gic);
const XlnxZynqMPGICRegion *r = &xlnx_zynqmp_gic_regions[i];
- MemoryRegion *mr = sysbus_mmio_get_region(gic, r->region_index);
+ MemoryRegion *mr;
uint32_t addr = r->address;
int j;
- sysbus_mmio_map(gic, r->region_index, addr);
+ if (r->virt && !s->virt) {
+ continue;
+ }
+ mr = sysbus_mmio_get_region(gic, r->region_index);
for (j = 0; j < XLNX_ZYNQMP_GIC_ALIASES; j++) {
MemoryRegion *alias = &s->gic_mr[i][j];
- addr += XLNX_ZYNQMP_GIC_REGION_SIZE;
memory_region_init_alias(alias, OBJECT(s), "zynqmp-gic-alias", mr,
- 0, XLNX_ZYNQMP_GIC_REGION_SIZE);
+ r->offset, XLNX_ZYNQMP_GIC_REGION_SIZE);
memory_region_add_subregion(system_memory, addr, alias);
+
+ addr += XLNX_ZYNQMP_GIC_REGION_SIZE;
}
}
@@ -347,12 +402,33 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i,
qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_FIQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 2,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_VIRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 3,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_VFIQ));
irq = qdev_get_gpio_in(DEVICE(&s->gic),
arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI));
- qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 0, irq);
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_PHYS, irq);
irq = qdev_get_gpio_in(DEVICE(&s->gic),
arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI));
- qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 1, irq);
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_VIRT, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, ARM_HYP_TIMER_PPI));
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_HYP, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, ARM_SEC_TIMER_PPI));
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_SEC, irq);
+
+ if (s->virt) {
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, GIC_MAINTENANCE_PPI));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 4, irq);
+ }
}
if (s->has_rpu) {
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 34dc84a..c1b35fc 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -61,6 +61,11 @@ static inline int gic_get_current_cpu(GICState *s)
return 0;
}
+static inline int gic_get_current_vcpu(GICState *s)
+{
+ return gic_get_current_cpu(s) + GIC_NCPU;
+}
+
/* Return true if this GIC config has interrupt groups, which is
* true if we're a GICv2, or a GICv1 with the security extensions.
*/
@@ -69,97 +74,288 @@ static inline bool gic_has_groups(GICState *s)
return s->revision == 2 || s->security_extn;
}
+static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure;
+}
+
+static inline void gic_get_best_irq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int irq;
+ int cm = 1 << cpu;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (irq = 0; irq < s->num_irq; irq++) {
+ if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
+ (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
+ (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
+ if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) {
+ *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
+ *best_irq = irq;
+ }
+ }
+ }
+
+ if (*best_irq < 1023) {
+ *group = GIC_DIST_TEST_GROUP(*best_irq, cm);
+ }
+}
+
+static inline void gic_get_best_virq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int lr_idx = 0;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t lr_entry = s->h_lr[lr_idx][cpu];
+ int state = GICH_LR_STATE(lr_entry);
+
+ if (state == GICH_LR_STATE_PENDING) {
+ int prio = GICH_LR_PRIORITY(lr_entry);
+
+ if (prio < *best_prio) {
+ *best_prio = prio;
+ *best_irq = GICH_LR_VIRT_ID(lr_entry);
+ *group = GICH_LR_GROUP(lr_entry);
+ }
+ }
+ }
+}
+
+/* Return true if IRQ signaling is enabled for the given cpu and at least one
+ * of the given groups:
+ * - in the non-virt case, the distributor must be enabled for one of the
+ * given groups
+ * - in the virt case, the virtual interface must be enabled.
+ * - in all cases, the (v)CPU interface must be enabled for one of the given
+ * groups.
+ */
+static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt,
+ int group_mask)
+{
+ if (!virt && !(s->ctlr & group_mask)) {
+ return false;
+ }
+
+ if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) {
+ return false;
+ }
+
+ if (!(s->cpu_ctlr[cpu] & group_mask)) {
+ return false;
+ }
+
+ return true;
+}
+
/* TODO: Many places that call this routine could be optimized. */
/* Update interrupt status after enabled or pending bits have been changed. */
-void gic_update(GICState *s)
+static inline void gic_update_internal(GICState *s, bool virt)
{
int best_irq;
int best_prio;
- int irq;
int irq_level, fiq_level;
- int cpu;
- int cm;
+ int cpu, cpu_iface;
+ int group = 0;
+ qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq;
+ qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq;
for (cpu = 0; cpu < s->num_cpu; cpu++) {
- cm = 1 << cpu;
- s->current_pending[cpu] = 1023;
- if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1))
- || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) {
- qemu_irq_lower(s->parent_irq[cpu]);
- qemu_irq_lower(s->parent_fiq[cpu]);
+ cpu_iface = virt ? (cpu + GIC_NCPU) : cpu;
+
+ s->current_pending[cpu_iface] = 1023;
+ if (!gic_irq_signaling_enabled(s, cpu, virt,
+ GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) {
+ qemu_irq_lower(irq_lines[cpu]);
+ qemu_irq_lower(fiq_lines[cpu]);
continue;
}
- best_prio = 0x100;
- best_irq = 1023;
- for (irq = 0; irq < s->num_irq; irq++) {
- if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
- (!GIC_TEST_ACTIVE(irq, cm)) &&
- (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) {
- if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
- best_prio = GIC_GET_PRIORITY(irq, cpu);
- best_irq = irq;
- }
- }
+
+ if (virt) {
+ gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group);
+ } else {
+ gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group);
}
if (best_irq != 1023) {
- trace_gic_update_bestirq(cpu, best_irq, best_prio,
- s->priority_mask[cpu], s->running_priority[cpu]);
+ trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu,
+ best_irq, best_prio,
+ s->priority_mask[cpu_iface],
+ s->running_priority[cpu_iface]);
}
irq_level = fiq_level = 0;
- if (best_prio < s->priority_mask[cpu]) {
- s->current_pending[cpu] = best_irq;
- if (best_prio < s->running_priority[cpu]) {
- int group = GIC_TEST_GROUP(best_irq, cm);
-
- if (extract32(s->ctlr, group, 1) &&
- extract32(s->cpu_ctlr[cpu], group, 1)) {
- if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) {
+ if (best_prio < s->priority_mask[cpu_iface]) {
+ s->current_pending[cpu_iface] = best_irq;
+ if (best_prio < s->running_priority[cpu_iface]) {
+ if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) {
+ if (group == 0 &&
+ s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) {
DPRINTF("Raised pending FIQ %d (cpu %d)\n",
- best_irq, cpu);
+ best_irq, cpu_iface);
fiq_level = 1;
- trace_gic_update_set_irq(cpu, "fiq", fiq_level);
+ trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq",
+ fiq_level);
} else {
DPRINTF("Raised pending IRQ %d (cpu %d)\n",
- best_irq, cpu);
+ best_irq, cpu_iface);
irq_level = 1;
- trace_gic_update_set_irq(cpu, "irq", irq_level);
+ trace_gic_update_set_irq(cpu, virt ? "virq" : "irq",
+ irq_level);
}
}
}
}
- qemu_set_irq(s->parent_irq[cpu], irq_level);
- qemu_set_irq(s->parent_fiq[cpu], fiq_level);
+ qemu_set_irq(irq_lines[cpu], irq_level);
+ qemu_set_irq(fiq_lines[cpu], fiq_level);
}
}
-void gic_set_pending_private(GICState *s, int cpu, int irq)
+static void gic_update(GICState *s)
{
- int cm = 1 << cpu;
+ gic_update_internal(s, false);
+}
- if (gic_test_pending(s, irq, cm)) {
- return;
+/* Return true if this LR is empty, i.e. the corresponding bit
+ * in ELRSR is set.
+ */
+static inline bool gic_lr_entry_is_free(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry));
+}
+
+/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the
+ * corrsponding bit in EISR is set.
+ */
+static inline bool gic_lr_entry_is_eoi(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && !GICH_LR_HW(entry) && GICH_LR_EOI(entry);
+}
+
+static inline void gic_extract_lr_info(GICState *s, int cpu,
+ int *num_eoi, int *num_valid, int *num_pending)
+{
+ int lr_idx;
+
+ *num_eoi = 0;
+ *num_valid = 0;
+ *num_pending = 0;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if (gic_lr_entry_is_eoi(*entry)) {
+ (*num_eoi)++;
+ }
+
+ if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) {
+ (*num_valid)++;
+ }
+
+ if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) {
+ (*num_pending)++;
+ }
}
+}
- DPRINTF("Set %d pending cpu %d\n", irq, cpu);
- GIC_SET_PENDING(irq, cm);
- gic_update(s);
+static void gic_compute_misr(GICState *s, int cpu)
+{
+ uint32_t value = 0;
+ int vcpu = cpu + GIC_NCPU;
+
+ int num_eoi, num_valid, num_pending;
+
+ gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending);
+
+ /* EOI */
+ if (num_eoi) {
+ value |= R_GICH_MISR_EOI_MASK;
+ }
+
+ /* U: true if only 0 or 1 LR entry is valid */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) {
+ value |= R_GICH_MISR_U_MASK;
+ }
+
+ /* LRENP: EOICount is not 0 */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) &&
+ ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) {
+ value |= R_GICH_MISR_LRENP_MASK;
+ }
+
+ /* NP: no pending interrupts */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) {
+ value |= R_GICH_MISR_NP_MASK;
+ }
+
+ /* VGrp0E: group0 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0E_MASK;
+ }
+
+ /* VGrp0D: group0 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0D_MASK;
+ }
+
+ /* VGrp1E: group1 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1E_MASK;
+ }
+
+ /* VGrp1D: group1 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1D_MASK;
+ }
+
+ s->h_misr[cpu] = value;
+}
+
+static void gic_update_maintenance(GICState *s)
+{
+ int cpu = 0;
+ int maint_level;
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ gic_compute_misr(s, cpu);
+ maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu];
+
+ trace_gic_update_maintenance_irq(cpu, maint_level);
+ qemu_set_irq(s->maintenance_irq[cpu], maint_level);
+ }
+}
+
+static void gic_update_virt(GICState *s)
+{
+ gic_update_internal(s, true);
+ gic_update_maintenance(s);
}
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
if (level) {
- GIC_SET_LEVEL(irq, cm);
- if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
+ GIC_DIST_SET_LEVEL(irq, cm);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) {
DPRINTF("Set %d pending mask %x\n", irq, target);
- GIC_SET_PENDING(irq, target);
+ GIC_DIST_SET_PENDING(irq, target);
}
} else {
- GIC_CLEAR_LEVEL(irq, cm);
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
}
}
@@ -167,13 +363,13 @@ static void gic_set_irq_generic(GICState *s, int irq, int level,
int cm, int target)
{
if (level) {
- GIC_SET_LEVEL(irq, cm);
+ GIC_DIST_SET_LEVEL(irq, cm);
DPRINTF("Set %d pending mask %x\n", irq, target);
- if (GIC_TEST_EDGE_TRIGGER(irq)) {
- GIC_SET_PENDING(irq, target);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) {
+ GIC_DIST_SET_PENDING(irq, target);
}
} else {
- GIC_CLEAR_LEVEL(irq, cm);
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
}
}
@@ -192,7 +388,7 @@ static void gic_set_irq(void *opaque, int irq, int level)
/* The first external input line is internal interrupt 32. */
cm = ALL_CPU_MASK;
irq += GIC_INTERNAL;
- target = GIC_TARGET(irq);
+ target = GIC_DIST_TARGET(irq);
} else {
int cpu;
irq -= (s->num_irq - GIC_INTERNAL);
@@ -204,7 +400,7 @@ static void gic_set_irq(void *opaque, int irq, int level)
assert(irq >= GIC_NR_SGIS);
- if (level == GIC_TEST_LEVEL(irq, cm)) {
+ if (level == GIC_DIST_TEST_LEVEL(irq, cm)) {
return;
}
@@ -224,11 +420,12 @@ static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
uint16_t pending_irq = s->current_pending[cpu];
if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) {
- int group = GIC_TEST_GROUP(pending_irq, (1 << cpu));
+ int group = gic_test_group(s, pending_irq, cpu);
+
/* On a GIC without the security extensions, reading this register
* behaves in the same way as a secure access to a GIC with them.
*/
- bool secure = !s->security_extn || attrs.secure;
+ bool secure = !gic_cpu_ns_access(s, cpu, attrs);
if (group == 0 && !secure) {
/* Group0 interrupts hidden from Non-secure access */
@@ -255,7 +452,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
if (gic_has_groups(s) &&
!(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) &&
- GIC_TEST_GROUP(irq, (1 << cpu))) {
+ gic_test_group(s, irq, cpu)) {
bpr = s->abpr[cpu] - 1;
assert(bpr >= 0);
} else {
@@ -268,7 +465,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
*/
mask = ~0U << ((bpr & 7) + 1);
- return GIC_GET_PRIORITY(irq, cpu) & mask;
+ return gic_get_priority(s, irq, cpu) & mask;
}
static void gic_activate_irq(GICState *s, int cpu, int irq)
@@ -277,18 +474,25 @@ static void gic_activate_irq(GICState *s, int cpu, int irq)
* and update the running priority.
*/
int prio = gic_get_group_priority(s, cpu, irq);
- int preemption_level = prio >> (GIC_MIN_BPR + 1);
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ int preemption_level = prio >> (min_bpr + 1);
int regno = preemption_level / 32;
int bitno = preemption_level % 32;
+ uint32_t *papr = NULL;
- if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) {
- s->nsapr[regno][cpu] |= (1 << bitno);
+ if (gic_is_vcpu(cpu)) {
+ assert(regno == 0);
+ papr = &s->h_apr[gic_get_vcpu_real_id(cpu)];
+ } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
+ papr = &s->nsapr[regno][cpu];
} else {
- s->apr[regno][cpu] |= (1 << bitno);
+ papr = &s->apr[regno][cpu];
}
+ *papr |= (1 << bitno);
+
s->running_priority[cpu] = prio;
- GIC_SET_ACTIVE(irq, 1 << cpu);
+ gic_set_active(s, irq, cpu);
}
static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
@@ -297,6 +501,16 @@ static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
* on the set bits in the Active Priority Registers.
*/
int i;
+
+ if (gic_is_vcpu(cpu)) {
+ uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)];
+ if (apr) {
+ return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1);
+ } else {
+ return 0x100;
+ }
+ }
+
for (i = 0; i < GIC_NR_APRS; i++) {
uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu];
if (!apr) {
@@ -325,83 +539,111 @@ static void gic_drop_prio(GICState *s, int cpu, int group)
* running priority will be wrong, so interrupts that should preempt
* might not do so, and interrupts that should not preempt might do so.
*/
- int i;
+ if (gic_is_vcpu(cpu)) {
+ int rcpu = gic_get_vcpu_real_id(cpu);
- for (i = 0; i < GIC_NR_APRS; i++) {
- uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
- if (!*papr) {
- continue;
+ if (s->h_apr[rcpu]) {
+ /* Clear lowest set bit */
+ s->h_apr[rcpu] &= s->h_apr[rcpu] - 1;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < GIC_NR_APRS; i++) {
+ uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
+ if (!*papr) {
+ continue;
+ }
+ /* Clear lowest set bit */
+ *papr &= *papr - 1;
+ break;
}
- /* Clear lowest set bit */
- *papr &= *papr - 1;
- break;
}
s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
}
+static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu)
+{
+ int src;
+ uint32_t ret;
+
+ if (!gic_is_vcpu(cpu)) {
+ /* Lookup the source CPU for the SGI and clear this in the
+ * sgi_pending map. Return the src and clear the overall pending
+ * state on this CPU if the SGI is not pending from any CPUs.
+ */
+ assert(s->sgi_pending[irq][cpu] != 0);
+ src = ctz32(s->sgi_pending[irq][cpu]);
+ s->sgi_pending[irq][cpu] &= ~(1 << src);
+ if (s->sgi_pending[irq][cpu] == 0) {
+ gic_clear_pending(s, irq, cpu);
+ }
+ ret = irq | ((src & 0x7) << 10);
+ } else {
+ uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu);
+ src = GICH_LR_CPUID(*lr_entry);
+
+ gic_clear_pending(s, irq, cpu);
+ ret = irq | (src << 10);
+ }
+
+ return ret;
+}
+
uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
{
- int ret, irq, src;
- int cm = 1 << cpu;
+ int ret, irq;
/* gic_get_current_pending_irq() will return 1022 or 1023 appropriately
* for the case where this GIC supports grouping and the pending interrupt
* is in the wrong group.
*/
irq = gic_get_current_pending_irq(s, cpu, attrs);
- trace_gic_acknowledge_irq(cpu, irq);
+ trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), irq);
if (irq >= GIC_MAXIRQ) {
DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq);
return irq;
}
- if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) {
+ if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) {
DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq);
return 1023;
}
+ gic_activate_irq(s, cpu, irq);
+
if (s->revision == REV_11MPCORE) {
/* Clear pending flags for both level and edge triggered interrupts.
* Level triggered IRQs will be reasserted once they become inactive.
*/
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ gic_clear_pending(s, irq, cpu);
ret = irq;
} else {
if (irq < GIC_NR_SGIS) {
- /* Lookup the source CPU for the SGI and clear this in the
- * sgi_pending map. Return the src and clear the overall pending
- * state on this CPU if the SGI is not pending from any CPUs.
- */
- assert(s->sgi_pending[irq][cpu] != 0);
- src = ctz32(s->sgi_pending[irq][cpu]);
- s->sgi_pending[irq][cpu] &= ~(1 << src);
- if (s->sgi_pending[irq][cpu] == 0) {
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
- }
- ret = irq | ((src & 0x7) << 10);
+ ret = gic_clear_pending_sgi(s, irq, cpu);
} else {
- /* Clear pending state for both level and edge triggered
- * interrupts. (level triggered interrupts with an active line
- * remain pending, see gic_test_pending)
- */
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ gic_clear_pending(s, irq, cpu);
ret = irq;
}
}
- gic_activate_irq(s, cpu, irq);
- gic_update(s);
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
DPRINTF("ACK %d\n", irq);
return ret;
}
-void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
MemTxAttrs attrs)
{
if (s->security_extn && !attrs.secure) {
- if (!GIC_TEST_GROUP(irq, (1 << cpu))) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
return; /* Ignore Non-secure access of Group0 IRQ */
}
val = 0x80 | (val >> 1); /* Non-secure view */
@@ -414,13 +656,13 @@ void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
}
}
-static uint32_t gic_get_priority(GICState *s, int cpu, int irq,
+static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq,
MemTxAttrs attrs)
{
- uint32_t prio = GIC_GET_PRIORITY(irq, cpu);
+ uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu);
if (s->security_extn && !attrs.secure) {
- if (!GIC_TEST_GROUP(irq, (1 << cpu))) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
return 0; /* Non-secure access cannot read priority of Group0 IRQ */
}
prio = (prio << 1) & 0xff; /* Non-secure view */
@@ -431,7 +673,7 @@ static uint32_t gic_get_priority(GICState *s, int cpu, int irq,
static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask,
MemTxAttrs attrs)
{
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->priority_mask[cpu] & 0x80) {
/* Priority Mask in upper half */
pmask = 0x80 | (pmask >> 1);
@@ -447,7 +689,7 @@ static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs)
{
uint32_t pmask = s->priority_mask[cpu];
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (pmask & 0x80) {
/* Priority Mask in upper half, return Non-secure view */
pmask = (pmask << 1) & 0xff;
@@ -463,7 +705,7 @@ static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs)
{
uint32_t ret = s->cpu_ctlr[cpu];
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
/* Construct the NS banked view of GICC_CTLR from the correct
* bits of the S banked view. We don't need to move the bypass
* control bits because we don't implement that (IMPDEF) part
@@ -479,7 +721,7 @@ static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value,
{
uint32_t mask;
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
/* The NS view can only write certain bits in the register;
* the rest are unchanged
*/
@@ -510,7 +752,7 @@ static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs)
return 0xff;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->running_priority[cpu] & 0x80) {
/* Running priority in upper half of range: return the Non-secure
* view of the priority.
@@ -534,7 +776,7 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
/* Before GICv2 prio-drop and deactivate are not separable */
return false;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS;
}
return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE;
@@ -542,23 +784,21 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
{
- int cm = 1 << cpu;
int group;
- if (irq >= s->num_irq) {
+ if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) {
/*
* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1023]
* to the GICC_DIR, the GIC ignores that write.
* 2. If software writes the number of a non-existent interrupt
* this must be a subcase of "value written is not an active interrupt"
- * and so this is UNPREDICTABLE. We choose to ignore it.
+ * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs,
+ * all IRQs potentially exist, so this limit does not apply.
*/
return;
}
- group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm);
-
if (!gic_eoi_split(s, cpu, attrs)) {
/* This is UNPREDICTABLE; we choose to ignore it */
qemu_log_mask(LOG_GUEST_ERROR,
@@ -566,20 +806,70 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- if (s->security_extn && !attrs.secure && !group) {
+ if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) {
+ /* This vIRQ does not have an LR entry which is either active or
+ * pending and active. Increment EOICount and ignore the write.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+
+ /* Update the virtual interface in case a maintenance interrupt should
+ * be raised.
+ */
+ gic_update_virt(s);
+ return;
+ }
+
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
+
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq);
return;
}
- GIC_CLEAR_ACTIVE(irq, cm);
+ gic_clear_active(s, irq, cpu);
}
-void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
+static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
{
int cm = 1 << cpu;
int group;
DPRINTF("EOI %d\n", irq);
+ if (gic_is_vcpu(cpu)) {
+ /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the
+ * running prio is < 0x100.
+ */
+ bool prio_drop = s->running_priority[cpu] < 0x100;
+
+ if (irq >= GIC_MAXIRQ) {
+ /* Ignore spurious interrupt */
+ return;
+ }
+
+ gic_drop_prio(s, cpu, 0);
+
+ if (!gic_eoi_split(s, cpu, attrs)) {
+ bool valid = gic_virq_is_valid(s, irq, cpu);
+ if (prio_drop && !valid) {
+ /* We are in a situation where:
+ * - V_CTRL.EOIMode is false (no EOI split),
+ * - The call to gic_drop_prio() cleared a bit in GICH_APR,
+ * - This vIRQ does not have an LR entry which is either
+ * active or pending and active.
+ * In that case, we must increment EOICount.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+ } else if (valid) {
+ gic_clear_active(s, irq, cpu);
+ }
+ }
+
+ gic_update_virt(s);
+ return;
+ }
+
if (irq >= s->num_irq) {
/* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1023]
@@ -598,16 +888,17 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
if (s->revision == REV_11MPCORE) {
/* Mark level triggered interrupts as pending if they are still
raised. */
- if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
- && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
+ if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm)
+ && GIC_DIST_TEST_LEVEL(irq, cm)
+ && (GIC_DIST_TARGET(irq) & cm) != 0) {
DPRINTF("Set %d pending mask %x\n", irq, cm);
- GIC_SET_PENDING(irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
}
}
- group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm);
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
- if (s->security_extn && !attrs.secure && !group) {
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
return;
}
@@ -621,7 +912,7 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
/* In GICv2 the guest can choose to split priority-drop and deactivate */
if (!gic_eoi_split(s, cpu, attrs)) {
- GIC_CLEAR_ACTIVE(irq, cm);
+ gic_clear_active(s, irq, cpu);
}
gic_update(s);
}
@@ -669,7 +960,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
goto bad_reg;
}
for (i = 0; i < 8; i++) {
- if (GIC_TEST_GROUP(irq + i, cm)) {
+ if (GIC_DIST_TEST_GROUP(irq + i, cm)) {
res |= (1 << i);
}
}
@@ -689,11 +980,11 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
res = 0;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ENABLED(irq + i, cm)) {
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
res |= (1 << i);
}
}
@@ -710,7 +1001,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
@@ -719,19 +1010,27 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
}
}
} else if (offset < 0x400) {
- /* Interrupt Active. */
- irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
+ /* Interrupt Set/Clear Active. */
+ if (offset < 0x380) {
+ irq = (offset - 0x300) * 8;
+ } else if (s->revision == 2) {
+ irq = (offset - 0x380) * 8;
+ } else {
+ goto bad_reg;
+ }
+
+ irq += GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
res = 0;
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ACTIVE(irq + i, mask)) {
+ if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) {
res |= (1 << i);
}
}
@@ -740,7 +1039,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
irq = (offset - 0x400) + GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
- res = gic_get_priority(s, cpu, irq, attrs);
+ res = gic_dist_get_priority(s, cpu, irq, attrs);
} else if (offset < 0xc00) {
/* Interrupt CPU Target. */
if (s->num_cpu == 1 && s->revision != REV_11MPCORE) {
@@ -756,7 +1055,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
} else if (irq < GIC_INTERNAL) {
res = cm;
} else {
- res = GIC_TARGET(irq);
+ res = GIC_DIST_TARGET(irq);
}
}
} else if (offset < 0xf00) {
@@ -767,14 +1066,16 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
res = 0;
for (i = 0; i < 4; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_MODEL(irq + i))
+ if (GIC_DIST_TEST_MODEL(irq + i)) {
res |= (1 << (i * 2));
- if (GIC_TEST_EDGE_TRIGGER(irq + i))
+ }
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
res |= (2 << (i * 2));
+ }
}
} else if (offset < 0xf10) {
goto bad_reg;
@@ -792,7 +1093,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
}
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
res = 0; /* Ignore Non-secure access of Group0 IRQ */
} else {
res = s->sgi_pending[irq][cpu];
@@ -833,20 +1134,23 @@ static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data,
switch (size) {
case 1:
*data = gic_dist_readb(opaque, offset, attrs);
- return MEMTX_OK;
+ break;
case 2:
*data = gic_dist_readb(opaque, offset, attrs);
*data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
- return MEMTX_OK;
+ break;
case 4:
*data = gic_dist_readb(opaque, offset, attrs);
*data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
*data |= gic_dist_readb(opaque, offset + 2, attrs) << 16;
*data |= gic_dist_readb(opaque, offset + 3, attrs) << 24;
- return MEMTX_OK;
+ break;
default:
return MEMTX_ERROR;
}
+
+ trace_gic_dist_read(offset, size, *data);
+ return MEMTX_OK;
}
static void gic_dist_writeb(void *opaque, hwaddr offset,
@@ -888,10 +1192,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (value & (1 << i)) {
/* Group1 (Non-secure) */
- GIC_SET_GROUP(irq + i, cm);
+ GIC_DIST_SET_GROUP(irq + i, cm);
} else {
/* Group0 (Secure) */
- GIC_CLEAR_GROUP(irq + i, cm);
+ GIC_DIST_CLEAR_GROUP(irq + i, cm);
}
}
}
@@ -910,25 +1214,26 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (value & (1 << i)) {
int mask =
- (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i);
+ (irq < GIC_INTERNAL) ? (1 << cpu)
+ : GIC_DIST_TARGET(irq + i);
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (!GIC_TEST_ENABLED(irq + i, cm)) {
+ if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) {
DPRINTF("Enabled IRQ %d\n", irq + i);
trace_gic_enable_irq(irq + i);
}
- GIC_SET_ENABLED(irq + i, cm);
+ GIC_DIST_SET_ENABLED(irq + i, cm);
/* If a raised level triggered IRQ enabled then mark
is as pending. */
- if (GIC_TEST_LEVEL(irq + i, mask)
- && !GIC_TEST_EDGE_TRIGGER(irq + i)) {
+ if (GIC_DIST_TEST_LEVEL(irq + i, mask)
+ && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
- GIC_SET_PENDING(irq + i, mask);
+ GIC_DIST_SET_PENDING(irq + i, mask);
}
}
}
@@ -946,15 +1251,15 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ENABLED(irq + i, cm)) {
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
DPRINTF("Disabled IRQ %d\n", irq + i);
trace_gic_disable_irq(irq + i);
}
- GIC_CLEAR_ENABLED(irq + i, cm);
+ GIC_DIST_CLEAR_ENABLED(irq + i, cm);
}
}
} else if (offset < 0x280) {
@@ -969,11 +1274,11 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (value & (1 << i)) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i));
+ GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i));
}
}
} else if (offset < 0x300) {
@@ -987,7 +1292,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
@@ -995,18 +1300,63 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for per-CPU interrupts. It's unclear whether this is the
corect behavior. */
if (value & (1 << i)) {
- GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
+ GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
+ }
+ }
+ } else if (offset < 0x380) {
+ /* Interrupt Set Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_SET_ACTIVE(irq + i, cm);
}
}
} else if (offset < 0x400) {
- /* Interrupt Active. */
- goto bad_reg;
+ /* Interrupt Clear Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x380) * 8 + GIC_BASE_IRQ;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_CLEAR_ACTIVE(irq + i, cm);
+ }
+ }
} else if (offset < 0x800) {
/* Interrupt Priority. */
irq = (offset - 0x400) + GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
- gic_set_priority(s, cpu, irq, value, attrs);
+ gic_dist_set_priority(s, cpu, irq, value, attrs);
} else if (offset < 0xc00) {
/* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
* annoying exception of the 11MPCore's GIC.
@@ -1032,21 +1382,21 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
value |= 0xaa;
for (i = 0; i < 4; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
if (s->revision == REV_11MPCORE) {
if (value & (1 << (i * 2))) {
- GIC_SET_MODEL(irq + i);
+ GIC_DIST_SET_MODEL(irq + i);
} else {
- GIC_CLEAR_MODEL(irq + i);
+ GIC_DIST_CLEAR_MODEL(irq + i);
}
}
if (value & (2 << (i * 2))) {
- GIC_SET_EDGE_TRIGGER(irq + i);
+ GIC_DIST_SET_EDGE_TRIGGER(irq + i);
} else {
- GIC_CLEAR_EDGE_TRIGGER(irq + i);
+ GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i);
}
}
} else if (offset < 0xf10) {
@@ -1060,10 +1410,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0xf10);
if (!s->security_extn || attrs.secure ||
- GIC_TEST_GROUP(irq, 1 << cpu)) {
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
s->sgi_pending[irq][cpu] &= ~value;
if (s->sgi_pending[irq][cpu] == 0) {
- GIC_CLEAR_PENDING(irq, 1 << cpu);
+ GIC_DIST_CLEAR_PENDING(irq, 1 << cpu);
}
}
} else if (offset < 0xf30) {
@@ -1074,8 +1424,8 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0xf20);
if (!s->security_extn || attrs.secure ||
- GIC_TEST_GROUP(irq, 1 << cpu)) {
- GIC_SET_PENDING(irq, 1 << cpu);
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
+ GIC_DIST_SET_PENDING(irq, 1 << cpu);
s->sgi_pending[irq][cpu] |= value;
}
} else {
@@ -1122,7 +1472,7 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
mask = ALL_CPU_MASK;
break;
}
- GIC_SET_PENDING(irq, mask);
+ GIC_DIST_SET_PENDING(irq, mask);
target_cpu = ctz32(mask);
while (target_cpu < GIC_NCPU) {
s->sgi_pending[irq][target_cpu] |= (1 << cpu);
@@ -1139,6 +1489,8 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size, MemTxAttrs attrs)
{
+ trace_gic_dist_write(offset, size, data);
+
switch (size) {
case 1:
gic_dist_writeb(opaque, offset, data, attrs);
@@ -1227,7 +1579,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
*data = gic_get_priority_mask(s, cpu, attrs);
break;
case 0x08: /* Binary Point */
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
/* NS view of BPR when CBPR is 1 */
*data = MIN(s->bpr[cpu] + 1, 7);
@@ -1254,7 +1606,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
* With security extensions, secure access: ABPR (alias of NS BPR)
* With security extensions, nonsecure access: RAZ/WI
*/
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
*data = 0;
} else {
*data = s->abpr[cpu];
@@ -1263,10 +1615,13 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
case 0xd0: case 0xd4: case 0xd8: case 0xdc:
{
int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
- if (regno >= GIC_NR_APRS || s->revision != 2) {
+ if (regno >= nr_aprs || s->revision != 2) {
*data = 0;
- } else if (s->security_extn && !attrs.secure) {
+ } else if (gic_is_vcpu(cpu)) {
+ *data = s->h_apr[gic_get_vcpu_real_id(cpu)];
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
*data = gic_apr_ns_view(s, regno, cpu);
} else {
@@ -1279,7 +1634,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
int regno = (offset - 0xe0) / 4;
if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) ||
- (s->security_extn && !attrs.secure)) {
+ gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) {
*data = 0;
} else {
*data = s->nsapr[regno][cpu];
@@ -1292,12 +1647,18 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
*data = 0;
break;
}
+
+ trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, *data);
return MEMTX_OK;
}
static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
uint32_t value, MemTxAttrs attrs)
{
+ trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, value);
+
switch (offset) {
case 0x00: /* Control */
gic_set_cpu_control(s, cpu, value, attrs);
@@ -1306,7 +1667,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
gic_set_priority_mask(s, cpu, value, attrs);
break;
case 0x08: /* Binary Point */
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
/* WI when CBPR is 1 */
return MEMTX_OK;
@@ -1314,14 +1675,15 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
}
} else {
- s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR);
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->bpr[cpu] = MAX(value & 0x7, min_bpr);
}
break;
case 0x10: /* End Of Interrupt */
gic_complete_irq(s, cpu, value & 0x3ff, attrs);
return MEMTX_OK;
case 0x1c: /* Aliased Binary Point */
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
/* unimplemented, or NS access: RAZ/WI */
return MEMTX_OK;
} else {
@@ -1331,11 +1693,14 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
case 0xd0: case 0xd4: case 0xd8: case 0xdc:
{
int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
- if (regno >= GIC_NR_APRS || s->revision != 2) {
+ if (regno >= nr_aprs || s->revision != 2) {
return MEMTX_OK;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_is_vcpu(cpu)) {
+ s->h_apr[gic_get_vcpu_real_id(cpu)] = value;
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
gic_apr_write_ns_view(s, regno, cpu, value);
} else {
@@ -1350,7 +1715,10 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
if (regno >= GIC_NR_APRS || s->revision != 2) {
return MEMTX_OK;
}
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (gic_is_vcpu(cpu)) {
+ return MEMTX_OK;
+ }
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
return MEMTX_OK;
}
s->nsapr[regno][cpu] = value;
@@ -1365,7 +1733,13 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
"gic_cpu_write: Bad offset %x\n", (int)offset);
return MEMTX_OK;
}
- gic_update(s);
+
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
+
return MEMTX_OK;
}
@@ -1407,6 +1781,222 @@ static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr,
return gic_cpu_write(s, id, addr, value, attrs);
}
+static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs);
+}
+
+static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_eoi(*entry));
+ }
+
+ return ret;
+}
+
+static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_free(*entry));
+ }
+
+ return ret;
+}
+
+static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs)
+{
+ int vcpu = gic_get_current_vcpu(s);
+ uint32_t ctlr;
+ uint32_t abpr;
+ uint32_t bpr;
+ uint32_t prio_mask;
+
+ ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr);
+ abpr = FIELD_EX32(value, GICH_VMCR, VMABP);
+ bpr = FIELD_EX32(value, GICH_VMCR, VMBP);
+ prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3;
+
+ gic_set_cpu_control(s, vcpu, ctlr, attrs);
+ s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR);
+ s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR);
+ gic_set_priority_mask(s, vcpu, prio_mask, attrs);
+}
+
+static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ *data = s->h_hcr[cpu];
+ break;
+
+ case A_GICH_VTR: /* VGIC Type */
+ *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PREbits,
+ GIC_VIRT_MAX_GROUP_PRIO_BITS - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PRIbits,
+ (7 - GIC_VIRT_MIN_BPR) - 1);
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr,
+ extract32(s->cpu_ctlr[vcpu], 0, 10));
+ *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask,
+ extract32(s->priority_mask[vcpu], 3, 5));
+ break;
+
+ case A_GICH_MISR: /* Maintenance Interrupt Status */
+ *data = s->h_misr[cpu];
+ break;
+
+ case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */
+ case A_GICH_EISR1:
+ *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8);
+ break;
+
+ case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */
+ case A_GICH_ELRSR1:
+ *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ *data = s->h_apr[cpu];
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ *data = 0;
+ } else {
+ *data = s->h_lr[lr_idx][cpu];
+ }
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ trace_gic_hyp_read(addr, *data);
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
+ uint64_t value, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ trace_gic_hyp_write(addr, value);
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ s->h_hcr[cpu] = value & GICH_HCR_MASK;
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ gic_vmcr_write(s, value, attrs);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ s->h_apr[cpu] = value;
+ s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu);
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ return MEMTX_OK;
+ }
+
+ s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK;
+ trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]);
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ gic_update_virt(s);
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs);
+}
+
+static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_read(s, id, addr, data, attrs);
+}
+
+static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs);
+
+}
+
static const MemoryRegionOps gic_ops[2] = {
{
.read_with_attrs = gic_dist_read,
@@ -1426,11 +2016,24 @@ static const MemoryRegionOps gic_cpu_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-/* This function is used by nvic model */
-void gic_init_irqs_and_distributor(GICState *s)
-{
- gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops);
-}
+static const MemoryRegionOps gic_virt_ops[2] = {
+ {
+ .read_with_attrs = gic_thiscpu_hyp_read,
+ .write_with_attrs = gic_thiscpu_hyp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ },
+ {
+ .read_with_attrs = gic_thisvcpu_read,
+ .write_with_attrs = gic_thisvcpu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ }
+};
+
+static const MemoryRegionOps gic_viface_ops = {
+ .read_with_attrs = gic_do_hyp_read,
+ .write_with_attrs = gic_do_hyp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
static void arm_gic_realize(DeviceState *dev, Error **errp)
{
@@ -1453,8 +2056,11 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
- /* This creates distributor and main CPU interface (s->cpuiomem[0]) */
- gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops);
+ /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if
+ * enabled, virtualization extensions related interfaces (main virtual
+ * interface (s->vifaceiomem[0]) and virtual CPU interface).
+ */
+ gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops);
/* Extra core-specific regions for the CPU interfaces. This is
* necessary for "franken-GIC" implementations, for example on
@@ -1470,6 +2076,19 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
&s->backref[i], "gic_cpu", 0x100);
sysbus_init_mmio(sbd, &s->cpuiomem[i+1]);
}
+
+ /* Extra core-specific regions for virtual interfaces. This is required by
+ * the GICv2 specification.
+ */
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s),
+ &gic_viface_ops, &s->backref[i],
+ "gic_viface", 0x1000);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]);
+ }
+ }
+
}
static void arm_gic_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index aee50a2..547dc41 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -46,6 +46,13 @@ static int gic_post_load(void *opaque, int version_id)
return 0;
}
+static bool gic_virt_state_needed(void *opaque)
+{
+ GICState *s = (GICState *)opaque;
+
+ return s->virt_extn;
+}
+
static const VMStateDescription vmstate_gic_irq_state = {
.name = "arm_gic_irq_state",
.version_id = 1,
@@ -62,6 +69,30 @@ static const VMStateDescription vmstate_gic_irq_state = {
}
};
+static const VMStateDescription vmstate_gic_virt_state = {
+ .name = "arm_gic_virt_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gic_virt_state_needed,
+ .fields = (VMStateField[]) {
+ /* Virtual interface */
+ VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(h_lr, GICState, GIC_MAX_LR, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_apr, GICState, GIC_NCPU),
+
+ /* Virtual CPU interfaces */
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, GIC_NCPU, GIC_NCPU),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gic = {
.name = "arm_gic",
.version_id = 12,
@@ -70,26 +101,31 @@ static const VMStateDescription vmstate_gic = {
.post_load = gic_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(ctlr, GICState),
- VMSTATE_UINT32_ARRAY(cpu_ctlr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU),
VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1,
vmstate_gic_irq_state, gic_irq_state),
VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ),
VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU),
VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL),
VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(priority_mask, GICState, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(running_priority, GICState, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(current_pending, GICState, GIC_NCPU),
- VMSTATE_UINT8_ARRAY(bpr, GICState, GIC_NCPU),
- VMSTATE_UINT8_ARRAY(abpr, GICState, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, 0, GIC_NCPU),
VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU),
VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gic_virt_state,
+ NULL
}
};
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops)
+ const MemoryRegionOps *ops,
+ const MemoryRegionOps *virt_ops)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
int i = s->num_irq - GIC_INTERNAL;
@@ -116,6 +152,11 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->parent_vfiq[i]);
}
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->maintenance_irq[i]);
+ }
+ }
/* Distributor */
memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000);
@@ -127,6 +168,17 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL,
s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100);
sysbus_init_mmio(sbd, &s->cpuiomem[0]);
+
+ if (s->virt_extn) {
+ memory_region_init_io(&s->vifaceiomem[0], OBJECT(s), virt_ops,
+ s, "gic_viface", 0x1000);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[0]);
+
+ memory_region_init_io(&s->vcpuiomem, OBJECT(s),
+ virt_ops ? &virt_ops[1] : NULL,
+ s, "gic_vcpu", 0x2000);
+ sysbus_init_mmio(sbd, &s->vcpuiomem);
+ }
}
static void arm_gic_common_realize(DeviceState *dev, Error **errp)
@@ -163,6 +215,48 @@ static void arm_gic_common_realize(DeviceState *dev, Error **errp)
"the security extensions");
return;
}
+
+ if (s->virt_extn) {
+ if (s->revision != 2) {
+ error_setg(errp, "GIC virtualization extensions are only "
+ "supported by revision 2");
+ return;
+ }
+
+ /* For now, set the number of implemented LRs to 4, as found in most
+ * real GICv2. This could be promoted as a QOM property if we need to
+ * emulate a variant with another num_lrs.
+ */
+ s->num_lrs = 4;
+ }
+}
+
+static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu,
+ int resetprio)
+{
+ int i, j;
+
+ for (i = first_cpu; i < first_cpu + s->num_cpu; i++) {
+ if (s->revision == REV_11MPCORE) {
+ s->priority_mask[i] = 0xf0;
+ } else {
+ s->priority_mask[i] = resetprio;
+ }
+ s->current_pending[i] = 1023;
+ s->running_priority[i] = 0x100;
+ s->cpu_ctlr[i] = 0;
+ s->bpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->abpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_ABPR : GIC_MIN_ABPR;
+
+ if (!gic_is_vcpu(i)) {
+ for (j = 0; j < GIC_INTERNAL; j++) {
+ s->priority1[j][i] = resetprio;
+ }
+ for (j = 0; j < GIC_NR_SGIS; j++) {
+ s->sgi_pending[j][i] = 0;
+ }
+ }
+ }
}
static void arm_gic_common_reset(DeviceState *dev)
@@ -185,27 +279,18 @@ static void arm_gic_common_reset(DeviceState *dev)
}
memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state));
- for (i = 0 ; i < s->num_cpu; i++) {
- if (s->revision == REV_11MPCORE) {
- s->priority_mask[i] = 0xf0;
- } else {
- s->priority_mask[i] = resetprio;
- }
- s->current_pending[i] = 1023;
- s->running_priority[i] = 0x100;
- s->cpu_ctlr[i] = 0;
- s->bpr[i] = GIC_MIN_BPR;
- s->abpr[i] = GIC_MIN_ABPR;
- for (j = 0; j < GIC_INTERNAL; j++) {
- s->priority1[j][i] = resetprio;
- }
- for (j = 0; j < GIC_NR_SGIS; j++) {
- s->sgi_pending[j][i] = 0;
- }
+ arm_gic_common_reset_irq_state(s, 0, resetprio);
+
+ if (s->virt_extn) {
+ /* vCPU states are stored at indexes GIC_NCPU .. GIC_NCPU+num_cpu.
+ * The exposed vCPU interface does not have security extensions.
+ */
+ arm_gic_common_reset_irq_state(s, GIC_NCPU, 0);
}
+
for (i = 0; i < GIC_NR_SGIS; i++) {
- GIC_SET_ENABLED(i, ALL_CPU_MASK);
- GIC_SET_EDGE_TRIGGER(i);
+ GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK);
+ GIC_DIST_SET_EDGE_TRIGGER(i);
}
for (i = 0; i < ARRAY_SIZE(s->priority2); i++) {
@@ -222,7 +307,20 @@ static void arm_gic_common_reset(DeviceState *dev)
}
if (s->security_extn && s->irq_reset_nonsecure) {
for (i = 0; i < GIC_MAXIRQ; i++) {
- GIC_SET_GROUP(i, ALL_CPU_MASK);
+ GIC_DIST_SET_GROUP(i, ALL_CPU_MASK);
+ }
+ }
+
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_lrs; i++) {
+ for (j = 0; j < s->num_cpu; j++) {
+ s->h_lr[i][j] = 0;
+ }
+ }
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->h_hcr[i] = 0;
+ s->h_misr[i] = 0;
}
}
@@ -255,6 +353,8 @@ static Property arm_gic_common_properties[] = {
DEFINE_PROP_UINT32("revision", GICState, revision, 1),
/* True if the GIC should implement the security extensions */
DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0),
+ /* True if the GIC should implement the virtualization extensions */
+ DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 8666508..a611e8e 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -140,10 +140,10 @@ static void translate_group(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_GROUP(irq, cm);
+ *field = GIC_DIST_TEST_GROUP(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_GROUP(irq, cm);
+ GIC_DIST_SET_GROUP(irq, cm);
}
}
}
@@ -154,10 +154,10 @@ static void translate_enabled(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_ENABLED(irq, cm);
+ *field = GIC_DIST_TEST_ENABLED(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_ENABLED(irq, cm);
+ GIC_DIST_SET_ENABLED(irq, cm);
}
}
}
@@ -171,7 +171,7 @@ static void translate_pending(GICState *s, int irq, int cpu,
*field = gic_test_pending(s, irq, cm);
} else {
if (*field & 1) {
- GIC_SET_PENDING(irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
/* TODO: Capture is level-line is held high in the kernel */
}
}
@@ -183,10 +183,10 @@ static void translate_active(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_ACTIVE(irq, cm);
+ *field = GIC_DIST_TEST_ACTIVE(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_ACTIVE(irq, cm);
+ GIC_DIST_SET_ACTIVE(irq, cm);
}
}
}
@@ -195,10 +195,10 @@ static void translate_trigger(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel)
{
if (to_kernel) {
- *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
+ *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
} else {
if (*field & 0x2) {
- GIC_SET_EDGE_TRIGGER(irq);
+ GIC_DIST_SET_EDGE_TRIGGER(irq);
}
}
}
@@ -207,9 +207,10 @@ static void translate_priority(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel)
{
if (to_kernel) {
- *field = GIC_GET_PRIORITY(irq, cpu) & 0xff;
+ *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff;
} else {
- gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED);
+ gic_dist_set_priority(s, cpu, irq,
+ *field & 0xff, MEMTXATTRS_UNSPECIFIED);
}
}
@@ -510,6 +511,12 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->virt_extn) {
+ error_setg(errp, "the in-kernel VGIC does not implement the "
+ "virtualization extensions");
+ return;
+ }
+
if (!kvm_arm_gic_can_save_restore(s)) {
error_setg(&s->migration_blocker, "This operating system kernel does "
"not support vGICv2 migration");
@@ -521,7 +528,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
}
}
- gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
+ gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL);
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
qemu_irq irq = qdev_get_gpio_in(dev, i);
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 2a60568..068a8e8 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -85,7 +85,10 @@ static bool icv_access(CPUARMState *env, int hcr_flags)
* * access if NS EL1 and either IMO or FMO == 1:
* CTLR, DIR, PMR, RPR
*/
- return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
+ bool flagmatch = ((hcr_flags & HCR_IMO) && arm_hcr_el2_imo(env)) ||
+ ((hcr_flags & HCR_FMO) && arm_hcr_el2_fmo(env));
+
+ return flagmatch && arm_current_el(env) == 1
&& !arm_is_secure_below_el3(env);
}
@@ -1549,8 +1552,8 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* No need to include !IsSecure in route_*_to_el2 as it's only
* tested in cases where we know !IsSecure is true.
*/
- route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
- route_irq_to_el2 = env->cp15.hcr_el2 & HCR_IMO;
+ route_fiq_to_el2 = arm_hcr_el2_fmo(env);
+ route_irq_to_el2 = arm_hcr_el2_imo(env);
switch (arm_current_el(env)) {
case 3:
@@ -1893,7 +1896,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
+ (arm_hcr_el2_imo(env) == 0 && arm_hcr_el2_fmo(env) == 0)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -1933,7 +1936,7 @@ static CPAccessResult gicv3_dir_access(CPUARMState *env,
static CPAccessResult gicv3_sgi_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
- if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) &&
+ if ((arm_hcr_el2_imo(env) || arm_hcr_el2_fmo(env)) &&
arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
/* Takes priority over a possible EL3 trap */
return CP_ACCESS_TRAP_EL2;
@@ -1958,8 +1961,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_FIQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
+ if (arm_is_secure_below_el3(env) || !arm_hcr_el2_fmo(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -1998,8 +2000,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_IRQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
+ if (arm_is_secure_below_el3(env) || !arm_hcr_el2_imo(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index cd1e7f1..351b69a 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -420,6 +420,8 @@ static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
assert(irq < s->num_irq);
+ prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
+
if (secure) {
assert(exc_is_banked(irq));
s->sec_vectors[irq].prio = prio;
@@ -779,6 +781,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
switch (offset) {
case 4: /* Interrupt Control Type. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
case 0xc: /* CPPWR */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
@@ -867,6 +872,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
}
return val;
case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.scr[attrs.secure];
case 0xd14: /* Configuration Control. */
/* The BFHFNMIGN bit is the only non-banked bit; we
@@ -876,6 +884,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
return val;
case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
val = 0;
if (attrs.secure) {
if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
@@ -988,12 +999,21 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
}
return val;
case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.hfsr;
case 0xd30: /* Debug Fault Status. */
return cpu->env.v7m.dfsr;
case 0xd34: /* MMFAR MemManage Fault Address */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.mmfar[attrs.secure];
case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.bfar;
case 0xd3c: /* Aux Fault Status. */
/* TODO: Implement fault status registers. */
@@ -1263,9 +1283,12 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
"Setting VECTRESET when not in DEBUG mode "
"is UNPREDICTABLE\n");
}
- s->prigroup[attrs.secure] = extract32(value,
- R_V7M_AIRCR_PRIGROUP_SHIFT,
- R_V7M_AIRCR_PRIGROUP_LENGTH);
+ if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ s->prigroup[attrs.secure] =
+ extract32(value,
+ R_V7M_AIRCR_PRIGROUP_SHIFT,
+ R_V7M_AIRCR_PRIGROUP_LENGTH);
+ }
if (attrs.secure) {
/* These bits are only writable by secure */
cpu->env.v7m.aircr = value &
@@ -1288,6 +1311,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
}
break;
case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
/* We don't implement deep-sleep so these bits are RAZ/WI.
* The other bits in the register are banked.
* QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
@@ -1297,6 +1323,10 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
cpu->env.v7m.scr[attrs.secure] = value;
break;
case 0xd14: /* Configuration Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
/* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
value &= (R_V7M_CCR_STKALIGN_MASK |
R_V7M_CCR_BFHFNMIGN_MASK |
@@ -1321,6 +1351,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
cpu->env.v7m.ccr[attrs.secure] = value;
break;
case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
if (attrs.secure) {
s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
/* Secure HardFault active bit cannot be written */
@@ -1389,15 +1422,24 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
nvic_irq_update(s);
break;
case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.hfsr &= ~value; /* W1C */
break;
case 0xd30: /* Debug Fault Status. */
cpu->env.v7m.dfsr &= ~value; /* W1C */
break;
case 0xd34: /* Mem Manage Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.mmfar[attrs.secure] = value;
return;
case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.bfar = value;
return;
case 0xd3c: /* Aux Fault Status. */
@@ -1627,6 +1669,11 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
case 0xf00: /* Software Triggered Interrupt Register */
{
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
if (excnum < s->num_irq) {
armv7m_nvic_set_pending(s, excnum, false);
}
@@ -1752,6 +1799,11 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
break;
case 0x300 ... 0x33f: /* NVIC Active */
val = 0;
+
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
+ break;
+ }
+
startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
@@ -1771,7 +1823,13 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
}
}
break;
- case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
+ case 0xd18: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
val = 0;
for (i = 0; i < size; i++) {
unsigned hdlidx = (offset - 0xd14) + i;
@@ -1784,6 +1842,10 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
}
break;
case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ };
/* The BFSR bits [15:8] are shared between security states
* and we store them in the NS copy
*/
@@ -1876,7 +1938,12 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
}
nvic_irq_update(s);
return MEMTX_OK;
- case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
+ case 0xd18: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ return MEMTX_OK;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
for (i = 0; i < size; i++) {
unsigned hdlidx = (offset - 0xd14) + i;
int newprio = extract32(value, i * 8, 8);
@@ -1890,6 +1957,9 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
nvic_irq_update(s);
return MEMTX_OK;
case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ return MEMTX_OK;
+ }
/* All bits are W1C, so construct 32 bit value with 0s in
* the parts not written by the access size
*/
@@ -2203,6 +2273,8 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
/* include space for internal exception vectors */
s->num_irq += NVIC_FIRST_IRQ;
+ s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
+
object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
"realized", &err);
if (err != NULL) {
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index 7fe87b1..45c2af0 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -21,36 +21,38 @@
#ifndef QEMU_ARM_GIC_INTERNAL_H
#define QEMU_ARM_GIC_INTERNAL_H
+#include "hw/registerfields.h"
#include "hw/intc/arm_gic.h"
#define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
#define GIC_BASE_IRQ 0
-#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
-#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
-#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
-#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
-#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
-#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
-#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
-#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
-#define GIC_SET_MODEL(irq) s->irq_state[irq].model = true
-#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = false
-#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
-#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level |= (cm)
-#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
-#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
-#define GIC_SET_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = true
-#define GIC_CLEAR_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = false
-#define GIC_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
-#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
+#define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
+#define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
+#define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
+#define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
+#define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
+#define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
+#define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
+#define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
+#define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
+#define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
+#define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
+#define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
+#define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
+#define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
+#define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
+#define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
+ (s->irq_state[irq].edge_trigger = false)
+#define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
+#define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
s->priority1[irq][cpu] : \
s->priority2[(irq) - GIC_INTERNAL])
-#define GIC_TARGET(irq) s->irq_target[irq]
-#define GIC_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
-#define GIC_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
-#define GIC_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
+#define GIC_DIST_TARGET(irq) (s->irq_target[irq])
+#define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
+#define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
+#define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
#define GICD_CTLR_EN_GRP0 (1U << 0)
#define GICD_CTLR_EN_GRP1 (1U << 1)
@@ -63,6 +65,91 @@
#define GICC_CTLR_EOIMODE (1U << 9)
#define GICC_CTLR_EOIMODE_NS (1U << 10)
+REG32(GICH_HCR, 0x0)
+ FIELD(GICH_HCR, EN, 0, 1)
+ FIELD(GICH_HCR, UIE, 1, 1)
+ FIELD(GICH_HCR, LRENPIE, 2, 1)
+ FIELD(GICH_HCR, NPIE, 3, 1)
+ FIELD(GICH_HCR, VGRP0EIE, 4, 1)
+ FIELD(GICH_HCR, VGRP0DIE, 5, 1)
+ FIELD(GICH_HCR, VGRP1EIE, 6, 1)
+ FIELD(GICH_HCR, VGRP1DIE, 7, 1)
+ FIELD(GICH_HCR, EOICount, 27, 5)
+
+#define GICH_HCR_MASK \
+ (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
+ R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
+ R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
+ R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
+ R_GICH_HCR_EOICount_MASK)
+
+REG32(GICH_VTR, 0x4)
+ FIELD(GICH_VTR, ListRegs, 0, 6)
+ FIELD(GICH_VTR, PREbits, 26, 3)
+ FIELD(GICH_VTR, PRIbits, 29, 3)
+
+REG32(GICH_VMCR, 0x8)
+ FIELD(GICH_VMCR, VMCCtlr, 0, 10)
+ FIELD(GICH_VMCR, VMABP, 18, 3)
+ FIELD(GICH_VMCR, VMBP, 21, 3)
+ FIELD(GICH_VMCR, VMPriMask, 27, 5)
+
+REG32(GICH_MISR, 0x10)
+ FIELD(GICH_MISR, EOI, 0, 1)
+ FIELD(GICH_MISR, U, 1, 1)
+ FIELD(GICH_MISR, LRENP, 2, 1)
+ FIELD(GICH_MISR, NP, 3, 1)
+ FIELD(GICH_MISR, VGrp0E, 4, 1)
+ FIELD(GICH_MISR, VGrp0D, 5, 1)
+ FIELD(GICH_MISR, VGrp1E, 6, 1)
+ FIELD(GICH_MISR, VGrp1D, 7, 1)
+
+REG32(GICH_EISR0, 0x20)
+REG32(GICH_EISR1, 0x24)
+REG32(GICH_ELRSR0, 0x30)
+REG32(GICH_ELRSR1, 0x34)
+REG32(GICH_APR, 0xf0)
+
+REG32(GICH_LR0, 0x100)
+ FIELD(GICH_LR0, VirtualID, 0, 10)
+ FIELD(GICH_LR0, PhysicalID, 10, 10)
+ FIELD(GICH_LR0, CPUID, 10, 3)
+ FIELD(GICH_LR0, EOI, 19, 1)
+ FIELD(GICH_LR0, Priority, 23, 5)
+ FIELD(GICH_LR0, State, 28, 2)
+ FIELD(GICH_LR0, Grp1, 30, 1)
+ FIELD(GICH_LR0, HW, 31, 1)
+
+/* Last LR register */
+REG32(GICH_LR63, 0x1fc)
+
+#define GICH_LR_MASK \
+ (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
+ R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
+ R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
+ R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
+
+#define GICH_LR_STATE_INVALID 0
+#define GICH_LR_STATE_PENDING 1
+#define GICH_LR_STATE_ACTIVE 2
+#define GICH_LR_STATE_ACTIVE_PENDING 3
+
+#define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
+#define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
+#define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
+#define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
+#define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
+#define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
+#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
+#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
+
+#define GICH_LR_CLEAR_PENDING(entry) \
+ ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_SET_ACTIVE(entry) \
+ ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_CLEAR_ACTIVE(entry) \
+ ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+
/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
* GICv2 and GICv2 with security extensions:
*/
@@ -74,13 +161,9 @@
/* The special cases for the revision property: */
#define REV_11MPCORE 0
-void gic_set_pending_private(GICState *s, int cpu, int irq);
uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
-void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs);
-void gic_update(GICState *s);
-void gic_init_irqs_and_distributor(GICState *s);
-void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
- MemTxAttrs attrs);
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+ MemTxAttrs attrs);
static inline bool gic_test_pending(GICState *s, int irq, int cm)
{
@@ -93,7 +176,148 @@ static inline bool gic_test_pending(GICState *s, int irq, int cm)
* GICD_ISPENDR to set the state pending.
*/
return (s->irq_state[irq].pending & cm) ||
- (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_LEVEL(irq, cm));
+ (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm));
+ }
+}
+
+static inline bool gic_is_vcpu(int cpu)
+{
+ return cpu >= GIC_NCPU;
+}
+
+static inline int gic_get_vcpu_real_id(int cpu)
+{
+ return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu;
+}
+
+/* Return true if the given vIRQ state exists in a LR and is either active or
+ * pending and active.
+ *
+ * This function is used to check that a guest's `end of interrupt' or
+ * `interrupts deactivation' request is valid, and matches with a LR of an
+ * already acknowledged vIRQ (i.e. has the active bit set in its state).
+ */
+static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Return a pointer on the LR entry matching the given vIRQ.
+ *
+ * This function is used to retrieve an LR for which we know for sure that the
+ * corresponding vIRQ exists in the current context (i.e. its current state is
+ * not `invalid'):
+ * - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
+ * so it is `active' or `active and pending',
+ * - Or it was pending and has been selected by gic_get_best_virq(). It is now
+ * `pending', `active' or `active and pending', depending on what the guest
+ * already did with this vIRQ.
+ *
+ * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
+ * behaviour in the GIC. We choose to return the first one that matches.
+ */
+static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) {
+ return entry;
+ }
+ }
+
+ g_assert_not_reached();
+}
+
+static inline bool gic_test_group(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_GROUP(*entry);
+ } else {
+ return GIC_DIST_TEST_GROUP(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_pending(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_PENDING(*entry);
+ } else {
+ /* Clear pending state for both level and edge triggered
+ * interrupts. (level triggered interrupts with an active line
+ * remain pending, see gic_test_pending)
+ */
+ GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : (1 << cpu));
+ }
+}
+
+static inline void gic_set_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_SET_ACTIVE(*entry);
+ } else {
+ GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_ACTIVE(*entry);
+
+ if (GICH_LR_HW(*entry)) {
+ /* Hardware interrupt. We must forward the deactivation request to
+ * the distributor.
+ */
+ int phys_irq = GICH_LR_PHYS_ID(*entry);
+ int rcpu = gic_get_vcpu_real_id(cpu);
+
+ if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) {
+ /* UNPREDICTABLE behaviour, we choose to ignore the request */
+ return;
+ }
+
+ /* This is equivalent to a NS write to DIR on the physical CPU
+ * interface. Hence group0 interrupt deactivation is ignored if
+ * the GIC is secure.
+ */
+ if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) {
+ GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu);
+ }
+ }
+ } else {
+ GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline int gic_get_priority(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_PRIORITY(*entry);
+ } else {
+ return GIC_DIST_GET_PRIORITY(irq, cpu);
}
}
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 5fb18e6..81c7c39 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -92,9 +92,17 @@ aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64
gic_enable_irq(int irq) "irq %d enabled"
gic_disable_irq(int irq) "irq %d disabled"
gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x"
-gic_update_bestirq(int cpu, int irq, int prio, int priority_mask, int running_priority) "cpu %d irq %d priority %d cpu priority mask %d cpu running priority %d"
+gic_update_bestirq(const char *s, int cpu, int irq, int prio, int priority_mask, int running_priority) "%s %d irq %d priority %d cpu priority mask %d cpu running priority %d"
gic_update_set_irq(int cpu, const char *name, int level) "cpu[%d]: %s = %d"
-gic_acknowledge_irq(int cpu, int irq) "cpu %d acknowledged irq %d"
+gic_acknowledge_irq(const char *s, int cpu, int irq) "%s %d acknowledged irq %d"
+gic_cpu_write(const char *s, int cpu, int addr, uint32_t val) "%s %d iface write at 0x%08x 0x%08" PRIx32
+gic_cpu_read(const char *s, int cpu, int addr, uint32_t val) "%s %d iface read at 0x%08x: 0x%08" PRIx32
+gic_hyp_read(int addr, uint32_t val) "hyp read at 0x%08x: 0x%08" PRIx32
+gic_hyp_write(int addr, uint32_t val) "hyp write at 0x%08x: 0x%08" PRIx32
+gic_dist_read(int addr, unsigned int size, uint32_t val) "dist read at 0x%08x size %u: 0x%08" PRIx32
+gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x size %u: 0x%08" PRIx32
+gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32
+gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d"
# hw/intc/arm_gicv3_cpuif.c
gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index da73e3b..5f78125 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -502,8 +502,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
hwaddr paddr, hwaddr xlat,
int prot,
target_ulong *address);
-bool memory_region_is_unassigned(MemoryRegion *mr);
-
#endif
/* vl.c */
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a870cc..4cc57a7 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -42,7 +42,7 @@
#define NUM_VIRTIO_TRANSPORTS 32
#define NUM_SMMU_IRQS 4
-#define ARCH_GICV3_MAINT_IRQ 9
+#define ARCH_GIC_MAINT_IRQ 9
#define ARCH_TIMER_VIRT_IRQ 11
#define ARCH_TIMER_S_EL1_IRQ 13
@@ -60,6 +60,8 @@ enum {
VIRT_GIC_DIST,
VIRT_GIC_CPU,
VIRT_GIC_V2M,
+ VIRT_GIC_HYP,
+ VIRT_GIC_VCPU,
VIRT_GIC_ITS,
VIRT_GIC_REDIST,
VIRT_GIC_REDIST2,
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 82b6ec2..98f925a 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -53,7 +53,7 @@
#define XLNX_ZYNQMP_OCM_RAM_0_ADDRESS 0xFFFC0000
#define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000
-#define XLNX_ZYNQMP_GIC_REGIONS 2
+#define XLNX_ZYNQMP_GIC_REGIONS 6
/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
* and under-decodes the 64k region. This mirrors the 4k regions to every 4k
@@ -62,7 +62,7 @@
*/
#define XLNX_ZYNQMP_GIC_REGION_SIZE 0x1000
-#define XLNX_ZYNQMP_GIC_ALIASES (0x10000 / XLNX_ZYNQMP_GIC_REGION_SIZE - 1)
+#define XLNX_ZYNQMP_GIC_ALIASES (0x10000 / XLNX_ZYNQMP_GIC_REGION_SIZE)
#define XLNX_ZYNQMP_MAX_LOW_RAM_SIZE 0x80000000ull
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index af3ca18..b5585fe 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -30,6 +30,8 @@
#define GIC_NR_SGIS 16
/* Maximum number of possible CPU interfaces, determined by GIC architecture */
#define GIC_NCPU 8
+/* Maximum number of possible CPU interfaces with their respective vCPU */
+#define GIC_NCPU_VCPU (GIC_NCPU * 2)
#define MAX_NR_GROUP_PRIO 128
#define GIC_NR_APRS (MAX_NR_GROUP_PRIO / 32)
@@ -37,6 +39,17 @@
#define GIC_MIN_BPR 0
#define GIC_MIN_ABPR (GIC_MIN_BPR + 1)
+/* Architectural maximum number of list registers in the virtual interface */
+#define GIC_MAX_LR 64
+
+/* Only 32 priority levels and 32 preemption levels in the vCPU interfaces */
+#define GIC_VIRT_MAX_GROUP_PRIO_BITS 5
+#define GIC_VIRT_MAX_NR_GROUP_PRIO (1 << GIC_VIRT_MAX_GROUP_PRIO_BITS)
+#define GIC_VIRT_NR_APRS (GIC_VIRT_MAX_NR_GROUP_PRIO / 32)
+
+#define GIC_VIRT_MIN_BPR 2
+#define GIC_VIRT_MIN_ABPR (GIC_VIRT_MIN_BPR + 1)
+
typedef struct gic_irq_state {
/* The enable bits are only banked for per-cpu interrupts. */
uint8_t enabled;
@@ -57,6 +70,8 @@ typedef struct GICState {
qemu_irq parent_fiq[GIC_NCPU];
qemu_irq parent_virq[GIC_NCPU];
qemu_irq parent_vfiq[GIC_NCPU];
+ qemu_irq maintenance_irq[GIC_NCPU];
+
/* GICD_CTLR; for a GIC with the security extensions the NS banked version
* of this register is just an alias of bit 1 of the S banked version.
*/
@@ -64,7 +79,7 @@ typedef struct GICState {
/* GICC_CTLR; again, the NS banked version is just aliases of bits of
* the S banked register, so our state only needs to store the S version.
*/
- uint32_t cpu_ctlr[GIC_NCPU];
+ uint32_t cpu_ctlr[GIC_NCPU_VCPU];
gic_irq_state irq_state[GIC_MAXIRQ];
uint8_t irq_target[GIC_MAXIRQ];
@@ -78,9 +93,9 @@ typedef struct GICState {
*/
uint8_t sgi_pending[GIC_NR_SGIS][GIC_NCPU];
- uint16_t priority_mask[GIC_NCPU];
- uint16_t running_priority[GIC_NCPU];
- uint16_t current_pending[GIC_NCPU];
+ uint16_t priority_mask[GIC_NCPU_VCPU];
+ uint16_t running_priority[GIC_NCPU_VCPU];
+ uint16_t current_pending[GIC_NCPU_VCPU];
/* If we present the GICv2 without security extensions to a guest,
* the guest can configure the GICC_CTLR to configure group 1 binary point
@@ -88,8 +103,8 @@ typedef struct GICState {
* For a GIC with Security Extensions we use use bpr for the
* secure copy and abpr as storage for the non-secure copy of the register.
*/
- uint8_t bpr[GIC_NCPU];
- uint8_t abpr[GIC_NCPU];
+ uint8_t bpr[GIC_NCPU_VCPU];
+ uint8_t abpr[GIC_NCPU_VCPU];
/* The APR is implementation defined, so we choose a layout identical to
* the KVM ABI layout for QEMU's implementation of the gic:
@@ -100,6 +115,15 @@ typedef struct GICState {
uint32_t apr[GIC_NR_APRS][GIC_NCPU];
uint32_t nsapr[GIC_NR_APRS][GIC_NCPU];
+ /* Virtual interface control registers */
+ uint32_t h_hcr[GIC_NCPU];
+ uint32_t h_misr[GIC_NCPU];
+ uint32_t h_lr[GIC_MAX_LR][GIC_NCPU];
+ uint32_t h_apr[GIC_NCPU];
+
+ /* Number of LRs implemented in this GIC instance */
+ uint32_t num_lrs;
+
uint32_t num_cpu;
MemoryRegion iomem; /* Distributor */
@@ -108,9 +132,13 @@ typedef struct GICState {
*/
struct GICState *backref[GIC_NCPU];
MemoryRegion cpuiomem[GIC_NCPU + 1]; /* CPU interfaces */
+ MemoryRegion vifaceiomem[GIC_NCPU + 1]; /* Virtual interfaces */
+ MemoryRegion vcpuiomem; /* vCPU interface */
+
uint32_t num_irq;
uint32_t revision;
bool security_extn;
+ bool virt_extn;
bool irq_reset_nonsecure; /* configure IRQs as group 1 (NS) on reset? */
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
@@ -134,6 +162,7 @@ typedef struct ARMGICCommonClass {
} ARMGICCommonClass;
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops);
+ const MemoryRegionOps *ops,
+ const MemoryRegionOps *virt_ops);
#endif
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index 8bc2911..a472c9b 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -57,6 +57,7 @@ typedef struct NVICState {
VecInfo sec_vectors[NVIC_INTERNAL_VECTORS];
/* The PRIGROUP field in AIRCR is banked */
uint32_t prigroup[M_REG_NUM_BANKS];
+ uint8_t num_prio_bits;
/* v8M NVIC_ITNS state (stored as a bool per bit) */
bool itns[NVIC_MAX_VECTORS];
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 42b946c..2b501d0 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -923,6 +923,9 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT16_ARRAY(_f, _s, _n) \
VMSTATE_UINT16_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_UINT16_SUB_ARRAY(_f, _s, _start, _num) \
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint16, uint16_t)
+
#define VMSTATE_UINT16_2DARRAY(_f, _s, _n1, _n2) \
VMSTATE_UINT16_2DARRAY_V(_f, _s, _n1, _n2, 0)
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index bd79657..ecf6ed5 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -386,6 +386,12 @@ struct CPUState {
*/
uintptr_t mem_io_pc;
vaddr mem_io_vaddr;
+ /*
+ * This is only needed for the legacy cpu_unassigned_access() hook;
+ * when all targets using it have been converted to use
+ * cpu_transaction_failed() instead it can be removed.
+ */
+ MMUAccessType mem_io_access_type;
int kvm_fd;
struct KVMState *kvm_state;
diff --git a/memory.c b/memory.c
index e9cd446..2ea16e7 100644
--- a/memory.c
+++ b/memory.c
@@ -1249,7 +1249,8 @@ static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 64a8005..3848ef4 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -231,6 +231,10 @@ static void arm_cpu_reset(CPUState *s)
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
}
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ }
/* Unlike A/R profile, M profile defines the reset LR value */
env->regs[14] = 0xffffffff;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e310ffc..4289c33 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1229,6 +1229,12 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_RW (1ULL << 31)
#define HCR_CD (1ULL << 32)
#define HCR_ID (1ULL << 33)
+#define HCR_E2H (1ULL << 34)
+/*
+ * When we actually implement ARMv8.1-VHE we should add HCR_E2H to
+ * HCR_MASK and then clear it again if the feature bit is not set in
+ * hcr_write().
+ */
#define HCR_MASK ((1ULL << 34) - 1)
#define SCR_NS (1U << 0)
@@ -2234,6 +2240,54 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
+/**
+ * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
+ * Depending on the values of HCR_EL2.E2H and TGE, this may be
+ * "behaves as 1 for all purposes other than direct read/write" or
+ * "behaves as 0 for all purposes other than direct read/write"
+ */
+static inline bool arm_hcr_el2_imo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_IMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
+ */
+static inline bool arm_hcr_el2_fmo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_FMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
+ */
+static inline bool arm_hcr_el2_amo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_AMO;
+ }
+}
+
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@@ -2261,13 +2315,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
+ if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
+ if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
@@ -2306,7 +2360,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
- hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ hcr = arm_hcr_el2_fmo(env);
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
@@ -2323,7 +2377,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* when setting the target EL, so it does not have a further
* affect here.
*/
- hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ hcr = arm_hcr_el2_imo(env);
scr = false;
break;
default:
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 66afb08..8b07bf2 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -444,9 +444,11 @@ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
@@ -462,9 +464,11 @@ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
@@ -480,9 +484,11 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
@@ -6330,15 +6336,15 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
switch (excp_idx) {
case EXCP_IRQ:
scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
- hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
+ hcr = arm_hcr_el2_imo(env);
break;
case EXCP_FIQ:
scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
- hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
+ hcr = arm_hcr_el2_fmo(env);
break;
default:
scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
- hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
+ hcr = arm_hcr_el2_amo(env);
break;
};
@@ -6834,6 +6840,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool push_failed = false;
armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
+ targets_secure ? "secure" : "nonsecure", exc);
if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
@@ -6907,12 +6915,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
* we might now want to take a different exception which
* targets a different security state, so try again from the top.
*/
+ qemu_log_mask(CPU_LOG_INT,
+ "...derived exception on callee-saves register stacking");
v7m_exception_taken(cpu, lr, true, true);
return;
}
if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
/* Vector load failed: derived exception */
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
v7m_exception_taken(cpu, lr, true, true);
return;
}
@@ -7041,6 +7052,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* For all other purposes, treat ES as 0 (R_HXSR) */
excret &= ~R_V7M_EXCRET_ES_MASK;
}
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
@@ -7051,7 +7063,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
* which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
*/
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- exc_secure = excret & R_V7M_EXCRET_ES_MASK;
if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
env->v7m.faultmask[exc_secure] = 0;
}
@@ -7120,12 +7131,22 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
}
+ /*
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
+ * Handler mode (and will be until we write the new XPSR.Interrupt
+ * field) this does not switch around the current stack pointer.
+ * We must do this before we do any kind of tailchaining, including
+ * for the derived exceptions on integrity check failures, or we will
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
+ */
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+
if (sfault) {
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed EXC_RETURN.ES validity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7135,17 +7156,27 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
*/
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
- /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
- * Handler mode (and will be until we write the new XPSR.Interrupt
- * field) this does not switch around the current stack pointer.
- */
- write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+ /*
+ * Tailchaining: if there is currently a pending exception that
+ * is high enough priority to preempt execution at the level we're
+ * about to return to, then just directly take that exception now,
+ * avoiding an unstack-and-then-stack. Note that now we have
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
+ * our current execution priority is already the execution priority we are
+ * returning to -- none of the state we would unstack or set based on
+ * the EXCRET value affects it.
+ */
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
switch_v7m_security_state(env, return_to_secure);
@@ -7192,10 +7223,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* Take a SecureFault on the current stack */
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed exception return integrity "
"signature check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7228,6 +7259,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame)
*/
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7264,10 +7296,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity "
"check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
}
@@ -7303,9 +7335,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
ignore_stackfaults = v7m_push_stack(cpu);
- v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
return;
}
@@ -7721,7 +7753,6 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
ignore_stackfaults = v7m_push_stack(cpu);
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
- qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
/* Function used to synchronize QEMU's AArch64 register set with AArch32
@@ -8390,6 +8421,14 @@ static inline bool regime_translation_disabled(CPUARMState *env,
if (mmu_idx == ARMMMUIdx_S2NS) {
return (env->cp15.hcr_el2 & HCR_VM) == 0;
}
+
+ if (env->cp15.hcr_el2 & HCR_TGE) {
+ /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
+ if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
+ return true;
+ }
+ }
+
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
@@ -9795,17 +9834,6 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
fi->type = ARMFault_Permission;
fi->level = 1;
- /*
- * Core QEMU code can't handle execution from small pages yet, so
- * don't try it. This way we'll get an MPU exception, rather than
- * eventually causing QEMU to exit in get_page_addr_code().
- */
- if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) {
- qemu_log_mask(LOG_UNIMP,
- "MPU: No support for execution from regions "
- "smaller than 1K\n");
- *prot &= ~PAGE_EXEC;
- }
return !(*prot & (1 << access_type));
}
@@ -10056,18 +10084,6 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
fi->type = ARMFault_Permission;
fi->level = 1;
- /*
- * Core QEMU code can't handle execution from small pages yet, so
- * don't try it. This means any attempted execution will generate
- * an MPU exception, rather than eventually causing QEMU to exit in
- * get_page_addr_code().
- */
- if (*is_subpage && (*prot & PAGE_EXEC)) {
- qemu_log_mask(LOG_UNIMP,
- "MPU: No support for execution from regions "
- "smaller than 1K\n");
- *prot &= ~PAGE_EXEC;
- }
return !(*prot & (1 << access_type));
}
@@ -10710,13 +10726,13 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.primask[M_REG_NS] = val & 1;
return;
case 0x91: /* BASEPRI_NS */
- if (!env->v7m.secure) {
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
return;
}
env->v7m.basepri[M_REG_NS] = val & 0xff;
return;
case 0x93: /* FAULTMASK_NS */
- if (!env->v7m.secure) {
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
return;
}
env->v7m.faultmask[M_REG_NS] = val & 1;
@@ -10728,8 +10744,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
write_v7m_control_spsel_for_secstate(env,
val & R_V7M_CONTROL_SPSEL_MASK,
M_REG_NS);
- env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
return;
case 0x98: /* SP_NS */
{
@@ -10798,9 +10816,15 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.primask[env->v7m.secure] = val & 1;
break;
case 17: /* BASEPRI */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
env->v7m.basepri[env->v7m.secure] = val & 0xff;
break;
case 18: /* BASEPRI_MAX */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
val &= 0xff;
if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
|| env->v7m.basepri[env->v7m.secure] == 0)) {
@@ -10808,6 +10832,9 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
}
break;
case 19: /* FAULTMASK */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
env->v7m.faultmask[env->v7m.secure] = val & 1;
break;
case 20: /* CONTROL */
@@ -10822,8 +10849,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
!arm_v7m_is_handler_mode(env)) {
write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
- env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
break;
default:
bad_reg:
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index f728f25..d550978 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -33,6 +33,20 @@ static void raise_exception(CPUARMState *env, uint32_t excp,
{
CPUState *cs = CPU(arm_env_get_cpu(env));
+ if ((env->cp15.hcr_el2 & HCR_TGE) &&
+ target_el == 1 && !arm_is_secure(env)) {
+ /*
+ * Redirect NS EL1 exceptions to NS EL2. These are reported with
+ * their original syndrome register value, with the exception of
+ * SIMD/FP access traps, which are reported as uncategorized
+ * (see DDI0478C.a D1.10.4)
+ */
+ target_el = 2;
+ if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
+ syndrome = syn_uncategorized();
+ }
+ }
+
assert(!excp_is_internal(excp));
cs->exception_index = excp;
env->exception.syndrome = syndrome;
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 54795c9..c3cbec9 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1042,7 +1042,7 @@ void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc)
uint64_t *d = vd, *n = vn;
uint8_t *pg = vg;
for (i = 0; i < opr_sz; i += 1) {
- d[i] = n[1] & -(uint64_t)(pg[H1(i)] & 1);
+ d[i] = n[i] & -(uint64_t)(pg[H1(i)] & 1);
}
}
@@ -2436,13 +2436,13 @@ uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
#define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \
DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull)
-DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, uint8_t, uint64_t, ==)
-DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, uint16_t, uint64_t, ==)
-DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, uint32_t, uint64_t, ==)
+DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==)
+DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==)
+DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==)
-DO_CMP_PPZW_B(sve_cmpne_ppzw_b, uint8_t, uint64_t, !=)
-DO_CMP_PPZW_H(sve_cmpne_ppzw_h, uint16_t, uint64_t, !=)
-DO_CMP_PPZW_S(sve_cmpne_ppzw_s, uint32_t, uint64_t, !=)
+DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=)
+DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=)
+DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=)
DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >)
DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >)
@@ -2846,11 +2846,6 @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
return flags;
}
- /* Scale from predicate element count to bits. */
- count <<= esz;
- /* Bound to the bits in the predicate. */
- count = MIN(count, oprsz * 8);
-
/* Set all of the requested bits. */
for (i = 0; i < count / 64; ++i) {
d->p[i] = esz_mask;
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 374051c..89efc80 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1625,7 +1625,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Detect signed overflow for subtraction. */
tcg_gen_xor_i64(t0, reg, val);
tcg_gen_sub_i64(t1, reg, val);
- tcg_gen_xor_i64(reg, reg, t0);
+ tcg_gen_xor_i64(reg, reg, t1);
tcg_gen_and_i64(t0, t0, reg);
/* Bound the result. */
@@ -3173,19 +3173,19 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a, uint32_t insn)
static bool trans_WHILE(DisasContext *s, arg_WHILE *a, uint32_t insn)
{
- if (!sve_access_check(s)) {
- return true;
- }
-
- TCGv_i64 op0 = read_cpu_reg(s, a->rn, 1);
- TCGv_i64 op1 = read_cpu_reg(s, a->rm, 1);
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 op0, op1, t0, t1, tmax;
TCGv_i32 t2, t3;
TCGv_ptr ptr;
unsigned desc, vsz = vec_full_reg_size(s);
TCGCond cond;
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ op0 = read_cpu_reg(s, a->rn, 1);
+ op1 = read_cpu_reg(s, a->rm, 1);
+
if (!a->sf) {
if (a->u) {
tcg_gen_ext32u_i64(op0, op0);
@@ -3198,32 +3198,47 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a, uint32_t insn)
/* For the helper, compress the different conditions into a computation
* of how many iterations for which the condition is true.
- *
- * This is slightly complicated by 0 <= UINT64_MAX, which is nominally
- * 2**64 iterations, overflowing to 0. Of course, predicate registers
- * aren't that large, so any value >= predicate size is sufficient.
*/
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
tcg_gen_sub_i64(t0, op1, op0);
- /* t0 = MIN(op1 - op0, vsz). */
- tcg_gen_movi_i64(t1, vsz);
- tcg_gen_umin_i64(t0, t0, t1);
+ tmax = tcg_const_i64(vsz >> a->esz);
if (a->eq) {
/* Equality means one more iteration. */
tcg_gen_addi_i64(t0, t0, 1);
+
+ /* If op1 is max (un)signed integer (and the only time the addition
+ * above could overflow), then we produce an all-true predicate by
+ * setting the count to the vector length. This is because the
+ * pseudocode is described as an increment + compare loop, and the
+ * max integer would always compare true.
+ */
+ tcg_gen_movi_i64(t1, (a->sf
+ ? (a->u ? UINT64_MAX : INT64_MAX)
+ : (a->u ? UINT32_MAX : INT32_MAX)));
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
}
- /* t0 = (condition true ? t0 : 0). */
+ /* Bound to the maximum. */
+ tcg_gen_umin_i64(t0, t0, tmax);
+ tcg_temp_free_i64(tmax);
+
+ /* Set the count to zero if the condition is false. */
cond = (a->u
? (a->eq ? TCG_COND_LEU : TCG_COND_LTU)
: (a->eq ? TCG_COND_LE : TCG_COND_LT));
tcg_gen_movi_i64(t1, 0);
tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
+ tcg_temp_free_i64(t1);
+ /* Since we're bounded, pass as a 32-bit type. */
t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, t0);
tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+
+ /* Scale elements to bits. */
+ tcg_gen_shli_i32(t2, t2, a->esz);
desc = (vsz / 8) - 2;
desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);