aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/hw/acpi/acpi-defs.h4
-rw-r--r--include/hw/intc/arm_gicv3.h32
-rw-r--r--include/hw/intc/arm_gicv3_common.h215
-rw-r--r--include/hw/timer/aspeed_timer.h5
-rw-r--r--include/migration/vmstate.h6
-rw-r--r--include/qemu/bitops.h108
6 files changed, 361 insertions, 9 deletions
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 850a962..ea9be0b 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -367,7 +367,9 @@ struct AcpiMadtGenericDistributor {
uint32_t gic_id;
uint64_t base_address;
uint32_t global_irq_base;
- uint32_t reserved2;
+ /* ACPI 5.1 Errata 1228 Present GIC version in MADT table */
+ uint8_t version;
+ uint8_t reserved2[3];
} QEMU_PACKED;
typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor;
diff --git a/include/hw/intc/arm_gicv3.h b/include/hw/intc/arm_gicv3.h
new file mode 100644
index 0000000..4a6fd85
--- /dev/null
+++ b/include/hw/intc/arm_gicv3.h
@@ -0,0 +1,32 @@
+/*
+ * ARM Generic Interrupt Controller v3
+ *
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2016 Linaro Limited
+ * Written by Shlomo Pongratz, Peter Maydell
+ *
+ * This code is licensed under the GPL, version 2 or (at your option)
+ * any later version.
+ */
+
+#ifndef HW_ARM_GICV3_H
+#define HW_ARM_GICV3_H
+
+#include "arm_gicv3_common.h"
+
+#define TYPE_ARM_GICV3 "arm-gicv3"
+#define ARM_GICV3(obj) OBJECT_CHECK(GICv3State, (obj), TYPE_ARM_GICV3)
+#define ARM_GICV3_CLASS(klass) \
+ OBJECT_CLASS_CHECK(ARMGICv3Class, (klass), TYPE_ARM_GICV3)
+#define ARM_GICV3_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(ARMGICv3Class, (obj), TYPE_ARM_GICV3)
+
+typedef struct ARMGICv3Class {
+ /*< private >*/
+ ARMGICv3CommonClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+} ARMGICv3Class;
+
+#endif
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index c2fd8da..f72e499 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -3,8 +3,9 @@
*
* Copyright (c) 2012 Linaro Limited
* Copyright (c) 2015 Huawei.
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Written by Peter Maydell
- * Extended to 64 cores by Shlomo Pongratz
+ * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,14 +27,163 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic_common.h"
-typedef struct GICv3State {
+/*
+ * Maximum number of possible interrupts, determined by the GIC architecture.
+ * Note that this does not include LPIs. When implemented, these should be
+ * dealt with separately.
+ */
+#define GICV3_MAXIRQ 1020
+#define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL)
+
+/* Minimum BPR for Secure, or when security not enabled */
+#define GIC_MIN_BPR 0
+/* Minimum BPR for Nonsecure when security is enabled */
+#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1)
+
+/* For some distributor fields we want to model the array of 32-bit
+ * register values which hold various bitmaps corresponding to enabled,
+ * pending, etc bits. These macros and functions facilitate that; the
+ * APIs are generally modelled on the generic bitmap.h functions
+ * (which are unsuitable here because they use 'unsigned long' as the
+ * underlying storage type, which is very awkward when you need to
+ * access the data as 32-bit values.)
+ * Each bitmap contains a bit for each interrupt. Although there is
+ * space for the PPIs and SGIs, those bits (the first 32) are never
+ * used as that state lives in the redistributor. The unused bits are
+ * provided purely so that interrupt X's state is always in bit X; this
+ * avoids bugs where we forget to subtract GIC_INTERNAL from an
+ * interrupt number.
+ */
+#define GICV3_BMP_SIZE (DIV_ROUND_UP(GICV3_MAXIRQ, 32))
+
+#define GIC_DECLARE_BITMAP(name) \
+ uint32_t name[GICV3_BMP_SIZE]
+
+#define GIC_BIT_MASK(nr) (1U << ((nr) % 32))
+#define GIC_BIT_WORD(nr) ((nr) / 32)
+
+static inline void gic_bmp_set_bit(int nr, uint32_t *addr)
+{
+ uint32_t mask = GIC_BIT_MASK(nr);
+ uint32_t *p = addr + GIC_BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static inline void gic_bmp_clear_bit(int nr, uint32_t *addr)
+{
+ uint32_t mask = GIC_BIT_MASK(nr);
+ uint32_t *p = addr + GIC_BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+static inline int gic_bmp_test_bit(int nr, const uint32_t *addr)
+{
+ return 1U & (addr[GIC_BIT_WORD(nr)] >> (nr & 31));
+}
+
+static inline void gic_bmp_replace_bit(int nr, uint32_t *addr, int val)
+{
+ uint32_t mask = GIC_BIT_MASK(nr);
+ uint32_t *p = addr + GIC_BIT_WORD(nr);
+
+ *p &= ~mask;
+ *p |= (val & 1U) << (nr % 32);
+}
+
+/* Return a pointer to the 32-bit word containing the specified bit. */
+static inline uint32_t *gic_bmp_ptr32(uint32_t *addr, int nr)
+{
+ return addr + GIC_BIT_WORD(nr);
+}
+
+typedef struct GICv3State GICv3State;
+typedef struct GICv3CPUState GICv3CPUState;
+
+/* Some CPU interface registers come in three flavours:
+ * Group0, Group1 (Secure) and Group1 (NonSecure)
+ * (where the latter two are exposed as a single banked system register).
+ * In the state struct they are implemented as a 3-element array which
+ * can be indexed into by the GICV3_G0, GICV3_G1 and GICV3_G1NS constants.
+ * If the CPU doesn't support EL3 then the G1 element is unused.
+ *
+ * These constants are also used to communicate the group to use for
+ * an interrupt or SGI when it is passed between the cpu interface and
+ * the redistributor or distributor. For those purposes the receiving end
+ * must be prepared to cope with a Group 1 Secure interrupt even if it does
+ * not have security support enabled, because security can be disabled
+ * independently in the CPU and in the GIC. In that case the receiver should
+ * treat an incoming Group 1 Secure interrupt as if it were Group 0.
+ * (This architectural requirement is why the _G1 element is the unused one
+ * in a no-EL3 CPU: we would otherwise have to translate back and forth
+ * between (G0, G1NS) from the distributor and (G0, G1) in the CPU i/f.)
+ */
+#define GICV3_G0 0
+#define GICV3_G1 1
+#define GICV3_G1NS 2
+
+/* ICC_CTLR_EL1, GICD_STATUSR and GICR_STATUSR are banked but not
+ * group-related, so those indices are just 0 for S and 1 for NS.
+ * (If the CPU or the GIC, respectively, don't support the Security
+ * extensions then the S element is unused.)
+ */
+#define GICV3_S 0
+#define GICV3_NS 1
+
+typedef struct {
+ int irq;
+ uint8_t prio;
+ int grp;
+} PendingIrq;
+
+struct GICv3CPUState {
+ GICv3State *gic;
+ CPUState *cpu;
+ qemu_irq parent_irq;
+ qemu_irq parent_fiq;
+
+ /* Redistributor */
+ uint32_t level; /* Current IRQ level */
+ /* RD_base page registers */
+ uint32_t gicr_ctlr;
+ uint64_t gicr_typer;
+ uint32_t gicr_statusr[2];
+ uint32_t gicr_waker;
+ uint64_t gicr_propbaser;
+ uint64_t gicr_pendbaser;
+ /* SGI_base page registers */
+ uint32_t gicr_igroupr0;
+ uint32_t gicr_ienabler0;
+ uint32_t gicr_ipendr0;
+ uint32_t gicr_iactiver0;
+ uint32_t edge_trigger; /* ICFGR0 and ICFGR1 even bits */
+ uint32_t gicr_igrpmodr0;
+ uint32_t gicr_nsacr;
+ uint8_t gicr_ipriorityr[GIC_INTERNAL];
+
+ /* CPU interface */
+ uint64_t icc_ctlr_el1[2];
+ uint64_t icc_pmr_el1;
+ uint64_t icc_bpr[3];
+ uint64_t icc_apr[3][4];
+ uint64_t icc_igrpen[3];
+ uint64_t icc_ctlr_el3;
+
+ /* Current highest priority pending interrupt for this CPU.
+ * This is cached information that can be recalculated from the
+ * real state above; it doesn't need to be migrated.
+ */
+ PendingIrq hppi;
+ /* This is temporary working state, to avoid a malloc in gicv3_update() */
+ bool seenbetter;
+};
+
+struct GICv3State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
- qemu_irq *parent_irq;
- qemu_irq *parent_fiq;
-
MemoryRegion iomem_dist; /* Distributor */
MemoryRegion iomem_redist; /* Redistributors */
@@ -41,9 +191,62 @@ typedef struct GICv3State {
uint32_t num_irq;
uint32_t revision;
bool security_extn;
+ bool irq_reset_nonsecure;
int dev_fd; /* kvm device fd if backed by kvm vgic support */
-} GICv3State;
+ Error *migration_blocker;
+
+ /* Distributor */
+
+ /* for a GIC with the security extensions the NS banked version of this
+ * register is just an alias of bit 1 of the S banked version.
+ */
+ uint32_t gicd_ctlr;
+ uint32_t gicd_statusr[2];
+ GIC_DECLARE_BITMAP(group); /* GICD_IGROUPR */
+ GIC_DECLARE_BITMAP(grpmod); /* GICD_IGRPMODR */
+ GIC_DECLARE_BITMAP(enabled); /* GICD_ISENABLER */
+ GIC_DECLARE_BITMAP(pending); /* GICD_ISPENDR */
+ GIC_DECLARE_BITMAP(active); /* GICD_ISACTIVER */
+ GIC_DECLARE_BITMAP(level); /* Current level */
+ GIC_DECLARE_BITMAP(edge_trigger); /* GICD_ICFGR even bits */
+ uint8_t gicd_ipriority[GICV3_MAXIRQ];
+ uint64_t gicd_irouter[GICV3_MAXIRQ];
+ /* Cached information: pointer to the cpu i/f for the CPUs specified
+ * in the IROUTER registers
+ */
+ GICv3CPUState *gicd_irouter_target[GICV3_MAXIRQ];
+ uint32_t gicd_nsacr[DIV_ROUND_UP(GICV3_MAXIRQ, 16)];
+
+ GICv3CPUState *cpu;
+};
+
+#define GICV3_BITMAP_ACCESSORS(BMP) \
+ static inline void gicv3_gicd_##BMP##_set(GICv3State *s, int irq) \
+ { \
+ gic_bmp_set_bit(irq, s->BMP); \
+ } \
+ static inline int gicv3_gicd_##BMP##_test(GICv3State *s, int irq) \
+ { \
+ return gic_bmp_test_bit(irq, s->BMP); \
+ } \
+ static inline void gicv3_gicd_##BMP##_clear(GICv3State *s, int irq) \
+ { \
+ gic_bmp_clear_bit(irq, s->BMP); \
+ } \
+ static inline void gicv3_gicd_##BMP##_replace(GICv3State *s, \
+ int irq, int value) \
+ { \
+ gic_bmp_replace_bit(irq, s->BMP, value); \
+ }
+
+GICV3_BITMAP_ACCESSORS(group)
+GICV3_BITMAP_ACCESSORS(grpmod)
+GICV3_BITMAP_ACCESSORS(enabled)
+GICV3_BITMAP_ACCESSORS(pending)
+GICV3_BITMAP_ACCESSORS(active)
+GICV3_BITMAP_ACCESSORS(level)
+GICV3_BITMAP_ACCESSORS(edge_trigger)
#define TYPE_ARM_GICV3_COMMON "arm-gicv3-common"
#define ARM_GICV3_COMMON(obj) \
diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h
index 44dc2f8..bd6c1a7 100644
--- a/include/hw/timer/aspeed_timer.h
+++ b/include/hw/timer/aspeed_timer.h
@@ -22,7 +22,7 @@
#ifndef ASPEED_TIMER_H
#define ASPEED_TIMER_H
-#include "hw/ptimer.h"
+#include "qemu/timer.h"
#define ASPEED_TIMER(obj) \
OBJECT_CHECK(AspeedTimerCtrlState, (obj), TYPE_ASPEED_TIMER);
@@ -33,15 +33,16 @@ typedef struct AspeedTimer {
qemu_irq irq;
uint8_t id;
+ QEMUTimer timer;
/**
* Track the line level as the ASPEED timers implement edge triggered
* interrupts, signalling with both the rising and falling edge.
*/
int32_t level;
- ptimer_state *timer;
uint32_t reload;
uint32_t match[2];
+ uint64_t start;
} AspeedTimer;
typedef struct AspeedTimerCtrlState {
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 6c65811..25ea58a 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -856,6 +856,12 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT64_ARRAY(_f, _s, _n) \
VMSTATE_UINT64_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_UINT64_2DARRAY(_f, _s, _n1, _n2) \
+ VMSTATE_UINT64_2DARRAY_V(_f, _s, _n1, _n2, 0)
+
+#define VMSTATE_UINT64_2DARRAY_V(_f, _s, _n1, _n2, _v) \
+ VMSTATE_2DARRAY(_f, _s, _n1, _n2, _v, vmstate_info_uint64, uint64_t)
+
#define VMSTATE_INT16_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int16, int16_t)
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 755fdd1..15418a8 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -428,4 +428,112 @@ static inline uint64_t deposit64(uint64_t value, int start, int length,
return (value & ~mask) | ((fieldval << start) & mask);
}
+/**
+ * half_shuffle32:
+ * @value: 32-bit value (of which only the bottom 16 bits are of interest)
+ *
+ * Given an input value:
+ * xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP
+ * return the value where the bottom 16 bits are spread out into
+ * the odd bits in the word, and the even bits are zeroed:
+ * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P
+ *
+ * Any bits set in the top half of the input are ignored.
+ *
+ * Returns: the shuffled bits.
+ */
+static inline uint32_t half_shuffle32(uint32_t x)
+{
+ /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
+ * It ignores any bits set in the top half of the input.
+ */
+ x = ((x & 0xFF00) << 8) | (x & 0x00FF);
+ x = ((x << 4) | x) & 0x0F0F0F0F;
+ x = ((x << 2) | x) & 0x33333333;
+ x = ((x << 1) | x) & 0x55555555;
+ return x;
+}
+
+/**
+ * half_shuffle64:
+ * @value: 64-bit value (of which only the bottom 32 bits are of interest)
+ *
+ * Given an input value:
+ * xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
+ * return the value where the bottom 32 bits are spread out into
+ * the odd bits in the word, and the even bits are zeroed:
+ * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f
+ *
+ * Any bits set in the top half of the input are ignored.
+ *
+ * Returns: the shuffled bits.
+ */
+static inline uint64_t half_shuffle64(uint64_t x)
+{
+ /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
+ * It ignores any bits set in the top half of the input.
+ */
+ x = ((x & 0xFFFF0000ULL) << 16) | (x & 0xFFFF);
+ x = ((x << 8) | x) & 0x00FF00FF00FF00FFULL;
+ x = ((x << 4) | x) & 0x0F0F0F0F0F0F0F0FULL;
+ x = ((x << 2) | x) & 0x3333333333333333ULL;
+ x = ((x << 1) | x) & 0x5555555555555555ULL;
+ return x;
+}
+
+/**
+ * half_unshuffle32:
+ * @value: 32-bit value (of which only the odd bits are of interest)
+ *
+ * Given an input value:
+ * xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP
+ * return the value where all the odd bits are compressed down
+ * into the low half of the word, and the high half is zeroed:
+ * 0000 0000 0000 0000 ABCD EFGH IJKL MNOP
+ *
+ * Any even bits set in the input are ignored.
+ *
+ * Returns: the unshuffled bits.
+ */
+static inline uint32_t half_unshuffle32(uint32_t x)
+{
+ /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
+ * where it is called an inverse half shuffle.
+ */
+ x &= 0x55555555;
+ x = ((x >> 1) | x) & 0x33333333;
+ x = ((x >> 2) | x) & 0x0F0F0F0F;
+ x = ((x >> 4) | x) & 0x00FF00FF;
+ x = ((x >> 8) | x) & 0x0000FFFF;
+ return x;
+}
+
+/**
+ * half_unshuffle64:
+ * @value: 64-bit value (of which only the odd bits are of interest)
+ *
+ * Given an input value:
+ * xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf
+ * return the value where all the odd bits are compressed down
+ * into the low half of the word, and the high half is zeroed:
+ * 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
+ *
+ * Any even bits set in the input are ignored.
+ *
+ * Returns: the unshuffled bits.
+ */
+static inline uint64_t half_unshuffle64(uint64_t x)
+{
+ /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
+ * where it is called an inverse half shuffle.
+ */
+ x &= 0x5555555555555555ULL;
+ x = ((x >> 1) | x) & 0x3333333333333333ULL;
+ x = ((x >> 2) | x) & 0x0F0F0F0F0F0F0F0FULL;
+ x = ((x >> 4) | x) & 0x00FF00FF00FF00FFULL;
+ x = ((x >> 8) | x) & 0x0000FFFF0000FFFFULL;
+ x = ((x >> 16) | x) & 0x00000000FFFFFFFFULL;
+ return x;
+}
+
#endif