aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-01-31 15:50:29 +0000
committerPeter Maydell <peter.maydell@linaro.org>2018-01-31 15:50:29 +0000
commitb05631954d6dfe93340d516660397e2c1a2a5dd6 (patch)
tree33a95992695411ba93cfeec651b358ecc2fd746f
parent6521130b0a7f699fdb82446d57df5627bfa7ed3c (diff)
parenteed142195c95a6c50545fa7a28e725d780ab9636 (diff)
downloadqemu-b05631954d6dfe93340d516660397e2c1a2a5dd6.zip
qemu-b05631954d6dfe93340d516660397e2c1a2a5dd6.tar.gz
qemu-b05631954d6dfe93340d516660397e2c1a2a5dd6.tar.bz2
Merge remote-tracking branch 'remotes/rth/tags/pull-hppa-20180131' into staging
Implement hppa-softmmu # gpg: Signature made Wed 31 Jan 2018 14:19:06 GMT # gpg: using RSA key 0x64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-hppa-20180131: (43 commits) target/hppa: Implement PROBE for system mode target/hppa: Fix 32-bit operand masks for 0E FCVT hw/hppa: Add MAINTAINERS entry pc-bios: Add hppa-firmware.img and git submodule hw/hppa: Implement DINO system board target/hppa: Enable MTTCG target/hppa: Implement STWA target/hppa: Implement a pause instruction target/hppa: Implement LDSID for system mode target/hppa: Fix comment target/hppa: Increase number of temp regs target/hppa: Only use EXCP_DTLB_MISS target/hppa: Implement B,GATE insn target/hppa: Add migration for the cpu target/hppa: Add system registers to gdbstub target/hppa: Optimize for flat addressing space target/hppa: Implement halt and reset instructions target/hppa: Implement SYNCDMA insn target/hppa: Implement LCI target/hppa: Implement LPA ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--.gitmodules3
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile3
-rw-r--r--Makefile.objs1
-rw-r--r--arch_init.c2
-rwxr-xr-xconfigure1
-rw-r--r--default-configs/hppa-softmmu.mak14
-rw-r--r--hw/hppa/Makefile.objs1
-rw-r--r--hw/hppa/dino.c518
-rw-r--r--hw/hppa/hppa_hardware.h40
-rw-r--r--hw/hppa/hppa_sys.h24
-rw-r--r--hw/hppa/machine.c283
-rw-r--r--hw/hppa/pci.c90
-rw-r--r--hw/hppa/trace-events4
-rw-r--r--include/sysemu/arch_init.h1
-rw-r--r--linux-user/hppa/target_cpu.h2
-rw-r--r--linux-user/main.c28
-rw-r--r--linux-user/signal.c4
-rwxr-xr-xpc-bios/hppa-firmware.imgbin0 -> 461352 bytes
m---------roms/seabios-hppa0
-rw-r--r--target/hppa/Makefile.objs4
-rw-r--r--target/hppa/cpu.c56
-rw-r--r--target/hppa/cpu.h284
-rw-r--r--target/hppa/gdbstub.c187
-rw-r--r--target/hppa/helper.c120
-rw-r--r--target/hppa/helper.h43
-rw-r--r--target/hppa/int_helper.c263
-rw-r--r--target/hppa/machine.c181
-rw-r--r--target/hppa/mem_helper.c348
-rw-r--r--target/hppa/op_helper.c152
-rw-r--r--target/hppa/translate.c2108
31 files changed, 4061 insertions, 705 deletions
diff --git a/.gitmodules b/.gitmodules
index 1500579..7a8282d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -40,3 +40,6 @@
[submodule "capstone"]
path = capstone
url = git://git.qemu.org/capstone.git
+[submodule "roms/seabios-hppa"]
+ path = roms/seabios-hppa
+ url = git://github.com/hdeller/seabios-hppa.git
diff --git a/MAINTAINERS b/MAINTAINERS
index fe39b30..f8deaf6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -133,6 +133,7 @@ HPPA (PA-RISC)
M: Richard Henderson <rth@twiddle.net>
S: Maintained
F: target/hppa/
+F: hw/hppa/
F: disas/hppa.c
LM32
diff --git a/Makefile b/Makefile
index 7073b3d..7d35ea1 100644
--- a/Makefile
+++ b/Makefile
@@ -661,7 +661,8 @@ s390-ccw.img s390-netboot.img \
spapr-rtas.bin slof.bin skiboot.lid \
palcode-clipper \
u-boot.e500 \
-qemu_vga.ndrv
+qemu_vga.ndrv \
+hppa-firmware.img
else
BLOBS=
endif
diff --git a/Makefile.objs b/Makefile.objs
index 323ef12..2efba6d 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -156,6 +156,7 @@ trace-events-subdirs += hw/vfio
trace-events-subdirs += hw/acpi
trace-events-subdirs += hw/arm
trace-events-subdirs += hw/alpha
+trace-events-subdirs += hw/hppa
trace-events-subdirs += hw/xen
trace-events-subdirs += hw/ide
trace-events-subdirs += ui
diff --git a/arch_init.c b/arch_init.c
index a0b8ed6..4c36f2b 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -53,6 +53,8 @@ int graphic_depth = 32;
#define QEMU_ARCH QEMU_ARCH_CRIS
#elif defined(TARGET_I386)
#define QEMU_ARCH QEMU_ARCH_I386
+#elif defined(TARGET_HPPA)
+#define QEMU_ARCH QEMU_ARCH_HPPA
#elif defined(TARGET_M68K)
#define QEMU_ARCH QEMU_ARCH_M68K
#elif defined(TARGET_LM32)
diff --git a/configure b/configure
index 96dee65..302fdc9 100755
--- a/configure
+++ b/configure
@@ -6555,6 +6555,7 @@ case "$target_name" in
cris)
;;
hppa)
+ mttcg="yes"
;;
lm32)
;;
diff --git a/default-configs/hppa-softmmu.mak b/default-configs/hppa-softmmu.mak
new file mode 100644
index 0000000..013e5f0
--- /dev/null
+++ b/default-configs/hppa-softmmu.mak
@@ -0,0 +1,14 @@
+include pci.mak
+include usb.mak
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_ISA_BUS=y
+CONFIG_I8259=y
+CONFIG_VIRTIO_PCI=$(CONFIG_PCI)
+CONFIG_VIRTIO=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_ISA=y
+CONFIG_IDE_CMD646=y
+# CONFIG_IDE_MMIO=y
+CONFIG_VIRTIO_VGA=y
+CONFIG_MC146818RTC=y
diff --git a/hw/hppa/Makefile.objs b/hw/hppa/Makefile.objs
new file mode 100644
index 0000000..bef241e
--- /dev/null
+++ b/hw/hppa/Makefile.objs
@@ -0,0 +1 @@
+obj-y += machine.o pci.o dino.o
diff --git a/hw/hppa/dino.c b/hw/hppa/dino.c
new file mode 100644
index 0000000..15aefde
--- /dev/null
+++ b/hw/hppa/dino.c
@@ -0,0 +1,518 @@
+/*
+ * HP-PARISC Dino PCI chipset emulation.
+ *
+ * (C) 2017 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Documentation available at:
+ * https://parisc.wiki.kernel.org/images-parisc/9/91/Dino_ers.pdf
+ * https://parisc.wiki.kernel.org/images-parisc/7/70/Dino_3_1_Errata.pdf
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/devices.h"
+#include "sysemu/sysemu.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_bus.h"
+#include "hppa_sys.h"
+#include "exec/address-spaces.h"
+
+
+#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost"
+
+#define DINO_IAR0 0x004
+#define DINO_IODC 0x008
+#define DINO_IRR0 0x00C /* RO */
+#define DINO_IAR1 0x010
+#define DINO_IRR1 0x014 /* RO */
+#define DINO_IMR 0x018
+#define DINO_IPR 0x01C
+#define DINO_TOC_ADDR 0x020
+#define DINO_ICR 0x024
+#define DINO_ILR 0x028 /* RO */
+#define DINO_IO_COMMAND 0x030 /* WO */
+#define DINO_IO_STATUS 0x034 /* RO */
+#define DINO_IO_CONTROL 0x038
+#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */
+#define DINO_IO_ERR_INFO 0x044 /* RO */
+#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */
+#define DINO_IO_FBB_EN 0x05c
+#define DINO_IO_ADDR_EN 0x060
+#define DINO_PCI_CONFIG_ADDR 0x064
+#define DINO_PCI_CONFIG_DATA 0x068
+#define DINO_PCI_IO_DATA 0x06c
+#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */
+#define DINO_GSC2X_CONFIG 0x7b4 /* RO */
+#define DINO_GMASK 0x800
+#define DINO_PAMR 0x804
+#define DINO_PAPR 0x808
+#define DINO_DAMODE 0x80c
+#define DINO_PCICMD 0x810
+#define DINO_PCISTS 0x814 /* R/WC */
+#define DINO_MLTIM 0x81c
+#define DINO_BRDG_FEAT 0x820
+#define DINO_PCIROR 0x824
+#define DINO_PCIWOR 0x828
+#define DINO_TLTIM 0x830
+
+#define DINO_IRQS 11 /* bits 0-10 are architected */
+#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
+#define DINO_LOCAL_IRQS (DINO_IRQS + 1)
+#define DINO_MASK_IRQ(x) (1 << (x))
+
+#define PCIINTA 0x001
+#define PCIINTB 0x002
+#define PCIINTC 0x004
+#define PCIINTD 0x008
+#define PCIINTE 0x010
+#define PCIINTF 0x020
+#define GSCEXTINT 0x040
+/* #define xxx 0x080 - bit 7 is "default" */
+/* #define xxx 0x100 - bit 8 not used */
+/* #define xxx 0x200 - bit 9 not used */
+#define RS232INT 0x400
+
+#define DINO_MEM_CHUNK_SIZE (8 * 1024 * 1024) /* 8MB */
+
+#define DINO_PCI_HOST_BRIDGE(obj) \
+ OBJECT_CHECK(DinoState, (obj), TYPE_DINO_PCI_HOST_BRIDGE)
+
+typedef struct DinoState {
+ PCIHostState parent_obj;
+
+ /* PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops,
+ so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops. */
+
+ uint32_t iar0;
+ uint32_t iar1;
+ uint32_t imr;
+ uint32_t ipr;
+ uint32_t icr;
+ uint32_t ilr;
+ uint32_t io_addr_en;
+ uint32_t io_control;
+
+ MemoryRegion this_mem;
+ MemoryRegion pci_mem;
+ MemoryRegion pci_mem_alias[32];
+
+ AddressSpace bm_as;
+ MemoryRegion bm;
+ MemoryRegion bm_ram_alias;
+ MemoryRegion bm_pci_alias;
+
+ MemoryRegion cpu0_eir_mem;
+} DinoState;
+
+/*
+ * Dino can forward memory accesses from the CPU in the range between
+ * 0xf0800000 and 0xff000000 to the PCI bus.
+ */
+static void gsc_to_pci_forwarding(DinoState *s)
+{
+ uint32_t io_addr_en, tmp;
+ int enabled, i;
+
+ tmp = extract32(s->io_control, 7, 2);
+ enabled = (tmp == 0x01);
+ io_addr_en = s->io_addr_en;
+
+ memory_region_transaction_begin();
+ for (i = 1; i < 31; i++) {
+ MemoryRegion *mem = &s->pci_mem_alias[i];
+ if (enabled && (io_addr_en & (1U << i))) {
+ if (!memory_region_is_mapped(mem)) {
+ uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE;
+ memory_region_add_subregion(get_system_memory(), addr, mem);
+ }
+ } else if (memory_region_is_mapped(mem)) {
+ memory_region_del_subregion(get_system_memory(), mem);
+ }
+ }
+ memory_region_transaction_commit();
+}
+
+static bool dino_chip_mem_valid(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ switch (addr) {
+ case DINO_IAR0:
+ case DINO_IAR1:
+ case DINO_IRR0:
+ case DINO_IRR1:
+ case DINO_IMR:
+ case DINO_IPR:
+ case DINO_ICR:
+ case DINO_ILR:
+ case DINO_IO_CONTROL:
+ case DINO_IO_ADDR_EN:
+ case DINO_PCI_IO_DATA:
+ return true;
+ case DINO_PCI_IO_DATA + 2:
+ return size <= 2;
+ case DINO_PCI_IO_DATA + 1:
+ case DINO_PCI_IO_DATA + 3:
+ return size == 1;
+ }
+ return false;
+}
+
+static MemTxResult dino_chip_read_with_attrs(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ DinoState *s = opaque;
+ MemTxResult ret = MEMTX_OK;
+ AddressSpace *io;
+ uint16_t ioaddr;
+ uint32_t val;
+
+ switch (addr) {
+ case DINO_PCI_IO_DATA ... DINO_PCI_IO_DATA + 3:
+ /* Read from PCI IO space. */
+ io = &address_space_io;
+ ioaddr = s->parent_obj.config_reg;
+ switch (size) {
+ case 1:
+ val = address_space_ldub(io, ioaddr, attrs, &ret);
+ break;
+ case 2:
+ val = address_space_lduw_be(io, ioaddr, attrs, &ret);
+ break;
+ case 4:
+ val = address_space_ldl_be(io, ioaddr, attrs, &ret);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case DINO_IO_ADDR_EN:
+ val = s->io_addr_en;
+ break;
+ case DINO_IO_CONTROL:
+ val = s->io_control;
+ break;
+
+ case DINO_IAR0:
+ val = s->iar0;
+ break;
+ case DINO_IAR1:
+ val = s->iar1;
+ break;
+ case DINO_IMR:
+ val = s->imr;
+ break;
+ case DINO_ICR:
+ val = s->icr;
+ break;
+ case DINO_IPR:
+ val = s->ipr;
+ /* Any read to IPR clears the register. */
+ s->ipr = 0;
+ break;
+ case DINO_ILR:
+ val = s->ilr;
+ break;
+ case DINO_IRR0:
+ val = s->ilr & s->imr & ~s->icr;
+ break;
+ case DINO_IRR1:
+ val = s->ilr & s->imr & s->icr;
+ break;
+
+ default:
+ /* Controlled by dino_chip_mem_valid above. */
+ g_assert_not_reached();
+ }
+
+ *data = val;
+ return ret;
+}
+
+static MemTxResult dino_chip_write_with_attrs(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size,
+ MemTxAttrs attrs)
+{
+ DinoState *s = opaque;
+ AddressSpace *io;
+ MemTxResult ret;
+ uint16_t ioaddr;
+
+ switch (addr) {
+ case DINO_IO_DATA ... DINO_PCI_IO_DATA + 3:
+ /* Write into PCI IO space. */
+ io = &address_space_io;
+ ioaddr = s->parent_obj.config_reg;
+ switch (size) {
+ case 1:
+ address_space_stb(io, ioaddr, val, attrs, &ret);
+ break;
+ case 2:
+ address_space_stw_be(io, ioaddr, val, attrs, &ret);
+ break;
+ case 4:
+ address_space_stl_be(io, ioaddr, val, attrs, &ret);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return ret;
+
+ case DINO_IO_ADDR_EN:
+ /* Never allow first (=firmware) and last (=Dino) areas. */
+ s->io_addr_en = val & 0x7ffffffe;
+ gsc_to_pci_forwarding(s);
+ break;
+ case DINO_IO_CONTROL:
+ s->io_control = val;
+ gsc_to_pci_forwarding(s);
+ break;
+
+ case DINO_IAR0:
+ s->iar0 = val;
+ break;
+ case DINO_IAR1:
+ s->iar1 = val;
+ break;
+ case DINO_IMR:
+ s->imr = val;
+ break;
+ case DINO_ICR:
+ s->icr = val;
+ break;
+ case DINO_IPR:
+ /* Any write to IPR clears the register. */
+ s->ipr = 0;
+ break;
+
+ case DINO_ILR:
+ case DINO_IRR0:
+ case DINO_IRR1:
+ /* These registers are read-only. */
+ break;
+
+ default:
+ /* Controlled by dino_chip_mem_valid above. */
+ g_assert_not_reached();
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps dino_chip_ops = {
+ .read_with_attrs = dino_chip_read_with_attrs,
+ .write_with_attrs = dino_chip_write_with_attrs,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ .accepts = dino_chip_mem_valid,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static const VMStateDescription vmstate_dino = {
+ .name = "Dino",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(iar0, DinoState),
+ VMSTATE_UINT32(iar1, DinoState),
+ VMSTATE_UINT32(imr, DinoState),
+ VMSTATE_UINT32(ipr, DinoState),
+ VMSTATE_UINT32(icr, DinoState),
+ VMSTATE_UINT32(ilr, DinoState),
+ VMSTATE_UINT32(io_addr_en, DinoState),
+ VMSTATE_UINT32(io_control, DinoState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+/* Unlike pci_config_data_le_ops, no check of high bit set in config_reg. */
+
+static uint64_t dino_config_data_read(void *opaque, hwaddr addr, unsigned len)
+{
+ PCIHostState *s = opaque;
+ return pci_data_read(s->bus, s->config_reg | (addr & 3), len);
+}
+
+static void dino_config_data_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned len)
+{
+ PCIHostState *s = opaque;
+ pci_data_write(s->bus, s->config_reg | (addr & 3), val, len);
+}
+
+static const MemoryRegionOps dino_config_data_ops = {
+ .read = dino_config_data_read,
+ .write = dino_config_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static AddressSpace *dino_pcihost_set_iommu(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ DinoState *s = opaque;
+
+ return &s->bm_as;
+}
+
+/*
+ * Dino interrupts are connected as shown on Page 78, Table 23
+ * (Little-endian bit numbers)
+ * 0 PCI INTA
+ * 1 PCI INTB
+ * 2 PCI INTC
+ * 3 PCI INTD
+ * 4 PCI INTE
+ * 5 PCI INTF
+ * 6 GSC External Interrupt
+ * 7 Bus Error for "less than fatal" mode
+ * 8 PS2
+ * 9 Unused
+ * 10 RS232
+ */
+
+static void dino_set_irq(void *opaque, int irq, int level)
+{
+ DinoState *s = opaque;
+ uint32_t bit = 1u << irq;
+ uint32_t old_ilr = s->ilr;
+
+ if (level) {
+ uint32_t ena = bit & ~old_ilr;
+ s->ipr |= ena;
+ s->ilr = old_ilr | bit;
+ if (ena & s->imr) {
+ uint32_t iar = (ena & s->icr ? s->iar1 : s->iar0);
+ stl_be_phys(&address_space_memory, iar & -32, iar & 31);
+ }
+ } else {
+ s->ilr = old_ilr & ~bit;
+ }
+}
+
+static int dino_pci_map_irq(PCIDevice *d, int irq_num)
+{
+ int slot = d->devfn >> 3;
+ int local_irq;
+
+ assert(irq_num >= 0 && irq_num <= 3);
+
+ local_irq = slot & 0x03;
+
+ return local_irq;
+}
+
+static void dino_set_timer_irq(void *opaque, int irq, int level)
+{
+ /* ??? Not connected. */
+}
+
+static void dino_set_serial_irq(void *opaque, int irq, int level)
+{
+ dino_set_irq(opaque, 10, level);
+}
+
+PCIBus *dino_init(MemoryRegion *addr_space,
+ qemu_irq *p_rtc_irq, qemu_irq *p_ser_irq)
+{
+ DeviceState *dev;
+ DinoState *s;
+ PCIBus *b;
+ int i;
+
+ dev = qdev_create(NULL, TYPE_DINO_PCI_HOST_BRIDGE);
+ s = DINO_PCI_HOST_BRIDGE(dev);
+
+ /* Dino PCI access from main memory. */
+ memory_region_init_io(&s->this_mem, OBJECT(s), &dino_chip_ops,
+ s, "dino", 4096);
+ memory_region_add_subregion(addr_space, DINO_HPA, &s->this_mem);
+
+ /* Dino PCI config. */
+ memory_region_init_io(&s->parent_obj.conf_mem, OBJECT(&s->parent_obj),
+ &pci_host_conf_be_ops, dev, "pci-conf-idx", 4);
+ memory_region_init_io(&s->parent_obj.data_mem, OBJECT(&s->parent_obj),
+ &dino_config_data_ops, dev, "pci-conf-data", 4);
+ memory_region_add_subregion(&s->this_mem, DINO_PCI_CONFIG_ADDR,
+ &s->parent_obj.conf_mem);
+ memory_region_add_subregion(&s->this_mem, DINO_CONFIG_DATA,
+ &s->parent_obj.data_mem);
+
+ /* Dino PCI bus memory. */
+ memory_region_init(&s->pci_mem, OBJECT(s), "pci-memory", 1ull << 32);
+
+ b = pci_register_root_bus(dev, "pci", dino_set_irq, dino_pci_map_irq, s,
+ &s->pci_mem, get_system_io(),
+ PCI_DEVFN(0, 0), 32, TYPE_PCI_BUS);
+ s->parent_obj.bus = b;
+ qdev_init_nofail(dev);
+
+ /* Set up windows into PCI bus memory. */
+ for (i = 1; i < 31; i++) {
+ uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE;
+ char *name = g_strdup_printf("PCI Outbound Window %d", i);
+ memory_region_init_alias(&s->pci_mem_alias[i], OBJECT(s),
+ name, &s->pci_mem, addr,
+ DINO_MEM_CHUNK_SIZE);
+ }
+
+ /* Set up PCI view of memory: Bus master address space. */
+ memory_region_init(&s->bm, OBJECT(s), "bm-dino", 1ull << 32);
+ memory_region_init_alias(&s->bm_ram_alias, OBJECT(s),
+ "bm-system", addr_space, 0,
+ 0xf0000000 + DINO_MEM_CHUNK_SIZE);
+ memory_region_init_alias(&s->bm_pci_alias, OBJECT(s),
+ "bm-pci", &s->pci_mem,
+ 0xf0000000 + DINO_MEM_CHUNK_SIZE,
+ 31 * DINO_MEM_CHUNK_SIZE);
+ memory_region_add_subregion(&s->bm, 0,
+ &s->bm_ram_alias);
+ memory_region_add_subregion(&s->bm,
+ 0xf0000000 + DINO_MEM_CHUNK_SIZE,
+ &s->bm_pci_alias);
+ address_space_init(&s->bm_as, &s->bm, "pci-bm");
+ pci_setup_iommu(b, dino_pcihost_set_iommu, s);
+
+ *p_rtc_irq = qemu_allocate_irq(dino_set_timer_irq, s, 0);
+ *p_ser_irq = qemu_allocate_irq(dino_set_serial_irq, s, 0);
+
+ return b;
+}
+
+static int dino_pcihost_init(SysBusDevice *dev)
+{
+ return 0;
+}
+
+static void dino_pcihost_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->init = dino_pcihost_init;
+ dc->vmsd = &vmstate_dino;
+}
+
+static const TypeInfo dino_pcihost_info = {
+ .name = TYPE_DINO_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(DinoState),
+ .class_init = dino_pcihost_class_init,
+};
+
+static void dino_register_types(void)
+{
+ type_register_static(&dino_pcihost_info);
+}
+
+type_init(dino_register_types)
diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h
new file mode 100644
index 0000000..2c61b1f
--- /dev/null
+++ b/hw/hppa/hppa_hardware.h
@@ -0,0 +1,40 @@
+/* HPPA cores and system support chips. */
+
+#define FIRMWARE_START 0xf0000000
+#define FIRMWARE_END 0xf0800000
+
+#define DEVICE_HPA_LEN 0x00100000
+
+#define GSC_HPA 0xffc00000
+#define DINO_HPA 0xfff80000
+#define DINO_UART_HPA 0xfff83000
+#define DINO_UART_BASE 0xfff83800
+#define DINO_SCSI_HPA 0xfff8c000
+#define LASI_HPA 0xffd00000
+#define LASI_UART_HPA 0xffd05000
+#define LASI_SCSI_HPA 0xffd06000
+#define LASI_LAN_HPA 0xffd07000
+#define LASI_LPT_HPA 0xffd02000
+#define LASI_AUDIO_HPA 0xffd04000
+#define LASI_PS2KBD_HPA 0xffd08000
+#define LASI_PS2MOU_HPA 0xffd08100
+#define LASI_GFX_HPA 0xf8000000
+#define CPU_HPA 0xfff10000
+#define MEMORY_HPA 0xfffbf000
+
+#define PCI_HPA DINO_HPA /* PCI bus */
+#define IDE_HPA 0xf9000000 /* Boot disc controller */
+
+/* offsets to DINO HPA: */
+#define DINO_PCI_ADDR 0x064
+#define DINO_CONFIG_DATA 0x068
+#define DINO_IO_DATA 0x06c
+
+#define PORT_PCI_CMD (PCI_HPA + DINO_PCI_ADDR)
+#define PORT_PCI_DATA (PCI_HPA + DINO_CONFIG_DATA)
+
+#define PORT_SERIAL1 (DINO_UART_HPA + 0x800)
+#define PORT_SERIAL2 (LASI_UART_HPA + 0x800)
+
+#define HPPA_MAX_CPUS 32 /* max. number of SMP CPUs */
+#define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */
diff --git a/hw/hppa/hppa_sys.h b/hw/hppa/hppa_sys.h
new file mode 100644
index 0000000..a182d1f
--- /dev/null
+++ b/hw/hppa/hppa_sys.h
@@ -0,0 +1,24 @@
+/* HPPA cores and system support chips. */
+
+#ifndef HW_HPPA_SYS_H
+#define HW_HPPA_SYS_H
+
+#include "target/hppa/cpu-qom.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+#include "hw/ide.h"
+#include "hw/i386/pc.h"
+#include "hw/irq.h"
+
+#include "hw/hppa/hppa_hardware.h"
+
+PCIBus *dino_init(MemoryRegion *, qemu_irq *, qemu_irq *);
+
+#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost"
+
+/* hppa_pci.c. */
+extern const MemoryRegionOps hppa_pci_ignore_ops;
+extern const MemoryRegionOps hppa_pci_conf1_ops;
+extern const MemoryRegionOps hppa_pci_iack_ops;
+
+#endif
diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c
new file mode 100644
index 0000000..afd3867
--- /dev/null
+++ b/hw/hppa/machine.c
@@ -0,0 +1,283 @@
+/*
+ * QEMU HPPA hardware system emulator.
+ * Copyright 2018 Helge Deller <deller@gmx.de>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "elf.h"
+#include "hw/loader.h"
+#include "hw/boards.h"
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "hw/timer/mc146818rtc.h"
+#include "hw/ide.h"
+#include "hw/timer/i8254.h"
+#include "hw/char/serial.h"
+#include "hw/hppa/hppa_sys.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
+
+#define MAX_IDE_BUS 2
+
+static ISABus *hppa_isa_bus(void)
+{
+ ISABus *isa_bus;
+ qemu_irq *isa_irqs;
+ MemoryRegion *isa_region;
+
+ isa_region = g_new(MemoryRegion, 1);
+ memory_region_init_io(isa_region, NULL, &hppa_pci_ignore_ops,
+ NULL, "isa-io", 0x800);
+ memory_region_add_subregion(get_system_memory(), IDE_HPA,
+ isa_region);
+
+ isa_bus = isa_bus_new(NULL, get_system_memory(), isa_region,
+ &error_abort);
+ isa_irqs = i8259_init(isa_bus,
+ /* qemu_allocate_irq(dino_set_isa_irq, s, 0)); */
+ NULL);
+ isa_bus_irqs(isa_bus, isa_irqs);
+
+ return isa_bus;
+}
+
+static uint64_t cpu_hppa_to_phys(void *opaque, uint64_t addr)
+{
+ addr &= (0x10000000 - 1);
+ return addr;
+}
+
+static HPPACPU *cpu[HPPA_MAX_CPUS];
+static uint64_t firmware_entry;
+
+static void machine_hppa_init(MachineState *machine)
+{
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ PCIBus *pci_bus;
+ ISABus *isa_bus;
+ qemu_irq rtc_irq, serial_irq;
+ char *firmware_filename;
+ uint64_t firmware_low, firmware_high;
+ long size;
+ uint64_t kernel_entry = 0, kernel_low, kernel_high;
+ MemoryRegion *addr_space = get_system_memory();
+ MemoryRegion *rom_region;
+ MemoryRegion *ram_region;
+ MemoryRegion *cpu_region;
+ long i;
+
+ ram_size = machine->ram_size;
+
+ /* Create CPUs. */
+ for (i = 0; i < smp_cpus; i++) {
+ cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type));
+
+ cpu_region = g_new(MemoryRegion, 1);
+ memory_region_init_io(cpu_region, OBJECT(cpu[i]), &hppa_io_eir_ops,
+ cpu[i], g_strdup_printf("cpu%ld-io-eir", i), 4);
+ memory_region_add_subregion(addr_space, CPU_HPA + i * 0x1000,
+ cpu_region);
+ }
+
+ /* Limit main memory. */
+ if (ram_size > FIRMWARE_START) {
+ machine->ram_size = ram_size = FIRMWARE_START;
+ }
+
+ /* Main memory region. */
+ ram_region = g_new(MemoryRegion, 1);
+ memory_region_allocate_system_memory(ram_region, OBJECT(machine),
+ "ram", ram_size);
+ memory_region_add_subregion(addr_space, 0, ram_region);
+
+ /* Init Dino (PCI host bus chip). */
+ pci_bus = dino_init(addr_space, &rtc_irq, &serial_irq);
+ assert(pci_bus);
+
+ /* Create ISA bus. */
+ isa_bus = hppa_isa_bus();
+ assert(isa_bus);
+
+ /* Realtime clock, used by firmware for PDC_TOD call. */
+ mc146818_rtc_init(isa_bus, 2000, rtc_irq);
+
+ /* Serial code setup. */
+ if (serial_hds[0]) {
+ uint32_t addr = DINO_UART_HPA + 0x800;
+ serial_mm_init(addr_space, addr, 0, serial_irq,
+ 115200, serial_hds[0], DEVICE_BIG_ENDIAN);
+ fprintf(stderr, "Serial port created at 0x%x\n", addr);
+ }
+
+ /* SCSI disk setup. */
+ lsi53c895a_create(pci_bus);
+
+ /* Network setup. e1000 is good enough, failing Tulip support. */
+ for (i = 0; i < nb_nics; i++) {
+ pci_nic_init_nofail(&nd_table[i], pci_bus, "e1000", NULL);
+ }
+
+ /* Load firmware. Given that this is not "real" firmware,
+ but one explicitly written for the emulation, we might as
+ well load it directly from an ELF image. */
+ firmware_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
+ bios_name ? bios_name :
+ "hppa-firmware.img");
+ if (firmware_filename == NULL) {
+ error_report("no firmware provided");
+ exit(1);
+ }
+
+ size = load_elf(firmware_filename, NULL,
+ NULL, &firmware_entry, &firmware_low, &firmware_high,
+ true, EM_PARISC, 0, 0);
+
+ /* Unfortunately, load_elf sign-extends reading elf32. */
+ firmware_entry = (target_ureg)firmware_entry;
+ firmware_low = (target_ureg)firmware_low;
+ firmware_high = (target_ureg)firmware_high;
+
+ if (size < 0) {
+ error_report("could not load firmware '%s'", firmware_filename);
+ exit(1);
+ }
+ fprintf(stderr, "Firmware loaded at 0x%08" PRIx64 "-0x%08" PRIx64
+ ", entry at 0x%08" PRIx64 ".\n",
+ firmware_low, firmware_high, firmware_entry);
+ if (firmware_low < ram_size || firmware_high >= FIRMWARE_END) {
+ error_report("Firmware overlaps with memory or IO space");
+ exit(1);
+ }
+ g_free(firmware_filename);
+
+ rom_region = g_new(MemoryRegion, 1);
+ memory_region_allocate_system_memory(rom_region, OBJECT(machine),
+ "firmware",
+ (FIRMWARE_END - FIRMWARE_START));
+ memory_region_add_subregion(addr_space, FIRMWARE_START, rom_region);
+
+ /* Load kernel */
+ if (kernel_filename) {
+ fprintf(stderr, "LOADING kernel '%s'\n", kernel_filename);
+ size = load_elf(kernel_filename, &cpu_hppa_to_phys,
+ NULL, &kernel_entry, &kernel_low, &kernel_high,
+ true, EM_PARISC, 0, 0);
+
+ /* Unfortunately, load_elf sign-extends reading elf32. */
+ kernel_entry = (target_ureg) cpu_hppa_to_phys(NULL, kernel_entry);
+ kernel_low = (target_ureg)kernel_low;
+ kernel_high = (target_ureg)kernel_high;
+
+ if (size < 0) {
+ error_report("could not load kernel '%s'", kernel_filename);
+ exit(1);
+ }
+
+ fprintf(stderr, "Kernel loaded at 0x%08" PRIx64 "-0x%08" PRIx64
+ ", entry at 0x%08" PRIx64 ", size %ld kB.\n",
+ kernel_low, kernel_high, kernel_entry, size / 1024);
+
+ if (kernel_cmdline) {
+ cpu[0]->env.gr[24] = 0x4000;
+ pstrcpy_targphys("cmdline", cpu[0]->env.gr[24],
+ TARGET_PAGE_SIZE, kernel_cmdline);
+ }
+
+ if (initrd_filename) {
+ ram_addr_t initrd_base;
+ long initrd_size;
+
+ initrd_size = get_image_size(initrd_filename);
+ if (initrd_size < 0) {
+ error_report("could not load initial ram disk '%s'",
+ initrd_filename);
+ exit(1);
+ }
+
+ /* Load the initrd image high in memory.
+ Mirror the algorithm used by palo:
+ (1) Due to sign-extension problems and PDC,
+ put the initrd no higher than 1G.
+ (2) Reserve 64k for stack. */
+ initrd_base = MIN(ram_size, 1024 * 1024 * 1024);
+ initrd_base = initrd_base - 64 * 1024;
+ initrd_base = (initrd_base - initrd_size) & TARGET_PAGE_MASK;
+
+ if (initrd_base < kernel_high) {
+ error_report("kernel and initial ram disk too large!");
+ exit(1);
+ }
+
+ load_image_targphys(initrd_filename, initrd_base, initrd_size);
+ cpu[0]->env.gr[23] = initrd_base;
+ cpu[0]->env.gr[22] = initrd_base + initrd_size;
+ }
+ }
+
+ if (!kernel_entry) {
+ /* When booting via firmware, tell firmware if we want interactive
+ * mode (kernel_entry=1), and to boot from CD (gr[24]='d')
+ * or hard disc * (gr[24]='c').
+ */
+ kernel_entry = boot_menu ? 1 : 0;
+ cpu[0]->env.gr[24] = machine->boot_order[0];
+ }
+
+ /* We jump to the firmware entry routine and pass the
+ * various parameters in registers. After firmware initialization,
+ * firmware will start the Linux kernel with ramdisk and cmdline.
+ */
+ cpu[0]->env.gr[26] = ram_size;
+ cpu[0]->env.gr[25] = kernel_entry;
+
+ /* tell firmware how many SMP CPUs to present in inventory table */
+ cpu[0]->env.gr[21] = smp_cpus;
+}
+
+static void hppa_machine_reset(void)
+{
+ int i;
+
+ qemu_devices_reset();
+
+ /* Start all CPUs at the firmware entry point.
+ * Monarch CPU will initialize firmware, secondary CPUs
+ * will enter a small idle look and wait for rendevouz. */
+ for (i = 0; i < smp_cpus; i++) {
+ cpu_set_pc(CPU(cpu[i]), firmware_entry);
+ cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000;
+ }
+
+ /* already initialized by machine_hppa_init()? */
+ if (cpu[0]->env.gr[26] == ram_size) {
+ return;
+ }
+
+ cpu[0]->env.gr[26] = ram_size;
+ cpu[0]->env.gr[25] = 0; /* no firmware boot menu */
+ cpu[0]->env.gr[24] = 'c';
+ /* gr22/gr23 unused, no initrd while reboot. */
+ cpu[0]->env.gr[21] = smp_cpus;
+}
+
+
+static void machine_hppa_machine_init(MachineClass *mc)
+{
+ mc->desc = "HPPA generic machine";
+ mc->default_cpu_type = TYPE_HPPA_CPU;
+ mc->init = machine_hppa_init;
+ mc->reset = hppa_machine_reset;
+ mc->block_default_type = IF_SCSI;
+ mc->max_cpus = HPPA_MAX_CPUS;
+ mc->default_cpus = 1;
+ mc->is_default = 1;
+ mc->default_ram_size = 512 * M_BYTE;
+ mc->default_boot_order = "cd";
+}
+
+DEFINE_MACHINE("hppa", machine_hppa_machine_init)
diff --git a/hw/hppa/pci.c b/hw/hppa/pci.c
new file mode 100644
index 0000000..7664202
--- /dev/null
+++ b/hw/hppa/pci.c
@@ -0,0 +1,90 @@
+/*
+ * QEMU HP-PARISC PCI support functions.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "hppa_sys.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "trace.h"
+
+
+/* Fallback for unassigned PCI I/O operations. Avoids MCHK. */
+
+static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return 0;
+}
+
+static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size)
+{
+}
+
+const MemoryRegionOps hppa_pci_ignore_ops = {
+ .read = ignore_read,
+ .write = ignore_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+
+/* PCI config space reads/writes, to byte-word addressable memory. */
+static uint64_t bw_conf1_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PCIBus *b = opaque;
+ return pci_data_read(b, addr, size);
+}
+
+static void bw_conf1_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PCIBus *b = opaque;
+ pci_data_write(b, addr, val, size);
+}
+
+const MemoryRegionOps hppa_pci_conf1_ops = {
+ .read = bw_conf1_read,
+ .write = bw_conf1_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+/* PCI/EISA Interrupt Acknowledge Cycle. */
+
+static uint64_t iack_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return pic_read_irq(isa_pic);
+}
+
+static void special_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ trace_hppa_pci_iack_write();
+}
+
+const MemoryRegionOps hppa_pci_iack_ops = {
+ .read = iack_read,
+ .write = special_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
diff --git a/hw/hppa/trace-events b/hw/hppa/trace-events
new file mode 100644
index 0000000..14c6793
--- /dev/null
+++ b/hw/hppa/trace-events
@@ -0,0 +1,4 @@
+# See docs/devel/tracing.txt for syntax documentation.
+
+# hw/hppa/pci.c
+hppa_pci_iack_write(void) ""
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 8751c46..f999bfd 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -24,6 +24,7 @@ enum {
QEMU_ARCH_MOXIE = (1 << 15),
QEMU_ARCH_TRICORE = (1 << 16),
QEMU_ARCH_NIOS2 = (1 << 17),
+ QEMU_ARCH_HPPA = (1 << 18),
};
extern const uint32_t arch_type;
diff --git a/linux-user/hppa/target_cpu.h b/linux-user/hppa/target_cpu.h
index e50522e..7b78bbe 100644
--- a/linux-user/hppa/target_cpu.h
+++ b/linux-user/hppa/target_cpu.h
@@ -33,7 +33,7 @@ static inline void cpu_clone_regs(CPUHPPAState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUHPPAState *env, target_ulong newtls)
{
- env->cr27 = newtls;
+ env->cr[27] = newtls;
}
#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 2140465..7de0e02 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3773,21 +3773,41 @@ void cpu_loop(CPUHPPAState *env)
env->iaoq_f = env->gr[31];
env->iaoq_b = env->gr[31] + 4;
break;
- case EXCP_SIGSEGV:
+ case EXCP_ITLB_MISS:
+ case EXCP_DTLB_MISS:
+ case EXCP_NA_ITLB_MISS:
+ case EXCP_NA_DTLB_MISS:
+ case EXCP_IMP:
+ case EXCP_DMP:
+ case EXCP_DMB:
+ case EXCP_PAGE_REF:
+ case EXCP_DMAR:
+ case EXCP_DMPI:
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SEGV_ACCERR;
- info._sifields._sigfault._addr = env->ior;
+ info._sifields._sigfault._addr = env->cr[CR_IOR];
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
- case EXCP_SIGILL:
+ case EXCP_UNALIGN:
+ info.si_signo = TARGET_SIGBUS;
+ info.si_errno = 0;
+ info.si_code = 0;
+ info._sifields._sigfault._addr = env->cr[CR_IOR];
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_ILL:
+ case EXCP_PRIV_OPR:
+ case EXCP_PRIV_REG:
info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->iaoq_f;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
- case EXCP_SIGFPE:
+ case EXCP_OVERFLOW:
+ case EXCP_COND:
+ case EXCP_ASSIST:
info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
info.si_code = 0;
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 2db4507..9a380b9 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -6442,7 +6442,7 @@ static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
__put_user(env->fr[i], &sc->sc_fr[i]);
}
- __put_user(env->sar, &sc->sc_sar);
+ __put_user(env->cr[CR_SAR], &sc->sc_sar);
}
static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
@@ -6463,7 +6463,7 @@ static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
__get_user(env->iaoq_f, &sc->sc_iaoq[0]);
__get_user(env->iaoq_b, &sc->sc_iaoq[1]);
- __get_user(env->sar, &sc->sc_sar);
+ __get_user(env->cr[CR_SAR], &sc->sc_sar);
}
/* No, this doesn't look right, but it's copied straight from the kernel. */
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
new file mode 100755
index 0000000..ae83343
--- /dev/null
+++ b/pc-bios/hppa-firmware.img
Binary files differ
diff --git a/roms/seabios-hppa b/roms/seabios-hppa
new file mode 160000
+Subproject 8fa4ca9935669414a824ecda24f6e70c36e8dc9
diff --git a/target/hppa/Makefile.objs b/target/hppa/Makefile.objs
index 263446f..3359da5 100644
--- a/target/hppa/Makefile.objs
+++ b/target/hppa/Makefile.objs
@@ -1 +1,3 @@
-obj-y += translate.o helper.o cpu.o op_helper.o gdbstub.o
+obj-y += translate.o helper.o cpu.o op_helper.o gdbstub.o mem_helper.o
+obj-y += int_helper.o
+obj-$(CONFIG_SOFTMMU) += machine.o
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 9e7b0d4..5213347 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -37,9 +37,29 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
{
HPPACPU *cpu = HPPA_CPU(cs);
+#ifdef CONFIG_USER_ONLY
cpu->env.iaoq_f = tb->pc;
cpu->env.iaoq_b = tb->cs_base;
- cpu->env.psw_n = tb->flags & 1;
+#else
+ /* Recover the IAOQ values from the GVA + PRIV. */
+ uint32_t priv = (tb->flags >> TB_FLAG_PRIV_SHIFT) & 3;
+ target_ulong cs_base = tb->cs_base;
+ target_ulong iasq_f = cs_base & ~0xffffffffull;
+ int32_t diff = cs_base;
+
+ cpu->env.iasq_f = iasq_f;
+ cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
+ if (diff) {
+ cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
+ }
+#endif
+
+ cpu->env.psw_n = (tb->flags & PSW_N) != 0;
+}
+
+static bool hppa_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
}
static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
@@ -48,6 +68,23 @@ static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
info->print_insn = print_insn_hppa;
}
+static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+
+ cs->exception_index = EXCP_UNALIGN;
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -62,6 +99,14 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
qemu_init_vcpu(cs);
acc->parent_realize(dev, errp);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ HPPACPU *cpu = HPPA_CPU(cs);
+ cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ hppa_cpu_alarm_timer, cpu);
+ }
+#endif
}
/* Sort hppabetically by type name. */
@@ -106,8 +151,10 @@ static void hppa_cpu_initfn(Object *obj)
CPUHPPAState *env = &cpu->env;
cs->env_ptr = env;
+ cs->exception_index = -1;
cpu_hppa_loaded_fr0(env);
set_snan_bit_is_one(true, &env->fp_status);
+ cpu_hppa_put_psw(env, PSW_W);
}
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
@@ -125,6 +172,7 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
dc->realize = hppa_cpu_realizefn;
cc->class_by_name = hppa_cpu_class_by_name;
+ cc->has_work = hppa_cpu_has_work;
cc->do_interrupt = hppa_cpu_do_interrupt;
cc->cpu_exec_interrupt = hppa_cpu_exec_interrupt;
cc->dump_state = hppa_cpu_dump_state;
@@ -132,7 +180,13 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
cc->gdb_read_register = hppa_cpu_gdb_read_register;
cc->gdb_write_register = hppa_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
+ dc->vmsd = &vmstate_hppa_cpu;
+#endif
+ cc->do_unaligned_access = hppa_cpu_do_unaligned_access;
cc->disas_set_info = hppa_cpu_disas_set_info;
cc->tcg_initialize = hppa_translate_init;
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 1a35eae..7640c81 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -23,10 +23,30 @@
#include "qemu-common.h"
#include "cpu-qom.h"
-/* We only support hppa-linux-user at present, so 32-bit only. */
-#define TARGET_LONG_BITS 32
-#define TARGET_PHYS_ADDR_SPACE_BITS 32
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#ifdef TARGET_HPPA64
+#define TARGET_LONG_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#define TARGET_REGISTER_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#elif defined(CONFIG_USER_ONLY)
+#define TARGET_LONG_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_REGISTER_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#else
+/* In order to form the GVA from space:offset,
+ we need a 64-bit virtual address space. */
+#define TARGET_LONG_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#define TARGET_REGISTER_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#endif
+
+/* PA-RISC 1.x processors have a strong memory model. */
+/* ??? While we do not yet implement PA-RISC 2.0, those processors have
+ a weak memory model, but with TLB bits that force ordering on a per-page
+ basis. It's probably easier to fall back to a strong memory model. */
+#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
#define CPUArchState struct CPUHPPAState
@@ -36,28 +56,145 @@
#define TARGET_PAGE_BITS 12
#define ALIGNED_ONLY
-#define NB_MMU_MODES 1
-#define MMU_USER_IDX 0
+#define NB_MMU_MODES 5
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 3
+#define MMU_PHYS_IDX 4
#define TARGET_INSN_START_EXTRA_WORDS 1
-#define EXCP_SYSCALL 1
-#define EXCP_SYSCALL_LWS 2
-#define EXCP_SIGSEGV 3
-#define EXCP_SIGILL 4
-#define EXCP_SIGFPE 5
+/* Hardware exceptions, interupts, faults, and traps. */
+#define EXCP_HPMC 1 /* high priority machine check */
+#define EXCP_POWER_FAIL 2
+#define EXCP_RC 3 /* recovery counter */
+#define EXCP_EXT_INTERRUPT 4 /* external interrupt */
+#define EXCP_LPMC 5 /* low priority machine check */
+#define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */
+#define EXCP_IMP 7 /* instruction memory protection trap */
+#define EXCP_ILL 8 /* illegal instruction trap */
+#define EXCP_BREAK 9 /* break instruction */
+#define EXCP_PRIV_OPR 10 /* privileged operation trap */
+#define EXCP_PRIV_REG 11 /* privileged register trap */
+#define EXCP_OVERFLOW 12 /* signed overflow trap */
+#define EXCP_COND 13 /* trap-on-condition */
+#define EXCP_ASSIST 14 /* assist exception trap */
+#define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */
+#define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */
+#define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */
+#define EXCP_DMP 18 /* data memory protection trap */
+#define EXCP_DMB 19 /* data memory break trap */
+#define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */
+#define EXCP_PAGE_REF 21 /* page reference trap */
+#define EXCP_ASSIST_EMU 22 /* assist emulation trap */
+#define EXCP_HPT 23 /* high-privilege transfer trap */
+#define EXCP_LPT 24 /* low-privilege transfer trap */
+#define EXCP_TB 25 /* taken branch trap */
+#define EXCP_DMAR 26 /* data memory access rights trap */
+#define EXCP_DMPI 27 /* data memory protection id trap */
+#define EXCP_UNALIGN 28 /* unaligned data reference trap */
+#define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */
+
+/* Exceptions for linux-user emulation. */
+#define EXCP_SYSCALL 30
+#define EXCP_SYSCALL_LWS 31
+
+/* Taken from Linux kernel: arch/parisc/include/asm/psw.h */
+#define PSW_I 0x00000001
+#define PSW_D 0x00000002
+#define PSW_P 0x00000004
+#define PSW_Q 0x00000008
+#define PSW_R 0x00000010
+#define PSW_F 0x00000020
+#define PSW_G 0x00000040 /* PA1.x only */
+#define PSW_O 0x00000080 /* PA2.0 only */
+#define PSW_CB 0x0000ff00
+#define PSW_M 0x00010000
+#define PSW_V 0x00020000
+#define PSW_C 0x00040000
+#define PSW_B 0x00080000
+#define PSW_X 0x00100000
+#define PSW_N 0x00200000
+#define PSW_L 0x00400000
+#define PSW_H 0x00800000
+#define PSW_T 0x01000000
+#define PSW_S 0x02000000
+#define PSW_E 0x04000000
+#ifdef TARGET_HPPA64
+#define PSW_W 0x08000000 /* PA2.0 only */
+#else
+#define PSW_W 0
+#endif
+#define PSW_Z 0x40000000 /* PA1.x only */
+#define PSW_Y 0x80000000 /* PA1.x only */
+
+#define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \
+ | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I)
+
+/* ssm/rsm instructions number PSW_W and PSW_E differently */
+#define PSW_SM_I PSW_I /* Enable External Interrupts */
+#define PSW_SM_D PSW_D
+#define PSW_SM_P PSW_P
+#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
+#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
+#ifdef TARGET_HPPA64
+#define PSW_SM_E 0x100
+#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
+#else
+#define PSW_SM_E 0
+#define PSW_SM_W 0
+#endif
+
+#define CR_RC 0
+#define CR_SCRCCR 10
+#define CR_SAR 11
+#define CR_IVA 14
+#define CR_EIEM 15
+#define CR_IT 16
+#define CR_IIASQ 17
+#define CR_IIAOQ 18
+#define CR_IIR 19
+#define CR_ISR 20
+#define CR_IOR 21
+#define CR_IPSW 22
+#define CR_EIRR 23
typedef struct CPUHPPAState CPUHPPAState;
+#if TARGET_REGISTER_BITS == 32
+typedef uint32_t target_ureg;
+typedef int32_t target_sreg;
+#define TREG_FMT_lx "%08"PRIx32
+#define TREG_FMT_ld "%"PRId32
+#else
+typedef uint64_t target_ureg;
+typedef int64_t target_sreg;
+#define TREG_FMT_lx "%016"PRIx64
+#define TREG_FMT_ld "%"PRId64
+#endif
+
+typedef struct {
+ uint64_t va_b;
+ uint64_t va_e;
+ target_ureg pa;
+ unsigned u : 1;
+ unsigned t : 1;
+ unsigned d : 1;
+ unsigned b : 1;
+ unsigned page_size : 4;
+ unsigned ar_type : 3;
+ unsigned ar_pl1 : 2;
+ unsigned ar_pl2 : 2;
+ unsigned entry_valid : 1;
+ unsigned access_id : 16;
+} hppa_tlb_entry;
+
struct CPUHPPAState {
- target_ulong gr[32];
+ target_ureg gr[32];
uint64_t fr[32];
+ uint64_t sr[8]; /* stored shifted into place for gva */
- target_ulong sar;
- target_ulong cr26;
- target_ulong cr27;
-
- target_ulong psw_n; /* boolean */
- target_long psw_v; /* in most significant bit */
+ target_ureg psw; /* All psw bits except the following: */
+ target_ureg psw_n; /* boolean */
+ target_sreg psw_v; /* in most significant bit */
/* Splitting the carry-borrow field into the MSB and "the rest", allows
* for "the rest" to be deleted when it is unused, but the MSB is in use.
@@ -66,19 +203,29 @@ struct CPUHPPAState {
* host has the appropriate add-with-carry insn to compute the msb).
* Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
*/
- target_ulong psw_cb; /* in least significant bit of next nibble */
- target_ulong psw_cb_msb; /* boolean */
+ target_ureg psw_cb; /* in least significant bit of next nibble */
+ target_ureg psw_cb_msb; /* boolean */
- target_ulong iaoq_f; /* front */
- target_ulong iaoq_b; /* back, aka next instruction */
-
- target_ulong ior; /* interrupt offset register */
+ target_ureg iaoq_f; /* front */
+ target_ureg iaoq_b; /* back, aka next instruction */
+ uint64_t iasq_f;
+ uint64_t iasq_b;
uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
float_status fp_status;
+ target_ureg cr[32]; /* control registers */
+ target_ureg cr_back[2]; /* back of cr17/cr18 */
+ target_ureg shadow[7]; /* shadow registers */
+
/* Those resources are used only in QEMU core */
CPU_COMMON
+
+ /* ??? The number of entries isn't specified by the architecture. */
+ /* ??? Implement a unified itlb/dtlb for the moment. */
+ /* ??? We should use a more intelligent data structure. */
+ hppa_tlb_entry tlb[256];
+ uint32_t tlb_last;
};
/**
@@ -93,6 +240,7 @@ struct HPPACPU {
/*< public >*/
CPUHPPAState env;
+ QEMUTimer *alarm_timer;
};
static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env)
@@ -107,7 +255,14 @@ static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env)
static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
{
- return 0;
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (env->psw & (ifetch ? PSW_C : PSW_D)) {
+ return env->iaoq_f & 3;
+ }
+ return MMU_PHYS_IDX; /* mmu disabled */
+#endif
}
void hppa_translate_init(void);
@@ -116,28 +271,97 @@ void hppa_translate_init(void);
void hppa_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
+ target_ureg off)
+{
+#ifdef CONFIG_USER_ONLY
+ return off;
+#else
+ off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull);
+ return spc | off;
+#endif
+}
+
+static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
+ target_ureg off)
+{
+ return hppa_form_gva_psw(env->psw, spc, off);
+}
+
+/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
+ * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
+ * same value.
+ */
+#define TB_FLAG_SR_SAME PSW_I
+#define TB_FLAG_PRIV_SHIFT 8
+
static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
target_ulong *cs_base,
uint32_t *pflags)
{
+ uint32_t flags = env->psw_n * PSW_N;
+
+ /* TB lookup assumes that PC contains the complete virtual address.
+ If we leave space+offset separate, we'll get ITLB misses to an
+ incomplete virtual address. This also means that we must separate
+ out current cpu priviledge from the low bits of IAOQ_F. */
+#ifdef CONFIG_USER_ONLY
*pc = env->iaoq_f;
*cs_base = env->iaoq_b;
- *pflags = env->psw_n;
+#else
+ /* ??? E, T, H, L, B, P bits need to be here, when implemented. */
+ flags |= env->psw & (PSW_W | PSW_C | PSW_D);
+ flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
+
+ *pc = (env->psw & PSW_C
+ ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4)
+ : env->iaoq_f & -4);
+ *cs_base = env->iasq_f;
+
+ /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
+ low 32-bits of CS_BASE. This will succeed for all direct branches,
+ which is the primary case we care about -- using goto_tb within a page.
+ Failure is indicated by a zero difference. */
+ if (env->iasq_f == env->iasq_b) {
+ target_sreg diff = env->iaoq_b - env->iaoq_f;
+ if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
+ *cs_base |= (uint32_t)diff;
+ }
+ }
+ if ((env->sr[4] == env->sr[5])
+ & (env->sr[4] == env->sr[6])
+ & (env->sr[4] == env->sr[7])) {
+ flags |= TB_FLAG_SR_SAME;
+ }
+#endif
+
+ *pflags = flags;
}
-target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
-void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
+target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
void cpu_hppa_loaded_fr0(CPUHPPAState *env);
#define cpu_signal_handler cpu_hppa_signal_handler
int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
-int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
- int rw, int midx);
+hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function, int);
+#ifdef CONFIG_USER_ONLY
+int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
+ int rw, int midx);
+#else
+int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
+ int type, hwaddr *pphys, int *pprot);
+extern const MemoryRegionOps hppa_io_eir_ops;
+extern const struct VMStateDescription vmstate_hppa_cpu;
+void hppa_cpu_alarm_timer(void *);
+int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
+#endif
+void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
#endif /* HPPA_CPU_H */
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
index c37a56f..e2e9c4d 100644
--- a/target/hppa/gdbstub.c
+++ b/target/hppa/gdbstub.c
@@ -26,7 +26,7 @@ int hppa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
- target_ulong val;
+ target_ureg val;
switch (n) {
case 0:
@@ -36,19 +36,97 @@ int hppa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
val = env->gr[n];
break;
case 32:
- val = env->sar;
+ val = env->cr[CR_SAR];
break;
case 33:
val = env->iaoq_f;
break;
+ case 34:
+ val = env->iasq_f >> 32;
+ break;
case 35:
val = env->iaoq_b;
break;
+ case 36:
+ val = env->iasq_b >> 32;
+ break;
+ case 37:
+ val = env->cr[CR_EIEM];
+ break;
+ case 38:
+ val = env->cr[CR_IIR];
+ break;
+ case 39:
+ val = env->cr[CR_ISR];
+ break;
+ case 40:
+ val = env->cr[CR_IOR];
+ break;
+ case 41:
+ val = env->cr[CR_IPSW];
+ break;
+ case 43:
+ val = env->sr[4] >> 32;
+ break;
+ case 44:
+ val = env->sr[0] >> 32;
+ break;
+ case 45:
+ val = env->sr[1] >> 32;
+ break;
+ case 46:
+ val = env->sr[2] >> 32;
+ break;
+ case 47:
+ val = env->sr[3] >> 32;
+ break;
+ case 48:
+ val = env->sr[5] >> 32;
+ break;
+ case 49:
+ val = env->sr[6] >> 32;
+ break;
+ case 50:
+ val = env->sr[7] >> 32;
+ break;
+ case 51:
+ val = env->cr[CR_RC];
+ break;
+ case 52:
+ val = env->cr[8];
+ break;
+ case 53:
+ val = env->cr[9];
+ break;
+ case 54:
+ val = env->cr[CR_SCRCCR];
+ break;
+ case 55:
+ val = env->cr[12];
+ break;
+ case 56:
+ val = env->cr[13];
+ break;
+ case 57:
+ val = env->cr[24];
+ break;
+ case 58:
+ val = env->cr[25];
+ break;
case 59:
- val = env->cr26;
+ val = env->cr[26];
break;
case 60:
- val = env->cr27;
+ val = env->cr[27];
+ break;
+ case 61:
+ val = env->cr[28];
+ break;
+ case 62:
+ val = env->cr[29];
+ break;
+ case 63:
+ val = env->cr[30];
break;
case 64 ... 127:
val = extract64(env->fr[(n - 64) / 2], (n & 1 ? 0 : 32), 32);
@@ -61,14 +139,25 @@ int hppa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
}
break;
}
- return gdb_get_regl(mem_buf, val);
+
+ if (TARGET_REGISTER_BITS == 64) {
+ return gdb_get_reg64(mem_buf, val);
+ } else {
+ return gdb_get_reg32(mem_buf, val);
+ }
}
int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
- target_ulong val = ldtul_p(mem_buf);
+ target_ureg val;
+
+ if (TARGET_REGISTER_BITS == 64) {
+ val = ldq_p(mem_buf);
+ } else {
+ val = ldl_p(mem_buf);
+ }
switch (n) {
case 0:
@@ -78,19 +167,97 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->gr[n] = val;
break;
case 32:
- env->sar = val;
+ env->cr[CR_SAR] = val;
break;
case 33:
env->iaoq_f = val;
break;
+ case 34:
+ env->iasq_f = (uint64_t)val << 32;
+ break;
case 35:
env->iaoq_b = val;
break;
+ case 36:
+ env->iasq_b = (uint64_t)val << 32;
+ break;
+ case 37:
+ env->cr[CR_EIEM] = val;
+ break;
+ case 38:
+ env->cr[CR_IIR] = val;
+ break;
+ case 39:
+ env->cr[CR_ISR] = val;
+ break;
+ case 40:
+ env->cr[CR_IOR] = val;
+ break;
+ case 41:
+ env->cr[CR_IPSW] = val;
+ break;
+ case 43:
+ env->sr[4] = (uint64_t)val << 32;
+ break;
+ case 44:
+ env->sr[0] = (uint64_t)val << 32;
+ break;
+ case 45:
+ env->sr[1] = (uint64_t)val << 32;
+ break;
+ case 46:
+ env->sr[2] = (uint64_t)val << 32;
+ break;
+ case 47:
+ env->sr[3] = (uint64_t)val << 32;
+ break;
+ case 48:
+ env->sr[5] = (uint64_t)val << 32;
+ break;
+ case 49:
+ env->sr[6] = (uint64_t)val << 32;
+ break;
+ case 50:
+ env->sr[7] = (uint64_t)val << 32;
+ break;
+ case 51:
+ env->cr[CR_RC] = val;
+ break;
+ case 52:
+ env->cr[8] = val;
+ break;
+ case 53:
+ env->cr[9] = val;
+ break;
+ case 54:
+ env->cr[CR_SCRCCR] = val;
+ break;
+ case 55:
+ env->cr[12] = val;
+ break;
+ case 56:
+ env->cr[13] = val;
+ break;
+ case 57:
+ env->cr[24] = val;
+ break;
+ case 58:
+ env->cr[25] = val;
+ break;
case 59:
- env->cr26 = val;
+ env->cr[26] = val;
break;
case 60:
- env->cr27 = val;
+ env->cr[27] = val;
+ break;
+ case 61:
+ env->cr[28] = val;
+ break;
+ case 62:
+ env->cr[29] = val;
+ break;
+ case 63:
+ env->cr[30] = val;
break;
case 64:
env->fr[0] = deposit64(env->fr[0], 32, 32, val);
@@ -108,5 +275,5 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
break;
}
- return sizeof(target_ulong);
+ return sizeof(target_ureg);
}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index 23f7af7..858ec20 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -24,9 +24,9 @@
#include "fpu/softfloat.h"
#include "exec/helper-proto.h"
-target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
+target_ureg cpu_hppa_get_psw(CPUHPPAState *env)
{
- target_ulong psw;
+ target_ureg psw;
/* Fold carry bits down to 8 consecutive bits. */
/* ??? Needs tweaking for hppa64. */
@@ -39,20 +39,22 @@ target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
/* .........................bcdefgh */
psw |= (psw >> 12) & 0xf;
psw |= env->psw_cb_msb << 7;
- psw <<= 8;
+ psw = (psw & 0xff) << 8;
- psw |= env->psw_n << 21;
- psw |= (env->psw_v < 0) << 17;
+ psw |= env->psw_n * PSW_N;
+ psw |= (env->psw_v < 0) * PSW_V;
+ psw |= env->psw;
return psw;
}
-void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
{
- target_ulong cb = 0;
+ target_ureg cb = 0;
- env->psw_n = (psw >> 21) & 1;
- env->psw_v = -((psw >> 17) & 1);
+ env->psw = psw & ~(PSW_N | PSW_V | PSW_CB);
+ env->psw_n = (psw / PSW_N) & 1;
+ env->psw_v = -((psw / PSW_V) & 1);
env->psw_cb_msb = (psw >> 15) & 1;
cb |= ((psw >> 14) & 1) << 28;
@@ -65,73 +67,55 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
env->psw_cb = cb;
}
-int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- HPPACPU *cpu = HPPA_CPU(cs);
-
- cs->exception_index = EXCP_SIGSEGV;
- cpu->env.ior = address;
- return 1;
-}
-
-void hppa_cpu_do_interrupt(CPUState *cs)
-{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
- int i = cs->exception_index;
-
- if (qemu_loglevel_mask(CPU_LOG_INT)) {
- static int count;
- const char *name = "<unknown>";
-
- switch (i) {
- case EXCP_SYSCALL:
- name = "syscall";
- break;
- case EXCP_SIGSEGV:
- name = "sigsegv";
- break;
- case EXCP_SIGILL:
- name = "sigill";
- break;
- case EXCP_SIGFPE:
- name = "sigfpe";
- break;
- }
- qemu_log("INT %6d: %s ia_f=" TARGET_FMT_lx "\n",
- ++count, name, env->iaoq_f);
- }
- cs->exception_index = -1;
-}
-
-bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- abort();
- return false;
-}
-
void hppa_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
+ target_ureg psw = cpu_hppa_get_psw(env);
+ target_ureg psw_cb;
+ char psw_c[20];
int i;
- cpu_fprintf(f, "IA_F " TARGET_FMT_lx
- " IA_B " TARGET_FMT_lx
- " PSW " TARGET_FMT_lx
- " [N:" TARGET_FMT_ld " V:%d"
- " CB:" TARGET_FMT_lx "]\n ",
- env->iaoq_f, env->iaoq_b, cpu_hppa_get_psw(env),
- env->psw_n, env->psw_v < 0,
- ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28));
- for (i = 1; i < 32; i++) {
- cpu_fprintf(f, "GR%02d " TARGET_FMT_lx " ", i, env->gr[i]);
- if ((i % 4) == 3) {
- cpu_fprintf(f, "\n");
- }
+ cpu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx "\n",
+ hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f),
+ hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b));
+
+ psw_c[0] = (psw & PSW_W ? 'W' : '-');
+ psw_c[1] = (psw & PSW_E ? 'E' : '-');
+ psw_c[2] = (psw & PSW_S ? 'S' : '-');
+ psw_c[3] = (psw & PSW_T ? 'T' : '-');
+ psw_c[4] = (psw & PSW_H ? 'H' : '-');
+ psw_c[5] = (psw & PSW_L ? 'L' : '-');
+ psw_c[6] = (psw & PSW_N ? 'N' : '-');
+ psw_c[7] = (psw & PSW_X ? 'X' : '-');
+ psw_c[8] = (psw & PSW_B ? 'B' : '-');
+ psw_c[9] = (psw & PSW_C ? 'C' : '-');
+ psw_c[10] = (psw & PSW_V ? 'V' : '-');
+ psw_c[11] = (psw & PSW_M ? 'M' : '-');
+ psw_c[12] = (psw & PSW_F ? 'F' : '-');
+ psw_c[13] = (psw & PSW_R ? 'R' : '-');
+ psw_c[14] = (psw & PSW_Q ? 'Q' : '-');
+ psw_c[15] = (psw & PSW_P ? 'P' : '-');
+ psw_c[16] = (psw & PSW_D ? 'D' : '-');
+ psw_c[17] = (psw & PSW_I ? 'I' : '-');
+ psw_c[18] = '\0';
+ psw_cb = ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28);
+
+ cpu_fprintf(f, "PSW " TREG_FMT_lx " CB " TREG_FMT_lx " %s\n",
+ psw, psw_cb, psw_c);
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "GR%02d " TREG_FMT_lx "%c", i, env->gr[i],
+ (i & 3) == 3 ? '\n' : ' ');
+ }
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 8; i++) {
+ cpu_fprintf(f, "SR%02d %08x%c", i, (uint32_t)(env->sr[i] >> 32),
+ (i & 3) == 3 ? '\n' : ' ');
}
+#endif
+ cpu_fprintf(f, "\n");
/* ??? FR */
}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index 0a6b900..bfe0dd1 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -1,14 +1,23 @@
+#if TARGET_REGISTER_BITS == 64
+# define dh_alias_tr i64
+# define dh_is_64bit_tr 1
+#else
+# define dh_alias_tr i32
+# define dh_is_64bit_tr 0
+#endif
+#define dh_ctype_tr target_ureg
+#define dh_is_signed_tr 0
+
DEF_HELPER_2(excp, noreturn, env, int)
-DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
-DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tr)
+DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tr)
-DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
-DEF_HELPER_FLAGS_1(probe_r, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(probe_w, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tr, env, tl, i32, i32)
DEF_HELPER_FLAGS_1(loaded_fr0, TCG_CALL_NO_RWG, void, env)
@@ -66,3 +75,21 @@ DEF_HELPER_FLAGS_4(fmpyfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(fmpynfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(fmpyfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmpynfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+
+DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tr)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(halt, noreturn, env)
+DEF_HELPER_1(reset, noreturn, env)
+DEF_HELPER_1(rfi, void, env)
+DEF_HELPER_1(rfi_r, void, env)
+DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tr, env, tr)
+DEF_HELPER_FLAGS_3(itlba, TCG_CALL_NO_RWG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(itlbp, TCG_CALL_NO_RWG, void, env, tl, tr)
+DEF_HELPER_FLAGS_2(ptlb, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tr, env, tl)
+#endif
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
new file mode 100644
index 0000000..787f3d6
--- /dev/null
+++ b/target/hppa/int_helper.c
@@ -0,0 +1,263 @@
+/*
+ * HPPA interrupt helper routines
+ *
+ * Copyright (c) 2017 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qom/cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+static void eval_interrupt(HPPACPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ if (cpu->env.cr[CR_EIRR] & cpu->env.cr[CR_EIEM]) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+/* Each CPU has a word mapped into the GSC bus. Anything on the GSC bus
+ * can write to this word to raise an external interrupt on the target CPU.
+ * This includes the system controler (DINO) for regular devices, or
+ * another CPU for SMP interprocessor interrupts.
+ */
+static uint64_t io_eir_read(void *opaque, hwaddr addr, unsigned size)
+{
+ HPPACPU *cpu = opaque;
+
+ /* ??? What does a read of this register over the GSC bus do? */
+ return cpu->env.cr[CR_EIRR];
+}
+
+static void io_eir_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ HPPACPU *cpu = opaque;
+ int le_bit = ~data & (TARGET_REGISTER_BITS - 1);
+
+ cpu->env.cr[CR_EIRR] |= (target_ureg)1 << le_bit;
+ eval_interrupt(cpu);
+}
+
+const MemoryRegionOps hppa_io_eir_ops = {
+ .read = io_eir_read,
+ .write = io_eir_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+};
+
+void hppa_cpu_alarm_timer(void *opaque)
+{
+ /* Raise interrupt 0. */
+ io_eir_write(opaque, 0, 0, 4);
+}
+
+void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val)
+{
+ env->cr[CR_EIRR] &= ~val;
+ qemu_mutex_lock_iothread();
+ eval_interrupt(hppa_env_get_cpu(env));
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
+{
+ env->cr[CR_EIEM] = val;
+ qemu_mutex_lock_iothread();
+ eval_interrupt(hppa_env_get_cpu(env));
+ qemu_mutex_unlock_iothread();
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void hppa_cpu_do_interrupt(CPUState *cs)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ int i = cs->exception_index;
+ target_ureg iaoq_f = env->iaoq_f;
+ target_ureg iaoq_b = env->iaoq_b;
+ uint64_t iasq_f = env->iasq_f;
+ uint64_t iasq_b = env->iasq_b;
+
+#ifndef CONFIG_USER_ONLY
+ target_ureg old_psw;
+
+ /* As documented in pa2.0 -- interruption handling. */
+ /* step 1 */
+ env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env);
+
+ /* step 2 -- note PSW_W == 0 for !HPPA64. */
+ cpu_hppa_put_psw(env, PSW_W | (i == EXCP_HPMC ? PSW_M : 0));
+
+ /* step 3 */
+ env->cr[CR_IIASQ] = iasq_f >> 32;
+ env->cr_back[0] = iasq_b >> 32;
+ env->cr[CR_IIAOQ] = iaoq_f;
+ env->cr_back[1] = iaoq_b;
+
+ if (old_psw & PSW_Q) {
+ /* step 5 */
+ /* ISR and IOR will be set elsewhere. */
+ switch (i) {
+ case EXCP_ILL:
+ case EXCP_BREAK:
+ case EXCP_PRIV_REG:
+ case EXCP_PRIV_OPR:
+ /* IIR set via translate.c. */
+ break;
+
+ case EXCP_OVERFLOW:
+ case EXCP_COND:
+ case EXCP_ASSIST:
+ case EXCP_DTLB_MISS:
+ case EXCP_NA_ITLB_MISS:
+ case EXCP_NA_DTLB_MISS:
+ case EXCP_DMAR:
+ case EXCP_DMPI:
+ case EXCP_UNALIGN:
+ case EXCP_DMP:
+ case EXCP_DMB:
+ case EXCP_TLB_DIRTY:
+ case EXCP_PAGE_REF:
+ case EXCP_ASSIST_EMU:
+ {
+ /* Avoid reading directly from the virtual address, lest we
+ raise another exception from some sort of TLB issue. */
+ /* ??? An alternate fool-proof method would be to store the
+ instruction data into the unwind info. That's probably
+ a bit too much in the way of extra storage required. */
+ vaddr vaddr;
+ hwaddr paddr;
+
+ paddr = vaddr = iaoq_f & -4;
+ if (old_psw & PSW_C) {
+ int prot, t;
+
+ vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr);
+ t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
+ 0, &paddr, &prot);
+ if (t >= 0) {
+ /* We can't re-load the instruction. */
+ env->cr[CR_IIR] = 0;
+ break;
+ }
+ }
+ env->cr[CR_IIR] = ldl_phys(cs->as, paddr);
+ }
+ break;
+
+ default:
+ /* Other exceptions do not set IIR. */
+ break;
+ }
+
+ /* step 6 */
+ env->shadow[0] = env->gr[1];
+ env->shadow[1] = env->gr[8];
+ env->shadow[2] = env->gr[9];
+ env->shadow[3] = env->gr[16];
+ env->shadow[4] = env->gr[17];
+ env->shadow[5] = env->gr[24];
+ env->shadow[6] = env->gr[25];
+ }
+
+ /* step 7 */
+ env->iaoq_f = env->cr[CR_IVA] + 32 * i;
+ env->iaoq_b = env->iaoq_f + 4;
+ env->iasq_f = 0;
+ env->iasq_b = 0;
+#endif
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static const char * const names[] = {
+ [EXCP_HPMC] = "high priority machine check",
+ [EXCP_POWER_FAIL] = "power fail interrupt",
+ [EXCP_RC] = "recovery counter trap",
+ [EXCP_EXT_INTERRUPT] = "external interrupt",
+ [EXCP_LPMC] = "low priority machine check",
+ [EXCP_ITLB_MISS] = "instruction tlb miss fault",
+ [EXCP_IMP] = "instruction memory protection trap",
+ [EXCP_ILL] = "illegal instruction trap",
+ [EXCP_BREAK] = "break instruction trap",
+ [EXCP_PRIV_OPR] = "privileged operation trap",
+ [EXCP_PRIV_REG] = "privileged register trap",
+ [EXCP_OVERFLOW] = "overflow trap",
+ [EXCP_COND] = "conditional trap",
+ [EXCP_ASSIST] = "assist exception trap",
+ [EXCP_DTLB_MISS] = "data tlb miss fault",
+ [EXCP_NA_ITLB_MISS] = "non-access instruction tlb miss",
+ [EXCP_NA_DTLB_MISS] = "non-access data tlb miss",
+ [EXCP_DMP] = "data memory protection trap",
+ [EXCP_DMB] = "data memory break trap",
+ [EXCP_TLB_DIRTY] = "tlb dirty bit trap",
+ [EXCP_PAGE_REF] = "page reference trap",
+ [EXCP_ASSIST_EMU] = "assist emulation trap",
+ [EXCP_HPT] = "high-privilege transfer trap",
+ [EXCP_LPT] = "low-privilege transfer trap",
+ [EXCP_TB] = "taken branch trap",
+ [EXCP_DMAR] = "data memory access rights trap",
+ [EXCP_DMPI] = "data memory protection id trap",
+ [EXCP_UNALIGN] = "unaligned data reference trap",
+ [EXCP_PER_INTERRUPT] = "performance monitor interrupt",
+ [EXCP_SYSCALL] = "syscall",
+ [EXCP_SYSCALL_LWS] = "syscall-lws",
+ };
+ static int count;
+ const char *name = NULL;
+ char unknown[16];
+
+ if (i >= 0 && i < ARRAY_SIZE(names)) {
+ name = names[i];
+ }
+ if (!name) {
+ snprintf(unknown, sizeof(unknown), "unknown %d", i);
+ name = unknown;
+ }
+ qemu_log("INT %6d: %s @ " TARGET_FMT_lx "," TARGET_FMT_lx
+ " -> " TREG_FMT_lx " " TARGET_FMT_lx "\n",
+ ++count, name,
+ hppa_form_gva(env, iasq_f, iaoq_f),
+ hppa_form_gva(env, iasq_b, iaoq_b),
+ env->iaoq_f,
+ hppa_form_gva(env, (uint64_t)env->cr[CR_ISR] << 32,
+ env->cr[CR_IOR]));
+ }
+ cs->exception_index = -1;
+}
+
+bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+#ifndef CONFIG_USER_ONLY
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+
+ /* If interrupts are requested and enabled, raise them. */
+ if ((env->psw & PSW_I) && (interrupt_request & CPU_INTERRUPT_HARD)) {
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ hppa_cpu_do_interrupt(cs);
+ return true;
+ }
+#endif
+ return false;
+}
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
new file mode 100644
index 0000000..8e07778
--- /dev/null
+++ b/target/hppa/machine.c
@@ -0,0 +1,181 @@
+/*
+ * HPPA interrupt helper routines
+ *
+ * Copyright (c) 2017 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "migration/cpu.h"
+
+#if TARGET_REGISTER_BITS == 64
+#define qemu_put_betr qemu_put_be64
+#define qemu_get_betr qemu_get_be64
+#define VMSTATE_UINTTL_V(_f, _s, _v) \
+ VMSTATE_UINT64_V(_f, _s, _v)
+#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v)
+#else
+#define qemu_put_betr qemu_put_be32
+#define qemu_get_betr qemu_get_be32
+#define VMSTATE_UINTTR_V(_f, _s, _v) \
+ VMSTATE_UINT32_V(_f, _s, _v)
+#define VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v)
+#endif
+
+#define VMSTATE_UINTTR(_f, _s) \
+ VMSTATE_UINTTR_V(_f, _s, 0)
+#define VMSTATE_UINTTR_ARRAY(_f, _s, _n) \
+ VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, 0)
+
+
+static int get_psw(QEMUFile *f, void *opaque, size_t size, VMStateField *field)
+{
+ CPUHPPAState *env = opaque;
+ cpu_hppa_put_psw(env, qemu_get_betr(f));
+ return 0;
+}
+
+static int put_psw(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
+{
+ CPUHPPAState *env = opaque;
+ qemu_put_betr(f, cpu_hppa_get_psw(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_psw = {
+ .name = "psw",
+ .get = get_psw,
+ .put = put_psw,
+};
+
+/* FIXME: Use the PA2.0 format, which is a superset of the PA1.1 format. */
+static int get_tlb(QEMUFile *f, void *opaque, size_t size, VMStateField *field)
+{
+ hppa_tlb_entry *ent = opaque;
+ uint32_t val;
+
+ memset(ent, 0, sizeof(*ent));
+
+ ent->va_b = qemu_get_be64(f);
+ ent->pa = qemu_get_betr(f);
+ val = qemu_get_be32(f);
+
+ ent->entry_valid = extract32(val, 0, 1);
+ ent->access_id = extract32(val, 1, 18);
+ ent->u = extract32(val, 19, 1);
+ ent->ar_pl2 = extract32(val, 20, 2);
+ ent->ar_pl1 = extract32(val, 22, 2);
+ ent->ar_type = extract32(val, 24, 3);
+ ent->b = extract32(val, 27, 1);
+ ent->d = extract32(val, 28, 1);
+ ent->t = extract32(val, 29, 1);
+
+ ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1;
+ return 0;
+}
+
+static int put_tlb(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
+{
+ hppa_tlb_entry *ent = opaque;
+ uint32_t val = 0;
+
+ if (ent->entry_valid) {
+ val = 1;
+ val = deposit32(val, 1, 18, ent->access_id);
+ val = deposit32(val, 19, 1, ent->u);
+ val = deposit32(val, 20, 2, ent->ar_pl2);
+ val = deposit32(val, 22, 2, ent->ar_pl1);
+ val = deposit32(val, 24, 3, ent->ar_type);
+ val = deposit32(val, 27, 1, ent->b);
+ val = deposit32(val, 28, 1, ent->d);
+ val = deposit32(val, 29, 1, ent->t);
+ }
+
+ qemu_put_be64(f, ent->va_b);
+ qemu_put_betr(f, ent->pa);
+ qemu_put_be32(f, val);
+ return 0;
+}
+
+static const VMStateInfo vmstate_tlb = {
+ .name = "tlb entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+static VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
+ VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
+ VMSTATE_UINT64_ARRAY(sr, CPUHPPAState, 8),
+ VMSTATE_UINTTR_ARRAY(cr, CPUHPPAState, 32),
+ VMSTATE_UINTTR_ARRAY(cr_back, CPUHPPAState, 2),
+ VMSTATE_UINTTR_ARRAY(shadow, CPUHPPAState, 7),
+
+ /* Save the architecture value of the psw, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "psw",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &vmstate_psw,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+
+ VMSTATE_UINTTR(iaoq_f, CPUHPPAState),
+ VMSTATE_UINTTR(iaoq_b, CPUHPPAState),
+ VMSTATE_UINT64(iasq_f, CPUHPPAState),
+ VMSTATE_UINT64(iasq_b, CPUHPPAState),
+
+ VMSTATE_UINT32(fr0_shadow, CPUHPPAState),
+
+ VMSTATE_ARRAY(tlb, CPUHPPAState, ARRAY_SIZE(((CPUHPPAState *)0)->tlb),
+ 0, vmstate_tlb, hppa_tlb_entry),
+ VMSTATE_UINT32(tlb_last, CPUHPPAState),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_env_fields,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_hppa_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_cpu_fields,
+};
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
new file mode 100644
index 0000000..ab160c2
--- /dev/null
+++ b/target/hppa/mem_helper.c
@@ -0,0 +1,348 @@
+/*
+ * HPPA memory access helper routines
+ *
+ * Copyright (c) 2017 Helge Deller
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qom/cpu.h"
+
+#ifdef CONFIG_USER_ONLY
+int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int size, int rw, int mmu_idx)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ /* ??? Test between data page fault and data memory protection trap,
+ which would affect si_code. */
+ cs->exception_index = EXCP_DMP;
+ cpu->env.cr[CR_IOR] = address;
+ return 1;
+}
+#else
+static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (ent->va_b <= addr && addr <= ent->va_e) {
+ return ent;
+ }
+ }
+ return NULL;
+}
+
+static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
+{
+ CPUState *cs = CPU(hppa_env_get_cpu(env));
+ unsigned i, n = 1 << (2 * ent->page_size);
+ uint64_t addr = ent->va_b;
+
+ for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
+ /* Do not flush MMU_PHYS_IDX. */
+ tlb_flush_page_by_mmuidx(cs, addr, 0xf);
+ }
+
+ memset(ent, 0, sizeof(*ent));
+ ent->va_b = -1;
+}
+
+static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+{
+ hppa_tlb_entry *ent;
+ uint32_t i = env->tlb_last;
+
+ env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
+ ent = &env->tlb[i];
+
+ hppa_flush_tlb_ent(env, ent);
+ return ent;
+}
+
+int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
+ int type, hwaddr *pphys, int *pprot)
+{
+ hwaddr phys;
+ int prot, r_prot, w_prot, x_prot;
+ hppa_tlb_entry *ent;
+ int ret = -1;
+
+ /* Virtual translation disabled. Direct map virtual to physical. */
+ if (mmu_idx == MMU_PHYS_IDX) {
+ phys = addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ goto egress;
+ }
+
+ /* Find a valid tlb entry that matches the virtual address. */
+ ent = hppa_find_tlb(env, addr);
+ if (ent == NULL || !ent->entry_valid) {
+ phys = 0;
+ prot = 0;
+ /* ??? Unconditionally report data tlb miss,
+ even if this is an instruction fetch. */
+ ret = EXCP_DTLB_MISS;
+ goto egress;
+ }
+
+ /* We now know the physical address. */
+ phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
+
+ /* Map TLB access_rights field to QEMU protection. */
+ r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
+ w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
+ x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
+ switch (ent->ar_type) {
+ case 0: /* read-only: data page */
+ prot = r_prot;
+ break;
+ case 1: /* read/write: dynamic data page */
+ prot = r_prot | w_prot;
+ break;
+ case 2: /* read/execute: normal code page */
+ prot = r_prot | x_prot;
+ break;
+ case 3: /* read/write/execute: dynamic code page */
+ prot = r_prot | w_prot | x_prot;
+ break;
+ default: /* execute: promote to privilege level type & 3 */
+ prot = x_prot;
+ break;
+ }
+
+ /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
+
+ /* No guest access type indicates a non-architectural access from
+ within QEMU. Bypass checks for access, D, B and T bits. */
+ if (type == 0) {
+ goto egress;
+ }
+
+ if (unlikely(!(prot & type))) {
+ /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
+ ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP);
+ goto egress;
+ }
+
+ /* In reverse priority order, check for conditions which raise faults.
+ As we go, remove PROT bits that cover the condition we want to check.
+ In this way, the resulting PROT will force a re-check of the
+ architectural TLB entry for the next access. */
+ if (unlikely(!ent->d)) {
+ if (type & PAGE_WRITE) {
+ /* The D bit is not set -- TLB Dirty Bit Fault. */
+ ret = EXCP_TLB_DIRTY;
+ }
+ prot &= PAGE_READ | PAGE_EXEC;
+ }
+ if (unlikely(ent->b)) {
+ if (type & PAGE_WRITE) {
+ /* The B bit is set -- Data Memory Break Fault. */
+ ret = EXCP_DMB;
+ }
+ prot &= PAGE_READ | PAGE_EXEC;
+ }
+ if (unlikely(ent->t)) {
+ if (!(type & PAGE_EXEC)) {
+ /* The T bit is set -- Page Reference Fault. */
+ ret = EXCP_PAGE_REF;
+ }
+ prot &= PAGE_EXEC;
+ }
+
+ egress:
+ *pphys = phys;
+ *pprot = prot;
+ return ret;
+}
+
+hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ hwaddr phys;
+ int prot, excp;
+
+ /* If the (data) mmu is disabled, bypass translation. */
+ /* ??? We really ought to know if the code mmu is disabled too,
+ in order to get the correct debugging dumps. */
+ if (!(cpu->env.psw & PSW_D)) {
+ return addr;
+ }
+
+ excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
+ &phys, &prot);
+
+ /* Since we're translating for debugging, the only error that is a
+ hard error is no translation at all. Otherwise, while a real cpu
+ access might not have permission, the debugger does. */
+ return excp == EXCP_DTLB_MISS ? -1 : phys;
+}
+
+void tlb_fill(CPUState *cs, target_ulong addr, int size,
+ MMUAccessType type, int mmu_idx, uintptr_t retaddr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ int prot, excp, a_prot;
+ hwaddr phys;
+
+ switch (type) {
+ case MMU_INST_FETCH:
+ a_prot = PAGE_EXEC;
+ break;
+ case MMU_DATA_STORE:
+ a_prot = PAGE_WRITE;
+ break;
+ default:
+ a_prot = PAGE_READ;
+ break;
+ }
+
+ excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
+ a_prot, &phys, &prot);
+ if (unlikely(excp >= 0)) {
+ /* Failure. Raise the indicated exception. */
+ cs->exception_index = excp;
+ if (cpu->env.psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ cpu->env.cr[CR_IOR] = addr;
+ cpu->env.cr[CR_ISR] = addr >> 32;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+
+ /* Success! Store the translation into the QEMU TLB. */
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+}
+
+/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
+void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *empty = NULL;
+ int i;
+
+ /* Zap any old entries covering ADDR; notice empty entries on the way. */
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (!ent->entry_valid) {
+ empty = ent;
+ } else if (ent->va_b <= addr && addr <= ent->va_e) {
+ hppa_flush_tlb_ent(env, ent);
+ empty = ent;
+ }
+ }
+
+ /* If we didn't see an empty entry, evict one. */
+ if (empty == NULL) {
+ empty = hppa_alloc_tlb_ent(env);
+ }
+
+ /* Note that empty->entry_valid == 0 already. */
+ empty->va_b = addr & TARGET_PAGE_MASK;
+ empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
+ empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+}
+
+/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
+void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (unlikely(ent == NULL || ent->entry_valid)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
+ return;
+ }
+
+ ent->access_id = extract32(reg, 1, 18);
+ ent->u = extract32(reg, 19, 1);
+ ent->ar_pl2 = extract32(reg, 20, 2);
+ ent->ar_pl1 = extract32(reg, 22, 2);
+ ent->ar_type = extract32(reg, 24, 3);
+ ent->b = extract32(reg, 27, 1);
+ ent->d = extract32(reg, 28, 1);
+ ent->t = extract32(reg, 29, 1);
+ ent->entry_valid = 1;
+}
+
+/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
+ synchronous across all processors. */
+static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
+{
+ CPUHPPAState *env = cpu->env_ptr;
+ target_ulong addr = (target_ulong) data.target_ptr;
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (ent && ent->entry_valid) {
+ hppa_flush_tlb_ent(env, ent);
+ }
+}
+
+void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
+{
+ CPUState *src = CPU(hppa_env_get_cpu(env));
+ CPUState *cpu;
+ run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
+
+ CPU_FOREACH(cpu) {
+ if (cpu != src) {
+ async_run_on_cpu(cpu, ptlb_work, data);
+ }
+ }
+ async_safe_run_on_cpu(src, ptlb_work, data);
+}
+
+/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
+ number of pages/entries (we choose all), and is local to the cpu. */
+void HELPER(ptlbe)(CPUHPPAState *env)
+{
+ CPUState *src = CPU(hppa_env_get_cpu(env));
+
+ memset(env->tlb, 0, sizeof(env->tlb));
+ tlb_flush_by_mmuidx(src, 0xf);
+}
+
+target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
+{
+ hwaddr phys;
+ int prot, excp;
+
+ excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
+ &phys, &prot);
+ if (excp >= 0) {
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+ if (excp == EXCP_DTLB_MISS) {
+ excp = EXCP_NA_DTLB_MISS;
+ }
+ hppa_dynamic_excp(env, excp, GETPC());
+ }
+ return phys;
+}
+
+/* Return the ar_type of the TLB at VADDR, or -1. */
+int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
+ return ent ? ent->ar_type : -1;
+}
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index fdbf64a..4ee936b 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -22,6 +22,9 @@
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
+#include "sysemu/sysemu.h"
+#include "qemu/timer.h"
+
void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
{
@@ -32,7 +35,7 @@ void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
cpu_loop_exit(cs);
}
-static void QEMU_NORETURN dynexcp(CPUHPPAState *env, int excp, uintptr_t ra)
+void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
{
HPPACPU *cpu = hppa_env_get_cpu(env);
CPUState *cs = CPU(cpu);
@@ -41,26 +44,26 @@ static void QEMU_NORETURN dynexcp(CPUHPPAState *env, int excp, uintptr_t ra)
cpu_loop_exit_restore(cs, ra);
}
-void HELPER(tsv)(CPUHPPAState *env, target_ulong cond)
+void HELPER(tsv)(CPUHPPAState *env, target_ureg cond)
{
- if (unlikely((target_long)cond < 0)) {
- dynexcp(env, EXCP_SIGFPE, GETPC());
+ if (unlikely((target_sreg)cond < 0)) {
+ hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC());
}
}
-void HELPER(tcond)(CPUHPPAState *env, target_ulong cond)
+void HELPER(tcond)(CPUHPPAState *env, target_ureg cond)
{
if (unlikely(cond)) {
- dynexcp(env, EXCP_SIGFPE, GETPC());
+ hppa_dynamic_excp(env, EXCP_COND, GETPC());
}
}
static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
uint32_t mask, uintptr_t ra)
{
+#ifdef CONFIG_USER_ONLY
uint32_t old, new, cmp;
-#ifdef CONFIG_USER_ONLY
uint32_t *haddr = g2h(addr - 1);
old = *haddr;
while (1) {
@@ -72,11 +75,12 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
old = cmp;
}
#else
-#error "Not implemented."
+ /* FIXME -- we can do better. */
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#endif
}
-static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
+static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val,
bool parallel)
{
uintptr_t ra = GETPC();
@@ -103,18 +107,18 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
}
}
-void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ureg val)
{
do_stby_b(env, addr, val, false);
}
void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
- target_ulong val)
+ target_ureg val)
{
do_stby_b(env, addr, val, true);
}
-static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
+static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val,
bool parallel)
{
uintptr_t ra = GETPC();
@@ -145,25 +149,45 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
}
}
-void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ureg val)
{
do_stby_e(env, addr, val, false);
}
void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
- target_ulong val)
+ target_ureg val)
{
do_stby_e(env, addr, val, true);
}
-target_ulong HELPER(probe_r)(target_ulong addr)
+target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
+ uint32_t level, uint32_t want)
{
- return page_check_range(addr, 1, PAGE_READ);
-}
+#ifdef CONFIG_USER_ONLY
+ return page_check_range(addr, 1, want);
+#else
+ int prot, excp;
+ hwaddr phys;
-target_ulong HELPER(probe_w)(target_ulong addr)
-{
- return page_check_range(addr, 1, PAGE_WRITE);
+ /* Fail if the requested privilege level is higher than current. */
+ if (level < (env->iaoq_f & 3)) {
+ return 0;
+ }
+
+ excp = hppa_get_physical_address(env, addr, level, 0, &phys, &prot);
+ if (excp >= 0) {
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+ if (excp == EXCP_DTLB_MISS) {
+ excp = EXCP_NA_DTLB_MISS;
+ }
+ hppa_dynamic_excp(env, excp, GETPC());
+ }
+ return (want & prot) != 0;
+#endif
}
void HELPER(loaded_fr0)(CPUHPPAState *env)
@@ -226,7 +250,7 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
env->fr[0] = (uint64_t)shadow << 32;
if (hard_exp & shadow) {
- dynexcp(env, EXCP_SIGFPE, ra);
+ hppa_dynamic_excp(env, EXCP_ASSIST, ra);
}
}
@@ -592,3 +616,89 @@ float64 HELPER(fmpynfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c)
update_fr0_op(env, GETPC());
return ret;
}
+
+target_ureg HELPER(read_interval_timer)(void)
+{
+#ifdef CONFIG_USER_ONLY
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
+ Just pass through the host cpu clock ticks. */
+ return cpu_get_host_ticks();
+#else
+ /* In system mode we have access to a decent high-resolution clock.
+ In order to make OS-level time accounting work with the cr16,
+ present it with a well-timed clock fixed at 250MHz. */
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val)
+{
+ HPPACPU *cpu = hppa_env_get_cpu(env);
+ uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t timeout;
+
+ /* Even in 64-bit mode, the comparator is always 32-bit. But the
+ value we expose to the guest is 1/4 of the speed of the clock,
+ so moosh in 34 bits. */
+ timeout = deposit64(current, 0, 34, (uint64_t)val << 2);
+
+ /* If the mooshing puts the clock in the past, advance to next round. */
+ if (timeout < current + 1000) {
+ timeout += 1ULL << 34;
+ }
+
+ cpu->env.cr[CR_IT] = timeout;
+ timer_mod(cpu->alarm_timer, timeout);
+}
+
+void HELPER(halt)(CPUHPPAState *env)
+{
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ helper_excp(env, EXCP_HLT);
+}
+
+void HELPER(reset)(CPUHPPAState *env)
+{
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ helper_excp(env, EXCP_HLT);
+}
+
+target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm)
+{
+ target_ulong psw = env->psw;
+ /* ??? On second reading this condition simply seems
+ to be undefined rather than a diagnosed trap. */
+ if (nsm & ~psw & PSW_Q) {
+ hppa_dynamic_excp(env, EXCP_ILL, GETPC());
+ }
+ env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM);
+ return psw & PSW_SM;
+}
+
+void HELPER(rfi)(CPUHPPAState *env)
+{
+ /* ??? On second reading this condition simply seems
+ to be undefined rather than a diagnosed trap. */
+ if (env->psw & (PSW_I | PSW_R | PSW_Q)) {
+ helper_excp(env, EXCP_ILL);
+ }
+ env->iasq_f = (uint64_t)env->cr[CR_IIASQ] << 32;
+ env->iasq_b = (uint64_t)env->cr_back[0] << 32;
+ env->iaoq_f = env->cr[CR_IIAOQ];
+ env->iaoq_b = env->cr_back[1];
+ cpu_hppa_put_psw(env, env->cr[CR_IPSW]);
+}
+
+void HELPER(rfi_r)(CPUHPPAState *env)
+{
+ env->gr[1] = env->shadow[0];
+ env->gr[8] = env->shadow[1];
+ env->gr[9] = env->shadow[2];
+ env->gr[16] = env->shadow[3];
+ env->gr[17] = env->shadow[4];
+ env->gr[24] = env->shadow[5];
+ env->gr[25] = env->shadow[6];
+ helper_rfi(env);
+}
+#endif
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 31d9a2a..6499b39 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -30,9 +30,239 @@
#include "trace-tcg.h"
#include "exec/log.h"
+/* Since we have a distinction between register size and address size,
+ we need to redefine all of these. */
+
+#undef TCGv
+#undef tcg_temp_new
+#undef tcg_global_reg_new
+#undef tcg_global_mem_new
+#undef tcg_temp_local_new
+#undef tcg_temp_free
+
+#if TARGET_LONG_BITS == 64
+#define TCGv_tl TCGv_i64
+#define tcg_temp_new_tl tcg_temp_new_i64
+#define tcg_temp_free_tl tcg_temp_free_i64
+#if TARGET_REGISTER_BITS == 64
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
+#else
+#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
+#endif
+#else
+#define TCGv_tl TCGv_i32
+#define tcg_temp_new_tl tcg_temp_new_i32
+#define tcg_temp_free_tl tcg_temp_free_i32
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
+#endif
+
+#if TARGET_REGISTER_BITS == 64
+#define TCGv_reg TCGv_i64
+
+#define tcg_temp_new tcg_temp_new_i64
+#define tcg_global_reg_new tcg_global_reg_new_i64
+#define tcg_global_mem_new tcg_global_mem_new_i64
+#define tcg_temp_local_new tcg_temp_local_new_i64
+#define tcg_temp_free tcg_temp_free_i64
+
+#define tcg_gen_movi_reg tcg_gen_movi_i64
+#define tcg_gen_mov_reg tcg_gen_mov_i64
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
+#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
+#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
+#define tcg_gen_ld_reg tcg_gen_ld_i64
+#define tcg_gen_st8_reg tcg_gen_st8_i64
+#define tcg_gen_st16_reg tcg_gen_st16_i64
+#define tcg_gen_st32_reg tcg_gen_st32_i64
+#define tcg_gen_st_reg tcg_gen_st_i64
+#define tcg_gen_add_reg tcg_gen_add_i64
+#define tcg_gen_addi_reg tcg_gen_addi_i64
+#define tcg_gen_sub_reg tcg_gen_sub_i64
+#define tcg_gen_neg_reg tcg_gen_neg_i64
+#define tcg_gen_subfi_reg tcg_gen_subfi_i64
+#define tcg_gen_subi_reg tcg_gen_subi_i64
+#define tcg_gen_and_reg tcg_gen_and_i64
+#define tcg_gen_andi_reg tcg_gen_andi_i64
+#define tcg_gen_or_reg tcg_gen_or_i64
+#define tcg_gen_ori_reg tcg_gen_ori_i64
+#define tcg_gen_xor_reg tcg_gen_xor_i64
+#define tcg_gen_xori_reg tcg_gen_xori_i64
+#define tcg_gen_not_reg tcg_gen_not_i64
+#define tcg_gen_shl_reg tcg_gen_shl_i64
+#define tcg_gen_shli_reg tcg_gen_shli_i64
+#define tcg_gen_shr_reg tcg_gen_shr_i64
+#define tcg_gen_shri_reg tcg_gen_shri_i64
+#define tcg_gen_sar_reg tcg_gen_sar_i64
+#define tcg_gen_sari_reg tcg_gen_sari_i64
+#define tcg_gen_brcond_reg tcg_gen_brcond_i64
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
+#define tcg_gen_setcond_reg tcg_gen_setcond_i64
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
+#define tcg_gen_mul_reg tcg_gen_mul_i64
+#define tcg_gen_muli_reg tcg_gen_muli_i64
+#define tcg_gen_div_reg tcg_gen_div_i64
+#define tcg_gen_rem_reg tcg_gen_rem_i64
+#define tcg_gen_divu_reg tcg_gen_divu_i64
+#define tcg_gen_remu_reg tcg_gen_remu_i64
+#define tcg_gen_discard_reg tcg_gen_discard_i64
+#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
+#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
+#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
+#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
+#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
+#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
+#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
+#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i64
+#define tcg_gen_eqv_reg tcg_gen_eqv_i64
+#define tcg_gen_nand_reg tcg_gen_nand_i64
+#define tcg_gen_nor_reg tcg_gen_nor_i64
+#define tcg_gen_orc_reg tcg_gen_orc_i64
+#define tcg_gen_clz_reg tcg_gen_clz_i64
+#define tcg_gen_ctz_reg tcg_gen_ctz_i64
+#define tcg_gen_clzi_reg tcg_gen_clzi_i64
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
+#define tcg_gen_rotl_reg tcg_gen_rotl_i64
+#define tcg_gen_rotli_reg tcg_gen_rotli_i64
+#define tcg_gen_rotr_reg tcg_gen_rotr_i64
+#define tcg_gen_rotri_reg tcg_gen_rotri_i64
+#define tcg_gen_deposit_reg tcg_gen_deposit_i64
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
+#define tcg_gen_extract_reg tcg_gen_extract_i64
+#define tcg_gen_sextract_reg tcg_gen_sextract_i64
+#define tcg_const_reg tcg_const_i64
+#define tcg_const_local_reg tcg_const_local_i64
+#define tcg_gen_movcond_reg tcg_gen_movcond_i64
+#define tcg_gen_add2_reg tcg_gen_add2_i64
+#define tcg_gen_sub2_reg tcg_gen_sub2_i64
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
+#else
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
+#endif
+#else
+#define TCGv_reg TCGv_i32
+#define tcg_temp_new tcg_temp_new_i32
+#define tcg_global_reg_new tcg_global_reg_new_i32
+#define tcg_global_mem_new tcg_global_mem_new_i32
+#define tcg_temp_local_new tcg_temp_local_new_i32
+#define tcg_temp_free tcg_temp_free_i32
+
+#define tcg_gen_movi_reg tcg_gen_movi_i32
+#define tcg_gen_mov_reg tcg_gen_mov_i32
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
+#define tcg_gen_ld32u_reg tcg_gen_ld_i32
+#define tcg_gen_ld32s_reg tcg_gen_ld_i32
+#define tcg_gen_ld_reg tcg_gen_ld_i32
+#define tcg_gen_st8_reg tcg_gen_st8_i32
+#define tcg_gen_st16_reg tcg_gen_st16_i32
+#define tcg_gen_st32_reg tcg_gen_st32_i32
+#define tcg_gen_st_reg tcg_gen_st_i32
+#define tcg_gen_add_reg tcg_gen_add_i32
+#define tcg_gen_addi_reg tcg_gen_addi_i32
+#define tcg_gen_sub_reg tcg_gen_sub_i32
+#define tcg_gen_neg_reg tcg_gen_neg_i32
+#define tcg_gen_subfi_reg tcg_gen_subfi_i32
+#define tcg_gen_subi_reg tcg_gen_subi_i32
+#define tcg_gen_and_reg tcg_gen_and_i32
+#define tcg_gen_andi_reg tcg_gen_andi_i32
+#define tcg_gen_or_reg tcg_gen_or_i32
+#define tcg_gen_ori_reg tcg_gen_ori_i32
+#define tcg_gen_xor_reg tcg_gen_xor_i32
+#define tcg_gen_xori_reg tcg_gen_xori_i32
+#define tcg_gen_not_reg tcg_gen_not_i32
+#define tcg_gen_shl_reg tcg_gen_shl_i32
+#define tcg_gen_shli_reg tcg_gen_shli_i32
+#define tcg_gen_shr_reg tcg_gen_shr_i32
+#define tcg_gen_shri_reg tcg_gen_shri_i32
+#define tcg_gen_sar_reg tcg_gen_sar_i32
+#define tcg_gen_sari_reg tcg_gen_sari_i32
+#define tcg_gen_brcond_reg tcg_gen_brcond_i32
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
+#define tcg_gen_setcond_reg tcg_gen_setcond_i32
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
+#define tcg_gen_mul_reg tcg_gen_mul_i32
+#define tcg_gen_muli_reg tcg_gen_muli_i32
+#define tcg_gen_div_reg tcg_gen_div_i32
+#define tcg_gen_rem_reg tcg_gen_rem_i32
+#define tcg_gen_divu_reg tcg_gen_divu_i32
+#define tcg_gen_remu_reg tcg_gen_remu_i32
+#define tcg_gen_discard_reg tcg_gen_discard_i32
+#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
+#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
+#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
+#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
+#define tcg_gen_ext32u_reg tcg_gen_mov_i32
+#define tcg_gen_ext32s_reg tcg_gen_mov_i32
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
+#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i32
+#define tcg_gen_eqv_reg tcg_gen_eqv_i32
+#define tcg_gen_nand_reg tcg_gen_nand_i32
+#define tcg_gen_nor_reg tcg_gen_nor_i32
+#define tcg_gen_orc_reg tcg_gen_orc_i32
+#define tcg_gen_clz_reg tcg_gen_clz_i32
+#define tcg_gen_ctz_reg tcg_gen_ctz_i32
+#define tcg_gen_clzi_reg tcg_gen_clzi_i32
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
+#define tcg_gen_rotl_reg tcg_gen_rotl_i32
+#define tcg_gen_rotli_reg tcg_gen_rotli_i32
+#define tcg_gen_rotr_reg tcg_gen_rotr_i32
+#define tcg_gen_rotri_reg tcg_gen_rotri_i32
+#define tcg_gen_deposit_reg tcg_gen_deposit_i32
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
+#define tcg_gen_extract_reg tcg_gen_extract_i32
+#define tcg_gen_sextract_reg tcg_gen_sextract_i32
+#define tcg_const_reg tcg_const_i32
+#define tcg_const_local_reg tcg_const_local_i32
+#define tcg_gen_movcond_reg tcg_gen_movcond_i32
+#define tcg_gen_add2_reg tcg_gen_add2_i32
+#define tcg_gen_sub2_reg tcg_gen_sub2_i32
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
+#else
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
+#endif
+#endif /* TARGET_REGISTER_BITS */
+
typedef struct DisasCond {
TCGCond c;
- TCGv a0, a1;
+ TCGv_reg a0, a1;
bool a0_is_n;
bool a1_is_0;
} DisasCond;
@@ -41,17 +271,22 @@ typedef struct DisasContext {
DisasContextBase base;
CPUState *cs;
- target_ulong iaoq_f;
- target_ulong iaoq_b;
- target_ulong iaoq_n;
- TCGv iaoq_n_var;
+ target_ureg iaoq_f;
+ target_ureg iaoq_b;
+ target_ureg iaoq_n;
+ TCGv_reg iaoq_n_var;
- int ntemps;
- TCGv temps[8];
+ int ntempr, ntempl;
+ TCGv_reg tempr[8];
+ TCGv_tl templ[4];
DisasCond null_cond;
TCGLabel *null_lab;
+ uint32_t insn;
+ uint32_t tb_flags;
+ int mmu_idx;
+ int privilege;
bool psw_n_nonzero;
} DisasContext;
@@ -67,12 +302,16 @@ typedef struct DisasContext {
updated the iaq for the next instruction to be executed. */
#define DISAS_IAQ_N_STALE DISAS_TARGET_1
+/* Similarly, but we want to return to the main loop immediately
+ to recognize unmasked interrupts. */
+#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
+
typedef struct DisasInsn {
uint32_t insn, mask;
DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
const struct DisasInsn *f);
union {
- void (*ttt)(TCGv, TCGv, TCGv);
+ void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
@@ -83,16 +322,18 @@ typedef struct DisasInsn {
} DisasInsn;
/* global register indexes */
-static TCGv cpu_gr[32];
-static TCGv cpu_iaoq_f;
-static TCGv cpu_iaoq_b;
-static TCGv cpu_sar;
-static TCGv cpu_psw_n;
-static TCGv cpu_psw_v;
-static TCGv cpu_psw_cb;
-static TCGv cpu_psw_cb_msb;
-static TCGv cpu_cr26;
-static TCGv cpu_cr27;
+static TCGv_reg cpu_gr[32];
+static TCGv_i64 cpu_sr[4];
+static TCGv_i64 cpu_srH;
+static TCGv_reg cpu_iaoq_f;
+static TCGv_reg cpu_iaoq_b;
+static TCGv_i64 cpu_iasq_f;
+static TCGv_i64 cpu_iasq_b;
+static TCGv_reg cpu_sar;
+static TCGv_reg cpu_psw_n;
+static TCGv_reg cpu_psw_v;
+static TCGv_reg cpu_psw_cb;
+static TCGv_reg cpu_psw_cb_msb;
#include "exec/gen-icount.h"
@@ -100,11 +341,9 @@ void hppa_translate_init(void)
{
#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
- typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
static const GlobalVar vars[] = {
- DEF_VAR(sar),
- DEF_VAR(cr26),
- DEF_VAR(cr27),
+ { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
DEF_VAR(psw_n),
DEF_VAR(psw_v),
DEF_VAR(psw_cb),
@@ -122,6 +361,10 @@ void hppa_translate_init(void)
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
};
+ /* SR[4-7] are not global registers so that we can index them. */
+ static const char sr_names[5][4] = {
+ "sr0", "sr1", "sr2", "sr3", "srH"
+ };
int i;
@@ -131,11 +374,26 @@ void hppa_translate_init(void)
offsetof(CPUHPPAState, gr[i]),
gr_names[i]);
}
+ for (i = 0; i < 4; i++) {
+ cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, sr[i]),
+ sr_names[i]);
+ }
+ cpu_srH = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, sr[4]),
+ sr_names[4]);
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
*v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
}
+
+ cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_f),
+ "iasq_f");
+ cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_b),
+ "iasq_b");
}
static DisasCond cond_make_f(void)
@@ -158,26 +416,26 @@ static DisasCond cond_make_n(void)
};
}
-static DisasCond cond_make_0(TCGCond c, TCGv a0)
+static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
{
DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
r.a0 = tcg_temp_new();
- tcg_gen_mov_tl(r.a0, a0);
+ tcg_gen_mov_reg(r.a0, a0);
return r;
}
-static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
+static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
{
DisasCond r = { .c = c };
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
r.a0 = tcg_temp_new();
- tcg_gen_mov_tl(r.a0, a0);
+ tcg_gen_mov_reg(r.a0, a0);
r.a1 = tcg_temp_new();
- tcg_gen_mov_tl(r.a1, a1);
+ tcg_gen_mov_reg(r.a1, a1);
return r;
}
@@ -186,7 +444,7 @@ static void cond_prep(DisasCond *cond)
{
if (cond->a1_is_0) {
cond->a1_is_0 = false;
- cond->a1 = tcg_const_tl(0);
+ cond->a1 = tcg_const_reg(0);
}
}
@@ -213,32 +471,41 @@ static void cond_free(DisasCond *cond)
}
}
-static TCGv get_temp(DisasContext *ctx)
+static TCGv_reg get_temp(DisasContext *ctx)
{
- unsigned i = ctx->ntemps++;
- g_assert(i < ARRAY_SIZE(ctx->temps));
- return ctx->temps[i] = tcg_temp_new();
+ unsigned i = ctx->ntempr++;
+ g_assert(i < ARRAY_SIZE(ctx->tempr));
+ return ctx->tempr[i] = tcg_temp_new();
}
-static TCGv load_const(DisasContext *ctx, target_long v)
+#ifndef CONFIG_USER_ONLY
+static TCGv_tl get_temp_tl(DisasContext *ctx)
{
- TCGv t = get_temp(ctx);
- tcg_gen_movi_tl(t, v);
+ unsigned i = ctx->ntempl++;
+ g_assert(i < ARRAY_SIZE(ctx->templ));
+ return ctx->templ[i] = tcg_temp_new_tl();
+}
+#endif
+
+static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
+{
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, v);
return t;
}
-static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0) {
- TCGv t = get_temp(ctx);
- tcg_gen_movi_tl(t, 0);
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, 0);
return t;
} else {
return cpu_gr[reg];
}
}
-static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
return get_temp(ctx);
@@ -247,18 +514,18 @@ static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
}
}
-static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
+static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
{
if (ctx->null_cond.c != TCG_COND_NEVER) {
cond_prep(&ctx->null_cond);
- tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
+ tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
ctx->null_cond.a1, dest, t);
} else {
- tcg_gen_mov_tl(dest, t);
+ tcg_gen_mov_reg(dest, t);
}
}
-static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
+static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
{
if (reg != 0) {
save_or_nullify(ctx, cpu_gr[reg], t);
@@ -335,6 +602,21 @@ static void save_frd(unsigned rt, TCGv_i64 val)
tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
}
+static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
+{
+#ifdef CONFIG_USER_ONLY
+ tcg_gen_movi_i64(dest, 0);
+#else
+ if (reg < 4) {
+ tcg_gen_mov_i64(dest, cpu_sr[reg]);
+ } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ tcg_gen_mov_i64(dest, cpu_srH);
+ } else {
+ tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
+ }
+#endif
+}
+
/* Skip over the implementation of an insn that has been nullified.
Use this when the insn is too complex for a conditional move. */
static void nullify_over(DisasContext *ctx)
@@ -350,17 +632,17 @@ static void nullify_over(DisasContext *ctx)
if (ctx->null_cond.a0_is_n) {
ctx->null_cond.a0_is_n = false;
ctx->null_cond.a0 = tcg_temp_new();
- tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
+ tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
}
/* ... we clear it before branching over the implementation,
so that (1) it's clear after nullifying this insn and
(2) if this insn nullifies the next, PSW[N] is valid. */
if (ctx->psw_n_nonzero) {
ctx->psw_n_nonzero = false;
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
}
- tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
+ tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
ctx->null_cond.a1, ctx->null_lab);
cond_free(&ctx->null_cond);
}
@@ -371,13 +653,13 @@ static void nullify_save(DisasContext *ctx)
{
if (ctx->null_cond.c == TCG_COND_NEVER) {
if (ctx->psw_n_nonzero) {
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
}
return;
}
if (!ctx->null_cond.a0_is_n) {
cond_prep(&ctx->null_cond);
- tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
+ tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
ctx->null_cond.a0, ctx->null_cond.a1);
ctx->psw_n_nonzero = true;
}
@@ -390,7 +672,7 @@ static void nullify_save(DisasContext *ctx)
static void nullify_set(DisasContext *ctx, bool x)
{
if (ctx->psw_n_nonzero || x) {
- tcg_gen_movi_tl(cpu_psw_n, x);
+ tcg_gen_movi_reg(cpu_psw_n, x);
}
}
@@ -400,6 +682,10 @@ static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
{
TCGLabel *null_lab = ctx->null_lab;
+ /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
+ For UPDATED, we cannot update on the nullified path. */
+ assert(status != DISAS_IAQ_N_UPDATED);
+
if (likely(null_lab == NULL)) {
/* The current insn wasn't conditional or handled the condition
applied to it without a branch, so the (new) setting of
@@ -421,24 +707,22 @@ static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
gen_set_label(null_lab);
ctx->null_cond = cond_make_n();
}
-
- assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
if (status == DISAS_NORETURN) {
status = DISAS_NEXT;
}
return status;
}
-static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
+static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
{
if (unlikely(ival == -1)) {
- tcg_gen_mov_tl(dest, vval);
+ tcg_gen_mov_reg(dest, vval);
} else {
- tcg_gen_movi_tl(dest, ival);
+ tcg_gen_movi_reg(dest, ival);
}
}
-static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
+static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
{
return ctx->iaoq_f + disp + 8;
}
@@ -459,13 +743,29 @@ static DisasJumpType gen_excp(DisasContext *ctx, int exception)
return DISAS_NORETURN;
}
+static DisasJumpType gen_excp_iir(DisasContext *ctx, int exc)
+{
+ TCGv_reg tmp = tcg_const_reg(ctx->insn);
+ tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
+ tcg_temp_free(tmp);
+ return gen_excp(ctx, exc);
+}
+
static DisasJumpType gen_illegal(DisasContext *ctx)
{
nullify_over(ctx);
- return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
+ return nullify_end(ctx, gen_excp_iir(ctx, EXCP_ILL));
}
-static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+#define CHECK_MOST_PRIVILEGED(EXCP) \
+ do { \
+ if (ctx->privilege != 0) { \
+ nullify_over(ctx); \
+ return nullify_end(ctx, gen_excp_iir(ctx, EXCP)); \
+ } \
+ } while (0)
+
+static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
@@ -485,12 +785,12 @@ static bool use_nullify_skip(DisasContext *ctx)
}
static void gen_goto_tb(DisasContext *ctx, int which,
- target_ulong f, target_ulong b)
+ target_ureg f, target_ureg b)
{
if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
tcg_gen_goto_tb(which);
- tcg_gen_movi_tl(cpu_iaoq_f, f);
- tcg_gen_movi_tl(cpu_iaoq_b, b);
+ tcg_gen_movi_reg(cpu_iaoq_f, f);
+ tcg_gen_movi_reg(cpu_iaoq_b, b);
tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
} else {
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
@@ -505,9 +805,9 @@ static void gen_goto_tb(DisasContext *ctx, int which,
/* PA has a habit of taking the LSB of a field and using that as the sign,
with the rest of the field becoming the least significant bits. */
-static target_long low_sextract(uint32_t val, int pos, int len)
+static target_sreg low_sextract(uint32_t val, int pos, int len)
{
- target_ulong x = -(target_ulong)extract32(val, pos, 1);
+ target_ureg x = -(target_ureg)extract32(val, pos, 1);
x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
return x;
}
@@ -541,15 +841,22 @@ static unsigned assemble_rc64(uint32_t insn)
return r2 * 32 + r1 * 4 + r0;
}
-static target_long assemble_12(uint32_t insn)
+static unsigned assemble_sr3(uint32_t insn)
+{
+ unsigned s2 = extract32(insn, 13, 1);
+ unsigned s0 = extract32(insn, 14, 2);
+ return s2 * 4 + s0;
+}
+
+static target_sreg assemble_12(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
return x;
}
-static target_long assemble_16(uint32_t insn)
+static target_sreg assemble_16(uint32_t insn)
{
/* Take the name from PA2.0, which produces a 16-bit number
only with wide mode; otherwise a 14-bit number. Since we don't
@@ -557,28 +864,28 @@ static target_long assemble_16(uint32_t insn)
return low_sextract(insn, 0, 14);
}
-static target_long assemble_16a(uint32_t insn)
+static target_sreg assemble_16a(uint32_t insn)
{
/* Take the name from PA2.0, which produces a 14-bit shifted number
only with wide mode; otherwise a 12-bit shifted number. Since we
don't implement wide mode, this is always the 12-bit number. */
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 11) | extract32(insn, 2, 11);
return x << 2;
}
-static target_long assemble_17(uint32_t insn)
+static target_sreg assemble_17(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 5) | extract32(insn, 16, 5);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
return x << 2;
}
-static target_long assemble_21(uint32_t insn)
+static target_sreg assemble_21(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 11) | extract32(insn, 1, 11);
x = (x << 2) | extract32(insn, 14, 2);
x = (x << 5) | extract32(insn, 16, 5);
@@ -586,9 +893,9 @@ static target_long assemble_21(uint32_t insn)
return x << 11;
}
-static target_long assemble_22(uint32_t insn)
+static target_sreg assemble_22(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 10) | extract32(insn, 16, 10);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
@@ -602,10 +909,11 @@ static target_long assemble_22(uint32_t insn)
as a whole it would appear that these relations are similar to what
a traditional NZCV set of flags would produce. */
-static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
+static DisasCond do_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg cb_msb, TCGv_reg sv)
{
DisasCond cond;
- TCGv tmp;
+ TCGv_reg tmp;
switch (cf >> 1) {
case 0: /* Never / TR */
@@ -625,8 +933,8 @@ static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
break;
case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
tmp = tcg_temp_new();
- tcg_gen_neg_tl(tmp, cb_msb);
- tcg_gen_and_tl(tmp, tmp, res);
+ tcg_gen_neg_reg(tmp, cb_msb);
+ tcg_gen_and_reg(tmp, tmp, res);
cond = cond_make_0(TCG_COND_EQ, tmp);
tcg_temp_free(tmp);
break;
@@ -635,7 +943,7 @@ static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
break;
case 7: /* OD / EV */
tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, res, 1);
+ tcg_gen_andi_reg(tmp, res, 1);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
@@ -653,7 +961,8 @@ static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
can use the inputs directly. This can allow other computation to be
deleted as unused. */
-static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
+static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
{
DisasCond cond;
@@ -686,7 +995,7 @@ static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
/* Similar, but for logicals, where the carry and overflow bits are not
computed, and use of them is undefined. */
-static DisasCond do_log_cond(unsigned cf, TCGv res)
+static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
{
switch (cf >> 1) {
case 4: case 5: case 6:
@@ -698,7 +1007,7 @@ static DisasCond do_log_cond(unsigned cf, TCGv res)
/* Similar, but for shift/extract/deposit conditions. */
-static DisasCond do_sed_cond(unsigned orig, TCGv res)
+static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
{
unsigned c, f;
@@ -716,10 +1025,11 @@ static DisasCond do_sed_cond(unsigned orig, TCGv res)
/* Similar, but for unit conditions. */
-static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
+static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
DisasCond cond;
- TCGv tmp, cb = NULL;
+ TCGv_reg tmp, cb = NULL;
if (cf & 8) {
/* Since we want to test lots of carry-out bits all at once, do not
@@ -728,10 +1038,10 @@ static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
*/
cb = tcg_temp_new();
tmp = tcg_temp_new();
- tcg_gen_or_tl(cb, in1, in2);
- tcg_gen_and_tl(tmp, in1, in2);
- tcg_gen_andc_tl(cb, cb, res);
- tcg_gen_or_tl(cb, cb, tmp);
+ tcg_gen_or_reg(cb, in1, in2);
+ tcg_gen_and_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(cb, cb, res);
+ tcg_gen_or_reg(cb, cb, tmp);
tcg_temp_free(tmp);
}
@@ -747,34 +1057,34 @@ static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
* https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
*/
tmp = tcg_temp_new();
- tcg_gen_subi_tl(tmp, res, 0x01010101u);
- tcg_gen_andc_tl(tmp, tmp, res);
- tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
+ tcg_gen_subi_reg(tmp, res, 0x01010101u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
case 3: /* SHZ / NHZ */
tmp = tcg_temp_new();
- tcg_gen_subi_tl(tmp, res, 0x00010001u);
- tcg_gen_andc_tl(tmp, tmp, res);
- tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
+ tcg_gen_subi_reg(tmp, res, 0x00010001u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
case 4: /* SDC / NDC */
- tcg_gen_andi_tl(cb, cb, 0x88888888u);
+ tcg_gen_andi_reg(cb, cb, 0x88888888u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 6: /* SBC / NBC */
- tcg_gen_andi_tl(cb, cb, 0x80808080u);
+ tcg_gen_andi_reg(cb, cb, 0x80808080u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 7: /* SHC / NHC */
- tcg_gen_andi_tl(cb, cb, 0x80008000u);
+ tcg_gen_andi_reg(cb, cb, 0x80008000u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
@@ -792,38 +1102,40 @@ static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
}
/* Compute signed overflow for addition. */
-static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
- TCGv sv = get_temp(ctx);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_xor_tl(sv, res, in1);
- tcg_gen_xor_tl(tmp, in1, in2);
- tcg_gen_andc_tl(sv, sv, tmp);
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(sv, sv, tmp);
tcg_temp_free(tmp);
return sv;
}
/* Compute signed overflow for subtraction. */
-static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
- TCGv sv = get_temp(ctx);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_xor_tl(sv, res, in1);
- tcg_gen_xor_tl(tmp, in1, in2);
- tcg_gen_and_tl(sv, sv, tmp);
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_and_reg(sv, sv, tmp);
tcg_temp_free(tmp);
return sv;
}
-static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- unsigned shift, bool is_l, bool is_tsv, bool is_tc,
- bool is_c, unsigned cf)
+static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned shift, bool is_l,
+ bool is_tsv, bool is_tc, bool is_c, unsigned cf)
{
- TCGv dest, cb, cb_msb, sv, tmp;
+ TCGv_reg dest, cb, cb_msb, sv, tmp;
unsigned c = cf >> 1;
DisasCond cond;
@@ -833,27 +1145,27 @@ static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
if (shift) {
tmp = get_temp(ctx);
- tcg_gen_shli_tl(tmp, in1, shift);
+ tcg_gen_shli_reg(tmp, in1, shift);
in1 = tmp;
}
if (!is_l || c == 4 || c == 5) {
- TCGv zero = tcg_const_tl(0);
+ TCGv_reg zero = tcg_const_reg(0);
cb_msb = get_temp(ctx);
- tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
if (is_c) {
- tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
}
tcg_temp_free(zero);
if (!is_l) {
cb = get_temp(ctx);
- tcg_gen_xor_tl(cb, in1, in2);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_xor_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
}
} else {
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
if (is_c) {
- tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
+ tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
}
}
@@ -872,7 +1184,7 @@ static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
if (is_tc) {
cond_prep(&cond);
tmp = tcg_temp_new();
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
@@ -891,10 +1203,11 @@ static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
return DISAS_NEXT;
}
-static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- bool is_tsv, bool is_b, bool is_tc, unsigned cf)
+static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, bool is_tsv, bool is_b,
+ bool is_tc, unsigned cf)
{
- TCGv dest, sv, cb, cb_msb, zero, tmp;
+ TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
unsigned c = cf >> 1;
DisasCond cond;
@@ -902,21 +1215,21 @@ static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
cb = tcg_temp_new();
cb_msb = tcg_temp_new();
- zero = tcg_const_tl(0);
+ zero = tcg_const_reg(0);
if (is_b) {
/* DEST,C = IN1 + ~IN2 + C. */
- tcg_gen_not_tl(cb, in2);
- tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
- tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
- tcg_gen_xor_tl(cb, cb, in1);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_not_reg(cb, in2);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
+ tcg_gen_xor_reg(cb, cb, in1);
+ tcg_gen_xor_reg(cb, cb, dest);
} else {
/* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
operations by seeding the high word with 1 and subtracting. */
- tcg_gen_movi_tl(cb_msb, 1);
- tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
- tcg_gen_eqv_tl(cb, in1, in2);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_movi_reg(cb_msb, 1);
+ tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
+ tcg_gen_eqv_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
}
tcg_temp_free(zero);
@@ -940,7 +1253,7 @@ static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
if (is_tc) {
cond_prep(&cond);
tmp = tcg_temp_new();
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
@@ -957,14 +1270,14 @@ static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
return DISAS_NEXT;
}
-static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
- TCGv in2, unsigned cf)
+static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf)
{
- TCGv dest, sv;
+ TCGv_reg dest, sv;
DisasCond cond;
dest = tcg_temp_new();
- tcg_gen_sub_tl(dest, in1, in2);
+ tcg_gen_sub_reg(dest, in1, in2);
/* Compute signed overflow if required. */
sv = NULL;
@@ -976,7 +1289,7 @@ static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
cond = do_sub_cond(cf, dest, in1, in2, sv);
/* Clear. */
- tcg_gen_movi_tl(dest, 0);
+ tcg_gen_movi_reg(dest, 0);
save_gpr(ctx, rt, dest);
tcg_temp_free(dest);
@@ -986,10 +1299,11 @@ static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
return DISAS_NEXT;
}
-static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
+static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
{
- TCGv dest = dest_gpr(ctx, rt);
+ TCGv_reg dest = dest_gpr(ctx, rt);
/* Perform the operation, and writeback. */
fn(dest, in1, in2);
@@ -1003,11 +1317,11 @@ static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
return DISAS_NEXT;
}
-static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
- TCGv in2, unsigned cf, bool is_tc,
- void (*fn)(TCGv, TCGv, TCGv))
+static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf, bool is_tc,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
{
- TCGv dest;
+ TCGv_reg dest;
DisasCond cond;
if (cf == 0) {
@@ -1022,9 +1336,9 @@ static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
cond = do_unit_cond(cf, dest, in1, in2);
if (is_tc) {
- TCGv tmp = tcg_temp_new();
+ TCGv_reg tmp = tcg_temp_new();
cond_prep(&cond);
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
@@ -1036,140 +1350,171 @@ static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
return DISAS_NEXT;
}
+#ifndef CONFIG_USER_ONLY
+/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
+ from the top 2 bits of the base register. There are a few system
+ instructions that have a 3-bit space specifier, for which SR0 is
+ not special. To handle this, pass ~SP. */
+static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
+{
+ TCGv_ptr ptr;
+ TCGv_reg tmp;
+ TCGv_i64 spc;
+
+ if (sp != 0) {
+ if (sp < 0) {
+ sp = ~sp;
+ }
+ spc = get_temp_tl(ctx);
+ load_spr(ctx, spc, sp);
+ return spc;
+ }
+ if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ return cpu_srH;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ tmp = tcg_temp_new();
+ spc = get_temp_tl(ctx);
+
+ tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
+ tcg_gen_andi_reg(tmp, tmp, 030);
+ tcg_gen_trunc_reg_ptr(ptr, tmp);
+ tcg_temp_free(tmp);
+
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
+ tcg_temp_free_ptr(ptr);
+
+ return spc;
+}
+#endif
+
+static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
+ unsigned rb, unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, bool is_phys)
+{
+ TCGv_reg base = load_gpr(ctx, rb);
+ TCGv_reg ofs;
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ ofs = get_temp(ctx);
+ tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
+ tcg_gen_add_reg(ofs, ofs, base);
+ } else if (disp || modify) {
+ ofs = get_temp(ctx);
+ tcg_gen_addi_reg(ofs, base, disp);
+ } else {
+ ofs = base;
+ }
+
+ *pofs = ofs;
+#ifdef CONFIG_USER_ONLY
+ *pgva = (modify <= 0 ? ofs : base);
+#else
+ TCGv_tl addr = get_temp_tl(ctx);
+ tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
+ if (ctx->tb_flags & PSW_W) {
+ tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
+ }
+ if (!is_phys) {
+ tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
+ }
+ *pgva = addr;
+#endif
+}
+
/* Emit a memory load. The modify parameter should be
* < 0 for pre-modify,
* > 0 for post-modify,
* = 0 for no base register update.
*/
static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify, TCGMemOp mop)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
- } else {
- tcg_gen_addi_tl(addr, base, disp);
- }
-
- if (modify == 0) {
- tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
- } else {
- tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
- MMU_USER_IDX, mop);
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify, TCGMemOp mop)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
- } else {
- tcg_gen_addi_tl(addr, base, disp);
- }
-
- if (modify == 0) {
- tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
- } else {
- tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
- MMU_USER_IDX, mop);
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify, TCGMemOp mop)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
- } else {
- tcg_gen_addi_tl(addr, base, disp);
- }
-
- tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
-
- if (modify != 0) {
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify, TCGMemOp mop)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
- } else {
- tcg_gen_addi_tl(addr, base, disp);
- }
-
- tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
-
- if (modify != 0) {
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
-#if TARGET_LONG_BITS == 64
-#define do_load_tl do_load_64
-#define do_store_tl do_store_64
+#if TARGET_REGISTER_BITS == 64
+#define do_load_reg do_load_64
+#define do_store_reg do_store_64
#else
-#define do_load_tl do_load_32
-#define do_store_tl do_store_32
+#define do_load_reg do_load_32
+#define do_store_reg do_store_32
#endif
static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify, TCGMemOp mop)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv dest;
+ TCGv_reg dest;
nullify_over(ctx);
@@ -1180,22 +1525,22 @@ static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
/* Make sure if RT == RB, we see the result of the load. */
dest = get_temp(ctx);
}
- do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
+ do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
{
TCGv_i32 tmp;
nullify_over(ctx);
tmp = tcg_temp_new_i32();
- do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
save_frw_i32(rt, tmp);
tcg_temp_free_i32(tmp);
@@ -1207,15 +1552,15 @@ static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
}
static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
{
TCGv_i64 tmp;
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
save_frd(rt, tmp);
tcg_temp_free_i64(tmp);
@@ -1227,38 +1572,39 @@ static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
}
static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
- target_long disp, int modify, TCGMemOp mop)
+ target_sreg disp, unsigned sp,
+ int modify, TCGMemOp mop)
{
nullify_over(ctx);
- do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
+ do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
{
TCGv_i32 tmp;
nullify_over(ctx);
tmp = load_frw_i32(rt);
- do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
tcg_temp_free_i32(tmp);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
- int modify)
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
{
TCGv_i64 tmp;
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
tcg_temp_free_i64(tmp);
return nullify_end(ctx, DISAS_NEXT);
@@ -1370,7 +1716,7 @@ static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
/* Emit an unconditional branch to a direct target, which may or may not
have already had nullification handled. */
-static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
+static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
unsigned link, bool is_n)
{
if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
@@ -1407,10 +1753,10 @@ static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
/* Emit a conditional branch to a direct target. If the branch itself
is nullified, we should have already used nullify_over. */
-static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
+static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
DisasCond *cond)
{
- target_ulong dest = iaoq_dest(ctx, disp);
+ target_ureg dest = iaoq_dest(ctx, disp);
TCGLabel *taken = NULL;
TCGCond c = cond->c;
bool n;
@@ -1427,7 +1773,7 @@ static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
taken = gen_new_label();
cond_prep(cond);
- tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
+ tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
cond_free(cond);
/* Not taken: Condition not satisfied; nullify on backward branches. */
@@ -1441,6 +1787,11 @@ static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
ctx->null_lab = NULL;
}
nullify_set(ctx, n);
+ if (ctx->iaoq_n == -1) {
+ /* The temporary iaoq_n_var died at the branch above.
+ Regenerate it here instead of saving it. */
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ }
gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
}
@@ -1468,10 +1819,10 @@ static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
/* Emit an unconditional branch to an indirect target. This handles
nullification of the branch itself. */
-static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
+static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
unsigned link, bool is_n)
{
- TCGv a0, a1, next, tmp;
+ TCGv_reg a0, a1, next, tmp;
TCGCond c;
assert(ctx->null_lab == NULL);
@@ -1481,12 +1832,18 @@ static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
next = get_temp(ctx);
- tcg_gen_mov_tl(next, dest);
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = next;
+ tcg_gen_mov_reg(next, dest);
if (is_n) {
+ if (use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, next);
+ tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
+ nullify_set(ctx, 0);
+ return DISAS_IAQ_N_UPDATED;
+ }
ctx->null_cond.c = TCG_COND_ALWAYS;
}
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
} else if (is_n && use_nullify_skip(ctx)) {
/* The (conditional) branch, B, nullifies the next insn, N,
and we're allowed to skip execution N (no single-step or
@@ -1500,12 +1857,12 @@ static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
/* We do have to handle the non-local temporary, DEST, before
branching. Since IOAQ_F is not really live at this point, we
can simply store DEST optimistically. Similarly with IAOQ_B. */
- tcg_gen_mov_tl(cpu_iaoq_f, dest);
- tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
+ tcg_gen_mov_reg(cpu_iaoq_f, dest);
+ tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
nullify_over(ctx);
if (link != 0) {
- tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
+ tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
}
tcg_gen_lookup_and_goto_ptr();
return nullify_end(ctx, DISAS_NEXT);
@@ -1519,19 +1876,19 @@ static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
next = get_temp(ctx);
copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
+ tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
ctx->iaoq_n = -1;
ctx->iaoq_n_var = next;
if (link != 0) {
- tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
+ tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
}
if (is_n) {
/* The branch nullifies the next insn, which means the state of N
after the branch is the inverse of the state of N that applied
to the branch. */
- tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
+ tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
cond_free(&ctx->null_cond);
ctx->null_cond = cond_make_n();
ctx->psw_n_nonzero = true;
@@ -1543,6 +1900,41 @@ static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
return DISAS_NEXT;
}
+/* Implement
+ * if (IAOQ_Front{30..31} < GR[b]{30..31})
+ * IAOQ_Next{30..31} ← GR[b]{30..31};
+ * else
+ * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
+ * which keeps the privilege level from being increased.
+ */
+static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
+{
+#ifdef CONFIG_USER_ONLY
+ return offset;
+#else
+ TCGv_reg dest;
+ switch (ctx->privilege) {
+ case 0:
+ /* Privilege 0 is maximum and is allowed to decrease. */
+ return offset;
+ case 3:
+ /* Privilege 3 is minimum and is never allowed increase. */
+ dest = get_temp(ctx);
+ tcg_gen_ori_reg(dest, offset, 3);
+ break;
+ default:
+ dest = tcg_temp_new();
+ tcg_gen_andi_reg(dest, offset, -4);
+ tcg_gen_ori_reg(dest, dest, ctx->privilege);
+ tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
+ tcg_temp_free(dest);
+ break;
+ }
+ return dest;
+#endif
+}
+
+#ifdef CONFIG_USER_ONLY
/* On Linux, page zero is normally marked execute only + gateway.
Therefore normal read or write is supposed to fail, but specific
offsets have kernel code mapped to raise permissions to implement
@@ -1559,7 +1951,7 @@ static DisasJumpType do_page_zero(DisasContext *ctx)
case TCG_COND_NEVER:
break;
case TCG_COND_ALWAYS:
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
goto do_sigill;
default:
/* Since this is always the first (and only) insn within the
@@ -1577,7 +1969,7 @@ static DisasJumpType do_page_zero(DisasContext *ctx)
switch (ctx->iaoq_f) {
case 0x00: /* Null pointer call */
- gen_excp_1(EXCP_SIGSEGV);
+ gen_excp_1(EXCP_IMP);
return DISAS_NORETURN;
case 0xb0: /* LWS */
@@ -1585,9 +1977,9 @@ static DisasJumpType do_page_zero(DisasContext *ctx)
return DISAS_NORETURN;
case 0xe0: /* SET_THREAD_POINTER */
- tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
- tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
- tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
+ tcg_gen_mov_reg(cpu_iaoq_f, cpu_gr[31]);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
return DISAS_IAQ_N_UPDATED;
case 0x100: /* SYSCALL */
@@ -1596,10 +1988,11 @@ static DisasJumpType do_page_zero(DisasContext *ctx)
default:
do_sigill:
- gen_excp_1(EXCP_SIGILL);
+ gen_excp_1(EXCP_ILL);
return DISAS_NORETURN;
}
}
+#endif
static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
@@ -1612,7 +2005,7 @@ static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
nullify_over(ctx);
- return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
+ return nullify_end(ctx, gen_excp_iir(ctx, EXCP_BREAK));
}
static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
@@ -1629,8 +2022,8 @@ static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(tmp, ctx->iaoq_f);
+ TCGv_reg tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(tmp, ctx->iaoq_f);
save_gpr(ctx, rt, tmp);
cond_free(&ctx->null_cond);
@@ -1641,11 +2034,17 @@ static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp = dest_gpr(ctx, rt);
+ unsigned rs = assemble_sr3(insn);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_reg t1 = tcg_temp_new();
- /* ??? We don't implement space registers. */
- tcg_gen_movi_tl(tmp, 0);
- save_gpr(ctx, rt, tmp);
+ load_spr(ctx, t0, rs);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_reg(t1, t0);
+
+ save_gpr(ctx, rt, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free_i64(t0);
cond_free(&ctx->null_cond);
return DISAS_NEXT;
@@ -1656,70 +2055,149 @@ static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
{
unsigned rt = extract32(insn, 0, 5);
unsigned ctl = extract32(insn, 21, 5);
- TCGv tmp;
+ TCGv_reg tmp;
+ DisasJumpType ret;
switch (ctl) {
- case 11: /* SAR */
+ case CR_SAR:
#ifdef TARGET_HPPA64
if (extract32(insn, 14, 1) == 0) {
/* MFSAR without ,W masks low 5 bits. */
tmp = dest_gpr(ctx, rt);
- tcg_gen_andi_tl(tmp, cpu_sar, 31);
+ tcg_gen_andi_reg(tmp, cpu_sar, 31);
save_gpr(ctx, rt, tmp);
- break;
+ goto done;
}
#endif
save_gpr(ctx, rt, cpu_sar);
- break;
- case 16: /* Interval Timer */
+ goto done;
+ case CR_IT: /* Interval Timer */
+ /* FIXME: Respect PSW_S bit. */
+ nullify_over(ctx);
tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(tmp, 0); /* FIXME */
+ if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ gen_helper_read_interval_timer(tmp);
+ gen_io_end();
+ ret = DISAS_IAQ_N_STALE;
+ } else {
+ gen_helper_read_interval_timer(tmp);
+ ret = DISAS_NEXT;
+ }
save_gpr(ctx, rt, tmp);
- break;
+ return nullify_end(ctx, ret);
case 26:
- save_gpr(ctx, rt, cpu_cr26);
- break;
case 27:
- save_gpr(ctx, rt, cpu_cr27);
break;
default:
/* All other control registers are privileged. */
- return gen_illegal(ctx);
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+ break;
}
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ save_gpr(ctx, rt, tmp);
+
+ done:
cond_free(&ctx->null_cond);
return DISAS_NEXT;
}
+static DisasJumpType trans_mtsp(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rs = assemble_sr3(insn);
+ TCGv_i64 t64;
+
+ if (rs >= 5) {
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+ }
+ nullify_over(ctx);
+
+ t64 = tcg_temp_new_i64();
+ tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
+ tcg_gen_shli_i64(t64, t64, 32);
+
+ if (rs >= 4) {
+ tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
+ ctx->tb_flags &= ~TB_FLAG_SR_SAME;
+ } else {
+ tcg_gen_mov_i64(cpu_sr[rs], t64);
+ }
+ tcg_temp_free_i64(t64);
+
+ return nullify_end(ctx, DISAS_NEXT);
+}
+
static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rin = extract32(insn, 16, 5);
unsigned ctl = extract32(insn, 21, 5);
- TCGv tmp;
+ TCGv_reg reg = load_gpr(ctx, rin);
+ TCGv_reg tmp;
- if (ctl == 11) { /* SAR */
+ if (ctl == CR_SAR) {
tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
+ tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
save_or_nullify(ctx, cpu_sar, tmp);
tcg_temp_free(tmp);
- } else {
- /* All other control registers are privileged or read-only. */
- return gen_illegal(ctx);
+
+ cond_free(&ctx->null_cond);
+ return DISAS_NEXT;
}
- cond_free(&ctx->null_cond);
- return DISAS_NEXT;
+ /* All other control registers are privileged or read-only. */
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ DisasJumpType ret = DISAS_NEXT;
+
+ nullify_over(ctx);
+ switch (ctl) {
+ case CR_IT:
+ gen_helper_write_interval_timer(cpu_env, reg);
+ break;
+ case CR_EIRR:
+ gen_helper_write_eirr(cpu_env, reg);
+ break;
+ case CR_EIEM:
+ gen_helper_write_eiem(cpu_env, reg);
+ ret = DISAS_IAQ_N_STALE_EXIT;
+ break;
+
+ case CR_IIASQ:
+ case CR_IIAOQ:
+ /* FIXME: Respect PSW_Q bit */
+ /* The write advances the queue and stores to the back element. */
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env,
+ offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
+ tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_reg(reg, cpu_env,
+ offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
+ break;
+
+ default:
+ tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ break;
+ }
+ return nullify_end(ctx, ret);
+#endif
}
static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rin = extract32(insn, 16, 5);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
- tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
+ tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
+ tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
save_or_nullify(ctx, cpu_sar, tmp);
tcg_temp_free(tmp);
@@ -1731,27 +2209,153 @@ static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv dest = dest_gpr(ctx, rt);
+ TCGv_reg dest = dest_gpr(ctx, rt);
- /* Since we don't implement space registers, this returns zero. */
- tcg_gen_movi_tl(dest, 0);
+#ifdef CONFIG_USER_ONLY
+ /* We don't implement space registers in user mode. */
+ tcg_gen_movi_reg(dest, 0);
+#else
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned sp = extract32(insn, 14, 2);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_reg(dest, t0);
+
+ tcg_temp_free_i64(t0);
+#endif
save_gpr(ctx, rt, dest);
cond_free(&ctx->null_cond);
return DISAS_NEXT;
}
+#ifndef CONFIG_USER_ONLY
+/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
+static target_ureg extract_sm_imm(uint32_t insn)
+{
+ target_ureg val = extract32(insn, 16, 10);
+
+ if (val & PSW_SM_E) {
+ val = (val & ~PSW_SM_E) | PSW_E;
+ }
+ if (val & PSW_SM_W) {
+ val = (val & ~PSW_SM_W) | PSW_W;
+ }
+ return val;
+}
+
+static DisasJumpType trans_rsm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ target_ureg sm = extract_sm_imm(insn);
+ TCGv_reg tmp;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_andi_reg(tmp, tmp, ~sm);
+ gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ save_gpr(ctx, rt, tmp);
+
+ /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
+ return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
+}
+
+static DisasJumpType trans_ssm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ target_ureg sm = extract_sm_imm(insn);
+ TCGv_reg tmp;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_ori_reg(tmp, tmp, sm);
+ gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ save_gpr(ctx, rt, tmp);
+
+ /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
+ return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
+}
+
+static DisasJumpType trans_mtsm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rr = extract32(insn, 16, 5);
+ TCGv_reg tmp, reg;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ reg = load_gpr(ctx, rr);
+ tmp = get_temp(ctx);
+ gen_helper_swap_system_mask(tmp, cpu_env, reg);
+
+ /* Exit the TB to recognize new interrupts. */
+ return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
+}
+
+static DisasJumpType trans_rfi(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned comp = extract32(insn, 5, 4);
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ if (comp == 5) {
+ gen_helper_rfi_r(cpu_env);
+ } else {
+ gen_helper_rfi(cpu_env);
+ }
+ if (ctx->base.singlestep_enabled) {
+ gen_excp_1(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+
+ /* Exit the TB to recognize new interrupts. */
+ return nullify_end(ctx, DISAS_NORETURN);
+}
+
+static DisasJumpType gen_hlt(DisasContext *ctx, int reset)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+ if (reset) {
+ gen_helper_reset(cpu_env);
+ } else {
+ gen_helper_halt(cpu_env);
+ }
+ return nullify_end(ctx, DISAS_NORETURN);
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const DisasInsn table_system[] = {
{ 0x00000000u, 0xfc001fe0u, trans_break },
- /* We don't implement space register, so MTSP is a nop. */
- { 0x00001820u, 0xffe01fffu, trans_nop },
+ { 0x00001820u, 0xffe01fffu, trans_mtsp },
{ 0x00001840u, 0xfc00ffffu, trans_mtctl },
{ 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
{ 0x000014a0u, 0xffffffe0u, trans_mfia },
{ 0x000004a0u, 0xffff1fe0u, trans_mfsp },
- { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
- { 0x00000400u, 0xffffffffu, trans_sync },
+ { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
+ { 0x00000400u, 0xffffffffu, trans_sync }, /* sync */
+ { 0x00100400u, 0xffffffffu, trans_sync }, /* syncdma */
{ 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
+#ifndef CONFIG_USER_ONLY
+ { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
+ { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
+ { 0x00001860u, 0xffe0ffffu, trans_mtsm },
+ { 0x00000c00u, 0xfffffe1fu, trans_rfi },
+#endif
};
static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
@@ -1759,12 +2363,12 @@ static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
{
unsigned rb = extract32(insn, 21, 5);
unsigned rx = extract32(insn, 16, 5);
- TCGv dest = dest_gpr(ctx, rb);
- TCGv src1 = load_gpr(ctx, rb);
- TCGv src2 = load_gpr(ctx, rx);
+ TCGv_reg dest = dest_gpr(ctx, rb);
+ TCGv_reg src1 = load_gpr(ctx, rb);
+ TCGv_reg src2 = load_gpr(ctx, rx);
/* The only thing we need to do is the base register modification. */
- tcg_gen_add_tl(dest, src1, src2);
+ tcg_gen_add_reg(dest, src1, src2);
save_gpr(ctx, rb, dest);
cond_free(&ctx->null_cond);
@@ -1775,23 +2379,158 @@ static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
+ unsigned sp = extract32(insn, 14, 2);
+ unsigned rr = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
unsigned is_write = extract32(insn, 6, 1);
- TCGv dest;
+ unsigned is_imm = extract32(insn, 13, 1);
+ TCGv_reg dest, ofs;
+ TCGv_i32 level, want;
+ TCGv_tl addr;
nullify_over(ctx);
- /* ??? Do something with priv level operand. */
dest = dest_gpr(ctx, rt);
- if (is_write) {
- gen_helper_probe_w(dest, load_gpr(ctx, rb));
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
+
+ if (is_imm) {
+ level = tcg_const_i32(extract32(insn, 16, 2));
} else {
- gen_helper_probe_r(dest, load_gpr(ctx, rb));
+ level = tcg_temp_new_i32();
+ tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
+ tcg_gen_andi_i32(level, level, 3);
}
+ want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
+
+ gen_helper_probe(dest, cpu_env, addr, level, want);
+
+ tcg_temp_free_i32(want);
+ tcg_temp_free_i32(level);
+
save_gpr(ctx, rt, dest);
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned sp;
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_addr = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs, reg;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
+ reg = load_gpr(ctx, rr);
+ if (is_addr) {
+ gen_helper_itlba(cpu_env, addr, reg);
+ } else {
+ gen_helper_itlbp(cpu_env, addr, reg);
+ }
+
+ /* Exit TB for ITLB change if mmu is enabled. This *should* not be
+ the case, since the OS TLB fill handler runs with mmu disabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp;
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_local = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ if (is_local) {
+ gen_helper_ptlbe(cpu_env);
+ } else {
+ gen_helper_ptlb(cpu_env, addr);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp = extract32(insn, 14, 2);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGv_tl vaddr;
+ TCGv_reg ofs, paddr;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
+
+ paddr = tcg_temp_new();
+ gen_helper_lpa(paddr, cpu_env, vaddr);
+
+ /* Note that physical address result overrides base modification. */
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ save_gpr(ctx, rt, paddr);
+ tcg_temp_free(paddr);
+
+ return nullify_end(ctx, DISAS_NEXT);
+}
+
+static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv_reg ci;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* The Coherence Index is an implementation-defined function of the
+ physical address. Two addresses with the same CI have a coherent
+ view of the cache. Our implementation is to return 0 for all,
+ since the entire address space is coherent. */
+ ci = tcg_const_reg(0);
+ save_gpr(ctx, rt, ci);
+ tcg_temp_free(ci);
+
+ return DISAS_NEXT;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const DisasInsn table_mem_mgmt[] = {
{ 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
{ 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
@@ -1808,6 +2547,18 @@ static const DisasInsn table_mem_mgmt[] = {
{ 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
{ 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
{ 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
+#ifndef CONFIG_USER_ONLY
+ { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
+ { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
+ { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
+ { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
+ { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
+ { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
+ { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
+ { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
+ { 0x04001340u, 0xfc003fc0u, trans_lpa },
+ { 0x04001300u, 0xfc003fe0u, trans_lci },
+#endif
};
static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
@@ -1819,7 +2570,7 @@ static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
unsigned ext = extract32(insn, 8, 4);
unsigned shift = extract32(insn, 6, 2);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
bool is_c = false;
bool is_l = false;
bool is_tc = false;
@@ -1862,7 +2613,7 @@ static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
unsigned cf = extract32(insn, 12, 4);
unsigned ext = extract32(insn, 6, 6);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
bool is_b = false;
bool is_tc = false;
bool is_tsv = false;
@@ -1906,7 +2657,7 @@ static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -1926,8 +2677,8 @@ static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
unsigned rt = extract32(insn, 0, 5);
if (r1 == 0) {
- TCGv dest = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(dest, 0);
+ TCGv_reg dest = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(dest, 0);
save_gpr(ctx, rt, dest);
} else {
save_gpr(ctx, rt, cpu_gr[r1]);
@@ -1943,7 +2694,7 @@ static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -1962,7 +2713,7 @@ static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -1970,7 +2721,7 @@ static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
}
tcg_r1 = load_gpr(ctx, r1);
tcg_r2 = load_gpr(ctx, r2);
- ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
+ ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
return nullify_end(ctx, ret);
}
@@ -1982,7 +2733,7 @@ static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
unsigned cf = extract32(insn, 12, 4);
unsigned is_tc = extract32(insn, 6, 1);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2, tmp;
+ TCGv_reg tcg_r1, tcg_r2, tmp;
DisasJumpType ret;
if (cf) {
@@ -1991,8 +2742,8 @@ static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
tcg_r1 = load_gpr(ctx, r1);
tcg_r2 = load_gpr(ctx, r2);
tmp = get_temp(ctx);
- tcg_gen_not_tl(tmp, tcg_r2);
- ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
+ tcg_gen_not_reg(tmp, tcg_r2);
+ ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
return nullify_end(ctx, ret);
}
@@ -2003,20 +2754,20 @@ static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
unsigned cf = extract32(insn, 12, 4);
unsigned is_i = extract32(insn, 6, 1);
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp;
+ TCGv_reg tmp;
DisasJumpType ret;
nullify_over(ctx);
tmp = get_temp(ctx);
- tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
+ tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
if (!is_i) {
- tcg_gen_not_tl(tmp, tmp);
+ tcg_gen_not_reg(tmp, tmp);
}
- tcg_gen_andi_tl(tmp, tmp, 0x11111111);
- tcg_gen_muli_tl(tmp, tmp, 6);
+ tcg_gen_andi_reg(tmp, tmp, 0x11111111);
+ tcg_gen_muli_reg(tmp, tmp, 6);
ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
- is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
+ is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
return nullify_end(ctx, ret);
}
@@ -2028,7 +2779,7 @@ static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv dest, add1, add2, addc, zero, in1, in2;
+ TCGv_reg dest, add1, add2, addc, zero, in1, in2;
nullify_over(ctx);
@@ -2039,19 +2790,19 @@ static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
add2 = tcg_temp_new();
addc = tcg_temp_new();
dest = tcg_temp_new();
- zero = tcg_const_tl(0);
+ zero = tcg_const_reg(0);
/* Form R1 << 1 | PSW[CB]{8}. */
- tcg_gen_add_tl(add1, in1, in1);
- tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
+ tcg_gen_add_reg(add1, in1, in1);
+ tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
/* Add or subtract R2, depending on PSW[V]. Proper computation of
carry{8} requires that we subtract via + ~R2 + 1, as described in
the manual. By extracting and masking V, we can produce the
proper inputs to the addition without movcond. */
- tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
- tcg_gen_xor_tl(add2, in2, addc);
- tcg_gen_andi_tl(addc, addc, 1);
+ tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
+ tcg_gen_xor_reg(add2, in2, addc);
+ tcg_gen_andi_reg(addc, addc, 1);
/* ??? This is only correct for 32-bit. */
tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
@@ -2063,16 +2814,16 @@ static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
save_gpr(ctx, rt, dest);
/* Write back PSW[CB]. */
- tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
- tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
+ tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
+ tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
/* Write back PSW[V] for the division step. */
- tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
- tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
+ tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
+ tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
/* Install the new nullification. */
if (cf) {
- TCGv sv = NULL;
+ TCGv_reg sv = NULL;
if (cf >> 1 == 6) {
/* ??? The lshift is supposed to contribute to overflow. */
sv = do_add_sv(ctx, dest, add1, add2);
@@ -2087,13 +2838,49 @@ static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+/* These are QEMU extensions and are nops in the real architecture:
+ *
+ * or %r10,%r10,%r10 -- idle loop; wait for interrupt
+ * or %r31,%r31,%r31 -- death loop; offline cpu
+ * currently implemented as idle.
+ */
+static DisasJumpType trans_pause(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ TCGv_i32 tmp;
+
+ /* No need to check for supervisor, as userland can only pause
+ until the next timer interrupt. */
+ nullify_over(ctx);
+
+ /* Advance the instruction queue. */
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ nullify_set(ctx, 0);
+
+ /* Tell the qemu main loop to halt until this cpu has work. */
+ tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
+ offsetof(CPUState, halted));
+ tcg_temp_free_i32(tmp);
+ gen_excp_1(EXCP_HALTED);
+
+ return nullify_end(ctx, DISAS_NORETURN);
+}
+#endif
+
static const DisasInsn table_arith_log[] = {
{ 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
{ 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
- { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
- { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
- { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
- { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
+#ifndef CONFIG_USER_ONLY
+ { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
+ { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
+#endif
+ { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
+ { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
+ { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
+ { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
{ 0x08000880u, 0xfc000fe0u, trans_cmpclr },
{ 0x08000380u, 0xfc000fe0u, trans_uxor },
{ 0x08000980u, 0xfc000fa0u, trans_uaddcm },
@@ -2107,13 +2894,13 @@ static const DisasInsn table_arith_log[] = {
static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned e1 = extract32(insn, 11, 1);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
unsigned o1 = extract32(insn, 26, 1);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -2129,12 +2916,12 @@ static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned e1 = extract32(insn, 11, 1);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -2150,11 +2937,11 @@ static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
@@ -2175,12 +2962,13 @@ static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
int disp = low_sextract(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (a ? -1 : 1) : 0);
TCGMemOp mop = MO_TE | sz;
- return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
+ return do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
}
static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
@@ -2190,11 +2978,12 @@ static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned u = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
TCGMemOp mop = MO_TE | sz;
- return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
+ return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
}
static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
@@ -2204,12 +2993,13 @@ static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rr = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (a ? -1 : 1) : 0);
TCGMemOp mop = MO_TE | sz;
- return do_store(ctx, rr, rb, disp, modify, mop);
+ return do_store(ctx, rr, rb, disp, sp, modify, mop);
}
static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
@@ -2219,16 +3009,16 @@ static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned i = extract32(insn, 12, 1);
unsigned au = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
- TCGv zero, addr, base, dest;
+ TCGv_reg zero, dest, ofs;
+ TCGv_tl addr;
int modify, disp = 0, scale = 0;
nullify_over(ctx);
- /* ??? Share more code with do_load and do_load_{32,64}. */
-
if (i) {
modify = (m ? (au ? -1 : 1) : 0);
disp = low_sextract(rx, 0, 5);
@@ -2240,27 +3030,19 @@ static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
}
}
if (modify) {
- /* Base register modification. Make sure if RT == RB, we see
- the result of the load. */
+ /* Base register modification. Make sure if RT == RB,
+ we see the result of the load. */
dest = get_temp(ctx);
} else {
dest = dest_gpr(ctx, rt);
}
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
- if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
- } else {
- tcg_gen_addi_tl(addr, base, disp);
- }
-
- zero = tcg_const_tl(0);
- tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
- zero, MMU_USER_IDX, mop);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ zero = tcg_const_reg(0);
+ tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
if (modify) {
- save_gpr(ctx, rb, addr);
+ save_gpr(ctx, rb, ofs);
}
save_gpr(ctx, rt, dest);
@@ -2270,23 +3052,20 @@ static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
- target_long disp = low_sextract(insn, 0, 5);
+ target_sreg disp = low_sextract(insn, 0, 5);
unsigned m = extract32(insn, 5, 1);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rt = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
- TCGv addr, val;
+ TCGv_reg ofs, val;
+ TCGv_tl addr;
nullify_over(ctx);
- addr = tcg_temp_new();
- if (m || disp == 0) {
- tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
- } else {
- tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
- }
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
+ ctx->mmu_idx == MMU_PHYS_IDX);
val = load_gpr(ctx, rt);
-
if (a) {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
gen_helper_stby_e_parallel(cpu_env, addr, val);
@@ -2302,30 +3081,83 @@ static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
}
if (m) {
- tcg_gen_addi_tl(addr, addr, disp);
- tcg_gen_andi_tl(addr, addr, ~3);
- save_gpr(ctx, rb, addr);
+ tcg_gen_andi_reg(ofs, ofs, ~3);
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_ld_idx_i(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+
+static DisasJumpType trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_ld_idx_x(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+
+static DisasJumpType trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_st_idx_i(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+#endif
+
static const DisasInsn table_index_mem[] = {
{ 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
{ 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
{ 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
{ 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
{ 0x0c001300u, 0xfc0013c0, trans_stby },
+#ifndef CONFIG_USER_ONLY
+ { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
+ { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
+ { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
+#endif
};
static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
{
unsigned rt = extract32(insn, 21, 5);
- target_long i = assemble_21(insn);
- TCGv tcg_rt = dest_gpr(ctx, rt);
+ target_sreg i = assemble_21(insn);
+ TCGv_reg tcg_rt = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(tcg_rt, i);
+ tcg_gen_movi_reg(tcg_rt, i);
save_gpr(ctx, rt, tcg_rt);
cond_free(&ctx->null_cond);
@@ -2335,11 +3167,11 @@ static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
{
unsigned rt = extract32(insn, 21, 5);
- target_long i = assemble_21(insn);
- TCGv tcg_rt = load_gpr(ctx, rt);
- TCGv tcg_r1 = dest_gpr(ctx, 1);
+ target_sreg i = assemble_21(insn);
+ TCGv_reg tcg_rt = load_gpr(ctx, rt);
+ TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
- tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
+ tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
save_gpr(ctx, 1, tcg_r1);
cond_free(&ctx->null_cond);
@@ -2350,15 +3182,15 @@ static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
- TCGv tcg_rt = dest_gpr(ctx, rt);
+ target_sreg i = assemble_16(insn);
+ TCGv_reg tcg_rt = dest_gpr(ctx, rt);
/* Special case rb == 0, for the LDI pseudo-op.
The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
if (rb == 0) {
- tcg_gen_movi_tl(tcg_rt, i);
+ tcg_gen_movi_reg(tcg_rt, i);
} else {
- tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
+ tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
}
save_gpr(ctx, rt, tcg_rt);
cond_free(&ctx->null_cond);
@@ -2371,27 +3203,30 @@ static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
+ unsigned sp = extract32(insn, 14, 2);
+ target_sreg i = assemble_16(insn);
- return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+ return do_load(ctx, rt, rb, 0, 0, i, sp,
+ is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16a(insn);
+ unsigned sp = extract32(insn, 14, 2);
+ target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
switch (ext2) {
case 0:
case 1:
/* FLDW without modification. */
- return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
case 2:
/* LDW with modification. Note that the sign of I selects
post-dec vs pre-inc. */
- return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
+ return do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
default:
return gen_illegal(ctx);
}
@@ -2399,14 +3234,15 @@ static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
{
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned t0 = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
/* FLDW with modification. */
- return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+ return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
}
static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
@@ -2414,26 +3250,28 @@ static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
+ unsigned sp = extract32(insn, 14, 2);
+ target_sreg i = assemble_16(insn);
- return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+ return do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16a(insn);
+ unsigned sp = extract32(insn, 14, 2);
+ target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
switch (ext2) {
case 0:
case 1:
/* FSTW without modification. */
- return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
case 2:
- /* LDW with modification. */
- return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
+ /* STW with modification. */
+ return do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
default:
return gen_illegal(ctx);
}
@@ -2441,14 +3279,15 @@ static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
{
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned t0 = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
/* FSTW with modification. */
- return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+ return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
}
static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
@@ -2460,6 +3299,7 @@ static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
/* unsigned cc = extract32(insn, 10, 2); */
unsigned i = extract32(insn, 12, 1);
unsigned ua = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
unsigned rt = t1 * 32 + t0;
@@ -2479,9 +3319,9 @@ static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
switch (ext3) {
case 0: /* FLDW */
- return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
+ return do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
case 4: /* FSTW */
- return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
+ return do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
}
return gen_illegal(ctx);
}
@@ -2494,6 +3334,7 @@ static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
/* unsigned cc = extract32(insn, 10, 2); */
unsigned i = extract32(insn, 12, 1);
unsigned ua = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (ua ? -1 : 1) : 0);
@@ -2512,9 +3353,9 @@ static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
switch (ext4) {
case 0: /* FLDD */
- return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
+ return do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
case 8: /* FSTD */
- return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
+ return do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
default:
return gen_illegal(ctx);
}
@@ -2523,12 +3364,12 @@ static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
bool is_true, bool is_imm, bool is_dw)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned r = extract32(insn, 21, 5);
unsigned cf = c * 2 + !is_true;
- TCGv dest, in1, in2, sv;
+ TCGv_reg dest, in1, in2, sv;
DisasCond cond;
nullify_over(ctx);
@@ -2541,7 +3382,7 @@ static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
in2 = load_gpr(ctx, r);
dest = get_temp(ctx);
- tcg_gen_sub_tl(dest, in1, in2);
+ tcg_gen_sub_reg(dest, in1, in2);
sv = NULL;
if (c == 6) {
@@ -2555,12 +3396,12 @@ static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
bool is_true, bool is_imm)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned r = extract32(insn, 21, 5);
unsigned cf = c * 2 + !is_true;
- TCGv dest, in1, in2, sv, cb_msb;
+ TCGv_reg dest, in1, in2, sv, cb_msb;
DisasCond cond;
nullify_over(ctx);
@@ -2577,15 +3418,15 @@ static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
switch (c) {
default:
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
break;
case 4: case 5:
cb_msb = get_temp(ctx);
- tcg_gen_movi_tl(cb_msb, 0);
- tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ tcg_gen_movi_reg(cb_msb, 0);
+ tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
break;
case 6:
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
sv = do_add_sv(ctx, dest, in1, in2);
break;
}
@@ -2596,13 +3437,13 @@ static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 15, 1);
unsigned r = extract32(insn, 16, 5);
unsigned p = extract32(insn, 21, 5);
unsigned i = extract32(insn, 26, 1);
- TCGv tmp, tcg_r;
+ TCGv_reg tmp, tcg_r;
DisasCond cond;
nullify_over(ctx);
@@ -2610,9 +3451,9 @@ static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
tmp = tcg_temp_new();
tcg_r = load_gpr(ctx, r);
if (i) {
- tcg_gen_shli_tl(tmp, tcg_r, p);
+ tcg_gen_shli_reg(tmp, tcg_r, p);
} else {
- tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
+ tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
}
cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
@@ -2622,23 +3463,23 @@ static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned t = extract32(insn, 16, 5);
unsigned r = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
DisasCond cond;
nullify_over(ctx);
dest = dest_gpr(ctx, r);
if (is_imm) {
- tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
+ tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
} else if (t == 0) {
- tcg_gen_movi_tl(dest, 0);
+ tcg_gen_movi_reg(dest, 0);
} else {
- tcg_gen_mov_tl(dest, cpu_gr[t]);
+ tcg_gen_mov_reg(dest, cpu_gr[t]);
}
cond = do_sed_cond(c, dest);
@@ -2652,7 +3493,7 @@ static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
unsigned c = extract32(insn, 13, 3);
unsigned r1 = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
if (c) {
nullify_over(ctx);
@@ -2660,22 +3501,22 @@ static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
dest = dest_gpr(ctx, rt);
if (r1 == 0) {
- tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
- tcg_gen_shr_tl(dest, dest, cpu_sar);
+ tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
+ tcg_gen_shr_reg(dest, dest, cpu_sar);
} else if (r1 == r2) {
TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
+ tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
tcg_gen_rotr_i32(t32, t32, cpu_sar);
- tcg_gen_extu_i32_tl(dest, t32);
+ tcg_gen_extu_i32_reg(dest, t32);
tcg_temp_free_i32(t32);
} else {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 s = tcg_temp_new_i64();
- tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
- tcg_gen_extu_tl_i64(s, cpu_sar);
+ tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
+ tcg_gen_extu_reg_i64(s, cpu_sar);
tcg_gen_shr_i64(t, t, s);
- tcg_gen_trunc_i64_tl(dest, t);
+ tcg_gen_trunc_i64_reg(dest, t);
tcg_temp_free_i64(t);
tcg_temp_free_i64(s);
@@ -2699,7 +3540,7 @@ static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
unsigned r1 = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
unsigned sa = 31 - cpos;
- TCGv dest, t2;
+ TCGv_reg dest, t2;
if (c) {
nullify_over(ctx);
@@ -2709,16 +3550,16 @@ static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
t2 = load_gpr(ctx, r2);
if (r1 == r2) {
TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t32, t2);
+ tcg_gen_trunc_reg_i32(t32, t2);
tcg_gen_rotri_i32(t32, t32, sa);
- tcg_gen_extu_i32_tl(dest, t32);
+ tcg_gen_extu_i32_reg(dest, t32);
tcg_temp_free_i32(t32);
} else if (r1 == 0) {
- tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
+ tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
} else {
- TCGv t0 = tcg_temp_new();
- tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
- tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
+ TCGv_reg t0 = tcg_temp_new();
+ tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
+ tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
tcg_temp_free(t0);
}
save_gpr(ctx, rt, dest);
@@ -2740,7 +3581,7 @@ static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
unsigned rt = extract32(insn, 16, 5);
unsigned rr = extract32(insn, 21, 5);
unsigned len = 32 - clen;
- TCGv dest, src, tmp;
+ TCGv_reg dest, src, tmp;
if (c) {
nullify_over(ctx);
@@ -2751,13 +3592,13 @@ static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
tmp = tcg_temp_new();
/* Recall that SAR is using big-endian bit numbering. */
- tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
+ tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
if (is_se) {
- tcg_gen_sar_tl(dest, src, tmp);
- tcg_gen_sextract_tl(dest, dest, 0, len);
+ tcg_gen_sar_reg(dest, src, tmp);
+ tcg_gen_sextract_reg(dest, dest, 0, len);
} else {
- tcg_gen_shr_tl(dest, src, tmp);
- tcg_gen_extract_tl(dest, dest, 0, len);
+ tcg_gen_shr_reg(dest, src, tmp);
+ tcg_gen_extract_reg(dest, dest, 0, len);
}
tcg_temp_free(tmp);
save_gpr(ctx, rt, dest);
@@ -2781,7 +3622,7 @@ static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
unsigned rr = extract32(insn, 21, 5);
unsigned len = 32 - clen;
unsigned cpos = 31 - pos;
- TCGv dest, src;
+ TCGv_reg dest, src;
if (c) {
nullify_over(ctx);
@@ -2790,9 +3631,9 @@ static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
dest = dest_gpr(ctx, rt);
src = load_gpr(ctx, rr);
if (is_se) {
- tcg_gen_sextract_tl(dest, src, cpos, len);
+ tcg_gen_sextract_reg(dest, src, cpos, len);
} else {
- tcg_gen_extract_tl(dest, src, cpos, len);
+ tcg_gen_extract_reg(dest, src, cpos, len);
}
save_gpr(ctx, rt, dest);
@@ -2818,11 +3659,11 @@ static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
unsigned cpos = extract32(insn, 5, 5);
unsigned nz = extract32(insn, 10, 1);
unsigned c = extract32(insn, 13, 3);
- target_long val = low_sextract(insn, 16, 5);
+ target_sreg val = low_sextract(insn, 16, 5);
unsigned rt = extract32(insn, 21, 5);
unsigned len = 32 - clen;
- target_long mask0, mask1;
- TCGv dest;
+ target_sreg mask0, mask1;
+ TCGv_reg dest;
if (c) {
nullify_over(ctx);
@@ -2836,14 +3677,14 @@ static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
mask1 = deposit64(-1, cpos, len, val);
if (nz) {
- TCGv src = load_gpr(ctx, rt);
+ TCGv_reg src = load_gpr(ctx, rt);
if (mask1 != -1) {
- tcg_gen_andi_tl(dest, src, mask1);
+ tcg_gen_andi_reg(dest, src, mask1);
src = dest;
}
- tcg_gen_ori_tl(dest, src, mask0);
+ tcg_gen_ori_reg(dest, src, mask0);
} else {
- tcg_gen_movi_tl(dest, mask0);
+ tcg_gen_movi_reg(dest, mask0);
}
save_gpr(ctx, rt, dest);
@@ -2866,7 +3707,7 @@ static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
unsigned rt = extract32(insn, 21, 5);
unsigned rs = nz ? rt : 0;
unsigned len = 32 - clen;
- TCGv dest, val;
+ TCGv_reg dest, val;
if (c) {
nullify_over(ctx);
@@ -2878,9 +3719,9 @@ static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
dest = dest_gpr(ctx, rt);
val = load_gpr(ctx, rr);
if (rs == 0) {
- tcg_gen_deposit_z_tl(dest, val, cpos, len);
+ tcg_gen_deposit_z_reg(dest, val, cpos, len);
} else {
- tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
+ tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
}
save_gpr(ctx, rt, dest);
@@ -2902,7 +3743,7 @@ static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
unsigned rt = extract32(insn, 21, 5);
unsigned rs = nz ? rt : 0;
unsigned len = 32 - clen;
- TCGv val, mask, tmp, shift, dest;
+ TCGv_reg val, mask, tmp, shift, dest;
unsigned msb = 1U << (len - 1);
if (c) {
@@ -2919,17 +3760,17 @@ static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
tmp = tcg_temp_new();
/* Convert big-endian bit numbering in SAR to left-shift. */
- tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
+ tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
- mask = tcg_const_tl(msb + (msb - 1));
- tcg_gen_and_tl(tmp, val, mask);
+ mask = tcg_const_reg(msb + (msb - 1));
+ tcg_gen_and_reg(tmp, val, mask);
if (rs) {
- tcg_gen_shl_tl(mask, mask, shift);
- tcg_gen_shl_tl(tmp, tmp, shift);
- tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
- tcg_gen_or_tl(dest, dest, tmp);
+ tcg_gen_shl_reg(mask, mask, shift);
+ tcg_gen_shl_reg(tmp, tmp, shift);
+ tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
+ tcg_gen_or_reg(dest, dest, tmp);
} else {
- tcg_gen_shl_tl(dest, tmp, shift);
+ tcg_gen_shl_reg(dest, tmp, shift);
}
tcg_temp_free(shift);
tcg_temp_free(mask);
@@ -2954,25 +3795,58 @@ static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
{
unsigned n = extract32(insn, 1, 1);
unsigned b = extract32(insn, 21, 5);
- target_long disp = assemble_17(insn);
+ target_sreg disp = assemble_17(insn);
+ TCGv_reg tmp;
- /* unsigned s = low_uextract(insn, 13, 3); */
+#ifdef CONFIG_USER_ONLY
/* ??? It seems like there should be a good way of using
"be disp(sr2, r0)", the canonical gateway entry mechanism
to our advantage. But that appears to be inconvenient to
manage along side branch delay slots. Therefore we handle
entry into the gateway page via absolute address. */
-
/* Since we don't implement spaces, just branch. Do notice the special
case of "be disp(*,r0)" using a direct branch to disp, so that we can
goto_tb to the TB containing the syscall. */
if (b == 0) {
return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
+ }
+#else
+ int sp = assemble_sr3(insn);
+ nullify_over(ctx);
+#endif
+
+ tmp = get_temp(ctx);
+ tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
+ tmp = do_ibranch_priv(ctx, tmp);
+
+#ifdef CONFIG_USER_ONLY
+ return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
+#else
+ TCGv_i64 new_spc = tcg_temp_new_i64();
+
+ load_spr(ctx, new_spc, sp);
+ if (is_l) {
+ copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
+ }
+ if (n && use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, tmp);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_mov_i64(cpu_iasq_f, new_spc);
+ tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
} else {
- TCGv tmp = get_temp(ctx);
- tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
- return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ tcg_gen_mov_reg(cpu_iaoq_b, tmp);
+ tcg_gen_mov_i64(cpu_iasq_b, new_spc);
+ nullify_set(ctx, n);
}
+ tcg_temp_free_i64(new_spc);
+ tcg_gen_lookup_and_goto_ptr();
+ return nullify_end(ctx, DISAS_NORETURN);
+#endif
}
static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
@@ -2980,16 +3854,63 @@ static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
{
unsigned n = extract32(insn, 1, 1);
unsigned link = extract32(insn, 21, 5);
- target_long disp = assemble_17(insn);
+ target_sreg disp = assemble_17(insn);
return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
}
+static DisasJumpType trans_b_gate(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned link = extract32(insn, 21, 5);
+ target_sreg disp = assemble_17(insn);
+ target_ureg dest = iaoq_dest(ctx, disp);
+
+ /* Make sure the caller hasn't done something weird with the queue.
+ * ??? This is not quite the same as the PSW[B] bit, which would be
+ * expensive to track. Real hardware will trap for
+ * b gateway
+ * b gateway+4 (in delay slot of first branch)
+ * However, checking for a non-sequential instruction queue *will*
+ * diagnose the security hole
+ * b gateway
+ * b evil
+ * in which instructions at evil would run with increased privs.
+ */
+ if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
+ return gen_illegal(ctx);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb_flags & PSW_C) {
+ CPUHPPAState *env = ctx->cs->env_ptr;
+ int type = hppa_artype_for_page(env, ctx->base.pc_next);
+ /* If we could not find a TLB entry, then we need to generate an
+ ITLB miss exception so the kernel will provide it.
+ The resulting TLB fill operation will invalidate this TB and
+ we will re-translate, at which point we *will* be able to find
+ the TLB entry and determine if this is in fact a gateway page. */
+ if (type < 0) {
+ return gen_excp(ctx, EXCP_ITLB_MISS);
+ }
+ /* No change for non-gateway pages or for priv decrease. */
+ if (type >= 4 && type - 4 < ctx->privilege) {
+ dest = deposit32(dest, 0, 2, type - 4);
+ }
+ } else {
+ dest &= -4; /* priv = 0 */
+ }
+#endif
+
+ return do_dbranch(ctx, dest, link, n);
+}
+
static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned n = extract32(insn, 1, 1);
- target_long disp = assemble_22(insn);
+ target_sreg disp = assemble_22(insn);
return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
}
@@ -3000,10 +3921,11 @@ static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
unsigned n = extract32(insn, 1, 1);
unsigned rx = extract32(insn, 16, 5);
unsigned link = extract32(insn, 21, 5);
- TCGv tmp = get_temp(ctx);
+ TCGv_reg tmp = get_temp(ctx);
- tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
- tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
+ tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
+ tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
+ /* The computation here never changes privilege level. */
return do_ibranch(ctx, tmp, link, n);
}
@@ -3013,15 +3935,16 @@ static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
unsigned n = extract32(insn, 1, 1);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
if (rx == 0) {
dest = load_gpr(ctx, rb);
} else {
dest = get_temp(ctx);
- tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
- tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
+ tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
+ tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
}
+ dest = do_ibranch_priv(ctx, dest);
return do_ibranch(ctx, dest, 0, n);
}
@@ -3031,8 +3954,28 @@ static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
unsigned n = extract32(insn, 1, 1);
unsigned rb = extract32(insn, 21, 5);
unsigned link = extract32(insn, 13, 1) ? 2 : 0;
+ TCGv_reg dest;
+
+#ifdef CONFIG_USER_ONLY
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
+ return do_ibranch(ctx, dest, link, n);
+#else
+ nullify_over(ctx);
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
- return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ copy_iaoq_entry(cpu_iaoq_b, -1, dest);
+ tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
+ if (link) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ nullify_set(ctx, n);
+ tcg_gen_lookup_and_goto_ptr();
+ return nullify_end(ctx, DISAS_NORETURN);
+#endif
}
static const DisasInsn table_branch[] = {
@@ -3041,6 +3984,7 @@ static const DisasInsn table_branch[] = {
{ 0xe8004000u, 0xfc00fffdu, trans_blr },
{ 0xe800c000u, 0xfc00fffdu, trans_bv },
{ 0xe800d000u, 0xfc00dffcu, trans_bve },
+ { 0xe8002000u, 0xfc00e000u, trans_b_gate },
};
static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
@@ -3240,13 +4184,13 @@ static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
{
unsigned y = extract32(insn, 13, 3);
unsigned cbit = (y ^ 1) - 1;
- TCGv t;
+ TCGv_reg t;
nullify_over(ctx);
t = tcg_temp_new();
- tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
- tcg_gen_extract_tl(t, t, 21 - cbit, 1);
+ tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_extract_reg(t, t, 21 - cbit, 1);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
tcg_temp_free(t);
@@ -3259,16 +4203,16 @@ static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
unsigned c = extract32(insn, 0, 5);
int mask;
bool inv = false;
- TCGv t;
+ TCGv_reg t;
nullify_over(ctx);
t = tcg_temp_new();
- tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
switch (c) {
case 0: /* simple */
- tcg_gen_andi_tl(t, t, 0x4000000);
+ tcg_gen_andi_reg(t, t, 0x4000000);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
goto done;
case 2: /* rej */
@@ -3296,11 +4240,11 @@ static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
return gen_illegal(ctx);
}
if (inv) {
- TCGv c = load_const(ctx, mask);
- tcg_gen_or_tl(t, t, c);
+ TCGv_reg c = load_const(ctx, mask);
+ tcg_gen_or_reg(t, t, c);
ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
} else {
- tcg_gen_andi_tl(t, t, mask);
+ tcg_gen_andi_reg(t, t, mask);
ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
}
done:
@@ -3449,34 +4393,34 @@ static const DisasInsn table_float_0e[] = {
/* floating point class one */
/* float/float */
{ 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
- { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
+ { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
/* int/float */
- { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
+ { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
{ 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
{ 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
{ 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
/* float/int */
- { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
+ { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
{ 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
{ 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
{ 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
/* float/int truncate */
- { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
+ { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
{ 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
{ 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
{ 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
/* uint/float */
- { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
+ { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
{ 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
{ 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
{ 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
/* float/uint */
- { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
+ { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
{ 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
{ 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
{ 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
/* float/uint truncate */
- { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
+ { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
{ 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
{ 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
{ 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
@@ -3601,6 +4545,8 @@ static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
return table[i].trans(ctx, insn, &table[i]);
}
}
+ qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
+ insn, ctx->base.pc_next);
return gen_illegal(ctx);
}
@@ -3721,7 +4667,18 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x15: /* unassigned */
case 0x1D: /* unassigned */
case 0x37: /* unassigned */
- case 0x3F: /* unassigned */
+ break;
+ case 0x3F:
+#ifndef CONFIG_USER_ONLY
+ /* Unassigned, but use as system-halt. */
+ if (insn == 0xfffdead0) {
+ return gen_hlt(ctx, 0); /* halt system */
+ }
+ if (insn == 0xfffdead1) {
+ return gen_hlt(ctx, 1); /* reset system */
+ }
+#endif
+ break;
default:
break;
}
@@ -3732,30 +4689,51 @@ static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cs, int max_insns)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- TranslationBlock *tb = ctx->base.tb;
int bound;
ctx->cs = cs;
- ctx->iaoq_f = tb->pc;
- ctx->iaoq_b = tb->cs_base;
+ ctx->tb_flags = ctx->base.tb->flags;
+
+#ifdef CONFIG_USER_ONLY
+ ctx->privilege = MMU_USER_IDX;
+ ctx->mmu_idx = MMU_USER_IDX;
+ ctx->iaoq_f = ctx->base.pc_first;
+ ctx->iaoq_b = ctx->base.tb->cs_base;
+#else
+ ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
+ ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
+
+ /* Recover the IAOQ values from the GVA + PRIV. */
+ uint64_t cs_base = ctx->base.tb->cs_base;
+ uint64_t iasq_f = cs_base & ~0xffffffffull;
+ int32_t diff = cs_base;
+
+ ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
+ ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
+#endif
ctx->iaoq_n = -1;
ctx->iaoq_n_var = NULL;
- ctx->ntemps = 0;
- memset(ctx->temps, 0, sizeof(ctx->temps));
+ /* Bound the number of instructions by those left on the page. */
+ bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
+ bound = MIN(max_insns, bound);
- bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
- return MIN(max_insns, bound);
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
+ memset(ctx->tempr, 0, sizeof(ctx->tempr));
+ memset(ctx->templ, 0, sizeof(ctx->templ));
+
+ return bound;
}
static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
+ /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
ctx->null_cond = cond_make_f();
ctx->psw_n_nonzero = false;
- if (ctx->base.tb->flags & 1) {
+ if (ctx->tb_flags & PSW_N) {
ctx->null_cond.c = TCG_COND_ALWAYS;
ctx->psw_n_nonzero = true;
}
@@ -3775,7 +4753,7 @@ static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
- ctx->base.pc_next = ctx->iaoq_f + 4;
+ ctx->base.pc_next += 4;
return true;
}
@@ -3787,20 +4765,23 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
int i, n;
/* Execute one insn. */
- if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
+#ifdef CONFIG_USER_ONLY
+ if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
ret = do_page_zero(ctx);
assert(ret != DISAS_NEXT);
- } else {
+ } else
+#endif
+ {
/* Always fetch the insn, even if nullified, so that we check
the page permissions for execute. */
- uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
+ uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
/* Set up the IA queue for the next insn.
This will be overwritten by a branch. */
if (ctx->iaoq_b == -1) {
ctx->iaoq_n = -1;
ctx->iaoq_n_var = get_temp(ctx);
- tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
} else {
ctx->iaoq_n = ctx->iaoq_b + 4;
ctx->iaoq_n_var = NULL;
@@ -3810,58 +4791,70 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->null_cond.c = TCG_COND_NEVER;
ret = DISAS_NEXT;
} else {
+ ctx->insn = insn;
ret = translate_one(ctx, insn);
assert(ctx->null_lab == NULL);
}
}
/* Free any temporaries allocated. */
- for (i = 0, n = ctx->ntemps; i < n; ++i) {
- tcg_temp_free(ctx->temps[i]);
- ctx->temps[i] = NULL;
+ for (i = 0, n = ctx->ntempr; i < n; ++i) {
+ tcg_temp_free(ctx->tempr[i]);
+ ctx->tempr[i] = NULL;
}
- ctx->ntemps = 0;
+ for (i = 0, n = ctx->ntempl; i < n; ++i) {
+ tcg_temp_free_tl(ctx->templ[i]);
+ ctx->templ[i] = NULL;
+ }
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
- /* Advance the insn queue. */
- /* ??? The non-linear instruction restriction is purely due to
- the debugging dump. Otherwise we *could* follow unconditional
- branches within the same page. */
+ /* Advance the insn queue. Note that this check also detects
+ a priority change within the instruction queue. */
if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
- if (ctx->null_cond.c == TCG_COND_NEVER
- || ctx->null_cond.c == TCG_COND_ALWAYS) {
+ if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
+ && use_goto_tb(ctx, ctx->iaoq_b)
+ && (ctx->null_cond.c == TCG_COND_NEVER
+ || ctx->null_cond.c == TCG_COND_ALWAYS)) {
nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
ret = DISAS_NORETURN;
} else {
ret = DISAS_IAQ_N_STALE;
- }
+ }
}
ctx->iaoq_f = ctx->iaoq_b;
ctx->iaoq_b = ctx->iaoq_n;
ctx->base.is_jmp = ret;
+ ctx->base.pc_next += 4;
if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
return;
}
if (ctx->iaoq_f == -1) {
- tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
+ tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+#endif
nullify_save(ctx);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
} else if (ctx->iaoq_b == -1) {
- tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
+ tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
}
}
static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ DisasJumpType is_jmp = ctx->base.is_jmp;
- switch (ctx->base.is_jmp) {
+ switch (is_jmp) {
case DISAS_NORETURN:
break;
case DISAS_TOO_MANY:
case DISAS_IAQ_N_STALE:
+ case DISAS_IAQ_N_STALE_EXIT:
copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
nullify_save(ctx);
@@ -3869,6 +4862,8 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
case DISAS_IAQ_N_UPDATED:
if (ctx->base.singlestep_enabled) {
gen_excp_1(EXCP_DEBUG);
+ } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
+ tcg_gen_exit_tb(0);
} else {
tcg_gen_lookup_and_goto_ptr();
}
@@ -3876,34 +4871,31 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
default:
g_assert_not_reached();
}
-
- /* We don't actually use this during normal translation,
- but we should interact with the generic main loop. */
- ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
}
static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
{
- TranslationBlock *tb = dcbase->tb;
+ target_ulong pc = dcbase->pc_first;
- switch (tb->pc) {
+#ifdef CONFIG_USER_ONLY
+ switch (pc) {
case 0x00:
qemu_log("IN:\n0x00000000: (null)\n");
- break;
+ return;
case 0xb0:
qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
- break;
+ return;
case 0xe0:
qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
- break;
+ return;
case 0x100:
qemu_log("IN:\n0x00000100: syscall\n");
- break;
- default:
- qemu_log("IN: %s\n", lookup_symbol(tb->pc));
- log_target_disas(cs, tb->pc, tb->size);
- break;
+ return;
}
+#endif
+
+ qemu_log("IN: %s\n", lookup_symbol(pc));
+ log_target_disas(cs, pc, dcbase->tb->size);
}
static const TranslatorOps hppa_tr_ops = {
@@ -3927,7 +4919,7 @@ void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
target_ulong *data)
{
env->iaoq_f = data[0];
- if (data[1] != -1) {
+ if (data[1] != (target_ureg)-1) {
env->iaoq_b = data[1];
}
/* Since we were executing the instruction at IAOQ_F, and took some