aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/windows.yml1
-rw-r--r--.mailmap1
-rw-r--r--MAINTAINERS2
-rw-r--r--hw/intc/loongarch_pic_kvm.c1
-rw-r--r--hw/loongarch/virt-acpi-build.c1
-rw-r--r--hw/loongarch/virt-fdt-build.c1
-rw-r--r--hw/loongarch/virt.c3
-rw-r--r--include/hw/intc/loongarch_pic_common.h2
-rw-r--r--include/hw/loongarch/virt.h75
-rw-r--r--include/hw/pci-host/ls7a.h39
-rw-r--r--linux-user/hexagon/signal.c184
-rw-r--r--linux-user/microblaze/elfload.c3
-rw-r--r--python/qemu/machine/README.rst2
-rw-r--r--python/qemu/utils/README.rst2
-rwxr-xr-xscripts/clean_functional_cache.py45
-rw-r--r--target/alpha/helper.h1
-rw-r--r--target/alpha/machine.c34
-rw-r--r--target/alpha/sys_helper.c5
-rw-r--r--target/alpha/translate.c11
-rw-r--r--target/arm/tcg/translate-a64.c8
-rw-r--r--target/arm/tcg/translate.c8
-rw-r--r--target/avr/translate.c7
-rw-r--r--target/hexagon/decode.c4
-rwxr-xr-xtarget/hexagon/gen_helper_funcs.py2
-rw-r--r--target/hexagon/genptr.c3
-rwxr-xr-xtarget/hexagon/hex_common.py22
-rw-r--r--target/hexagon/idef-parser/README.rst2
-rw-r--r--target/hexagon/idef-parser/parser-helpers.c4
-rwxr-xr-xtarget/hexagon/idef-parser/prepare24
-rw-r--r--target/hexagon/insn.h4
-rw-r--r--target/hexagon/macros.h8
-rw-r--r--target/hexagon/meson.build5
-rw-r--r--target/hexagon/op_helper.c4
-rw-r--r--target/hexagon/translate.c17
-rw-r--r--target/hppa/cpu.h10
-rw-r--r--target/hppa/helper.c4
-rw-r--r--target/hppa/insns.decode8
-rw-r--r--target/hppa/mem_helper.c2
-rw-r--r--target/hppa/trace-events6
-rw-r--r--target/hppa/translate.c24
-rw-r--r--target/i386/monitor.c134
-rw-r--r--target/loongarch/cpu-mmu.h2
-rw-r--r--target/loongarch/cpu_helper.c2
-rw-r--r--target/loongarch/gdbstub.c2
-rw-r--r--target/loongarch/machine.c4
-rw-r--r--target/loongarch/tcg/helper.h2
-rw-r--r--target/loongarch/tcg/insn_trans/trans_privileged.c.inc2
-rw-r--r--target/loongarch/tcg/tlb_helper.c26
-rw-r--r--target/loongarch/tcg/translate.c6
-rw-r--r--target/m68k/translate.c17
-rw-r--r--target/microblaze/cpu.h2
-rw-r--r--target/microblaze/helper.c3
-rw-r--r--target/microblaze/helper.h2
-rw-r--r--target/microblaze/machine.c6
-rw-r--r--target/microblaze/mmu.c4
-rw-r--r--target/microblaze/mmu.h2
-rw-r--r--target/microblaze/op_helper.c4
-rw-r--r--target/microblaze/translate.c83
-rw-r--r--target/mips/tcg/translate.c7
-rw-r--r--target/openrisc/cpu.h31
-rw-r--r--target/openrisc/fpu_helper.c8
-rw-r--r--target/openrisc/helper.h8
-rw-r--r--target/openrisc/machine.c20
-rw-r--r--target/openrisc/mmu.c7
-rw-r--r--target/openrisc/sys_helper.c7
-rw-r--r--target/openrisc/translate.c463
-rw-r--r--target/ppc/translate.c7
-rw-r--r--target/riscv/cpu.h4
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc16
-rw-r--r--target/riscv/kvm/kvm-cpu.c6
-rw-r--r--target/riscv/monitor.c12
-rw-r--r--target/riscv/translate.c9
-rw-r--r--target/riscv/vector_helper.c32
-rw-r--r--target/rx/translate.c462
-rw-r--r--target/s390x/mmu_helper.c17
-rw-r--r--target/s390x/tcg/translate.c6
-rw-r--r--target/s390x/tcg/translate_vx.c.inc6
-rw-r--r--target/sh4/cpu.h6
-rw-r--r--target/sh4/helper.c34
-rw-r--r--target/sh4/translate.c10
-rw-r--r--target/sparc/cpu.h1
-rw-r--r--target/sparc/helper.c1
-rw-r--r--target/sparc/int64_helper.c1
-rw-r--r--target/sparc/translate.c6
-rw-r--r--target/tricore/cpu.c2
-rw-r--r--target/tricore/helper.c4
-rw-r--r--target/tricore/op_helper.c219
-rw-r--r--target/tricore/translate.c3978
-rw-r--r--target/xtensa/cpu.c6
-rw-r--r--target/xtensa/translate.c2
-rw-r--r--target/xtensa/xtensa-semi.c11
-rw-r--r--tests/Makefile.include1
-rwxr-xr-xtests/functional/aarch64/test_sbsaref_alpine.py6
-rwxr-xr-xtests/functional/alpha/test_clipper.py1
-rw-r--r--tests/functional/qemu_test/asset.py13
-rw-r--r--tests/functional/reverse_debugging.py65
-rw-r--r--tests/tcg/hexagon/signal_context.c23
97 files changed, 3233 insertions, 3177 deletions
diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml
index 1e6a01b..6e1135d 100644
--- a/.gitlab-ci.d/windows.yml
+++ b/.gitlab-ci.d/windows.yml
@@ -87,6 +87,7 @@ msys2-64bit:
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
mingw-w64-x86_64-zstd"
+ - .\msys64\usr\bin\bash -lc "pacman -Sc --noconfirm"
- Write-Output "Running build at $(Get-Date -Format u)"
- $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc)
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
diff --git a/.mailmap b/.mailmap
index e727185..15bec72 100644
--- a/.mailmap
+++ b/.mailmap
@@ -136,6 +136,7 @@ Chen Gang <gang.chen.5i5j@gmail.com>
Chen Gang <gang.chen@sunrus.com.cn>
Chen Wei-Ren <chenwj@iis.sinica.edu.tw>
Christophe Lyon <christophe.lyon@st.com>
+Clément Mathieu--Drif <clement.mathieu--drif@eviden.com>
Collin L. Walling <walling@linux.ibm.com>
Daniel P. Berrangé <berrange@redhat.com>
Eduardo Otubo <otubo@redhat.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 84cfd85..667acd9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1308,7 +1308,6 @@ F: include/hw/intc/loongarch_*.h
F: include/hw/intc/loongson_ipi_common.h
F: hw/intc/loongarch_*.c
F: hw/intc/loongson_ipi_common.c
-F: include/hw/pci-host/ls7a.h
F: hw/rtc/ls7a_rtc.c
F: gdb-xml/loongarch*.xml
@@ -4398,6 +4397,7 @@ M: Thomas Huth <thuth@redhat.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Daniel P. Berrange <berrange@redhat.com>
F: docs/devel/testing/functional.rst
+F: scripts/clean_functional_cache.py
F: tests/functional/qemu_test/
Windows Hosted Continuous Integration
diff --git a/hw/intc/loongarch_pic_kvm.c b/hw/intc/loongarch_pic_kvm.c
index dd504ec..6cfddf4 100644
--- a/hw/intc/loongarch_pic_kvm.c
+++ b/hw/intc/loongarch_pic_kvm.c
@@ -10,7 +10,6 @@
#include "hw/boards.h"
#include "hw/intc/loongarch_pch_pic.h"
#include "hw/loongarch/virt.h"
-#include "hw/pci-host/ls7a.h"
#include "system/kvm.h"
static void kvm_pch_pic_access_reg(int fd, uint64_t addr, void *val, bool write)
diff --git a/hw/loongarch/virt-acpi-build.c b/hw/loongarch/virt-acpi-build.c
index 8c2228a..3694c98 100644
--- a/hw/loongarch/virt-acpi-build.c
+++ b/hw/loongarch/virt-acpi-build.c
@@ -21,7 +21,6 @@
#include "system/reset.h"
/* Supported chipsets: */
-#include "hw/pci-host/ls7a.h"
#include "hw/loongarch/virt.h"
#include "hw/acpi/utils.h"
diff --git a/hw/loongarch/virt-fdt-build.c b/hw/loongarch/virt-fdt-build.c
index 728ce46..1f0ba01 100644
--- a/hw/loongarch/virt-fdt-build.c
+++ b/hw/loongarch/virt-fdt-build.c
@@ -12,7 +12,6 @@
#include "hw/loader.h"
#include "hw/loongarch/virt.h"
#include "hw/pci-host/gpex.h"
-#include "hw/pci-host/ls7a.h"
#include "system/device_tree.h"
#include "system/reset.h"
#include "target/loongarch/cpu.h"
diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c
index c176042..49434ad 100644
--- a/hw/loongarch/virt.c
+++ b/hw/loongarch/virt.c
@@ -29,7 +29,6 @@
#include "hw/intc/loongarch_pch_pic.h"
#include "hw/intc/loongarch_pch_msi.h"
#include "hw/intc/loongarch_dintc.h"
-#include "hw/pci-host/ls7a.h"
#include "hw/pci-host/gpex.h"
#include "hw/misc/unimp.h"
#include "hw/loongarch/fw_cfg.h"
@@ -521,7 +520,7 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms)
}
/* PCH_PIC memory region */
- memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE,
+ memory_region_add_subregion(get_system_memory(), VIRT_PCH_REG_BASE,
sysbus_mmio_get_region(SYS_BUS_DEVICE(pch_pic), 0));
/* Connect pch_pic irqs to extioi */
diff --git a/include/hw/intc/loongarch_pic_common.h b/include/hw/intc/loongarch_pic_common.h
index f774c97..675ba96 100644
--- a/include/hw/intc/loongarch_pic_common.h
+++ b/include/hw/intc/loongarch_pic_common.h
@@ -7,7 +7,7 @@
#ifndef HW_LOONGARCH_PIC_COMMON_H
#define HW_LOONGARCH_PIC_COMMON_H
-#include "hw/pci-host/ls7a.h"
+#include "hw/loongarch/virt.h"
#include "hw/sysbus.h"
#define PCH_PIC_INT_ID 0x00
diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h
index 76fa57c..27b1755 100644
--- a/include/hw/loongarch/virt.h
+++ b/include/hw/loongarch/virt.h
@@ -13,49 +13,84 @@
#include "hw/block/flash.h"
#include "hw/loongarch/boot.h"
-#define IOCSRF_TEMP 0
-#define IOCSRF_NODECNT 1
-#define IOCSRF_MSI 2
-#define IOCSRF_EXTIOI 3
-#define IOCSRF_CSRIPI 4
-#define IOCSRF_FREQCSR 5
-#define IOCSRF_FREQSCALE 6
-#define IOCSRF_DVFSV1 7
-#define IOCSRF_GMOD 9
-#define IOCSRF_VM 11
-#define IOCSRF_DMSI 15
-
+/* IOCSR region */
#define VERSION_REG 0x0
#define FEATURE_REG 0x8
+#define IOCSRF_TEMP 0
+#define IOCSRF_NODECNT 1
+#define IOCSRF_MSI 2
+#define IOCSRF_EXTIOI 3
+#define IOCSRF_CSRIPI 4
+#define IOCSRF_FREQCSR 5
+#define IOCSRF_FREQSCALE 6
+#define IOCSRF_DVFSV1 7
+#define IOCSRF_GMOD 9
+#define IOCSRF_VM 11
+#define IOCSRF_DMSI 15
#define VENDOR_REG 0x10
#define CPUNAME_REG 0x20
#define MISC_FUNC_REG 0x420
-#define IOCSRM_EXTIOI_EN 48
-#define IOCSRM_EXTIOI_INT_ENCODE 49
-#define IOCSRM_DMSI_EN 51
+#define IOCSRM_EXTIOI_EN 48
+#define IOCSRM_EXTIOI_INT_ENCODE 49
+#define IOCSRM_DMSI_EN 51
#define LOONGARCH_MAX_CPUS 256
-#define VIRT_FWCFG_BASE 0x1e020000UL
+/* MMIO memory region */
+#define VIRT_PCH_REG_BASE 0x10000000UL
+#define VIRT_PCH_REG_SIZE 0x400
+#define VIRT_RTC_REG_BASE 0x100d0100UL
+#define VIRT_RTC_LEN 0x100
+#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000UL
+#define VIRT_PLATFORM_BUS_SIZE 0x02000000
+#define VIRT_PCI_IO_BASE 0x18004000UL
+#define VIRT_PCI_IO_OFFSET 0x4000
+#define VIRT_PCI_IO_SIZE 0xC000
#define VIRT_BIOS_BASE 0x1c000000UL
-#define VIRT_BIOS_SIZE (16 * MiB)
+#define VIRT_BIOS_SIZE 0x01000000UL
#define VIRT_FLASH_SECTOR_SIZE (256 * KiB)
#define VIRT_FLASH0_BASE VIRT_BIOS_BASE
#define VIRT_FLASH0_SIZE VIRT_BIOS_SIZE
#define VIRT_FLASH1_BASE 0x1d000000UL
-#define VIRT_FLASH1_SIZE (16 * MiB)
+#define VIRT_FLASH1_SIZE 0x01000000UL
+#define VIRT_FWCFG_BASE 0x1e020000UL
+#define VIRT_UART_BASE 0x1fe001e0UL
+#define VIRT_UART_SIZE 0x100
+#define VIRT_PCI_CFG_BASE 0x20000000UL
+#define VIRT_PCI_CFG_SIZE 0x08000000UL
+#define VIRT_DINTC_BASE 0x2FE00000UL
+#define VIRT_DINTC_SIZE 0x00100000UL
+#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL
+#define VIRT_PCH_MSI_SIZE 0x8
+#define VIRT_PCI_MEM_BASE 0x40000000UL
+#define VIRT_PCI_MEM_SIZE 0x40000000UL
#define VIRT_LOWMEM_BASE 0
#define VIRT_LOWMEM_SIZE 0x10000000
+#define FDT_BASE 0x100000
#define VIRT_HIGHMEM_BASE 0x80000000
#define VIRT_GED_EVT_ADDR 0x100e0000
#define VIRT_GED_MEM_ADDR QEMU_ALIGN_UP(VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN, 4)
#define VIRT_GED_REG_ADDR QEMU_ALIGN_UP(VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN, 4)
#define VIRT_GED_CPUHP_ADDR QEMU_ALIGN_UP(VIRT_GED_REG_ADDR + ACPI_GED_REG_COUNT, 4)
-#define COMMAND_LINE_SIZE 512
+/*
+ * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot
+ * 0 - 15 GSI for ISA devices even if there is no ISA devices
+ * 16 - 63 GSI for CPU devices such as timers/perf monitor etc
+ * 64 - GSI for external devices
+ */
+#define VIRT_PCH_PIC_IRQ_NUM 32
+#define VIRT_GSI_BASE 64
+#define VIRT_DEVICE_IRQS 16
+#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2)
+#define VIRT_UART_COUNT 4
+#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6)
+#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7)
+#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8)
+#define VIRT_PLATFORM_BUS_NUM_IRQS 2
-#define FDT_BASE 0x100000
+#define COMMAND_LINE_SIZE 512
struct LoongArchVirtMachineState {
/*< private >*/
diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h
index bfdbfe3..33e7942 100644
--- a/include/hw/pci-host/ls7a.h
+++ b/include/hw/pci-host/ls7a.h
@@ -13,43 +13,4 @@
#include "qemu/range.h"
#include "qom/object.h"
-#define VIRT_PCI_MEM_BASE 0x40000000UL
-#define VIRT_PCI_MEM_SIZE 0x40000000UL
-#define VIRT_PCI_IO_OFFSET 0x4000
-#define VIRT_PCI_CFG_BASE 0x20000000
-#define VIRT_PCI_CFG_SIZE 0x08000000
-#define VIRT_PCI_IO_BASE 0x18004000UL
-#define VIRT_PCI_IO_SIZE 0xC000
-
-#define VIRT_PCH_REG_BASE 0x10000000UL
-#define VIRT_IOAPIC_REG_BASE (VIRT_PCH_REG_BASE)
-#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL
-#define VIRT_DINTC_SIZE 0x100000UL
-#define VIRT_DINTC_BASE 0x2FE00000UL
-#define VIRT_PCH_REG_SIZE 0x400
-#define VIRT_PCH_MSI_SIZE 0x8
-
-/*
- * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot
- * 0 - 15 GSI for ISA devices even if there is no ISA devices
- * 16 - 63 GSI for CPU devices such as timers/perf monitor etc
- * 64 - GSI for external devices
- */
-#define VIRT_PCH_PIC_IRQ_NUM 32
-#define VIRT_GSI_BASE 64
-#define VIRT_DEVICE_IRQS 16
-#define VIRT_UART_COUNT 4
-#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2)
-#define VIRT_UART_BASE 0x1fe001e0
-#define VIRT_UART_SIZE 0x100
-#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6)
-#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000)
-#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100)
-#define VIRT_RTC_LEN 0x100
-#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7)
-
-#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000
-#define VIRT_PLATFORM_BUS_SIZE 0x2000000
-#define VIRT_PLATFORM_BUS_NUM_IRQS 2
-#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8)
#endif
diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c
index 492b51f..183ecfa 100644
--- a/linux-user/hexagon/signal.c
+++ b/linux-user/hexagon/signal.c
@@ -23,30 +23,32 @@
#include "signal-common.h"
#include "linux-user/trace.h"
-struct target_sigcontext {
- target_ulong r0, r1, r2, r3;
- target_ulong r4, r5, r6, r7;
- target_ulong r8, r9, r10, r11;
- target_ulong r12, r13, r14, r15;
- target_ulong r16, r17, r18, r19;
- target_ulong r20, r21, r22, r23;
- target_ulong r24, r25, r26, r27;
- target_ulong r28, r29, r30, r31;
- target_ulong sa0;
- target_ulong lc0;
- target_ulong sa1;
- target_ulong lc1;
- target_ulong m0;
- target_ulong m1;
- target_ulong usr;
- target_ulong gp;
- target_ulong ugp;
- target_ulong pc;
- target_ulong cause;
- target_ulong badva;
- target_ulong pred[NUM_PREGS];
+struct target_user_regs_struct {
+ abi_ulong gpr[32];
+ abi_ulong sa0;
+ abi_ulong lc0;
+ abi_ulong sa1;
+ abi_ulong lc1;
+ abi_ulong m0;
+ abi_ulong m1;
+ abi_ulong usr;
+ abi_ulong p3_0;
+ abi_ulong gp;
+ abi_ulong ugp;
+ abi_ulong pc;
+ abi_ulong cause;
+ abi_ulong badva;
+ abi_ulong cs0;
+ abi_ulong cs1;
+ abi_ulong pad1; /* pad to 48 words */
};
+QEMU_BUILD_BUG_ON(sizeof(struct target_user_regs_struct) != 48 * 4);
+
+struct target_sigcontext {
+ struct target_user_regs_struct sc_regs;
+} QEMU_ALIGNED(8);
+
struct target_ucontext {
unsigned long uc_flags;
target_ulong uc_link; /* target pointer */
@@ -76,53 +78,34 @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
static void setup_sigcontext(struct target_sigcontext *sc, CPUHexagonState *env)
{
- __put_user(env->gpr[HEX_REG_R00], &sc->r0);
- __put_user(env->gpr[HEX_REG_R01], &sc->r1);
- __put_user(env->gpr[HEX_REG_R02], &sc->r2);
- __put_user(env->gpr[HEX_REG_R03], &sc->r3);
- __put_user(env->gpr[HEX_REG_R04], &sc->r4);
- __put_user(env->gpr[HEX_REG_R05], &sc->r5);
- __put_user(env->gpr[HEX_REG_R06], &sc->r6);
- __put_user(env->gpr[HEX_REG_R07], &sc->r7);
- __put_user(env->gpr[HEX_REG_R08], &sc->r8);
- __put_user(env->gpr[HEX_REG_R09], &sc->r9);
- __put_user(env->gpr[HEX_REG_R10], &sc->r10);
- __put_user(env->gpr[HEX_REG_R11], &sc->r11);
- __put_user(env->gpr[HEX_REG_R12], &sc->r12);
- __put_user(env->gpr[HEX_REG_R13], &sc->r13);
- __put_user(env->gpr[HEX_REG_R14], &sc->r14);
- __put_user(env->gpr[HEX_REG_R15], &sc->r15);
- __put_user(env->gpr[HEX_REG_R16], &sc->r16);
- __put_user(env->gpr[HEX_REG_R17], &sc->r17);
- __put_user(env->gpr[HEX_REG_R18], &sc->r18);
- __put_user(env->gpr[HEX_REG_R19], &sc->r19);
- __put_user(env->gpr[HEX_REG_R20], &sc->r20);
- __put_user(env->gpr[HEX_REG_R21], &sc->r21);
- __put_user(env->gpr[HEX_REG_R22], &sc->r22);
- __put_user(env->gpr[HEX_REG_R23], &sc->r23);
- __put_user(env->gpr[HEX_REG_R24], &sc->r24);
- __put_user(env->gpr[HEX_REG_R25], &sc->r25);
- __put_user(env->gpr[HEX_REG_R26], &sc->r26);
- __put_user(env->gpr[HEX_REG_R27], &sc->r27);
- __put_user(env->gpr[HEX_REG_R28], &sc->r28);
- __put_user(env->gpr[HEX_REG_R29], &sc->r29);
- __put_user(env->gpr[HEX_REG_R30], &sc->r30);
- __put_user(env->gpr[HEX_REG_R31], &sc->r31);
- __put_user(env->gpr[HEX_REG_SA0], &sc->sa0);
- __put_user(env->gpr[HEX_REG_LC0], &sc->lc0);
- __put_user(env->gpr[HEX_REG_SA1], &sc->sa1);
- __put_user(env->gpr[HEX_REG_LC1], &sc->lc1);
- __put_user(env->gpr[HEX_REG_M0], &sc->m0);
- __put_user(env->gpr[HEX_REG_M1], &sc->m1);
- __put_user(env->gpr[HEX_REG_USR], &sc->usr);
- __put_user(env->gpr[HEX_REG_GP], &sc->gp);
- __put_user(env->gpr[HEX_REG_UGP], &sc->ugp);
- __put_user(env->gpr[HEX_REG_PC], &sc->pc);
+ abi_ulong preds = 0;
- int i;
- for (i = 0; i < NUM_PREGS; i++) {
- __put_user(env->pred[i], &(sc->pred[i]));
+ for (int i = 0; i < 32; i++) {
+ __put_user(env->gpr[HEX_REG_R00 + i], &sc->sc_regs.gpr[i]);
+ }
+ __put_user(env->gpr[HEX_REG_SA0], &sc->sc_regs.sa0);
+ __put_user(env->gpr[HEX_REG_LC0], &sc->sc_regs.lc0);
+ __put_user(env->gpr[HEX_REG_SA1], &sc->sc_regs.sa1);
+ __put_user(env->gpr[HEX_REG_LC1], &sc->sc_regs.lc1);
+ __put_user(env->gpr[HEX_REG_M0], &sc->sc_regs.m0);
+ __put_user(env->gpr[HEX_REG_M1], &sc->sc_regs.m1);
+ __put_user(env->gpr[HEX_REG_USR], &sc->sc_regs.usr);
+ __put_user(env->gpr[HEX_REG_GP], &sc->sc_regs.gp);
+ __put_user(env->gpr[HEX_REG_UGP], &sc->sc_regs.ugp);
+ __put_user(env->gpr[HEX_REG_PC], &sc->sc_regs.pc);
+
+ /* Consolidate predicates into p3_0 */
+ for (int i = 0; i < NUM_PREGS; i++) {
+ preds |= (env->pred[i] & 0xff) << (i * 8);
}
+ __put_user(preds, &sc->sc_regs.p3_0);
+
+ /* Set cause and badva to 0 - these are set by kernel on exceptions */
+ __put_user(0, &sc->sc_regs.cause);
+ __put_user(0, &sc->sc_regs.badva);
+
+ __put_user(env->gpr[HEX_REG_CS0], &sc->sc_regs.cs0);
+ __put_user(env->gpr[HEX_REG_CS1], &sc->sc_regs.cs1);
}
static void setup_ucontext(struct target_ucontext *uc,
@@ -192,53 +175,30 @@ badframe:
static void restore_sigcontext(CPUHexagonState *env,
struct target_sigcontext *sc)
{
- __get_user(env->gpr[HEX_REG_R00], &sc->r0);
- __get_user(env->gpr[HEX_REG_R01], &sc->r1);
- __get_user(env->gpr[HEX_REG_R02], &sc->r2);
- __get_user(env->gpr[HEX_REG_R03], &sc->r3);
- __get_user(env->gpr[HEX_REG_R04], &sc->r4);
- __get_user(env->gpr[HEX_REG_R05], &sc->r5);
- __get_user(env->gpr[HEX_REG_R06], &sc->r6);
- __get_user(env->gpr[HEX_REG_R07], &sc->r7);
- __get_user(env->gpr[HEX_REG_R08], &sc->r8);
- __get_user(env->gpr[HEX_REG_R09], &sc->r9);
- __get_user(env->gpr[HEX_REG_R10], &sc->r10);
- __get_user(env->gpr[HEX_REG_R11], &sc->r11);
- __get_user(env->gpr[HEX_REG_R12], &sc->r12);
- __get_user(env->gpr[HEX_REG_R13], &sc->r13);
- __get_user(env->gpr[HEX_REG_R14], &sc->r14);
- __get_user(env->gpr[HEX_REG_R15], &sc->r15);
- __get_user(env->gpr[HEX_REG_R16], &sc->r16);
- __get_user(env->gpr[HEX_REG_R17], &sc->r17);
- __get_user(env->gpr[HEX_REG_R18], &sc->r18);
- __get_user(env->gpr[HEX_REG_R19], &sc->r19);
- __get_user(env->gpr[HEX_REG_R20], &sc->r20);
- __get_user(env->gpr[HEX_REG_R21], &sc->r21);
- __get_user(env->gpr[HEX_REG_R22], &sc->r22);
- __get_user(env->gpr[HEX_REG_R23], &sc->r23);
- __get_user(env->gpr[HEX_REG_R24], &sc->r24);
- __get_user(env->gpr[HEX_REG_R25], &sc->r25);
- __get_user(env->gpr[HEX_REG_R26], &sc->r26);
- __get_user(env->gpr[HEX_REG_R27], &sc->r27);
- __get_user(env->gpr[HEX_REG_R28], &sc->r28);
- __get_user(env->gpr[HEX_REG_R29], &sc->r29);
- __get_user(env->gpr[HEX_REG_R30], &sc->r30);
- __get_user(env->gpr[HEX_REG_R31], &sc->r31);
- __get_user(env->gpr[HEX_REG_SA0], &sc->sa0);
- __get_user(env->gpr[HEX_REG_LC0], &sc->lc0);
- __get_user(env->gpr[HEX_REG_SA1], &sc->sa1);
- __get_user(env->gpr[HEX_REG_LC1], &sc->lc1);
- __get_user(env->gpr[HEX_REG_M0], &sc->m0);
- __get_user(env->gpr[HEX_REG_M1], &sc->m1);
- __get_user(env->gpr[HEX_REG_USR], &sc->usr);
- __get_user(env->gpr[HEX_REG_GP], &sc->gp);
- __get_user(env->gpr[HEX_REG_UGP], &sc->ugp);
- __get_user(env->gpr[HEX_REG_PC], &sc->pc);
+ abi_ulong preds;
- int i;
- for (i = 0; i < NUM_PREGS; i++) {
- __get_user(env->pred[i], &(sc->pred[i]));
+ for (int i = 0; i < 32; i++) {
+ __get_user(env->gpr[HEX_REG_R00 + i], &sc->sc_regs.gpr[i]);
}
+ __get_user(env->gpr[HEX_REG_SA0], &sc->sc_regs.sa0);
+ __get_user(env->gpr[HEX_REG_LC0], &sc->sc_regs.lc0);
+ __get_user(env->gpr[HEX_REG_SA1], &sc->sc_regs.sa1);
+ __get_user(env->gpr[HEX_REG_LC1], &sc->sc_regs.lc1);
+ __get_user(env->gpr[HEX_REG_M0], &sc->sc_regs.m0);
+ __get_user(env->gpr[HEX_REG_M1], &sc->sc_regs.m1);
+ __get_user(env->gpr[HEX_REG_USR], &sc->sc_regs.usr);
+ __get_user(env->gpr[HEX_REG_GP], &sc->sc_regs.gp);
+ __get_user(env->gpr[HEX_REG_UGP], &sc->sc_regs.ugp);
+ __get_user(env->gpr[HEX_REG_PC], &sc->sc_regs.pc);
+
+ /* Restore predicates from p3_0 */
+ __get_user(preds, &sc->sc_regs.p3_0);
+ for (int i = 0; i < NUM_PREGS; i++) {
+ env->pred[i] = (preds >> (i * 8)) & 0xff;
+ }
+
+ __get_user(env->gpr[HEX_REG_CS0], &sc->sc_regs.cs0);
+ __get_user(env->gpr[HEX_REG_CS1], &sc->sc_regs.cs1);
}
static void restore_ucontext(CPUHexagonState *env, struct target_ucontext *uc)
diff --git a/linux-user/microblaze/elfload.c b/linux-user/microblaze/elfload.c
index 7eb1b26..bdc0a95 100644
--- a/linux-user/microblaze/elfload.c
+++ b/linux-user/microblaze/elfload.c
@@ -8,7 +8,8 @@
const char *get_elf_cpu_model(uint32_t eflags)
{
- return "any";
+ return TARGET_BIG_ENDIAN ? "any,little-endian=off"
+ : "any,little-endian=on";
}
void elf_core_copy_regs(target_elf_gregset_t *r, const CPUMBState *env)
diff --git a/python/qemu/machine/README.rst b/python/qemu/machine/README.rst
index 8de2c3d..6554c69 100644
--- a/python/qemu/machine/README.rst
+++ b/python/qemu/machine/README.rst
@@ -2,7 +2,7 @@ qemu.machine package
====================
This package provides core utilities used for testing and debugging
-QEMU. It is used by the iotests, vm tests, avocado tests, and several
+QEMU. It is used by the iotests, vm tests, functional tests, and several
other utilities in the ./scripts directory. It is not a fully-fledged
SDK and it is subject to change at any time.
diff --git a/python/qemu/utils/README.rst b/python/qemu/utils/README.rst
index d5f2da1..5027f0b 100644
--- a/python/qemu/utils/README.rst
+++ b/python/qemu/utils/README.rst
@@ -2,6 +2,6 @@ qemu.utils package
==================
This package provides miscellaneous utilities used for testing and
-debugging QEMU. It is used primarily by the vm and avocado tests.
+debugging QEMU. It is used primarily by the vm and functional tests.
See the documentation in ``__init__.py`` for more information.
diff --git a/scripts/clean_functional_cache.py b/scripts/clean_functional_cache.py
new file mode 100755
index 0000000..c3370ff
--- /dev/null
+++ b/scripts/clean_functional_cache.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+"""Delete stale assets from the download cache of the functional tests"""
+
+import os
+import stat
+import sys
+import time
+from pathlib import Path
+
+
+cache_dir_env = os.getenv('QEMU_TEST_CACHE_DIR')
+if cache_dir_env:
+ cache_dir = Path(cache_dir_env, "download")
+else:
+ cache_dir = Path(Path("~").expanduser(), ".cache", "qemu", "download")
+
+if not cache_dir.exists():
+ print(f"Cache dir {cache_dir} does not exist!", file=sys.stderr)
+ sys.exit(1)
+
+os.chdir(cache_dir)
+
+for file in cache_dir.iterdir():
+ # Only consider the files that use a sha256 as filename:
+ if len(file.name) != 64:
+ continue
+
+ try:
+ timestamp = int(file.with_suffix(".stamp").read_text())
+ except FileNotFoundError:
+ # Assume it's an old file that was already in the cache before we
+ # added the code for evicting stale assets. Use the release date
+ # of QEMU v10.1 as a default timestamp.
+ timestamp = time.mktime((2025, 8, 26, 0, 0, 0, 0, 0, 0))
+
+ age = time.time() - timestamp
+
+ # Delete files older than half of a year (183 days * 24h * 60m * 60s)
+ if age > 15811200:
+ print(f"Removing {cache_dir}/{file.name}.")
+ file.chmod(stat.S_IWRITE)
+ file.unlink()
diff --git a/target/alpha/helper.h b/target/alpha/helper.h
index 788d2fb..954a5c8 100644
--- a/target/alpha/helper.h
+++ b/target/alpha/helper.h
@@ -92,6 +92,7 @@ DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_1(halt, void, i64)
+DEF_HELPER_1(whami, i64, env)
DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
index 5f302b1..6828b12 100644
--- a/target/alpha/machine.c
+++ b/target/alpha/machine.c
@@ -25,8 +25,8 @@ static const VMStateInfo vmstate_fpcr = {
};
static const VMStateField vmstate_env_fields[] = {
- VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31),
- VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31),
+ VMSTATE_UINT64_ARRAY(ir, CPUAlphaState, 31),
+ VMSTATE_UINT64_ARRAY(fir, CPUAlphaState, 31),
/* Save the architecture value of the fpcr, not the internally
expanded version. Since this architecture value does not
exist in memory to be stored, this requires a but of hoop
@@ -41,27 +41,27 @@ static const VMStateField vmstate_env_fields[] = {
.flags = VMS_SINGLE,
.offset = 0
},
- VMSTATE_UINTTL(pc, CPUAlphaState),
- VMSTATE_UINTTL(unique, CPUAlphaState),
- VMSTATE_UINTTL(lock_addr, CPUAlphaState),
- VMSTATE_UINTTL(lock_value, CPUAlphaState),
+ VMSTATE_UINT64(pc, CPUAlphaState),
+ VMSTATE_UINT64(unique, CPUAlphaState),
+ VMSTATE_UINT64(lock_addr, CPUAlphaState),
+ VMSTATE_UINT64(lock_value, CPUAlphaState),
VMSTATE_UINT32(flags, CPUAlphaState),
VMSTATE_UINT32(pcc_ofs, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg0, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg1, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg2, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg0, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg1, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg2, CPUAlphaState),
- VMSTATE_UINTTL(exc_addr, CPUAlphaState),
- VMSTATE_UINTTL(palbr, CPUAlphaState),
- VMSTATE_UINTTL(ptbr, CPUAlphaState),
- VMSTATE_UINTTL(vptptr, CPUAlphaState),
- VMSTATE_UINTTL(sysval, CPUAlphaState),
- VMSTATE_UINTTL(usp, CPUAlphaState),
+ VMSTATE_UINT64(exc_addr, CPUAlphaState),
+ VMSTATE_UINT64(palbr, CPUAlphaState),
+ VMSTATE_UINT64(ptbr, CPUAlphaState),
+ VMSTATE_UINT64(vptptr, CPUAlphaState),
+ VMSTATE_UINT64(sysval, CPUAlphaState),
+ VMSTATE_UINT64(usp, CPUAlphaState),
- VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8),
- VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24),
+ VMSTATE_UINT64_ARRAY(shadow, CPUAlphaState, 8),
+ VMSTATE_UINT64_ARRAY(scratch, CPUAlphaState, 24),
VMSTATE_END_OF_LIST()
};
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
index 87e3760..0e0a619 100644
--- a/target/alpha/sys_helper.c
+++ b/target/alpha/sys_helper.c
@@ -67,3 +67,8 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
timer_del(cpu->alarm_timer);
}
}
+
+uint64_t HELPER(whami)(CPUAlphaState *env)
+{
+ return env_cpu(env)->cpu_index;
+}
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index f11b382..b1d8a4e 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -436,18 +436,18 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
return DISAS_NEXT;
}
-static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, int32_t disp)
{
if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) {
/* With PCREL, PC must always be up-to-date. */
if (ctx->pcrel) {
gen_pc_disp(ctx, cpu_pc, disp);
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_pc_disp(ctx, cpu_pc, disp);
}
- tcg_gen_exit_tb(ctx->base.tb, idx);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_pc_disp(ctx, cpu_pc, disp);
tcg_gen_lookup_and_goto_ptr();
@@ -1126,8 +1126,7 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x3C:
/* WHAMI */
- tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
- -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
+ gen_helper_whami(ctx->ir[IR_V0], tcg_env);
break;
case 0x3E:
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 918d5ed..3292d7c 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -528,7 +528,7 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
return translator_use_goto_tb(&s->base, dest);
}
-static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, int64_t diff)
{
if (use_goto_tb(s, s->pc_curr + diff)) {
/*
@@ -541,12 +541,12 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
*/
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_a64_update_pc(s, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_a64_update_pc(s, diff);
}
- tcg_gen_exit_tb(s->base.tb, n);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
s->base.is_jmp = DISAS_NORETURN;
} else {
gen_a64_update_pc(s, diff);
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 3df0bbc..5f64fed 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -1310,7 +1310,7 @@ static void gen_goto_ptr(void)
* cpu_loop_exec. Any live exit_requests will be processed as we
* enter the next TB.
*/
-static void gen_goto_tb(DisasContext *s, int n, target_long diff)
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, target_long diff)
{
if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
/*
@@ -1323,12 +1323,12 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff)
*/
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_update_pc(s, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_update_pc(s, diff);
}
- tcg_gen_exit_tb(s->base.tb, n);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
} else {
gen_update_pc(s, diff);
gen_goto_ptr();
diff --git a/target/avr/translate.c b/target/avr/translate.c
index 804b0b2..ef6f655 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -981,14 +981,15 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret)
}
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
const TranslationBlock *tb = ctx->base.tb;
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(tb, n);
+ tcg_gen_exit_tb(tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c
index 23deba2..b5ece60 100644
--- a/target/hexagon/decode.c
+++ b/target/hexagon/decode.c
@@ -236,9 +236,9 @@ static void decode_set_insn_attr_fields(Packet *pkt)
if (GET_ATTRIB(opcode, A_SCALAR_STORE) &&
!GET_ATTRIB(opcode, A_MEMSIZE_0B)) {
if (pkt->insn[i].slot == 0) {
- pkt->pkt_has_store_s0 = true;
+ pkt->pkt_has_scalar_store_s0 = true;
} else {
- pkt->pkt_has_store_s1 = true;
+ pkt->pkt_has_scalar_store_s1 = true;
}
}
}
diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py
index c1f806a..a9c0e27 100755
--- a/target/hexagon/gen_helper_funcs.py
+++ b/target/hexagon/gen_helper_funcs.py
@@ -69,7 +69,7 @@ def gen_helper_function(f, tag, tagregs, tagimms):
if hex_common.need_slot(tag):
if "A_LOAD" in hex_common.attribdict[tag]:
f.write(hex_common.code_fmt(f"""\
- bool pkt_has_store_s1 = slotval & 0x1;
+ bool pkt_has_scalar_store_s1 = slotval & 0x1;
"""))
f.write(hex_common.code_fmt(f"""\
uint32_t slot = slotval >> 1;
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
index 08fc541..cecaece 100644
--- a/target/hexagon/genptr.c
+++ b/target/hexagon/genptr.c
@@ -395,7 +395,8 @@ static inline void gen_store_conditional8(DisasContext *ctx,
#ifndef CONFIG_HEXAGON_IDEF_PARSER
static TCGv gen_slotval(DisasContext *ctx)
{
- int slotval = (ctx->pkt->pkt_has_store_s1 & 1) | (ctx->insn->slot << 1);
+ int slotval =
+ (ctx->pkt->pkt_has_scalar_store_s1 & 1) | (ctx->insn->slot << 1);
return tcg_constant_tl(slotval);
}
#endif
diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py
index 758e5fd..6803908 100755
--- a/target/hexagon/hex_common.py
+++ b/target/hexagon/hex_common.py
@@ -350,6 +350,7 @@ class Register:
f"{self.helper_arg_type()} {self.helper_arg_name()}"
)
+
#
# Every register is either Single or Pair or Hvx
#
@@ -1070,11 +1071,22 @@ def init_registers():
for reg in new_regs:
new_registers[f"{reg.regtype}{reg.regid}"] = reg
-def get_register(tag, regtype, regid):
- if f"{regtype}{regid}V" in semdict[tag]:
- return registers[f"{regtype}{regid}"]
- else:
- return new_registers[f"{regtype}{regid}"]
+def is_new_reg(tag, regid):
+ if regid[0] in "NO":
+ return True
+ return regid[0] == "P" and \
+ f"{regid}N" in semdict[tag] and \
+ f"{regid}V" not in semdict[tag]
+
+def get_register(tag, regtype, regid, subtype=""):
+ regid = f"{regtype}{regid}"
+ is_new = is_new_reg(tag, regid)
+ try:
+ reg = new_registers[regid] if is_new else registers[regid]
+ except KeyError:
+ raise Exception(f"Unknown {'new ' if is_new else ''}register {regid}" +\
+ f"from '{tag}' with syntax '{semdict[tag]}'") from None
+ return reg
def helper_ret_type(tag, regs):
## If there is a scalar result, it is the return type
diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst
index 7199177..235e3de 100644
--- a/target/hexagon/idef-parser/README.rst
+++ b/target/hexagon/idef-parser/README.rst
@@ -637,7 +637,7 @@ tinycode for the Hexagon ``add`` instruction
::
---- 00021094
- mov_i32 pkt_has_store_s1,$0x0
+ mov_i32 pkt_has_scalar_store_s1,$0x0
add_i32 tmp0,r2,r2
mov_i32 loc2,tmp0
mov_i32 new_r1,loc2
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
index 542af8d..1dc52b4 100644
--- a/target/hexagon/idef-parser/parser-helpers.c
+++ b/target/hexagon/idef-parser/parser-helpers.c
@@ -1725,7 +1725,7 @@ void gen_cancel(Context *c, YYLTYPE *locp)
void gen_load_cancel(Context *c, YYLTYPE *locp)
{
- OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
+ OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n");
OUT(c, locp, "ctx->s1_store_processed = false;\n");
OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n");
@@ -1750,7 +1750,7 @@ void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
/* Lookup the effective address EA */
find_variable(c, locp, ea, ea);
- OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
+ OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n");
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n");
diff --git a/target/hexagon/idef-parser/prepare b/target/hexagon/idef-parser/prepare
deleted file mode 100755
index cb3622d..0000000
--- a/target/hexagon/idef-parser/prepare
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# Copyright(c) 2019-2021 rev.ng Labs Srl. All Rights Reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-
-set -e
-set -o pipefail
-
-# Run the preprocessor and drop comments
-cpp "$@"
diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h
index 24dcf7f..5d59430 100644
--- a/target/hexagon/insn.h
+++ b/target/hexagon/insn.h
@@ -66,8 +66,8 @@ struct Packet {
bool pkt_has_dczeroa;
- bool pkt_has_store_s0;
- bool pkt_has_store_s1;
+ bool pkt_has_scalar_store_s0;
+ bool pkt_has_scalar_store_s1;
bool pkt_has_hvx;
Insn *vhist_insn;
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 9ba9be4..088e596 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -83,7 +83,7 @@
*/
#define CHECK_NOSHUF(VA, SIZE) \
do { \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
probe_noshuf_load(VA, SIZE, ctx->mem_idx); \
process_store(ctx, 1); \
} \
@@ -94,11 +94,11 @@
TCGLabel *noshuf_label = gen_new_label(); \
tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, noshuf_label); \
GET_EA; \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
probe_noshuf_load(EA, SIZE, ctx->mem_idx); \
} \
gen_set_label(noshuf_label); \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
process_store(ctx, 1); \
} \
} while (0)
@@ -525,7 +525,7 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
#define fLOAD(NUM, SIZE, SIGN, EA, DST) \
do { \
- check_noshuf(env, pkt_has_store_s1, slot, EA, SIZE, GETPC()); \
+ check_noshuf(env, pkt_has_scalar_store_s1, slot, EA, SIZE, GETPC()); \
DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE(env, EA, GETPC()); \
} while (0)
#endif
diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build
index bb4ebaa..d26787a 100644
--- a/target/hexagon/meson.build
+++ b/target/hexagon/meson.build
@@ -280,12 +280,13 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
command: [python, files('gen_idef_parser_funcs.py'), semantics_generated, '@OUTPUT@'],
)
+ compiler = meson.get_compiler('c').get_id()
preprocessed_idef_parser_input_generated = custom_target(
'idef_parser_input.preprocessed.h.inc',
output: 'idef_parser_input.preprocessed.h.inc',
input: idef_parser_input_generated,
depend_files: [idef_parser_dir / 'macros.h.inc'],
- command: [idef_parser_dir / 'prepare', '@INPUT@', '-I' + idef_parser_dir, '-o', '@OUTPUT@'],
+ command: [compiler, '-x', 'c', '-E', '-I', idef_parser_dir, '-o', '@OUTPUT@', '@INPUT@'],
)
flex = generator(
@@ -323,7 +324,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs
)
indent = find_program('indent', required: false)
- if indent.found()
+ if indent.found() and host_os == 'linux'
idef_generated_tcg_c = custom_target(
'indent',
input: idef_generated_tcg[0],
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
index 444799d..e2e80ca 100644
--- a/target/hexagon/op_helper.c
+++ b/target/hexagon/op_helper.c
@@ -463,11 +463,11 @@ void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask)
* If the load is in slot 0 and there is a store in slot1 (that
* wasn't cancelled), we have to do the store first.
*/
-static void check_noshuf(CPUHexagonState *env, bool pkt_has_store_s1,
+static void check_noshuf(CPUHexagonState *env, bool pkt_has_scalar_store_s1,
uint32_t slot, target_ulong vaddr, int size,
uintptr_t ra)
{
- if (slot == 0 && pkt_has_store_s1 &&
+ if (slot == 0 && pkt_has_scalar_store_s1 &&
((env->slot_cancelled & (1 << 1)) == 0)) {
probe_read(env, vaddr, size, MMU_USER_IDX, ra);
commit_store(env, 1, ra);
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 02fd40c..8fce219 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -133,15 +133,15 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
return translator_use_goto_tb(&ctx->base, dest);
}
-static void gen_goto_tb(DisasContext *ctx, int idx, target_ulong dest, bool
- move_to_pc)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest, bool move_to_pc)
{
if (use_goto_tb(ctx, dest)) {
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
if (move_to_pc) {
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest);
}
- tcg_gen_exit_tb(ctx->base.tb, idx);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
if (move_to_pc) {
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest);
@@ -693,11 +693,11 @@ static void process_store_log(DisasContext *ctx)
* the memory accesses overlap.
*/
Packet *pkt = ctx->pkt;
- if (pkt->pkt_has_store_s1) {
+ if (pkt->pkt_has_scalar_store_s1) {
g_assert(!pkt->pkt_has_dczeroa);
process_store(ctx, 1);
}
- if (pkt->pkt_has_store_s0) {
+ if (pkt->pkt_has_scalar_store_s0) {
g_assert(!pkt->pkt_has_dczeroa);
process_store(ctx, 0);
}
@@ -822,8 +822,9 @@ static void gen_commit_packet(DisasContext *ctx)
* involved in committing the packet.
*/
Packet *pkt = ctx->pkt;
- bool has_store_s0 = pkt->pkt_has_store_s0;
- bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed);
+ bool has_store_s0 = pkt->pkt_has_scalar_store_s0;
+ bool has_store_s1 =
+ (pkt->pkt_has_scalar_store_s1 && !ctx->s1_store_processed);
bool has_hvx_store = pkt_has_hvx_store(pkt);
if (pkt->pkt_has_dczeroa) {
/*
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 672ab37..c652ef9 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -187,7 +187,7 @@ typedef struct HPPATLBEntry {
struct HPPATLBEntry *unused_next;
};
- target_ulong pa;
+ hwaddr pa;
unsigned entry_valid : 1;
@@ -320,8 +320,8 @@ void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask,
- uint64_t spc, target_ulong off)
+static inline vaddr hppa_form_gva_mask(uint64_t gva_offset_mask,
+ uint64_t spc, target_ulong off)
{
#ifdef CONFIG_USER_ONLY
return off & gva_offset_mask;
@@ -330,8 +330,8 @@ static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask,
#endif
}
-static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
- target_ulong off)
+static inline vaddr hppa_form_gva(CPUHPPAState *env, uint64_t spc,
+ target_ulong off)
{
return hppa_form_gva_mask(env->gva_offset_mask, spc, off);
}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index d7f8495..edcd2bf 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -148,8 +148,8 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
m = UINT32_MAX;
}
- qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n"
- "IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n",
+ qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n"
+ "IA_B %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n",
env->iasq_f >> 32, w, m & env->iaoq_f,
hppa_form_gva_mask(env->gva_offset_mask, env->iasq_f,
env->iaoq_f),
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
index 4eaac75..13c6a55 100644
--- a/target/hppa/insns.decode
+++ b/target/hppa/insns.decode
@@ -365,10 +365,10 @@ fstd 011100 ..... ..... .. ............1. @ldstim11
&mpyadd rm1 rm2 ta ra tm
@mpyadd ...... rm1:5 rm2:5 ta:5 ra:5 . tm:5 &mpyadd
-fmpyadd_f 000110 ..... ..... ..... ..... 0 ..... @mpyadd
-fmpyadd_d 000110 ..... ..... ..... ..... 1 ..... @mpyadd
-fmpysub_f 100110 ..... ..... ..... ..... 0 ..... @mpyadd
-fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpyadd_f 000110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpyadd_d 000110 ..... ..... ..... ..... 0 ..... @mpyadd
+fmpysub_f 100110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpysub_d 100110 ..... ..... ..... ..... 0 ..... @mpyadd
####
# Conditional Branches
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 9bdd0a6..cce82e6 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -803,7 +803,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
{
- uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
+ vaddr gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
HPPATLBEntry *ent = hppa_find_tlb(env, gva);
if (ent == NULL) {
diff --git a/target/hppa/trace-events b/target/hppa/trace-events
index a10ba73..01761a4 100644
--- a/target/hppa/trace-events
+++ b/target/hppa/trace-events
@@ -1,13 +1,13 @@
# See docs/devel/tracing.rst for syntax documentation.
# mem_helper.c
-disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
-disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
+disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
disable hppa_tlb_find_entry_not_found(void *env, uint64_t addr) "env=%p addr=%08lx"
disable hppa_tlb_get_physical_address(void *env, int ret, int prot, uint64_t addr, uint64_t phys) "env=%p ret=%d prot=%d addr=0x%lx phys=0x%lx"
disable hppa_tlb_fill_excp(void *env, uint64_t addr, int size, int type, int mmu_idx) "env=%p addr=0x%lx size=%d type=%d mmu_idx=%d"
disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size, int type, int mmu_idx) "env=%p addr=0x%lx phys=0x%lx size=%d type=%d mmu_idx=%d"
-disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d"
disable hppa_tlb_ptlb(void *env) "env=%p"
disable hppa_tlb_ptlb_local(void *env) "env=%p"
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 7a81cfc..853cba2 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -104,6 +104,12 @@ typedef struct DisasContext {
#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
#endif
+static inline MemOp mo_endian(DisasContext *ctx)
+{
+ /* The PSW_E bit sets the (little) endianness, but we don't implement it. */
+ return MO_BE;
+}
+
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
static int expand_sm_imm(DisasContext *ctx, int val)
{
@@ -1599,6 +1605,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1617,6 +1624,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1635,6 +1643,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1653,6 +1662,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1691,7 +1701,7 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i32();
- do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL);
save_frw_i32(rt, tmp);
if (rt == 0) {
@@ -1716,7 +1726,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ);
save_frd(rt, tmp);
if (rt == 0) {
@@ -1750,7 +1760,7 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frw_i32(rt);
- do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL);
return nullify_end(ctx);
}
@@ -1770,7 +1780,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ);
return nullify_end(ctx);
}
@@ -3302,7 +3312,7 @@ static bool trans_ld(DisasContext *ctx, arg_ldst *a)
return gen_illegal(ctx);
}
return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
- a->disp, a->sp, a->m, a->size | MO_TE);
+ a->disp, a->sp, a->m, a->size);
}
static bool trans_st(DisasContext *ctx, arg_ldst *a)
@@ -3311,12 +3321,12 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a)
if (!ctx->is_pa20 && a->size > MO_32) {
return gen_illegal(ctx);
}
- return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
+ return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size);
}
static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
{
- MemOp mop = MO_TE | MO_ALIGN | a->size;
+ MemOp mop = mo_endian(ctx) | MO_ALIGN | a->size;
TCGv_i64 dest, ofs;
TCGv_i64 addr;
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 3c9b6ca..d2bb873 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -30,6 +30,7 @@
#include "qobject/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
+#include "system/memory.h"
/* Perform linear address sign extension */
static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
@@ -68,23 +69,23 @@ static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
pte & PG_RW_MASK ? 'W' : '-');
}
-static void tlb_info_32(Monitor *mon, CPUArchState *env)
+static void tlb_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
uint32_t pgd, pde, pte;
pgd = env->cr[3] & ~0xfff;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
/* 4M pages */
print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 22) + (l2 << 12),
pte & ~PG_PSE_MASK,
@@ -96,21 +97,20 @@ static void tlb_info_32(Monitor *mon, CPUArchState *env)
}
}
-static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
uint64_t pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
pdp_addr = env->cr[3] & ~0x1f;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
/* 2M pages with PAE, CR4.PSE is ignored */
@@ -119,8 +119,8 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l1 << 30) + (l2 << 21)
+ (l3 << 12),
@@ -136,24 +136,23 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
}
#ifdef TARGET_X86_64
-static void tlb_info_la48(Monitor *mon, CPUArchState *env,
+static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as,
uint64_t l0, uint64_t pml4_addr)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
uint64_t pdp_addr, pd_addr, pt_addr;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
if (!(pml4e & PG_PRESENT_MASK)) {
continue;
}
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
if (!(pdpe & PG_PRESENT_MASK)) {
continue;
}
@@ -167,8 +166,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8, attrs, NULL);
if (!(pde & PG_PRESENT_MASK)) {
continue;
}
@@ -182,10 +180,8 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
if (pte & PG_PRESENT_MASK) {
print_pte(mon, env, (l0 << 48) + (l1 << 39) +
(l2 << 30) + (l3 << 21) + (l4 << 12),
@@ -197,18 +193,18 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env,
}
}
-static void tlb_info_la57(Monitor *mon, CPUArchState *env)
+static void tlb_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
uint64_t l0;
uint64_t pml5e;
uint64_t pml5_addr;
pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
if (pml5e & PG_PRESENT_MASK) {
- tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
+ tlb_info_la48(mon, env, as, l0, pml5e & 0x3fffffffff000ULL);
}
}
}
@@ -217,6 +213,7 @@ static void tlb_info_la57(Monitor *mon, CPUArchState *env)
void hmp_info_tlb(Monitor *mon, const QDict *qdict)
{
CPUArchState *env;
+ AddressSpace *as;
env = mon_get_cpu_env(mon);
if (!env) {
@@ -228,21 +225,22 @@ void hmp_info_tlb(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "PG disabled\n");
return;
}
+ as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
- tlb_info_la57(mon, env);
+ tlb_info_la57(mon, env, as);
} else {
- tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
+ tlb_info_la48(mon, env, as, 0, env->cr[3] & 0x3fffffffff000ULL);
}
} else
#endif
{
- tlb_info_pae32(mon, env);
+ tlb_info_pae32(mon, env, as);
}
} else {
- tlb_info_32(mon, env);
+ tlb_info_32(mon, env, as);
}
}
@@ -271,8 +269,9 @@ static void mem_print(Monitor *mon, CPUArchState *env,
}
}
-static void mem_info_32(Monitor *mon, CPUArchState *env)
+static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2;
int prot, last_prot;
uint32_t pgd, pde, pte;
@@ -282,8 +281,7 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
+ pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL);
end = l1 << 22;
if (pde & PG_PRESENT_MASK) {
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -291,8 +289,8 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, end, prot);
} else {
for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
+ pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4,
+ attrs, NULL);
end = (l1 << 22) + (l2 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde &
@@ -312,8 +310,9 @@ static void mem_info_32(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
}
-static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
unsigned int l1, l2, l3;
int prot, last_prot;
uint64_t pdpe, pde, pte;
@@ -324,14 +323,12 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL);
end = l1 << 30;
if (pdpe & PG_PRESENT_MASK) {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL);
end = (l1 << 30) + (l2 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -341,8 +338,8 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 30) + (l2 << 21) + (l3 << 12);
if (pte & PG_PRESENT_MASK) {
prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
@@ -369,8 +366,9 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env)
#ifdef TARGET_X86_64
-static void mem_info_la48(Monitor *mon, CPUArchState *env)
+static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte;
@@ -380,14 +378,12 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = l1 << 39;
if (pml4e & PG_PRESENT_MASK) {
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
if (pdpe & PG_PSE_MASK) {
@@ -398,8 +394,8 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
} else {
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
if (pde & PG_PSE_MASK) {
@@ -411,10 +407,10 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
} else {
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as,
+ pt_addr
+ + l4 * 8,
+ attrs, NULL);
end = (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -449,8 +445,9 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env)
mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
}
-static void mem_info_la57(Monitor *mon, CPUArchState *env)
+static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int prot, last_prot;
uint64_t l0, l1, l2, l3, l4;
uint64_t pml5e, pml4e, pdpe, pde, pte;
@@ -460,8 +457,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
last_prot = 0;
start = -1;
for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
+ pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL);
end = l0 << 48;
if (!(pml5e & PG_PRESENT_MASK)) {
prot = 0;
@@ -471,8 +467,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pml4_addr = pml5e & 0x3fffffffff000ULL;
for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
+ pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39);
if (!(pml4e & PG_PRESENT_MASK)) {
prot = 0;
@@ -482,8 +477,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pdp_addr = pml4e & 0x3fffffffff000ULL;
for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
+ pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30);
if (pdpe & PG_PRESENT_MASK) {
prot = 0;
@@ -501,8 +495,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pd_addr = pdpe & 0x3fffffffff000ULL;
for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
+ pde = address_space_ldq_le(as, pd_addr + l3 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
if (pde & PG_PRESENT_MASK) {
prot = 0;
@@ -520,8 +514,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
pt_addr = pde & 0x3fffffffff000ULL;
for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
+ pte = address_space_ldq_le(as, pt_addr + l4 * 8,
+ attrs, NULL);
end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
(l3 << 21) + (l4 << 12);
if (pte & PG_PRESENT_MASK) {
@@ -545,6 +539,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env)
void hmp_info_mem(Monitor *mon, const QDict *qdict)
{
CPUArchState *env;
+ AddressSpace *as;
env = mon_get_cpu_env(mon);
if (!env) {
@@ -556,21 +551,22 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "PG disabled\n");
return;
}
+ as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
- mem_info_la57(mon, env);
+ mem_info_la57(mon, env, as);
} else {
- mem_info_la48(mon, env);
+ mem_info_la48(mon, env, as);
}
} else
#endif
{
- mem_info_pae32(mon, env);
+ mem_info_pae32(mon, env, as);
}
} else {
- mem_info_32(mon, env);
+ mem_info_32(mon, env, as);
}
}
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 0068d22..dbc69c7 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -34,7 +34,7 @@ TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx,
int is_debug);
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
- uint64_t *dir_width, target_ulong level);
+ uint64_t *dir_width, unsigned int level);
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
#endif /* LOONGARCH_CPU_MMU_H */
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 4a9db3e..867e7c8 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -17,7 +17,7 @@
#include "tcg/tcg_loongarch.h"
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
- uint64_t *dir_width, target_ulong level)
+ uint64_t *dir_width, unsigned int level)
{
switch (level) {
case 1:
diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c
index 471eda2..23a5eec 100644
--- a/target/loongarch/gdbstub.c
+++ b/target/loongarch/gdbstub.c
@@ -62,7 +62,7 @@ int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPULoongArchState *env = cpu_env(cs);
- target_ulong tmp;
+ uint64_t tmp;
int length = 0;
if (n < 0 || n > 34) {
diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c
index 73190fb..0366a50 100644
--- a/target/loongarch/machine.c
+++ b/target/loongarch/machine.c
@@ -191,8 +191,8 @@ const VMStateDescription vmstate_loongarch_cpu = {
.version_id = 4,
.minimum_version_id = 4,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32),
- VMSTATE_UINTTL(env.pc, LoongArchCPU),
+ VMSTATE_UINT64_ARRAY(env.gpr, LoongArchCPU, 32),
+ VMSTATE_UINT64(env.pc, LoongArchCPU),
/* Remaining CSRs */
VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU),
diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h
index db57dbf..7e508c5 100644
--- a/target/loongarch/tcg/helper.h
+++ b/target/loongarch/tcg/helper.h
@@ -129,7 +129,7 @@ DEF_HELPER_2(invtlb_all_asid, void, env, tl)
DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl)
DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl)
-DEF_HELPER_4(lddir, tl, env, tl, tl, i32)
+DEF_HELPER_4(lddir, tl, env, tl, i32, i32)
DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
DEF_HELPER_1(ertn, void, env)
DEF_HELPER_1(idle, void, env)
diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
index a407ab5..64e53a4 100644
--- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
@@ -380,7 +380,7 @@ static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
if (check_plv(ctx)) {
return false;
}
- gen_helper_lddir(dest, tcg_env, src, tcg_constant_tl(a->imm), mem_idx);
+ gen_helper_lddir(dest, tcg_env, src, tcg_constant_i32(a->imm), mem_idx);
return true;
}
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 8cfce48..e119f78 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -117,13 +117,7 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
- uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
- if (!tlb_e) {
- return;
- }
-
- tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
@@ -145,11 +139,19 @@ static void invalidate_tlb(CPULoongArchState *env, int index)
{
LoongArchTLB *tlb;
uint16_t csr_asid, tlb_asid, tlb_g;
+ uint8_t tlb_e;
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
tlb = &env->tlb[index];
+ tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
+ if (!tlb_e) {
+ return;
+ }
+
+ tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
+ /* QEMU TLB is flushed when asid is changed */
if (tlb_g == 0 && tlb_asid != csr_asid) {
return;
}
@@ -369,7 +371,7 @@ void helper_tlbfill(CPULoongArchState *env)
uint16_t pagesize, stlb_ps;
uint16_t asid, tlb_asid;
LoongArchTLB *tlb;
- uint8_t tlb_e;
+ uint8_t tlb_e, tlb_g;
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
entryhi = env->CSR_TLBREHI;
@@ -398,7 +400,8 @@ void helper_tlbfill(CPULoongArchState *env)
}
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
- if (asid != tlb_asid) {
+ tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
+ if (tlb_g == 0 && asid != tlb_asid) {
set = i;
}
}
@@ -421,7 +424,8 @@ void helper_tlbfill(CPULoongArchState *env)
}
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
- if (asid != tlb_asid) {
+ tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
+ if (tlb_g == 0 && asid != tlb_asid) {
index = i;
}
}
@@ -595,7 +599,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
- target_ulong level, uint32_t mem_idx)
+ uint32_t level, uint32_t mem_idx)
{
CPUState *cs = env_cpu(env);
target_ulong badvaddr, index, phys;
@@ -603,7 +607,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
if (unlikely((level == 0) || (level > 4))) {
qemu_log_mask(LOG_GUEST_ERROR,
- "Attepted LDDIR with level %"PRId64"\n", level);
+ "Attepted LDDIR with level %u\n", level);
return base;
}
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
index 53a0b4c..055f6fb 100644
--- a/target/loongarch/tcg/translate.c
+++ b/target/loongarch/tcg/translate.c
@@ -99,16 +99,16 @@ void generate_exception(DisasContext *ctx, int excp)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest)
{
if (ctx->va32) {
dest = (uint32_t) dest;
}
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 97afceb..eb1ba15 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -44,9 +44,6 @@
#undef DEFO32
#undef DEFO64
-static TCGv_i32 cpu_halted;
-static TCGv_i32 cpu_exception_index;
-
static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
static TCGv cpu_dregs[8];
static TCGv cpu_aregs[8];
@@ -78,14 +75,6 @@ void m68k_tcg_init(void)
#undef DEFO32
#undef DEFO64
- cpu_halted = tcg_global_mem_new_i32(tcg_env,
- -offsetof(M68kCPU, env) +
- offsetof(CPUState, halted), "HALTED");
- cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
- -offsetof(M68kCPU, env) +
- offsetof(CPUState, exception_index),
- "EXCEPTION");
-
p = cpu_reg_names;
for (i = 0; i < 8; i++) {
sprintf(p, "D%d", i);
@@ -4512,7 +4501,8 @@ DISAS_INSN(halt)
gen_exception(s, s->pc, EXCP_SEMIHOSTING);
return;
}
- tcg_gen_movi_i32(cpu_halted, 1);
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
+ offsetof(CPUState, halted) - offsetof(M68kCPU, env));
gen_exception(s, s->pc, EXCP_HLT);
}
@@ -4528,7 +4518,8 @@ DISAS_INSN(stop)
ext = read_im16(env, s);
gen_set_sr_im(s, ext, 0);
- tcg_gen_movi_i32(cpu_halted, 1);
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
+ offsetof(CPUState, halted) - offsetof(M68kCPU, env));
gen_exception(s, s->pc, EXCP_HLT);
}
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 3ce28b3..14b1078 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -259,7 +259,7 @@ struct CPUArchState {
/* lwx/swx reserved address */
#define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
- target_ulong res_addr;
+ uint32_t res_addr;
uint32_t res_val;
/* Internal flags. */
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index ef0e2f9..cf577a7 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -274,7 +274,8 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- target_ulong vaddr, paddr = 0;
+ vaddr vaddr;
+ hwaddr paddr = 0;
MicroBlazeMMULookup lu;
int mmu_idx = cpu_mmu_index(cs, false);
unsigned int hit;
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index ef4fad9..01eba59 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -20,7 +20,7 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c
index a4cf38d..48efa54 100644
--- a/target/microblaze/machine.c
+++ b/target/microblaze/machine.c
@@ -78,7 +78,7 @@ static const VMStateField vmstate_env_fields[] = {
VMSTATE_UINT32(iflags, CPUMBState),
VMSTATE_UINT32(res_val, CPUMBState),
- VMSTATE_UINTTL(res_addr, CPUMBState),
+ VMSTATE_UINT32(res_addr, CPUMBState),
VMSTATE_STRUCT(mmu, CPUMBState, 0, vmstate_mmu, MicroBlazeMMU),
@@ -87,8 +87,8 @@ static const VMStateField vmstate_env_fields[] = {
static const VMStateDescription vmstate_env = {
.name = "env",
- .version_id = 0,
- .minimum_version_id = 0,
+ .version_id = 1,
+ .minimum_version_id = 1,
.fields = vmstate_env_fields,
};
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 8703ff5..db24cb3 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -78,7 +78,7 @@ static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
/* rw - 0 = read, 1 = write, 2 = fetch. */
unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
- target_ulong vaddr, MMUAccessType rw, int mmu_idx)
+ vaddr vaddr, MMUAccessType rw, int mmu_idx)
{
MicroBlazeMMU *mmu = &cpu->env.mmu;
unsigned int i, hit = 0;
@@ -172,7 +172,7 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
}
done:
qemu_log_mask(CPU_LOG_MMU,
- "MMU vaddr=0x" TARGET_FMT_lx
+ "MMU vaddr=0x%" VADDR_PRIx
" rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 1068bd2..2aca39c 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -86,7 +86,7 @@ typedef struct {
} MicroBlazeMMULookup;
unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
- target_ulong vaddr, MMUAccessType rw, int mmu_idx);
+ vaddr vaddr, MMUAccessType rw, int mmu_idx);
uint32_t mmu_read(CPUMBState *env, bool ea, uint32_t rn);
void mmu_write(CPUMBState *env, bool ea, uint32_t rn, uint32_t v);
void mmu_init(MicroBlazeMMU *mmu);
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index b8365b3..df93c42 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -365,13 +365,13 @@ uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
return 0;
}
-void helper_stackprot(CPUMBState *env, target_ulong addr)
+void helper_stackprot(CPUMBState *env, uint32_t addr)
{
if (addr < env->slr || addr > env->shr) {
CPUState *cs = env_cpu(env);
qemu_log_mask(CPU_LOG_INT, "Stack protector violation at "
- TARGET_FMT_lx " %x %x\n",
+ "0x%x 0x%x 0x%x\n",
addr, env->slr, env->shr);
env->ear = addr;
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 5098a1d..6442a25 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -55,7 +55,7 @@ static TCGv_i32 cpu_imm;
static TCGv_i32 cpu_bvalue;
static TCGv_i32 cpu_btarget;
static TCGv_i32 cpu_iflags;
-static TCGv cpu_res_addr;
+static TCGv_i32 cpu_res_addr;
static TCGv_i32 cpu_res_val;
/* This is the state at translation time. */
@@ -116,12 +116,12 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
gen_raise_exception_sync(dc, EXCP_HW_EXCP);
}
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(dc->base.tb, n);
+ tcg_gen_exit_tb(dc->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
@@ -604,9 +604,9 @@ static bool trans_wdic(DisasContext *dc, arg_wdic *a)
DO_TYPEA(xor, false, tcg_gen_xor_i32)
DO_TYPEBI(xori, false, tcg_gen_xori_i32)
-static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
+static TCGv_i32 compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
- TCGv ret;
+ TCGv_i32 ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && rb) {
@@ -626,9 +626,9 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
return ret;
}
-static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
+static TCGv_i32 compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
- TCGv ret;
+ TCGv_i32 ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && imm) {
@@ -708,7 +708,7 @@ static inline MemOp mo_endian(DisasContext *dc)
return dc->cfg->endi ? MO_LE : MO_BE;
}
-static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+static bool do_load(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop,
int mem_index, bool rev)
{
MemOp size = mop & MO_SIZE;
@@ -726,7 +726,7 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
mop ^= MO_BSWAP;
}
if (size < MO_32) {
- tcg_gen_xori_tl(addr, addr, 3 - size);
+ tcg_gen_xori_i32(addr, addr, 3 - size);
}
}
@@ -750,13 +750,13 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
static bool trans_lbu(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_lbur(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
}
@@ -776,19 +776,19 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_lhu(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lhur(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
@@ -810,19 +810,19 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lw(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
@@ -844,20 +844,20 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwx(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
/* lwx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);
tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
mo_endian(dc) | MO_UL);
- tcg_gen_mov_tl(cpu_res_addr, addr);
+ tcg_gen_mov_i32(cpu_res_addr, addr);
if (arg->rd) {
tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
@@ -868,7 +868,7 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg)
return true;
}
-static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+static bool do_store(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop,
int mem_index, bool rev)
{
MemOp size = mop & MO_SIZE;
@@ -886,7 +886,7 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
mop ^= MO_BSWAP;
}
if (size < MO_32) {
- tcg_gen_xori_tl(addr, addr, 3 - size);
+ tcg_gen_xori_i32(addr, addr, 3 - size);
}
}
@@ -910,13 +910,13 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
static bool trans_sb(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_sbr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
}
@@ -936,19 +936,19 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_sh(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_shr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
@@ -970,19 +970,19 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
static bool trans_shi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_sw(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
@@ -1004,19 +1004,19 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
static bool trans_swi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swx(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
TCGLabel *swx_done = gen_new_label();
TCGLabel *swx_fail = gen_new_label();
TCGv_i32 tval;
/* swx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);
/*
* Compare the address vs the one we used during lwx.
@@ -1024,7 +1024,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg)
* branch, but we know we can use the equal version in the global.
* In either case, addr is no longer needed.
*/
- tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
+ tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
/*
* Compare the value loaded during lwx with current contents of
@@ -1052,7 +1052,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg)
* Prevent the saved address from working again without another ldx.
* Akin to the pseudocode setting reservation = 0.
*/
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
return true;
}
@@ -1173,7 +1173,7 @@ static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
}
tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
dc->base.is_jmp = DISAS_EXIT;
return true;
@@ -1194,7 +1194,7 @@ static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
if (arg->rd) {
tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
}
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
#ifdef CONFIG_USER_ONLY
switch (imm) {
@@ -1885,6 +1885,7 @@ void mb_tcg_init(void)
tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
}
- cpu_res_addr =
- tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
+ cpu_res_addr = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUMBState, res_addr),
+ "res_addr");
}
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index d91d6ef..54849e9 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -4362,12 +4362,13 @@ static void gen_trap(DisasContext *ctx, uint32_t opc,
}
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_save_pc(dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_save_pc(dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index f4bcf00..c8e2827 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -220,33 +220,24 @@ typedef struct OpenRISCTLBEntry {
typedef struct CPUOpenRISCTLBContext {
OpenRISCTLBEntry itlb[TLB_SIZE];
OpenRISCTLBEntry dtlb[TLB_SIZE];
-
- int (*cpu_openrisc_map_address_code)(OpenRISCCPU *cpu,
- hwaddr *physical,
- int *prot,
- target_ulong address, int rw);
- int (*cpu_openrisc_map_address_data)(OpenRISCCPU *cpu,
- hwaddr *physical,
- int *prot,
- target_ulong address, int rw);
} CPUOpenRISCTLBContext;
#endif
typedef struct CPUArchState {
- target_ulong shadow_gpr[16][32]; /* Shadow registers */
+ uint32_t shadow_gpr[16][32]; /* Shadow registers */
- target_ulong pc; /* Program counter */
- target_ulong ppc; /* Prev PC */
- target_ulong jmp_pc; /* Jump PC */
+ uint32_t pc; /* Program counter */
+ uint32_t ppc; /* Prev PC */
+ uint32_t jmp_pc; /* Jump PC */
uint64_t mac; /* Multiply registers MACHI:MACLO */
- target_ulong epcr; /* Exception PC register */
- target_ulong eear; /* Exception EA register */
+ uint32_t epcr; /* Exception PC register */
+ uint32_t eear; /* Exception EA register */
- target_ulong sr_f; /* the SR_F bit, values 0, 1. */
- target_ulong sr_cy; /* the SR_CY bit, values 0, 1. */
- target_long sr_ov; /* the SR_OV bit (in the sign bit only) */
+ uint32_t sr_f; /* the SR_F bit, values 0, 1. */
+ uint32_t sr_cy; /* the SR_CY bit, values 0, 1. */
+ int32_t sr_ov; /* the SR_OV bit (in the sign bit only) */
uint32_t sr; /* Supervisor register, without SR_{F,CY,OV} */
uint32_t esr; /* Exception supervisor register */
uint32_t evbar; /* Exception vector base address register */
@@ -254,8 +245,8 @@ typedef struct CPUArchState {
uint32_t fpcsr; /* Float register */
float_status fp_status;
- target_ulong lock_addr;
- target_ulong lock_value;
+ uint32_t lock_addr;
+ uint32_t lock_value;
uint32_t dflag; /* In delay slot (boolean) */
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
index dba9972..39b6195 100644
--- a/target/openrisc/fpu_helper.c
+++ b/target/openrisc/fpu_helper.c
@@ -146,10 +146,10 @@ uint32_t helper_float_madd_s(CPUOpenRISCState *env, uint32_t a,
#define FLOAT_CMP(name, impl) \
-target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
uint64_t fdt0, uint64_t fdt1) \
{ return float64_ ## impl(fdt0, fdt1, &env->fp_status); } \
-target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
uint32_t fdt0, uint32_t fdt1) \
{ return float32_ ## impl(fdt0, fdt1, &env->fp_status); }
@@ -160,13 +160,13 @@ FLOAT_CMP(un, unordered_quiet)
#undef FLOAT_CMP
#define FLOAT_UCMP(name, expr) \
-target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
uint64_t fdt0, uint64_t fdt1) \
{ \
FloatRelation r = float64_compare_quiet(fdt0, fdt1, &env->fp_status); \
return expr; \
} \
-target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
uint32_t fdt0, uint32_t fdt1) \
{ \
FloatRelation r = float32_compare_quiet(fdt0, fdt1, &env->fp_status); \
diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h
index d847814..e0a8d40 100644
--- a/target/openrisc/helper.h
+++ b/target/openrisc/helper.h
@@ -47,8 +47,8 @@ FOP_CALC(rem)
#undef FOP_CALC
#define FOP_CMP(op) \
-DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, tl, env, i32, i32) \
-DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, i32, env, i64, i64)
FOP_CMP(eq)
FOP_CMP(lt)
FOP_CMP(le)
@@ -62,5 +62,5 @@ FOP_CMP(ult)
DEF_HELPER_FLAGS_1(rfe, 0, void, env)
/* sys */
-DEF_HELPER_FLAGS_3(mtspr, 0, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl)
+DEF_HELPER_FLAGS_3(mtspr, 0, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, i32, env, i32, i32)
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index 081c706..f285367 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -26,8 +26,8 @@ static const VMStateDescription vmstate_tlb_entry = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL(mr, OpenRISCTLBEntry),
- VMSTATE_UINTTL(tr, OpenRISCTLBEntry),
+ VMSTATE_UINT32(mr, OpenRISCTLBEntry),
+ VMSTATE_UINT32(tr, OpenRISCTLBEntry),
VMSTATE_END_OF_LIST()
}
};
@@ -72,14 +72,14 @@ static const VMStateDescription vmstate_env = {
.version_id = 6,
.minimum_version_id = 6,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
- VMSTATE_UINTTL(pc, CPUOpenRISCState),
- VMSTATE_UINTTL(ppc, CPUOpenRISCState),
- VMSTATE_UINTTL(jmp_pc, CPUOpenRISCState),
- VMSTATE_UINTTL(lock_addr, CPUOpenRISCState),
- VMSTATE_UINTTL(lock_value, CPUOpenRISCState),
- VMSTATE_UINTTL(epcr, CPUOpenRISCState),
- VMSTATE_UINTTL(eear, CPUOpenRISCState),
+ VMSTATE_UINT32_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
+ VMSTATE_UINT32(pc, CPUOpenRISCState),
+ VMSTATE_UINT32(ppc, CPUOpenRISCState),
+ VMSTATE_UINT32(jmp_pc, CPUOpenRISCState),
+ VMSTATE_UINT32(lock_addr, CPUOpenRISCState),
+ VMSTATE_UINT32(lock_value, CPUOpenRISCState),
+ VMSTATE_UINT32(epcr, CPUOpenRISCState),
+ VMSTATE_UINT32(eear, CPUOpenRISCState),
/* Save the architecture value of the SR, not the internally
expanded version. Since this architecture value does not
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index acea50c..ffb732e 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -28,15 +28,14 @@
#include "qemu/host-utils.h"
#include "hw/loader.h"
-static inline void get_phys_nommu(hwaddr *phys_addr, int *prot,
- target_ulong address)
+static void get_phys_nommu(hwaddr *phys_addr, int *prot, vaddr address)
{
*phys_addr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
}
static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
- target_ulong addr, int need, bool super)
+ vaddr addr, int need, bool super)
{
int idx = (addr >> TARGET_PAGE_BITS) & TLB_MASK;
uint32_t imr = cpu->env.tlb.itlb[idx].mr;
@@ -95,7 +94,7 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
}
}
-static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
+static void raise_mmu_exception(OpenRISCCPU *cpu, vaddr address,
int exception)
{
CPUState *cs = CPU(cpu);
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index b091a9c..7ad908b 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -40,12 +40,12 @@ static inline bool is_user(CPUOpenRISCState *env)
#endif
}
-void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
+void HELPER(mtspr)(CPUOpenRISCState *env, uint32_t spr, uint32_t rb)
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
CPUState *cs = env_cpu(env);
- target_ulong mr;
+ uint32_t mr;
int idx;
#endif
@@ -213,8 +213,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
#endif
}
-target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
- target_ulong spr)
+uint32_t HELPER(mfspr)(CPUOpenRISCState *env, uint32_t rd, uint32_t spr)
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 5ab3bc7..6fa4d6c 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -51,14 +51,20 @@ typedef struct DisasContext {
uint32_t avr;
/* If not -1, jmp_pc contains this value and so is a direct jump. */
- target_ulong jmp_pc_imm;
+ vaddr jmp_pc_imm;
/* The temporary corresponding to register 0 for this compilation. */
- TCGv R0;
+ TCGv_i32 R0;
/* The constant zero. */
- TCGv zero;
+ TCGv_i32 zero;
} DisasContext;
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ /* The SR_LEE bit sets the (little) endianness, but we don't implement it. */
+ return MO_BE;
+}
+
static inline bool is_user(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
@@ -71,16 +77,16 @@ static inline bool is_user(DisasContext *dc)
/* Include the auto-generated decoder. */
#include "decode-insns.c.inc"
-static TCGv cpu_sr;
-static TCGv cpu_regs[32];
-static TCGv cpu_pc;
-static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
-static TCGv cpu_ppc;
-static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
-static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
-static TCGv cpu_sr_ov; /* signed overflow */
-static TCGv cpu_lock_addr;
-static TCGv cpu_lock_value;
+static TCGv_i32 cpu_sr;
+static TCGv_i32 cpu_regs[32];
+static TCGv_i32 cpu_pc;
+static TCGv_i32 jmp_pc; /* l.jr/l.jalr temp pc */
+static TCGv_i32 cpu_ppc;
+static TCGv_i32 cpu_sr_f; /* bf/bnf, F flag taken */
+static TCGv_i32 cpu_sr_cy; /* carry (unsigned overflow) */
+static TCGv_i32 cpu_sr_ov; /* signed overflow */
+static TCGv_i32 cpu_lock_addr;
+static TCGv_i32 cpu_lock_value;
static TCGv_i32 fpcsr;
static TCGv_i64 cpu_mac; /* MACHI:MACLO */
static TCGv_i32 cpu_dflag;
@@ -95,27 +101,27 @@ void openrisc_translate_init(void)
};
int i;
- cpu_sr = tcg_global_mem_new(tcg_env,
+ cpu_sr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr), "sr");
cpu_dflag = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, dflag),
"dflag");
- cpu_pc = tcg_global_mem_new(tcg_env,
+ cpu_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, pc), "pc");
- cpu_ppc = tcg_global_mem_new(tcg_env,
+ cpu_ppc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, ppc), "ppc");
- jmp_pc = tcg_global_mem_new(tcg_env,
+ jmp_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
- cpu_sr_f = tcg_global_mem_new(tcg_env,
+ cpu_sr_f = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_f), "sr_f");
- cpu_sr_cy = tcg_global_mem_new(tcg_env,
+ cpu_sr_cy = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
- cpu_sr_ov = tcg_global_mem_new(tcg_env,
+ cpu_sr_ov = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
- cpu_lock_addr = tcg_global_mem_new(tcg_env,
+ cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, lock_addr),
"lock_addr");
- cpu_lock_value = tcg_global_mem_new(tcg_env,
+ cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, lock_value),
"lock_value");
fpcsr = tcg_global_mem_new_i32(tcg_env,
@@ -125,7 +131,7 @@ void openrisc_translate_init(void)
offsetof(CPUOpenRISCState, mac),
"mac");
for (i = 0; i < 32; i++) {
- cpu_regs[i] = tcg_global_mem_new(tcg_env,
+ cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState,
shadow_gpr[0][i]),
regnames[i]);
@@ -139,7 +145,7 @@ static void gen_exception(DisasContext *dc, unsigned int excp)
static void gen_illegal_exception(DisasContext *dc)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_ILLEGAL);
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -159,7 +165,7 @@ static bool check_of64a32s(DisasContext *dc)
return dc->cpucfgr & CPUCFGR_OF64A32S;
}
-static TCGv cpu_R(DisasContext *dc, int reg)
+static TCGv_i32 cpu_R(DisasContext *dc, int reg)
{
if (reg == 0) {
return dc->R0;
@@ -200,147 +206,133 @@ static void gen_ove_cyov(DisasContext *dc)
}
}
-static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_add(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
- TCGv res = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
- tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
- tcg_gen_xor_tl(t0, res, srcb);
- tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_gen_add2_i32(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
+ tcg_gen_xor_i32(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_i32(t0, res, srcb);
+ tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_addc(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
- TCGv res = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
- tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
- tcg_gen_xor_tl(t0, res, srcb);
- tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_gen_addcio_i32(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
+ tcg_gen_xor_i32(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_i32(t0, res, srcb);
+ tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_sub(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv res = tcg_temp_new();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_sub_tl(res, srca, srcb);
- tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
- tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
- tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
+ tcg_gen_sub_i32(res, srca, srcb);
+ tcg_gen_xor_i32(cpu_sr_cy, srca, srcb);
+ tcg_gen_xor_i32(cpu_sr_ov, res, srcb);
+ tcg_gen_and_i32(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_mul(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
- tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
- tcg_gen_negsetcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
+ tcg_gen_muls2_i32(dest, cpu_sr_ov, srca, srcb);
+ tcg_gen_sari_i32(t0, dest, TARGET_LONG_BITS - 1);
+ tcg_gen_negsetcond_i32(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
gen_ove_ov(dc);
}
-static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_mulu(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
+ tcg_gen_muls2_i32(dest, cpu_sr_cy, srca, srcb);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
gen_ove_cy(dc);
}
-static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_div(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
/* The result of divide-by-zero is undefined.
Suppress the host-side exception by dividing by 1. */
- tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
- tcg_gen_div_tl(dest, srca, t0);
+ tcg_gen_or_i32(t0, srcb, cpu_sr_ov);
+ tcg_gen_div_i32(dest, srca, t0);
- tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
+ tcg_gen_neg_i32(cpu_sr_ov, cpu_sr_ov);
gen_ove_ov(dc);
}
-static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_divu(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
/* The result of divide-by-zero is undefined.
Suppress the host-side exception by dividing by 1. */
- tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
- tcg_gen_divu_tl(dest, srca, t0);
+ tcg_gen_or_i32(t0, srcb, cpu_sr_cy);
+ tcg_gen_divu_i32(dest, srca, t0);
gen_ove_cy(dc);
}
-static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_muld(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
- if (TARGET_LONG_BITS == 32) {
- tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_ov, 0);
- } else {
- TCGv_i64 high = tcg_temp_new_i64();
-
- tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
- tcg_gen_sari_i64(t1, cpu_mac, 63);
- tcg_gen_negsetcond_i64(TCG_COND_NE, t1, t1, high);
- tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
-
- gen_ove_ov(dc);
- }
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_i32(cpu_sr_ov, 0);
}
-static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_muldu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
- if (TARGET_LONG_BITS == 32) {
- tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_cy, 0);
- } else {
- TCGv_i64 high = tcg_temp_new_i64();
-
- tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
- tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
-
- gen_ove_cy(dc);
- }
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_i32(cpu_sr_cy, 0);
}
-static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_mac(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during addition stage. */
@@ -349,39 +341,35 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
tcg_gen_xor_i64(t1, t1, cpu_mac);
tcg_gen_andc_i64(t1, t1, t2);
-#if TARGET_LONG_BITS == 32
tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
-#else
- tcg_gen_mov_i64(cpu_sr_ov, t1);
-#endif
gen_ove_ov(dc);
}
-static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_macu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during addition stage. */
tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
+ tcg_gen_extrl_i64_i32(cpu_sr_cy, t1);
gen_ove_cy(dc);
}
-static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_msb(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during subtraction stage. */
@@ -399,19 +387,19 @@ static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
gen_ove_ov(dc);
}
-static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_msbu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during subtraction stage. */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
+ tcg_gen_extrl_i64_i32(cpu_sr_cy, t2);
gen_ove_cy(dc);
}
@@ -440,84 +428,84 @@ static bool trans_l_sub(DisasContext *dc, arg_dab *a)
static bool trans_l_and(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_and_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_or(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_or_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_xor(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_xor_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sll(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_shl_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_srl(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_shr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sra(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_sar_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_ror(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_rotr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_exths(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext16s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_extbs(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext8s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_exthz(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext16u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_extbz(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext8u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
@@ -525,16 +513,16 @@ static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
static bool trans_l_ff1(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
- tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
+ tcg_gen_ctzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
+ tcg_gen_addi_i32(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
return true;
}
static bool trans_l_fl1(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
- tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
+ tcg_gen_clzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
+ tcg_gen_subfi_i32(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
return true;
}
@@ -580,9 +568,9 @@ static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
static bool trans_l_j(DisasContext *dc, arg_l_j *a)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
- tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ tcg_gen_movi_i32(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
return true;
@@ -590,13 +578,13 @@ static bool trans_l_j(DisasContext *dc, arg_l_j *a)
static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
- target_ulong ret_pc = dc->base.pc_next + 8;
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
+ vaddr ret_pc = dc->base.pc_next + 8;
- tcg_gen_movi_tl(cpu_regs[9], ret_pc);
+ tcg_gen_movi_i32(cpu_regs[9], ret_pc);
/* Optimize jal being used to load the PC for PIC. */
if (tmp_pc != ret_pc) {
- tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ tcg_gen_movi_i32(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
}
@@ -605,11 +593,11 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
- TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8);
- TCGv t_true = tcg_constant_tl(tmp_pc);
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
+ TCGv_i32 t_next = tcg_constant_i32(dc->base.pc_next + 8);
+ TCGv_i32 t_true = tcg_constant_i32(tmp_pc);
- tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
+ tcg_gen_movcond_i32(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
dc->delayed_branch = 2;
}
@@ -627,51 +615,54 @@ static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
{
- tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
+ tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b));
dc->delayed_branch = 2;
return true;
}
static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
{
- tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
- tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8);
+ tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b));
+ tcg_gen_movi_i32(cpu_regs[9], dc->base.pc_next + 8);
dc->delayed_branch = 2;
return true;
}
static bool trans_l_lwa(DisasContext *dc, arg_load *a)
{
- TCGv ea;
+ TCGv_i32 ea;
check_r0_write(dc, a->d);
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL);
- tcg_gen_mov_tl(cpu_lock_addr, ea);
- tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx,
+ mo_endian(dc) | MO_UL);
+ tcg_gen_mov_i32(cpu_lock_addr, ea);
+ tcg_gen_mov_i32(cpu_lock_value, cpu_R(dc, a->d));
return true;
}
static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
{
- TCGv ea;
+ TCGv_i32 ea;
+
+ mop |= mo_endian(dc);
check_r0_write(dc, a->d);
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
}
static bool trans_l_lwz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TEUL);
+ do_load(dc, a, MO_UL);
return true;
}
static bool trans_l_lws(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TESL);
+ do_load(dc, a, MO_SL);
return true;
}
@@ -689,53 +680,57 @@ static bool trans_l_lbs(DisasContext *dc, arg_load *a)
static bool trans_l_lhz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TEUW);
+ do_load(dc, a, MO_UW);
return true;
}
static bool trans_l_lhs(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TESW);
+ do_load(dc, a, MO_SW);
return true;
}
static bool trans_l_swa(DisasContext *dc, arg_store *a)
{
- TCGv ea, val;
+ TCGv_i32 ea, val;
TCGLabel *lab_fail, *lab_done;
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
lab_fail = gen_new_label();
lab_done = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
+ tcg_gen_brcond_i32(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
- val = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
- cpu_R(dc, a->b), dc->mem_idx, MO_TEUL);
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
+ val = tcg_temp_new_i32();
+ tcg_gen_atomic_cmpxchg_i32(val, cpu_lock_addr, cpu_lock_value,
+ cpu_R(dc, a->b), dc->mem_idx,
+ mo_endian(dc) | MO_UL);
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
tcg_gen_br(lab_done);
gen_set_label(lab_fail);
- tcg_gen_movi_tl(cpu_sr_f, 0);
+ tcg_gen_movi_i32(cpu_sr_f, 0);
gen_set_label(lab_done);
- tcg_gen_movi_tl(cpu_lock_addr, -1);
+ tcg_gen_movi_i32(cpu_lock_addr, -1);
return true;
}
static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
{
- TCGv t0 = tcg_temp_new();
- tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ mop |= mo_endian(dc);
+
+ tcg_gen_addi_i32(t0, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_st_i32(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
}
static bool trans_l_sw(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TEUL);
+ do_store(dc, a, MO_UL);
return true;
}
@@ -747,7 +742,7 @@ static bool trans_l_sb(DisasContext *dc, arg_store *a)
static bool trans_l_sh(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TEUW);
+ do_store(dc, a, MO_UW);
return true;
}
@@ -772,75 +767,75 @@ static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a)
static bool trans_l_addi(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_addic(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_muli(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
{
- gen_mac(dc, cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_mac(dc, cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
{
check_r0_write(dc, a->d);
- tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ tcg_gen_andi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
return true;
}
static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
return true;
}
static bool trans_l_xori(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
+ tcg_gen_xori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
{
- TCGv spr = tcg_temp_new();
+ TCGv_i32 spr = tcg_temp_new_i32();
check_r0_write(dc, a->d);
if (translator_io_start(&dc->base)) {
if (dc->delayed_branch) {
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
} else {
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
}
dc->base.is_jmp = DISAS_EXIT;
}
- tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k);
gen_helper_mfspr(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d), spr);
return true;
}
static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
{
- TCGv spr = tcg_temp_new();
+ TCGv_i32 spr = tcg_temp_new_i32();
translator_io_start(&dc->base);
@@ -851,14 +846,14 @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
* of the cpu state first, allowing it to be overwritten.
*/
if (dc->delayed_branch) {
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
} else {
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
}
dc->base.is_jmp = DISAS_EXIT;
- tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k);
gen_helper_mtspr(tcg_env, spr, cpu_R(dc, a->b));
return true;
}
@@ -890,7 +885,7 @@ static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
static bool trans_l_slli(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_shli_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -898,7 +893,7 @@ static bool trans_l_slli(DisasContext *dc, arg_dal *a)
static bool trans_l_srli(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_shri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -906,7 +901,7 @@ static bool trans_l_srli(DisasContext *dc, arg_dal *a)
static bool trans_l_srai(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_sari_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -914,7 +909,7 @@ static bool trans_l_srai(DisasContext *dc, arg_dal *a)
static bool trans_l_rori(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_rotri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -922,151 +917,151 @@ static bool trans_l_rori(DisasContext *dc, arg_dal *a)
static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
{
check_r0_write(dc, a->d);
- tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16);
+ tcg_gen_movi_i32(cpu_R(dc, a->d), a->k << 16);
return true;
}
static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
{
check_r0_write(dc, a->d);
- tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac);
+ tcg_gen_extrl_i64_i32(cpu_R(dc, a->d), cpu_mac);
tcg_gen_movi_i64(cpu_mac, 0);
return true;
}
static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LEU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LE,
+ tcg_gen_setcond_i32(TCG_COND_LE,
cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_SYSCALL);
dc->base.is_jmp = DISAS_NORETURN;
return true;
@@ -1074,7 +1069,7 @@ static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_TRAP);
dc->base.is_jmp = DISAS_NORETURN;
return true;
@@ -1108,7 +1103,7 @@ static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
}
static bool do_fp2(DisasContext *dc, arg_da *a,
- void (*fn)(TCGv, TCGv_env, TCGv))
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32))
{
if (!check_of32s(dc)) {
return false;
@@ -1120,7 +1115,7 @@ static bool do_fp2(DisasContext *dc, arg_da *a,
}
static bool do_fp3(DisasContext *dc, arg_dab *a,
- void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
{
if (!check_of32s(dc)) {
return false;
@@ -1132,7 +1127,7 @@ static bool do_fp3(DisasContext *dc, arg_dab *a,
}
static bool do_fpcmp(DisasContext *dc, arg_ab *a,
- void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32),
bool inv, bool swap)
{
if (!check_of32s(dc)) {
@@ -1144,7 +1139,7 @@ static bool do_fpcmp(DisasContext *dc, arg_ab *a,
fn(cpu_sr_f, tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
}
if (inv) {
- tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1);
}
gen_helper_update_fpcsr(tcg_env);
return true;
@@ -1337,7 +1332,7 @@ static bool do_dp2(DisasContext *dc, arg_da_pair *a,
}
static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
- void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64),
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i64),
bool inv, bool swap)
{
TCGv_i64 t0, t1;
@@ -1359,7 +1354,7 @@ static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
}
if (inv) {
- tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1);
}
gen_helper_update_fpcsr(tcg_env);
return true;
@@ -1544,7 +1539,7 @@ static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
/* Allow the TCG optimizer to see that R0 == 0,
when it's true, which is the common case. */
- dc->zero = tcg_constant_tl(0);
+ dc->zero = tcg_constant_i32(0);
if (dc->tb_flags & TB_FLAGS_R0_0) {
dc->R0 = dc->zero;
} else {
@@ -1586,7 +1581,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- target_ulong jmp_dest;
+ vaddr jmp_dest;
/* If we have already exited the TB, nothing following has effect. */
if (dc->base.is_jmp == DISAS_NORETURN) {
@@ -1600,32 +1595,32 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
/* For DISAS_TOO_MANY, jump to the next insn. */
jmp_dest = dc->base.pc_next;
- tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
+ tcg_gen_movi_i32(cpu_ppc, jmp_dest - 4);
switch (dc->base.is_jmp) {
case DISAS_JUMP:
jmp_dest = dc->jmp_pc_imm;
if (jmp_dest == -1) {
/* The jump destination is indirect/computed; use jmp_pc. */
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
tcg_gen_lookup_and_goto_ptr();
break;
}
/* The jump destination is direct; use jmp_pc_imm.
However, we will have stored into jmp_pc as well;
we know now that it wasn't needed. */
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
/* fallthru */
case DISAS_TOO_MANY:
if (translator_use_goto_tb(&dc->base, jmp_dest)) {
tcg_gen_goto_tb(0);
- tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_movi_i32(cpu_pc, jmp_dest);
tcg_gen_exit_tb(dc->base.tb, 0);
break;
}
- tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_movi_i32(cpu_pc, jmp_dest);
tcg_gen_lookup_and_goto_ptr();
break;
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index d422789..17e6d07 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3668,16 +3668,17 @@ static void gen_lookup_and_goto_ptr(DisasContext *ctx)
}
/*** Branch ***/
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
}
if (use_goto_tb(ctx, dest)) {
pmu_count_insns(ctx);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
gen_lookup_and_goto_ptr(ctx);
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 2c22664..4c13012 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -50,7 +50,7 @@ typedef struct CPUArchState CPURISCVState;
*/
#define RISCV_UW2_ALWAYS_STORE_AMO 1
-#define RV(x) ((target_ulong)1 << (x - 'A'))
+#define RV(x) BIT(x - 'A')
/*
* Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
@@ -582,7 +582,7 @@ struct RISCVCPUClass {
RISCVCPUDef *def;
};
-static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
+static inline int riscv_has_ext(CPURISCVState *env, uint32_t ext)
{
return (env->misa_ext & ext) != 0;
}
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index f4b5460..2a48717 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3351,19 +3351,19 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base,
/* offset of the idx element with base register r */
static uint32_t endian_ofs(DisasContext *s, int r, int idx)
{
-#if HOST_BIG_ENDIAN
- return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
-#else
- return vreg_ofs(s, r) + (idx << s->sew);
-#endif
+ if (HOST_BIG_ENDIAN) {
+ return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
+ } else {
+ return vreg_ofs(s, r) + (idx << s->sew);
+ }
}
/* adjust the index according to the endian */
static void endian_adjust(TCGv_i32 ofs, int sew)
{
-#if HOST_BIG_ENDIAN
- tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
-#endif
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
+ }
}
/* Load idx >= VLMAX ? 0 : vreg[idx] */
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 75ca3fb..0dd0d59 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -36,6 +36,7 @@
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
#include "system/address-spaces.h"
+#include "system/memory.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "hw/intc/riscv_imsic.h"
@@ -1564,6 +1565,7 @@ bool kvm_arch_stop_on_emulation_error(CPUState *cs)
static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
g_autofree uint8_t *buf = NULL;
RISCVCPU *cpu = RISCV_CPU(cs);
target_ulong num_bytes;
@@ -1602,9 +1604,9 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
exit(1);
}
- cpu_physical_memory_write(addr, buf, ret);
+ address_space_write(cs->as, addr, attrs, buf, ret);
} else {
- cpu_physical_memory_read(addr, buf, num_bytes);
+ address_space_read(cs->as, addr, attrs, buf, num_bytes);
ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
if (ret < 0) {
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
index 100005e..8a77476 100644
--- a/target/riscv/monitor.c
+++ b/target/riscv/monitor.c
@@ -23,6 +23,7 @@
#include "cpu_bits.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
+#include "system/memory.h"
#ifdef TARGET_RISCV64
#define PTE_HEADER_FIELDS "vaddr paddr "\
@@ -77,11 +78,13 @@ static void print_pte(Monitor *mon, int va_bits, target_ulong vaddr,
attr & PTE_D ? 'd' : '-');
}
-static void walk_pte(Monitor *mon, hwaddr base, target_ulong start,
+static void walk_pte(Monitor *mon, AddressSpace *as,
+ hwaddr base, target_ulong start,
int level, int ptidxbits, int ptesize, int va_bits,
target_ulong *vbase, hwaddr *pbase, hwaddr *last_paddr,
target_ulong *last_size, int *last_attr)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
hwaddr pte_addr;
hwaddr paddr;
target_ulong last_start = -1;
@@ -100,7 +103,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start,
for (idx = 0; idx < (1UL << ptidxbits); idx++) {
pte_addr = base + idx * ptesize;
- cpu_physical_memory_read(pte_addr, &pte, ptesize);
+ address_space_read(as, pte_addr, attrs, &pte, ptesize);
paddr = (hwaddr)(pte >> PTE_PPN_SHIFT) << PGSHIFT;
attr = pte & 0xff;
@@ -132,7 +135,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start,
*last_size = pgsize;
} else {
/* pointer to the next level of the page table */
- walk_pte(mon, paddr, start, level - 1, ptidxbits, ptesize,
+ walk_pte(mon, as, paddr, start, level - 1, ptidxbits, ptesize,
va_bits, vbase, pbase, last_paddr,
last_size, last_attr);
}
@@ -145,6 +148,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start,
static void mem_info_svxx(Monitor *mon, CPUArchState *env)
{
+ AddressSpace *as = env_cpu(env)->as;
int levels, ptidxbits, ptesize, vm, va_bits;
hwaddr base;
target_ulong vbase;
@@ -199,7 +203,7 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env)
last_attr = 0;
/* walk page tables, starting from address 0 */
- walk_pte(mon, base, 0, levels - 1, ptidxbits, ptesize, va_bits,
+ walk_pte(mon, as, base, 0, levels - 1, ptidxbits, ptesize, va_bits,
&vbase, &pbase, &last_paddr, &last_size, &last_attr);
/* don't forget the last one */
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 6fc06c7..9a53aec 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -286,7 +286,8 @@ static void exit_tb(DisasContext *ctx)
tcg_gen_exit_tb(NULL, 0);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_long diff)
{
target_ulong dest = ctx->base.pc_next + diff;
@@ -305,12 +306,12 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
*/
if (tb_cflags(ctx->base.tb) & CF_PCREL) {
gen_update_pc(ctx, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_update_pc(ctx, diff);
}
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_update_pc(ctx, diff);
lookup_and_goto_ptr(ctx);
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 41ea223..2de3358 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -235,26 +235,26 @@ vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
void *vd, uint32_t evl, uint32_t reg_start, void *host,
uint32_t esz, bool is_load)
{
-#if HOST_BIG_ENDIAN
- for (; reg_start < evl; reg_start++, host += esz) {
- ldst_host(vd, reg_start, host);
- }
-#else
- if (esz == 1) {
- uint32_t byte_offset = reg_start * esz;
- uint32_t size = (evl - reg_start) * esz;
-
- if (is_load) {
- memcpy(vd + byte_offset, host, size);
- } else {
- memcpy(host, vd + byte_offset, size);
- }
- } else {
+ if (HOST_BIG_ENDIAN) {
for (; reg_start < evl; reg_start++, host += esz) {
ldst_host(vd, reg_start, host);
}
+ } else {
+ if (esz == 1) {
+ uint32_t byte_offset = reg_start * esz;
+ uint32_t size = (evl - reg_start) * esz;
+
+ if (is_load) {
+ memcpy(vd + byte_offset, host, size);
+ } else {
+ memcpy(host, vd + byte_offset, size);
+ }
+ } else {
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+ }
}
-#endif
}
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 19a9584..ef865f1 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -40,8 +40,8 @@ typedef struct DisasContext {
} DisasContext;
typedef struct DisasCompare {
- TCGv value;
- TCGv temp;
+ TCGv_i32 value;
+ TCGv_i32 temp;
TCGCond cond;
} DisasCompare;
@@ -63,15 +63,20 @@ const char *rx_crname(uint8_t cr)
#define DISAS_EXIT DISAS_TARGET_2
/* global register indexes */
-static TCGv cpu_regs[16];
-static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
-static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
-static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
-static TCGv cpu_fintv, cpu_intb, cpu_pc;
+static TCGv_i32 cpu_regs[16];
+static TCGv_i32 cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
+static TCGv_i32 cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
+static TCGv_i32 cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
+static TCGv_i32 cpu_fintv, cpu_intb, cpu_pc;
static TCGv_i64 cpu_acc;
#define cpu_sp cpu_regs[0]
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ return MO_LE;
+}
+
/* decoder helper */
static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
int i, int n)
@@ -85,7 +90,7 @@ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
static uint32_t li(DisasContext *ctx, int sz)
{
- target_ulong addr;
+ vaddr addr;
uint32_t tmp;
CPURXState *env = ctx->env;
addr = ctx->base.pc_next;
@@ -147,12 +152,12 @@ void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(dc->base.tb, n);
+ tcg_gen_exit_tb(dc->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
@@ -161,34 +166,34 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
}
/* generic load wrapper */
-static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
+static void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
- tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | mo_endian(ctx));
}
/* unsigned load wrapper */
-static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
+static void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
- tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* generic store wrapper */
-static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
+static void rx_gen_st(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
- tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
+ tcg_gen_qemu_st_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* [ri, rb] */
-static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
- int size, int ri, int rb)
+static void rx_gen_regindex(DisasContext *ctx, TCGv_i32 mem,
+ int size, int ri, int rb)
{
tcg_gen_shli_i32(mem, cpu_regs[ri], size);
tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
}
/* dsp[reg] */
-static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
- int ld, int size, int reg)
+static TCGv_i32 rx_index_addr(DisasContext *ctx, TCGv_i32 mem,
+ int ld, int size, int reg)
{
uint32_t dsp;
@@ -218,15 +223,15 @@ static inline MemOp mi_to_mop(unsigned mi)
}
/* load source operand */
-static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
- int ld, int mi, int rs)
+static TCGv_i32 rx_load_source(DisasContext *ctx, TCGv_i32 mem,
+ int ld, int mi, int rs)
{
- TCGv addr;
+ TCGv_i32 addr;
MemOp mop;
if (ld < 3) {
mop = mi_to_mop(mi);
addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
- tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
+ tcg_gen_qemu_ld_i32(mem, addr, 0, mop | mo_endian(ctx));
return mem;
} else {
return cpu_regs[rs];
@@ -315,7 +320,7 @@ static void psw_cond(DisasCompare *dc, uint32_t cond)
}
}
-static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
+static void move_from_cr(DisasContext *ctx, TCGv_i32 ret, int cr, uint32_t pc)
{
switch (cr) {
case 0: /* PSW */
@@ -361,7 +366,7 @@ static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
}
}
-static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
+static void move_to_cr(DisasContext *ctx, TCGv_i32 val, int cr)
{
if (cr >= 8 && !is_privileged(ctx, 0)) {
/* Some control registers can only be written in privileged mode. */
@@ -414,35 +419,35 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
}
}
-static void push(TCGv val)
+static void push(DisasContext *ctx, TCGv_i32 val)
{
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(MO_32, val, cpu_sp);
+ rx_gen_st(ctx, MO_32, val, cpu_sp);
}
-static void pop(TCGv ret)
+static void pop(DisasContext *ctx, TCGv_i32 ret)
{
- rx_gen_ld(MO_32, ret, cpu_sp);
+ rx_gen_ld(ctx, MO_32, ret, cpu_sp);
tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
}
/* mov.<bwl> rs,dsp5[rd] */
static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
- rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
}
/* mov.<bwl> dsp5[rs],rd */
static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
- rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -459,31 +464,31 @@ static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
/* mov.<bwl> #imm, dsp[rd] */
static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
{
- TCGv imm, mem;
+ TCGv_i32 imm, mem;
imm = tcg_constant_i32(a->imm);
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
- rx_gen_st(a->sz, imm, mem);
+ rx_gen_st(ctx, a->sz, imm, mem);
return true;
}
/* mov.<bwl> [ri,rb],rd */
static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
/* mov.<bwl> rd,[ri,rb] */
static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
}
@@ -493,7 +498,7 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
/* mov.<bwl> rs,rd */
static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
{
- TCGv tmp, mem, addr;
+ TCGv_i32 tmp, mem, addr;
if (a->lds == 3 && a->ldd == 3) {
/* mov.<bwl> rs,rd */
@@ -501,22 +506,22 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
return true;
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
if (a->lds == 3) {
/* mov.<bwl> rs,dsp[rd] */
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
- rx_gen_st(a->sz, cpu_regs[a->rd], addr);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rd], addr);
} else if (a->ldd == 3) {
/* mov.<bwl> dsp[rs],rd */
addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
- rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], addr);
} else {
/* mov.<bwl> dsp[rs],dsp[rd] */
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
- rx_gen_ld(a->sz, tmp, addr);
+ rx_gen_ld(ctx, a->sz, tmp, addr);
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
- rx_gen_st(a->sz, tmp, addr);
+ rx_gen_st(ctx, a->sz, tmp, addr);
}
return true;
}
@@ -525,13 +530,13 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
/* mov.<bwl> rs,[-rd] */
static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
tcg_gen_mov_i32(val, cpu_regs[a->rs]);
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_st(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_st(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -542,12 +547,12 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
/* mov.<bwl> [-rd],rs */
static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_ld(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -559,10 +564,10 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
/* movu.<bw> dsp[rs],rd */
static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
- rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -576,10 +581,10 @@ static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
/* movu.<bw> [ri,rb],rd */
static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -587,12 +592,12 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
/* mov.<bw> [-rd],rs */
static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_ldu(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -617,9 +622,9 @@ static bool trans_POP(DisasContext *ctx, arg_POP *a)
/* popc cr */
static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
{
- TCGv val;
- val = tcg_temp_new();
- pop(val);
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
+ pop(ctx, val);
move_to_cr(ctx, val, a->cr);
return true;
}
@@ -634,7 +639,7 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
}
r = a->rd;
while (r <= a->rd2 && r < 16) {
- pop(cpu_regs[r++]);
+ pop(ctx, cpu_regs[r++]);
}
return true;
}
@@ -643,34 +648,34 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
/* push.<bwl> rs */
static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
tcg_gen_mov_i32(val, cpu_regs[a->rs]);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(a->sz, val, cpu_sp);
+ rx_gen_st(ctx, a->sz, val, cpu_sp);
return true;
}
/* push.<bwl> dsp[rs] */
static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
{
- TCGv mem, val, addr;
- mem = tcg_temp_new();
- val = tcg_temp_new();
+ TCGv_i32 mem, val, addr;
+ mem = tcg_temp_new_i32();
+ val = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
- rx_gen_ld(a->sz, val, addr);
+ rx_gen_ld(ctx, a->sz, val, addr);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(a->sz, val, cpu_sp);
+ rx_gen_st(ctx, a->sz, val, cpu_sp);
return true;
}
/* pushc rx */
static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
move_from_cr(ctx, val, a->cr, ctx->pc);
- push(val);
+ push(ctx, val);
return true;
}
@@ -685,7 +690,7 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
}
r = a->rs2;
while (r >= a->rs && r >= 0) {
- push(cpu_regs[r--]);
+ push(ctx, cpu_regs[r--]);
}
return true;
}
@@ -693,8 +698,8 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
/* xchg rs,rd */
static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
@@ -704,8 +709,8 @@ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
/* xchg dsp[rs].<mi>,rd */
static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
{
- TCGv mem, addr;
- mem = tcg_temp_new();
+ TCGv_i32 mem, addr;
+ mem = tcg_temp_new_i32();
switch (a->mi) {
case 0: /* dsp[rs].b */
case 1: /* dsp[rs].w */
@@ -724,10 +729,10 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
return true;
}
-static inline void stcond(TCGCond cond, int rd, int imm)
+static void stcond(TCGCond cond, int rd, int imm)
{
- TCGv z;
- TCGv _imm;
+ TCGv_i32 z;
+ TCGv_i32 _imm;
z = tcg_constant_i32(0);
_imm = tcg_constant_i32(imm);
tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
@@ -753,15 +758,15 @@ static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
{
DisasCompare dc;
- TCGv val, mem, addr;
- dc.temp = tcg_temp_new();
+ TCGv_i32 val, mem, addr;
+ dc.temp = tcg_temp_new_i32();
psw_cond(&dc, a->cd);
if (a->ld < 3) {
- val = tcg_temp_new();
- mem = tcg_temp_new();
+ val = tcg_temp_new_i32();
+ mem = tcg_temp_new_i32();
tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
- rx_gen_st(a->sz, val, addr);
+ rx_gen_st(ctx, a->sz, val, addr);
} else {
tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
}
@@ -772,7 +777,7 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
{
tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -792,42 +797,42 @@ static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
dst = a->rd;
while (dst <= a->rd2 && dst < 16) {
- pop(cpu_regs[dst++]);
+ pop(ctx, cpu_regs[dst++]);
}
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
-typedef void (*op2fn)(TCGv ret, TCGv arg1);
-typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
+typedef void (*op2fn)(TCGv_i32 ret, TCGv_i32 arg1);
+typedef void (*op3fn)(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
+static void rx_gen_op_rr(op2fn opr, int dst, int src)
{
opr(cpu_regs[dst], cpu_regs[src]);
}
-static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
+static void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
{
opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
}
-static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
+static void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
{
- TCGv imm = tcg_constant_i32(src2);
+ TCGv_i32 imm = tcg_constant_i32(src2);
opr(cpu_regs[dst], cpu_regs[src], imm);
}
-static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
- int dst, int src, int ld, int mi)
+static void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
+ int dst, int src, int ld, int mi)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, ld, mi, src);
opr(cpu_regs[dst], cpu_regs[dst], val);
}
-static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_and(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -857,7 +862,7 @@ static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
return true;
}
-static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_or(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -887,7 +892,7 @@ static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
return true;
}
-static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_xor(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -909,7 +914,7 @@ static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
return true;
}
-static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_tst(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -930,7 +935,7 @@ static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
return true;
}
-static void rx_not(TCGv ret, TCGv arg1)
+static void rx_not(TCGv_i32 ret, TCGv_i32 arg1)
{
tcg_gen_not_i32(ret, arg1);
tcg_gen_mov_i32(cpu_psw_z, ret);
@@ -945,7 +950,7 @@ static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
return true;
}
-static void rx_neg(TCGv ret, TCGv arg1)
+static void rx_neg(TCGv_i32 ret, TCGv_i32 arg1)
{
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
tcg_gen_neg_i32(ret, arg1);
@@ -964,9 +969,9 @@ static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
}
/* ret = arg1 + arg2 + psw_c */
-static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_adc(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv z = tcg_constant_i32(0);
+ TCGv_i32 z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
@@ -1002,9 +1007,9 @@ static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
}
/* ret = arg1 + arg2 */
-static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_add(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv z = tcg_constant_i32(0);
+ TCGv_i32 z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
@@ -1037,7 +1042,7 @@ static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
}
/* ret = arg1 - arg2 */
-static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_sub(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
@@ -1051,17 +1056,17 @@ static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
}
}
-static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
+static void rx_cmp(TCGv_i32 dummy, TCGv_i32 arg1, TCGv_i32 arg2)
{
rx_sub(NULL, arg1, arg2);
}
/* ret = arg1 - arg2 - !psw_c */
/* -> ret = arg1 + ~arg2 + psw_c */
-static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_sbb(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv temp;
- temp = tcg_temp_new();
+ TCGv_i32 temp;
+ temp = tcg_temp_new_i32();
tcg_gen_not_i32(temp, arg2);
rx_adc(ret, arg1, temp);
}
@@ -1187,7 +1192,7 @@ static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
/* emul #imm, rd */
static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
{
- TCGv imm = tcg_constant_i32(a->imm);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1200,11 +1205,11 @@ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
/* emul dsp[rs], rd */
static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
{
- TCGv val, mem;
+ TCGv_i32 val, mem;
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
cpu_regs[a->rd], val);
@@ -1214,7 +1219,7 @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
/* emulu #imm, rd */
static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
{
- TCGv imm = tcg_constant_i32(a->imm);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1227,23 +1232,23 @@ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
/* emulu dsp[rs], rd */
static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
{
- TCGv val, mem;
+ TCGv_i32 val, mem;
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
cpu_regs[a->rd], val);
return true;
}
-static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_div(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
gen_helper_div(ret, tcg_env, arg1, arg2);
}
-static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_divu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
gen_helper_divu(ret, tcg_env, arg1, arg2);
}
@@ -1283,8 +1288,8 @@ static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
/* shll #imm:5, rs2, rd */
static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
if (a->imm) {
tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
@@ -1306,14 +1311,14 @@ static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
{
TCGLabel *noshift, *done;
- TCGv count, tmp;
+ TCGv_i32 count, tmp;
noshift = gen_new_label();
done = gen_new_label();
/* if (cpu_regs[a->rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
- count = tcg_temp_new();
- tmp = tcg_temp_new();
+ count = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
@@ -1334,10 +1339,10 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
return true;
}
-static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
- unsigned int alith)
+static void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
+ unsigned int alith)
{
- static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = {
tcg_gen_shri_i32, tcg_gen_sari_i32,
};
tcg_debug_assert(alith < 2);
@@ -1354,20 +1359,21 @@ static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
}
-static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
+static void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
{
TCGLabel *noshift, *done;
- TCGv count;
- static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ TCGv_i32 count;
+ static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = {
tcg_gen_shri_i32, tcg_gen_sari_i32,
};
- static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
+ static void (* const gen_sXr[])(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2) = {
tcg_gen_shr_i32, tcg_gen_sar_i32,
};
tcg_debug_assert(alith < 2);
noshift = gen_new_label();
done = gen_new_label();
- count = tcg_temp_new();
+ count = tcg_temp_new_i32();
/* if (cpu_regs[rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
tcg_gen_andi_i32(count, cpu_regs[rs], 31);
@@ -1419,8 +1425,8 @@ static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
/* rolc rd */
static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
@@ -1433,8 +1439,8 @@ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
/* rorc rd */
static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
@@ -1447,7 +1453,7 @@ static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
enum {ROTR = 0, ROTL = 1};
enum {ROT_IMM = 0, ROT_REG = 1};
-static inline void rx_rot(int ir, int dir, int rd, int src)
+static void rx_rot(int ir, int dir, int rd, int src)
{
switch (dir) {
case ROTL:
@@ -1509,8 +1515,8 @@ static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
/* revw rs, rd */
static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
tcg_gen_shli_i32(tmp, tmp, 8);
tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
@@ -1527,7 +1533,7 @@ static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
switch (cd) {
case 0 ... 13:
- dc.temp = tcg_temp_new();
+ dc.temp = tcg_temp_new_i32();
psw_cond(&dc, cd);
t = gen_new_label();
done = gen_new_label();
@@ -1582,10 +1588,10 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
return true;
}
-static inline void rx_save_pc(DisasContext *ctx)
+static void rx_save_pc(DisasContext *ctx)
{
- TCGv pc = tcg_constant_i32(ctx->base.pc_next);
- push(pc);
+ TCGv_i32 pc = tcg_constant_i32(ctx->base.pc_next);
+ push(ctx, pc);
}
/* jmp rs */
@@ -1626,7 +1632,7 @@ static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
/* rts */
static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
{
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1667,7 +1673,7 @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
#define STRING(op) \
do { \
- TCGv size = tcg_constant_i32(a->sz); \
+ TCGv_i32 size = tcg_constant_i32(a->sz); \
gen_helper_##op(tcg_env, size); \
} while (0)
@@ -1798,7 +1804,7 @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
/* racw #imm */
static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
{
- TCGv imm = tcg_constant_i32(a->imm + 1);
+ TCGv_i32 imm = tcg_constant_i32(a->imm + 1);
gen_helper_racw(tcg_env, imm);
return true;
}
@@ -1806,8 +1812,8 @@ static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
/* sat rd */
static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
{
- TCGv tmp, z;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp, z;
+ tmp = tcg_temp_new_i32();
z = tcg_constant_i32(0);
/* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
@@ -1830,7 +1836,7 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv imm = tcg_constant_i32(li(ctx, 0)); \
+ TCGv_i32 imm = tcg_constant_i32(li(ctx, 0)); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], imm); \
return true; \
@@ -1838,8 +1844,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
cat3(arg_, name, _mr) * a) \
{ \
- TCGv val, mem; \
- mem = tcg_temp_new(); \
+ TCGv_i32 val, mem; \
+ mem = tcg_temp_new_i32(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], val); \
@@ -1849,8 +1855,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
#define FCONVOP(name, op) \
static bool trans_##name(DisasContext *ctx, arg_##name * a) \
{ \
- TCGv val, mem; \
- mem = tcg_temp_new(); \
+ TCGv_i32 val, mem; \
+ mem = tcg_temp_new_i32(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
return true; \
@@ -1864,7 +1870,7 @@ FOP(FDIV, fdiv)
/* fcmp #imm, rd */
static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
{
- TCGv imm = tcg_constant_i32(li(ctx, 0));
+ TCGv_i32 imm = tcg_constant_i32(li(ctx, 0));
gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
return true;
}
@@ -1873,8 +1879,8 @@ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
/* fcmp rs, rd */
static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
return true;
@@ -1887,70 +1893,70 @@ FCONVOP(ROUND, round)
/* itof dsp[rs], rd */
static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
return true;
}
-static void rx_bsetm(TCGv mem, TCGv mask)
+static void rx_bsetm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_or_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_bclrm(TCGv mem, TCGv mask)
+static void rx_bclrm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_andc_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_btstm(TCGv mem, TCGv mask)
+static void rx_btstm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_and_i32(val, val, mask);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static void rx_bnotm(TCGv mem, TCGv mask)
+static void rx_bnotm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_xor_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_bsetr(TCGv reg, TCGv mask)
+static void rx_bsetr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_or_i32(reg, reg, mask);
}
-static void rx_bclrr(TCGv reg, TCGv mask)
+static void rx_bclrr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_andc_i32(reg, reg, mask);
}
-static inline void rx_btstr(TCGv reg, TCGv mask)
+static void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
- TCGv t0;
- t0 = tcg_temp_new();
+ TCGv_i32 t0;
+ t0 = tcg_temp_new_i32();
tcg_gen_and_i32(t0, reg, mask);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static inline void rx_bnotr(TCGv reg, TCGv mask)
+static void rx_bnotr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_xor_i32(reg, reg, mask);
}
@@ -1959,43 +1965,43 @@ static inline void rx_bnotr(TCGv reg, TCGv mask)
static bool cat3(trans_, name, _im)(DisasContext *ctx, \
cat3(arg_, name, _im) * a) \
{ \
- TCGv mask, mem, addr; \
- mem = tcg_temp_new(); \
+ TCGv_i32 mask, mem, addr; \
+ mem = tcg_temp_new_i32(); \
mask = tcg_constant_i32(1 << a->imm); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
- cat3(rx_, op, m)(addr, mask); \
+ cat3(rx_, op, m)(ctx, addr, mask); \
return true; \
} \
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv mask; \
+ TCGv_i32 mask; \
mask = tcg_constant_i32(1 << a->imm); \
- cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
return true; \
} \
static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
cat3(arg_, name, _rr) * a) \
{ \
- TCGv mask, b; \
- mask = tcg_temp_new(); \
- b = tcg_temp_new(); \
+ TCGv_i32 mask, b; \
+ mask = tcg_temp_new_i32(); \
+ b = tcg_temp_new_i32(); \
tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
- cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
return true; \
} \
static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
cat3(arg_, name, _rm) * a) \
{ \
- TCGv mask, mem, addr, b; \
- mask = tcg_temp_new(); \
- b = tcg_temp_new(); \
+ TCGv_i32 mask, mem, addr, b; \
+ mask = tcg_temp_new_i32(); \
+ b = tcg_temp_new_i32(); \
tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
- mem = tcg_temp_new(); \
+ mem = tcg_temp_new_i32(); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
- cat3(rx_, op, m)(addr, mask); \
+ cat3(rx_, op, m)(ctx, addr, mask); \
return true; \
}
@@ -2004,12 +2010,12 @@ BITOP(BCLR, bclr)
BITOP(BTST, btst)
BITOP(BNOT, bnot)
-static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
+static void bmcnd_op(TCGv_i32 val, TCGCond cond, int pos)
{
- TCGv bit;
+ TCGv_i32 bit;
DisasCompare dc;
- dc.temp = tcg_temp_new();
- bit = tcg_temp_new();
+ dc.temp = tcg_temp_new_i32();
+ bit = tcg_temp_new_i32();
psw_cond(&dc, cond);
tcg_gen_andi_i32(val, val, ~(1 << pos));
tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
@@ -2019,13 +2025,13 @@ static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
/* bmcnd #imm, dsp[rd] */
static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
{
- TCGv val, mem, addr;
- val = tcg_temp_new();
- mem = tcg_temp_new();
+ TCGv_i32 val, mem, addr;
+ val = tcg_temp_new_i32();
+ mem = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
- rx_gen_ld(MO_8, val, addr);
+ rx_gen_ld(ctx, MO_8, val, addr);
bmcnd_op(val, a->cd, a->imm);
- rx_gen_st(MO_8, val, addr);
+ rx_gen_st(ctx, MO_8, val, addr);
return true;
}
@@ -2045,7 +2051,7 @@ enum {
PSW_U = 9,
};
-static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
+static void clrsetpsw(DisasContext *ctx, int cb, int val)
{
if (cb < 8) {
switch (cb) {
@@ -2113,7 +2119,7 @@ static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
/* mvtc #imm, rd */
static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
{
- TCGv imm;
+ TCGv_i32 imm;
imm = tcg_constant_i32(a->imm);
move_to_cr(ctx, imm, a->cr);
@@ -2137,9 +2143,9 @@ static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
/* rtfi */
static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
{
- TCGv psw;
+ TCGv_i32 psw;
if (is_privileged(ctx, 1)) {
- psw = tcg_temp_new();
+ psw = tcg_temp_new_i32();
tcg_gen_mov_i32(cpu_pc, cpu_bpc);
tcg_gen_mov_i32(psw, cpu_bpsw);
gen_helper_set_psw_rte(tcg_env, psw);
@@ -2151,11 +2157,11 @@ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
/* rte */
static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
{
- TCGv psw;
+ TCGv_i32 psw;
if (is_privileged(ctx, 1)) {
- psw = tcg_temp_new();
- pop(cpu_pc);
- pop(psw);
+ psw = tcg_temp_new_i32();
+ pop(ctx, cpu_pc);
+ pop(ctx, psw);
gen_helper_set_psw_rte(tcg_env, psw);
ctx->base.is_jmp = DISAS_EXIT;
}
@@ -2174,7 +2180,7 @@ static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
/* int #imm */
static bool trans_INT(DisasContext *ctx, arg_INT *a)
{
- TCGv vec;
+ TCGv_i32 vec;
tcg_debug_assert(a->imm < 0x100);
vec = tcg_constant_i32(a->imm);
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 487c41b..3b1e75f 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -541,21 +541,28 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
pages = g_malloc(nr_pages * sizeof(*pages));
ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec);
- if (ret) {
- trigger_access_exception(&cpu->env, ret, tec);
- } else if (hostbuf != NULL) {
+ if (ret == 0 && hostbuf != NULL) {
AddressSpace *as = CPU(cpu)->as;
/* Copy data by stepping through the area page by page */
for (i = 0; i < nr_pages; i++) {
+ MemTxResult res;
+
currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
- address_space_rw(as, pages[i] | (laddr & ~TARGET_PAGE_MASK),
- attrs, hostbuf, currlen, is_write);
+ res = address_space_rw(as, pages[i] | (laddr & ~TARGET_PAGE_MASK),
+ attrs, hostbuf, currlen, is_write);
+ if (res != MEMTX_OK) {
+ ret = PGM_ADDRESSING;
+ break;
+ }
laddr += currlen;
hostbuf += currlen;
len -= currlen;
}
}
+ if (ret) {
+ trigger_access_exception(&cpu->env, ret, tec);
+ }
g_free(pages);
return ret;
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index c7e8574..ec9e5a0 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -258,9 +258,9 @@ static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
* 16 byte operations to handle it in a special way.
*/
g_assert(es <= MO_64);
-#if !HOST_BIG_ENDIAN
- offs ^= (8 - bytes);
-#endif
+ if (!HOST_BIG_ENDIAN) {
+ offs ^= (8 - bytes);
+ }
return offs + vec_full_reg_offset(reg);
}
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index e073e5a..f3b4b48 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -175,9 +175,9 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
/* convert it to an element offset relative to tcg_env (vec_reg_offset() */
tcg_gen_shli_i64(tmp, tmp, es);
-#if !HOST_BIG_ENDIAN
- tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
-#endif
+ if (!HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
+ }
tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
/* generate the final ptr by adding tcg_env */
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index c41ab70..b075901 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -277,7 +277,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
uint32_t mem_value);
#endif
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr);
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr);
void cpu_load_tlb(CPUSH4State * env);
@@ -365,14 +365,14 @@ static inline int cpu_ptel_pr (uint32_t ptel)
#define PTEA_TC (1 << 3)
#define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3)
-static inline target_ulong cpu_read_sr(CPUSH4State *env)
+static inline uint32_t cpu_read_sr(CPUSH4State *env)
{
return env->sr | (env->sr_m << SR_M) |
(env->sr_q << SR_Q) |
(env->sr_t << SR_T);
}
-static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
+static inline void cpu_write_sr(CPUSH4State *env, uint32_t sr)
{
env->sr_m = (sr >> SR_M) & 1;
env->sr_q = (sr >> SR_Q) & 1;
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 1744ef0..3b18a32 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -47,7 +47,7 @@
#if defined(CONFIG_USER_ONLY)
-int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr)
{
/* For user mode, only U0 area is cacheable. */
return !(addr & 0x80000000);
@@ -231,11 +231,11 @@ static int itlb_replacement(CPUSH4State * env)
/* Find the corresponding entry in the right TLB
Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
*/
-static int find_tlb_entry(CPUSH4State * env, target_ulong address,
+static int find_tlb_entry(CPUSH4State *env, vaddr address,
tlb_t * entries, uint8_t nbtlb, int use_asid)
{
int match = MMU_DTLB_MISS;
- uint32_t start, end;
+ vaddr start, end;
uint8_t asid;
int i;
@@ -291,7 +291,7 @@ static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
/* Find itlb entry
Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
*/
-static int find_itlb_entry(CPUSH4State * env, target_ulong address,
+static int find_itlb_entry(CPUSH4State *env, vaddr address,
int use_asid)
{
int e;
@@ -309,7 +309,7 @@ static int find_itlb_entry(CPUSH4State * env, target_ulong address,
/* Find utlb entry
Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
-static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid)
+static int find_utlb_entry(CPUSH4State *env, vaddr address, int use_asid)
{
/* per utlb access */
increment_urc(env);
@@ -325,8 +325,8 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid
MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
*/
-static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
- int *prot, target_ulong address,
+static int get_mmu_address(CPUSH4State *env, hwaddr *physical,
+ int *prot, vaddr address,
MMUAccessType access_type)
{
int use_asid, n;
@@ -392,8 +392,8 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
return n;
}
-static int get_physical_address(CPUSH4State * env, target_ulong * physical,
- int *prot, target_ulong address,
+static int get_physical_address(CPUSH4State *env, hwaddr* physical,
+ int *prot, vaddr address,
MMUAccessType access_type)
{
/* P1, P2 and P4 areas do not use translation */
@@ -433,7 +433,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
- target_ulong physical;
+ hwaddr physical;
int prot;
if (get_physical_address(cpu_env(cs), &physical, &prot, addr, MMU_DATA_LOAD)
@@ -452,7 +452,7 @@ void cpu_load_tlb(CPUSH4State * env)
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(cs, address);
}
@@ -528,7 +528,7 @@ void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
tlb_t * entry = &s->itlb[index];
if (entry->v) {
/* Overwriting valid entry in itlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->asid = asid;
@@ -570,7 +570,7 @@ void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
/* ITLB Data Array 1 */
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
@@ -665,7 +665,7 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
CPUState *cs = env_cpu(s);
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(cs, address);
}
entry->asid = asid;
@@ -716,7 +716,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
/* UTLB Data Array 1 */
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
@@ -735,7 +735,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
}
}
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr)
{
int n;
int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
@@ -800,7 +800,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
CPUSH4State *env = cpu_env(cs);
int ret;
- target_ulong physical;
+ hwaddr physical;
int prot;
ret = get_physical_address(env, &physical, &prot, address, access_type);
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 70fd13a..b3ae0a3 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -223,7 +223,7 @@ static inline bool use_exit_tb(DisasContext *ctx)
return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
}
-static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+static bool use_goto_tb(DisasContext *ctx, vaddr dest)
{
if (use_exit_tb(ctx)) {
return false;
@@ -231,12 +231,12 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
return translator_use_goto_tb(&ctx->base, dest);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest)
{
if (use_goto_tb(ctx, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (use_exit_tb(ctx)) {
@@ -267,7 +267,7 @@ static void gen_jump(DisasContext * ctx)
}
/* Immediate conditional jump (bt or bf) */
-static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
+static void gen_conditional_jump(DisasContext *ctx, vaddr dest,
bool jump_if_true)
{
TCGLabel *l1 = gen_new_label();
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 31cb3d9..7169a50 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -3,7 +3,6 @@
#include "qemu/bswap.h"
#include "cpu-qom.h"
-#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index 9163b9d..c5d88de 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -21,6 +21,7 @@
#include "cpu.h"
#include "qemu/timer.h"
#include "qemu/host-utils.h"
+#include "exec/cpu-common.h"
#include "exec/helper-proto.h"
void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
index 23adda4..96ef81c 100644
--- a/target/sparc/int64_helper.c
+++ b/target/sparc/int64_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
+#include "exec/cpu-common.h"
#include "exec/helper-proto.h"
#include "exec/log.h"
#include "trace.h"
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 810e249..d6b599b 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -363,15 +363,15 @@ static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
translator_use_goto_tb(&s->base, npc);
}
-static void gen_goto_tb(DisasContext *s, int tb_num,
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx,
target_ulong pc, target_ulong npc)
{
if (use_goto_tb(s, pc, npc)) {
/* jump to same page: we can use a direct jump */
- tcg_gen_goto_tb(tb_num);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb(s->base.tb, tb_num);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
} else {
/* jump to another page: we can use an indirect jump */
tcg_gen_movi_tl(cpu_pc, pc);
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 4f035b6..04319e1 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -37,7 +37,7 @@ static const gchar *tricore_gdb_arch_name(CPUState *cs)
static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
{
- cpu_env(cs)->PC = value & ~(target_ulong)1;
+ cpu_env(cs)->PC = value & ~1;
}
static vaddr tricore_cpu_get_pc(CPUState *cs)
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index e4c53d4..7574111 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -35,7 +35,7 @@ enum {
};
static int get_physical_address(CPUTriCoreState *env, hwaddr *physical,
- int *prot, target_ulong address,
+ int *prot, vaddr address,
MMUAccessType access_type, int mmu_idx)
{
int ret = TLBRET_MATCH;
@@ -61,7 +61,7 @@ hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
/* TODO: Add exception support */
-static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
+static void raise_mmu_exception(CPUTriCoreState *env, vaddr address,
int rw, int tlb_error)
{
}
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index 9910c13..610f148 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -149,15 +149,15 @@ static uint32_t ssov32(CPUTriCoreState *env, int64_t arg)
if (arg > max_pos) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_pos;
+ ret = (uint32_t)max_pos;
} else {
if (arg < max_neg) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_neg;
+ ret = (uint32_t)max_neg;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
}
env->PSW_USB_AV = arg ^ arg * 2u;
@@ -172,10 +172,10 @@ static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg)
if (arg > max_pos) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_pos;
+ ret = (uint32_t)max_pos;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
env->PSW_USB_AV = arg ^ arg * 2u;
env->PSW_USB_SAV |= env->PSW_USB_AV;
@@ -192,7 +192,7 @@ static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg)
ret = 0;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
env->PSW_USB_AV = arg ^ arg * 2u;
env->PSW_USB_SAV |= env->PSW_USB_AV;
@@ -260,8 +260,7 @@ static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
return (hw0 & 0xffff) | (hw1 << 16);
}
-target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -294,8 +293,7 @@ uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
return result;
}
-target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -397,8 +395,7 @@ uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
}
-target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -406,8 +403,7 @@ target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -416,8 +412,7 @@ target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
return suov16(env, ret_hw0, ret_hw1);
}
-target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -450,8 +445,7 @@ uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
return result;
}
-target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -552,8 +546,7 @@ uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
}
-target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -561,8 +554,7 @@ target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_neg(env, result);
}
-target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -571,8 +563,7 @@ target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
return suov16(env, ret_hw0, ret_hw1);
}
-target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_mul_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -580,8 +571,7 @@ target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_mul_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -590,8 +580,7 @@ target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sha_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int32_t t2 = sextract64(r2, 0, 6);
@@ -606,14 +595,14 @@ target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1)
+uint32_t helper_abs_ssov(CPUTriCoreState *env, uint32_t r1)
{
- target_ulong result;
+ uint32_t result;
result = ((int32_t)r1 >= 0) ? r1 : (0 - r1);
return ssov32(env, result);
}
-uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
+uint32_t helper_abs_h_ssov(CPUTriCoreState *env, uint32_t r1)
{
int32_t ret_h0, ret_h1;
@@ -626,8 +615,7 @@ uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
return ssov16(env, ret_h0, ret_h1);
}
-target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_absdif_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -641,8 +629,7 @@ target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t t1, t2;
int32_t ret_h0, ret_h1;
@@ -666,8 +653,8 @@ uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov16(env, ret_h0, ret_h1);
}
-target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_madd32_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -678,8 +665,8 @@ target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_madd32_suov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
uint64_t t1 = extract64(r1, 0, 32);
uint64_t t2 = extract64(r2, 0, 32);
@@ -690,8 +677,8 @@ target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_madd64_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, ovf;
int64_t t1 = sextract64(r1, 0, 32);
@@ -848,8 +835,8 @@ uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_madd64_suov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, mul;
uint64_t t1 = extract64(r1, 0, 32);
@@ -873,8 +860,8 @@ uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
return ret;
}
-target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_msub32_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -885,8 +872,8 @@ target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_msub32_suov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
uint64_t t1 = extract64(r1, 0, 32);
uint64_t t2 = extract64(r2, 0, 32);
@@ -912,8 +899,8 @@ target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
return result;
}
-uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_msub64_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, ovf;
int64_t t1 = sextract64(r1, 0, 32);
@@ -944,8 +931,8 @@ uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
return ret;
}
-uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_msub64_suov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, mul;
uint64_t t1 = extract64(r1, 0, 32);
@@ -1097,7 +1084,7 @@ uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
+uint32_t helper_abs_b(CPUTriCoreState *env, uint32_t arg)
{
int32_t b, i;
int32_t ovf = 0;
@@ -1120,7 +1107,7 @@ uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
return ret;
}
-uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
+uint32_t helper_abs_h(CPUTriCoreState *env, uint32_t arg)
{
int32_t h, i;
int32_t ovf = 0;
@@ -1143,7 +1130,7 @@ uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
return ret;
}
-uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_absdif_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r2;
@@ -1167,7 +1154,7 @@ uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_absdif_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r2;
@@ -1296,7 +1283,7 @@ uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_add_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r1, extr_r2;
@@ -1322,7 +1309,7 @@ uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_add_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r1, extr_r2;
@@ -1451,7 +1438,7 @@ uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sub_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r1, extr_r2;
@@ -1477,7 +1464,7 @@ uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sub_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r1, extr_r2;
@@ -1502,7 +1489,7 @@ uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
+uint32_t helper_eq_b(uint32_t r1, uint32_t r2)
{
uint32_t ret, msk;
int32_t i;
@@ -1519,7 +1506,7 @@ uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
+uint32_t helper_eq_h(uint32_t r1, uint32_t r2)
{
int32_t ret = 0;
@@ -1534,7 +1521,7 @@ uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
+uint32_t helper_eqany_b(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1546,7 +1533,7 @@ uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
+uint32_t helper_eqany_h(uint32_t r1, uint32_t r2)
{
uint32_t ret;
@@ -1556,7 +1543,7 @@ uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_b(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1570,7 +1557,7 @@ uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_bu(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1584,7 +1571,7 @@ uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_h(uint32_t r1, uint32_t r2)
{
uint32_t ret = 0;
@@ -1599,7 +1586,7 @@ uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_hu(uint32_t r1, uint32_t r2)
{
uint32_t ret = 0;
@@ -1615,7 +1602,7 @@ uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
}
#define EXTREMA_H_B(name, op) \
-uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
+uint32_t helper_##name ##_b(uint32_t r1, uint32_t r2) \
{ \
int32_t i, extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1629,7 +1616,7 @@ uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
return ret; \
} \
\
-uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
+uint32_t helper_##name ##_bu(uint32_t r1, uint32_t r2) \
{ \
int32_t i; \
uint32_t extr_r1, extr_r2; \
@@ -1644,7 +1631,7 @@ uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
return ret; \
} \
\
-uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
+uint32_t helper_##name ##_h(uint32_t r1, uint32_t r2) \
{ \
int32_t extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1662,7 +1649,7 @@ uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
return ret; \
} \
\
-uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\
+uint32_t helper_##name ##_hu(uint32_t r1, uint32_t r2) \
{ \
uint32_t extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1729,7 +1716,7 @@ EXTREMA_H_B(min, <)
#undef EXTREMA_H_B
-uint32_t helper_clo_h(target_ulong r1)
+uint32_t helper_clo_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1747,7 +1734,7 @@ uint32_t helper_clo_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_clz_h(target_ulong r1)
+uint32_t helper_clz_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1765,7 +1752,7 @@ uint32_t helper_clz_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_cls_h(target_ulong r1)
+uint32_t helper_cls_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1783,7 +1770,7 @@ uint32_t helper_cls_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_sh(target_ulong r1, target_ulong r2)
+uint32_t helper_sh(uint32_t r1, uint32_t r2)
{
int32_t shift_count = sextract32(r2, 0, 6);
@@ -1796,7 +1783,7 @@ uint32_t helper_sh(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
+uint32_t helper_sh_h(uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
int32_t shift_count;
@@ -1816,7 +1803,7 @@ uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sha(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t shift_count;
int64_t result, t1;
@@ -1854,7 +1841,7 @@ uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
+uint32_t helper_sha_h(uint32_t r1, uint32_t r2)
{
int32_t shift_count;
int32_t ret_hw0, ret_hw1;
@@ -1874,7 +1861,7 @@ uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_bmerge(target_ulong r1, target_ulong r2)
+uint32_t helper_bmerge(uint32_t r1, uint32_t r2)
{
uint32_t i, ret;
@@ -1905,7 +1892,7 @@ uint64_t helper_bsplit(uint32_t r1)
return ret;
}
-uint32_t helper_parity(target_ulong r1)
+uint32_t helper_parity(uint32_t r1)
{
uint32_t ret;
uint32_t nOnes, i;
@@ -1942,7 +1929,7 @@ uint32_t helper_parity(target_ulong r1)
}
uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
- target_ulong r2)
+ uint32_t r2)
{
uint32_t ret;
int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac;
@@ -1983,7 +1970,7 @@ uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
return ret;
}
-uint64_t helper_unpack(target_ulong arg1)
+uint64_t helper_unpack(uint32_t arg1)
{
int32_t fp_exp = extract32(arg1, 23, 8);
int32_t fp_frac = extract32(arg1, 0, 23);
@@ -2408,7 +2395,7 @@ uint32_t helper_shuffle(uint32_t arg0, uint32_t arg1)
/* context save area (CSA) related helpers */
-static int cdc_increment(target_ulong *psw)
+static int cdc_increment(uint32_t *psw)
{
if ((*psw & MASK_PSW_CDC) == 0x7f) {
return 0;
@@ -2426,7 +2413,7 @@ static int cdc_increment(target_ulong *psw)
return 0;
}
-static int cdc_decrement(target_ulong *psw)
+static int cdc_decrement(uint32_t *psw)
{
if ((*psw & MASK_PSW_CDC) == 0x7f) {
return 0;
@@ -2442,7 +2429,7 @@ static int cdc_decrement(target_ulong *psw)
return 0;
}
-static bool cdc_zero(target_ulong *psw)
+static bool cdc_zero(uint32_t *psw)
{
int cdc = *psw & MASK_PSW_CDC;
/* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC ==
@@ -2457,7 +2444,7 @@ static bool cdc_zero(target_ulong *psw)
return count == 0;
}
-static void save_context_upper(CPUTriCoreState *env, target_ulong ea)
+static void save_context_upper(CPUTriCoreState *env, uint32_t ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, psw_read(env));
@@ -2477,7 +2464,7 @@ static void save_context_upper(CPUTriCoreState *env, target_ulong ea)
cpu_stl_data(env, ea+60, env->gpr_d[15]);
}
-static void save_context_lower(CPUTriCoreState *env, target_ulong ea)
+static void save_context_lower(CPUTriCoreState *env, uint32_t ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, env->gpr_a[11]);
@@ -2497,8 +2484,8 @@ static void save_context_lower(CPUTriCoreState *env, target_ulong ea)
cpu_stl_data(env, ea+60, env->gpr_d[7]);
}
-static void restore_context_upper(CPUTriCoreState *env, target_ulong ea,
- target_ulong *new_PCXI, target_ulong *new_PSW)
+static void restore_context_upper(CPUTriCoreState *env, uint32_t ea,
+ uint32_t *new_PCXI, uint32_t *new_PSW)
{
*new_PCXI = cpu_ldl_data(env, ea);
*new_PSW = cpu_ldl_data(env, ea+4);
@@ -2518,8 +2505,8 @@ static void restore_context_upper(CPUTriCoreState *env, target_ulong ea,
env->gpr_d[15] = cpu_ldl_data(env, ea+60);
}
-static void restore_context_lower(CPUTriCoreState *env, target_ulong ea,
- target_ulong *ra, target_ulong *pcxi)
+static void restore_context_lower(CPUTriCoreState *env, uint32_t ea,
+ uint32_t *ra, uint32_t *pcxi)
{
*pcxi = cpu_ldl_data(env, ea);
*ra = cpu_ldl_data(env, ea+4);
@@ -2541,10 +2528,10 @@ static void restore_context_lower(CPUTriCoreState *env, target_ulong ea,
void helper_call(CPUTriCoreState *env, uint32_t next_pc)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
- target_ulong psw;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
+ uint32_t psw;
psw = psw_read(env);
/* if (FCX == 0) trap(FCU); */
@@ -2604,9 +2591,9 @@ void helper_call(CPUTriCoreState *env, uint32_t next_pc)
void helper_ret(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
- target_ulong new_PSW, psw;
+ uint32_t ea;
+ uint32_t new_PCXI;
+ uint32_t new_PSW, psw;
psw = psw_read(env);
/* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/
@@ -2657,9 +2644,9 @@ void helper_ret(CPUTriCoreState *env)
void helper_bisr(CPUTriCoreState *env, uint32_t const9)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2701,9 +2688,9 @@ void helper_bisr(CPUTriCoreState *env, uint32_t const9)
void helper_rfe(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
- target_ulong new_PSW;
+ uint32_t ea;
+ uint32_t new_PCXI;
+ uint32_t new_PSW;
/* if (PCXI[19: 0] == 0) then trap(CSU); */
if ((env->PCXI & 0xfffff) == 0) {
/* raise csu trap */
@@ -2762,35 +2749,35 @@ void helper_rfm(CPUTriCoreState *env)
}
}
-void helper_ldlcx(CPUTriCoreState *env, target_ulong ea)
+void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and RA */
restore_context_lower(env, ea, &dummy, &dummy);
}
-void helper_lducx(CPUTriCoreState *env, target_ulong ea)
+void helper_lducx(CPUTriCoreState *env, uint32_t ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and PSW */
restore_context_upper(env, ea, &dummy, &dummy);
}
-void helper_stlcx(CPUTriCoreState *env, target_ulong ea)
+void helper_stlcx(CPUTriCoreState *env, uint32_t ea)
{
save_context_lower(env, ea);
}
-void helper_stucx(CPUTriCoreState *env, target_ulong ea)
+void helper_stucx(CPUTriCoreState *env, uint32_t ea)
{
save_context_upper(env, ea);
}
void helper_svlcx(CPUTriCoreState *env)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2831,9 +2818,9 @@ void helper_svlcx(CPUTriCoreState *env)
void helper_svucx(CPUTriCoreState *env)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2874,8 +2861,8 @@ void helper_svucx(CPUTriCoreState *env)
void helper_rslcx(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
+ uint32_t ea;
+ uint32_t new_PCXI;
/* if (PCXI[19: 0] == 0) then trap(CSU); */
if ((env->PCXI & 0xfffff) == 0) {
/* CSU trap */
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 3d0e7a1..fbe05a9 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -44,19 +44,19 @@
/*
* TCG registers
*/
-static TCGv cpu_PC;
-static TCGv cpu_PCXI;
-static TCGv cpu_PSW;
-static TCGv cpu_ICR;
+static TCGv_i32 cpu_PC;
+static TCGv_i32 cpu_PCXI;
+static TCGv_i32 cpu_PSW;
+static TCGv_i32 cpu_ICR;
/* GPR registers */
-static TCGv cpu_gpr_a[16];
-static TCGv cpu_gpr_d[16];
+static TCGv_i32 cpu_gpr_a[16];
+static TCGv_i32 cpu_gpr_d[16];
/* PSW Flag cache */
-static TCGv cpu_PSW_C;
-static TCGv cpu_PSW_V;
-static TCGv cpu_PSW_SV;
-static TCGv cpu_PSW_AV;
-static TCGv cpu_PSW_SAV;
+static TCGv_i32 cpu_PSW_C;
+static TCGv_i32 cpu_PSW_V;
+static TCGv_i32 cpu_PSW_SV;
+static TCGv_i32 cpu_PSW_AV;
+static TCGv_i32 cpu_PSW_SAV;
static const char *regnames_a[] = {
"a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
@@ -72,7 +72,8 @@ static const char *regnames_d[] = {
typedef struct DisasContext {
DisasContextBase base;
- target_ulong pc_succ_insn;
+
+ vaddr pc_succ_insn;
uint32_t opcode;
/* Routine used to access memory */
int mem_idx;
@@ -135,46 +136,46 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} while (0)
#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_ext16s_tl(arg11, arg1); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_ext16s_i32(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
} while (0)
#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg10 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_sari_tl(arg11, arg1, 16); \
- tcg_gen_ext16s_tl(arg10, arg1); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg10 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_sari_i32(arg11, arg1, 16); \
+ tcg_gen_ext16s_i32(arg10, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
} while (0)
#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg10 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_sari_tl(arg10, arg1, 16); \
- tcg_gen_ext16s_tl(arg11, arg1); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg10 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_sari_i32(arg10, arg1, 16); \
+ tcg_gen_ext16s_i32(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
} while (0)
#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg01, arg0, 16); \
- tcg_gen_ext16s_tl(arg00, arg0); \
- tcg_gen_sari_tl(arg11, arg1, 16); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
+ tcg_gen_sari_i32(arg01, arg0, 16); \
+ tcg_gen_ext16s_i32(arg00, arg0); \
+ tcg_gen_sari_i32(arg11, arg1, 16); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
} while (0)
@@ -200,7 +201,7 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
/* For two 32-bit registers used a 64-bit register, the first
registernumber needs to be even. Otherwise we trap. */
-static inline void generate_trap(DisasContext *ctx, int class, int tin);
+static void generate_trap(DisasContext *ctx, int class, int tin);
#define CHECK_REG_PAIR(reg) do { \
if (reg & 0x1) { \
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
@@ -209,23 +210,24 @@ static inline void generate_trap(DisasContext *ctx, int class, int tin);
/* Functions for load/save to/from memory */
-static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
- int16_t con, MemOp mop)
+static void gen_offset_ld(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t con, MemOp mop)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, con);
- tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, r2, con);
+ tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
}
-static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
- int16_t con, MemOp mop)
+static void gen_offset_st(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t con, MemOp mop)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, con);
- tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, r2, con);
+ tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
}
-static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+static void gen_st_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -233,15 +235,17 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ);
}
-static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
- DisasContext *ctx)
+static void gen_offset_st_2regs(DisasContext *ctx,
+ TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 base, int16_t con)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, base, con);
- gen_st_2regs_64(rh, rl, temp, ctx);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, base, con);
+ gen_st_2regs_64(ctx, rh, rl, temp);
}
-static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+static void gen_ld_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -250,87 +254,88 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
- DisasContext *ctx)
+static void gen_offset_ld_2regs(DisasContext *ctx,
+ TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 base, int16_t con)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, base, con);
- gen_ld_2regs_64(rh, rl, temp, ctx);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, base, con);
+ gen_ld_2regs_64(ctx, rh, rl, temp);
}
-static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
- MemOp mop)
+static void gen_st_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t off, MemOp mop)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, off);
- tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
- tcg_gen_mov_tl(r2, temp);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, r2, off);
+ tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_i32(r2, temp);
}
-static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
- MemOp mop)
+static void gen_ld_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t off, MemOp mop)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, off);
- tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
- tcg_gen_mov_tl(r2, temp);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, r2, off);
+ tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_i32(r2, temp);
}
/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
-static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
+static void gen_ldmst(DisasContext *ctx, int ereg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(ereg);
/* temp = (M(EA, word) */
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
/* temp = temp & ~E[a][63:32]) */
- tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
+ tcg_gen_andc_i32(temp, temp, cpu_gpr_d[ereg + 1]);
/* temp2 = (E[a][31:0] & E[a][63:32]); */
- tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
+ tcg_gen_and_i32(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg + 1]);
/* temp = temp | temp2; */
- tcg_gen_or_tl(temp, temp, temp2);
+ tcg_gen_or_i32(temp, temp, temp2);
/* M(EA, word) = temp; */
- tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(temp, ea, ctx->mem_idx, MO_LEUL);
}
/* tmp = M(EA, word);
M(EA, word) = D[a];
D[a] = tmp[31:0];*/
-static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
+static void gen_swap(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
-static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
+static void gen_cmpswap(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(reg);
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_movcond_i32(TCG_COND_EQ, temp2, cpu_gpr_d[reg + 1], temp,
cpu_gpr_d[reg], temp);
- tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
-static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
+static void gen_swapmsk(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(reg);
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
- tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
- tcg_gen_or_tl(temp2, temp2, temp3);
- tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_and_i32(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg + 1]);
+ tcg_gen_andc_i32(temp3, temp, cpu_gpr_d[reg + 1]);
+ tcg_gen_or_i32(temp2, temp2, temp3);
+ tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
/* We generate loads and store to core special function register (csfr) through
@@ -340,12 +345,12 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
#define R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_ld_tl(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_ld_i32(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
-static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
+static void gen_mfcr(DisasContext *ctx, TCGv_i32 ret, int32_t offset)
{
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
@@ -365,7 +370,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_st_tl(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_st_i32(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
/* Endinit protected registers
@@ -373,8 +378,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
watchdog device, we handle endinit protected registers like
all-access registers for now. */
#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
-static inline void gen_mtcr(DisasContext *ctx, TCGv r1,
- int32_t offset)
+static void gen_mtcr(DisasContext *ctx, TCGv_i32 r1, int32_t offset)
{
if (ctx->priv == TRICORE_PRIV_SM) {
/* since we're caching PSW make this a special case */
@@ -393,31 +397,30 @@ static inline void gen_mtcr(DisasContext *ctx, TCGv r1,
/* Functions for arithmetic instructions */
-static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
/* Addition and set V/SV bits */
- tcg_gen_add_tl(result, r1, r2);
+ tcg_gen_add_i32(result, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void
-gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+static void gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
@@ -429,65 +432,66 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_andc_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
- tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
- tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
}
-static inline void
-gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
- void(*op2)(TCGv, TCGv, TCGv))
+static void gen_addsub64_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32),
+ void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
- TCGv temp4 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
+ TCGv_i32 temp4 = tcg_temp_new_i32();
(*op1)(temp, r1_low, r2);
/* calc V0 bit */
- tcg_gen_xor_tl(temp2, temp, r1_low);
- tcg_gen_xor_tl(temp3, r1_low, r2);
+ tcg_gen_xor_i32(temp2, temp, r1_low);
+ tcg_gen_xor_i32(temp3, r1_low, r2);
if (op1 == tcg_gen_add_tl) {
- tcg_gen_andc_tl(temp2, temp2, temp3);
+ tcg_gen_andc_i32(temp2, temp2, temp3);
} else {
- tcg_gen_and_tl(temp2, temp2, temp3);
+ tcg_gen_and_i32(temp2, temp2, temp3);
}
(*op2)(temp3, r1_high, r3);
/* calc V1 bit */
- tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
- tcg_gen_xor_tl(temp4, r1_high, r3);
+ tcg_gen_xor_i32(cpu_PSW_V, temp3, r1_high);
+ tcg_gen_xor_i32(temp4, r1_high, r3);
if (op2 == tcg_gen_add_tl) {
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, temp4);
} else {
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp4);
}
/* combine V0/V1 bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp2);
/* calc sv bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_mov_tl(ret_low, temp);
- tcg_gen_mov_tl(ret_high, temp3);
+ tcg_gen_mov_i32(ret_low, temp);
+ tcg_gen_mov_i32(ret_high, temp3);
/* calc AV bit */
- tcg_gen_add_tl(temp, ret_low, ret_low);
- tcg_gen_xor_tl(temp, temp, ret_low);
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, ret_low, ret_low);
+ tcg_gen_xor_i32(temp, temp, ret_low);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, ret_high);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
/* ret = r2 + (r1 * r3); */
-static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_madd32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -508,53 +512,51 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madd32_d(ret, r1, r2, temp);
}
-static inline void
-gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_madd64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
- TCGv t4 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ TCGv_i32 t4 = tcg_temp_new_i32();
- tcg_gen_muls2_tl(t1, t2, r1, r3);
+ tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the add can overflow */
- tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ tcg_gen_add2_i32(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
- tcg_gen_xor_tl(t1, r2_high, t2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_i32(t1, r2_high, t2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
- tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
- tcg_gen_mov_tl(ret_low, t3);
- tcg_gen_mov_tl(ret_high, t4);
+ tcg_gen_mov_i32(ret_low, t3);
+ tcg_gen_mov_i32(ret_high, t4);
}
-static inline void
-gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_maddu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -572,39 +574,38 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
calc V bit */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
-gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
-gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
-gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_madd_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -625,13 +626,14 @@ gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_add_tl);
}
-static inline void
-gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsu_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -652,11 +654,12 @@ gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_add_tl);
}
-static inline void
-gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsum_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -685,16 +688,17 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
+static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2);
-static inline void
-gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_madds_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -713,25 +717,26 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
+static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2);
-static inline void
-gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsus_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -750,20 +755,21 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
-gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsums_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
@@ -792,11 +798,12 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
-static inline void
-gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -820,11 +827,12 @@ gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static inline void
-gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
@@ -846,11 +854,11 @@ gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
- uint32_t mode)
+static void gen_maddr64_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -869,23 +877,25 @@ gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_addr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
-gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
-gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsur32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -901,17 +911,17 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_addsur_h(ret, tcg_env, temp64, temp, temp2);
}
-static inline void
-gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
- uint32_t n, uint32_t mode)
+static void gen_maddr64s_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -930,23 +940,25 @@ gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_addr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
-gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
-gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsur32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -962,32 +974,32 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static inline void
-gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_maddr_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
gen_helper_maddr_q(ret, tcg_env, r1, r2, r3, t_n);
}
-static inline void
-gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_maddrs_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
gen_helper_maddr_q_ssov(ret, tcg_env, r1, r2, r3, t_n);
}
-static inline void
-gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_madd32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1008,81 +1020,83 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
- tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, temp3);
+ tcg_gen_mov_i32(ret, temp3);
}
-static inline void
-gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16add32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_add_d(ret, arg1, temp);
}
-static inline void
-gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16adds32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_adds(ret, arg1, temp);
}
-static inline void
-gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16add64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1092,23 +1106,23 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static inline void
-gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16adds64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1118,15 +1132,15 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static inline void
-gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_madd64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
@@ -1146,29 +1160,29 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
-gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_madds32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1184,12 +1198,12 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_madd32_q_add_ssov(ret, tcg_env, t1, t2);
}
-static inline void
-gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_madds64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_madd64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
@@ -1197,7 +1211,7 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
}
/* ret = r2 - (r1 * r3); */
-static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_msub32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1218,62 +1232,61 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msub32_d(ret, r1, r2, temp);
}
-static inline void
-gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msub64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
- TCGv t4 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ TCGv_i32 t4 = tcg_temp_new_i32();
- tcg_gen_muls2_tl(t1, t2, r1, r3);
+ tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the sub can overflow */
- tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ tcg_gen_sub2_i32(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
- tcg_gen_xor_tl(t1, r2_high, t2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_i32(t1, r2_high, t2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
- tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
- tcg_gen_mov_tl(ret_low, t3);
- tcg_gen_mov_tl(ret_high, t4);
+ tcg_gen_mov_i32(ret_low, t3);
+ tcg_gen_mov_i32(ret_high, t4);
}
-static inline void
-gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
-gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1289,153 +1302,152 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
/* calc V bit, only the sub can overflow, if t1 > t2 */
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
-gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
+static void gen_addi_d(TCGv_i32 ret, TCGv_i32 r1, int32_t r2)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
}
/* calculate the carry bit too */
-static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
- tcg_gen_movi_tl(t0, 0);
+ tcg_gen_movi_i32(t0, 0);
/* Addition and set C/V/SV bits */
tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addi_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_add_CC(ret, r1, temp);
}
-static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_addc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
/* Addition, carry and set C/V/SV bits */
tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addci_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_addc_CC(ret, r1, temp);
}
-static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
- TCGv r4)
+static void gen_cond_add(TCGCond cond,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv result = tcg_temp_new();
- TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_constant_i32(0);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
- tcg_gen_setcond_tl(cond, mask, r4, t0);
- tcg_gen_shli_tl(mask, mask, 31);
+ tcg_gen_setcond_i32(cond, mask, r4, t0);
+ tcg_gen_shli_i32(mask, mask, 31);
- tcg_gen_add_tl(result, r1, r2);
+ tcg_gen_add_i32(result, r1, r2);
/* Calc PSW_V */
- tcg_gen_xor_tl(temp, result, r1);
- tcg_gen_xor_tl(temp2, r1, r2);
- tcg_gen_andc_tl(temp, temp, temp2);
- tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ tcg_gen_xor_i32(temp, result, r1);
+ tcg_gen_xor_i32(temp2, r1, r2);
+ tcg_gen_andc_i32(temp, temp, temp2);
+ tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
- tcg_gen_add_tl(temp, result, result);
- tcg_gen_xor_tl(temp, temp, result);
- tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(temp, result, result);
+ tcg_gen_xor_i32(temp, temp, result);
+ tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
- tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+ tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
- TCGv r3, TCGv r4)
+static void gen_condi_add(TCGCond cond,
+ TCGv_i32 r1, int32_t r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
}
-static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void
-gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+static void gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
@@ -1447,87 +1459,88 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_and_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
- tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
- tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
}
-static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv result = tcg_temp_new();
- TCGv temp = tcg_temp_new();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* calc C bit */
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_PSW_C, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new();
- tcg_gen_not_tl(temp, r2);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_not_i32(temp, r2);
gen_addc_CC(ret, r1, temp);
}
-static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
- TCGv r4)
+static void gen_cond_sub(TCGCond cond,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv result = tcg_temp_new();
- TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_constant_i32(0);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
- tcg_gen_setcond_tl(cond, mask, r4, t0);
- tcg_gen_shli_tl(mask, mask, 31);
+ tcg_gen_setcond_i32(cond, mask, r4, t0);
+ tcg_gen_shli_i32(mask, mask, 31);
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* Calc PSW_V */
- tcg_gen_xor_tl(temp, result, r1);
- tcg_gen_xor_tl(temp2, r1, r2);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ tcg_gen_xor_i32(temp, result, r1);
+ tcg_gen_xor_i32(temp2, r1, r2);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
- tcg_gen_add_tl(temp, result, result);
- tcg_gen_xor_tl(temp, temp, result);
- tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(temp, result, result);
+ tcg_gen_xor_i32(temp, temp, result);
+ tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
- tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+ tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static inline void
-gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msub_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1548,14 +1561,15 @@ gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_sub_tl);
}
-static inline void
-gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubs_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -1574,20 +1588,21 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
-gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -1611,11 +1626,12 @@ gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static inline void
-gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
@@ -1637,11 +1653,12 @@ gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
- uint32_t mode)
+static void gen_msubr64_h(TCGv_i32 ret,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1660,22 +1677,24 @@ gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_subr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
-gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
-gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
- uint32_t n, uint32_t mode)
+static void gen_msubr64s_h(TCGv_i32 ret,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1694,36 +1713,37 @@ gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_subr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
-gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
-gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_msubr_q(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n)
{
- TCGv temp = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_constant_i32(n);
gen_helper_msubr_q(ret, tcg_env, r1, r2, r3, temp);
}
-static inline void
-gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_msubrs_q(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n)
{
- TCGv temp = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_constant_i32(n);
gen_helper_msubr_q_ssov(ret, tcg_env, r1, r2, r3, temp);
}
-static inline void
-gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_msub32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1748,70 +1768,72 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
- tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, temp3);
+ tcg_gen_mov_i32(ret, temp3);
}
-static inline void
-gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16sub32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_sub_d(ret, arg1, temp);
}
-static inline void
-gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16subs32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_subs(ret, arg1, temp);
}
-static inline void
-gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16sub64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1821,23 +1843,23 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static inline void
-gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16subs64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1847,15 +1869,15 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static inline void
-gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_msub64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
@@ -1875,29 +1897,29 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
-gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_msubs32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1918,25 +1940,26 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_msub32_q_sub_ssov(ret, tcg_env, t1, t3);
}
-static inline void
-gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_msubs64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_msub64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
-static inline void
-gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubad_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1957,11 +1980,12 @@ gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_sub_tl);
}
-static inline void
-gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -1990,12 +2014,13 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static inline void
-gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -2011,19 +2036,20 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2);
}
-static inline void
-gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubads_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -2042,20 +2068,21 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
-gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
@@ -2083,12 +2110,13 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -2104,166 +2132,168 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static inline void gen_abs(TCGv ret, TCGv r1)
+static void gen_abs(TCGv_i32 ret, TCGv_i32 r1)
{
- tcg_gen_abs_tl(ret, r1);
+ tcg_gen_abs_i32(ret, r1);
/* overflow can only happen, if r1 = 0x80000000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+static void gen_absdif(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
- tcg_gen_sub_tl(result, r1, r2);
- tcg_gen_sub_tl(temp, r2, r1);
- tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
+ tcg_gen_sub_i32(result, r1, r2);
+ tcg_gen_sub_i32(temp, r2, r1);
+ tcg_gen_movcond_i32(TCG_COND_GT, result, r1, r2, result, temp);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, result, r2);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, result, r2);
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
-static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_absdif(ret, r1, temp);
}
-static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_absdif_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
+static void gen_mul_i32s(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv high = tcg_temp_new();
- TCGv low = tcg_temp_new();
+ TCGv_i32 high = tcg_temp_new_i32();
+ TCGv_i32 low = tcg_temp_new_i32();
- tcg_gen_muls2_tl(low, high, r1, r2);
- tcg_gen_mov_tl(ret, low);
+ tcg_gen_muls2_i32(low, high, r1, r2);
+ tcg_gen_mov_i32(ret, low);
/* calc V bit */
- tcg_gen_sari_tl(low, low, 31);
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_sari_i32(low, low, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_PSW_V, high, low);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
+static void gen_muli_i32s(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i32s(ret, r1, temp);
}
-static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2)
{
- tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
+ tcg_gen_muls2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
- int32_t con)
+static void gen_muli_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
}
-static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2)
{
- tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
+ tcg_gen_mulu2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
- int32_t con)
+static void gen_muli_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
}
-static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsi_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_mul_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsui_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_mul_suov(ret, tcg_env, r1, temp);
}
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
-static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_madd32_ssov(ret, tcg_env, r1, r2, temp);
}
-static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_madd32_suov(ret, tcg_env, r1, r2, temp);
}
-static void
-gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
+static void gen_mul_q(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 temp_64 = tcg_temp_new_i64();
TCGv_i64 temp2_64 = tcg_temp_new_i64();
if (n == 0) {
if (up_shift == 32) {
- tcg_gen_muls2_tl(rh, rl, arg1, arg2);
+ tcg_gen_muls2_i32(rh, rl, arg1, arg2);
} else if (up_shift == 16) {
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
@@ -2272,10 +2302,10 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
tcg_gen_shri_i64(temp_64, temp_64, up_shift);
tcg_gen_extr_i64_i32(rl, rh, temp_64);
} else {
- tcg_gen_muls2_tl(rl, rh, arg1, arg2);
+ tcg_gen_muls2_i32(rl, rh, arg1, arg2);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
} else { /* n is expected to be 1 */
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
@@ -2290,79 +2320,78 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
tcg_gen_extr_i64_i32(rl, rh, temp_64);
/* overflow only occurs if r1 = r2 = 0x8000 */
if (up_shift == 0) {/* result is 64 bit */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rh,
0x80000000);
} else { /* result is 32 bit */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rl,
0x80000000);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc sv overflow bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
}
/* calc av overflow bit */
if (up_shift == 0) {
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
} else {
- tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
- tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rl, rl);
+ tcg_gen_xor_i32(cpu_PSW_AV, rl, cpu_PSW_AV);
}
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+static void gen_mul_q_16(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_mul_i32(ret, arg1, arg2);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_shli_tl(ret, ret, 1);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_shli_i32(ret, ret, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
- tcg_gen_sub_tl(ret, ret, temp);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80000000);
+ tcg_gen_sub_i32(ret, ret, temp);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+static void gen_mulr_q(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
if (n == 0) {
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_addi_tl(ret, ret, 0x8000);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_addi_i32(ret, ret, 0x8000);
} else {
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_shli_tl(ret, ret, 1);
- tcg_gen_addi_tl(ret, ret, 0x8000);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_shli_i32(ret, ret, 1);
+ tcg_gen_addi_i32(ret, ret, 0x8000);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
- tcg_gen_muli_tl(temp, temp, 0x8001);
- tcg_gen_sub_tl(ret, ret, temp);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80008000);
+ tcg_gen_muli_i32(temp, temp, 0x8001);
+ tcg_gen_sub_i32(ret, ret, temp);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* cut halfword off */
- tcg_gen_andi_tl(ret, ret, 0xffff0000);
+ tcg_gen_andi_i32(ret, ret, 0xffff0000);
}
-static inline void
-gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_madds_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2370,17 +2399,17 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
-gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_maddsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2388,29 +2417,29 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low,
+ TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_msub32_ssov(ret, tcg_env, r1, r2, temp);
}
-static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_msub32_suov(ret, tcg_env, r1, r2, temp);
}
-static inline void
-gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubs_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2418,17 +2447,17 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
-gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2436,310 +2465,312 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
-gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
+static void gen_saturate(TCGv_i32 ret, TCGv_i32 arg, int32_t up, int32_t low)
{
- tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low));
- tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up));
+ tcg_gen_smax_i32(ret, arg, tcg_constant_i32(low));
+ tcg_gen_smin_i32(ret, ret, tcg_constant_i32(up));
}
-static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
+static void gen_saturate_u(TCGv_i32 ret, TCGv_i32 arg, int32_t up)
{
- tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up));
+ tcg_gen_umin_i32(ret, arg, tcg_constant_i32(up));
}
-static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_shi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
if (shift_count == -32) {
- tcg_gen_movi_tl(ret, 0);
+ tcg_gen_movi_i32(ret, 0);
} else if (shift_count >= 0) {
- tcg_gen_shli_tl(ret, r1, shift_count);
+ tcg_gen_shli_i32(ret, r1, shift_count);
} else {
- tcg_gen_shri_tl(ret, r1, -shift_count);
+ tcg_gen_shri_i32(ret, r1, -shift_count);
}
}
-static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
+static void gen_sh_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shiftcount)
{
- TCGv temp_low, temp_high;
+ TCGv_i32 temp_low, temp_high;
if (shiftcount == -16) {
- tcg_gen_movi_tl(ret, 0);
+ tcg_gen_movi_i32(ret, 0);
} else {
- temp_high = tcg_temp_new();
- temp_low = tcg_temp_new();
+ temp_high = tcg_temp_new_i32();
+ temp_low = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp_low, r1, 0xffff);
- tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
+ tcg_gen_andi_i32(temp_low, r1, 0xffff);
+ tcg_gen_andi_i32(temp_high, r1, 0xffff0000);
gen_shi(temp_low, temp_low, shiftcount);
gen_shi(ret, temp_high, shiftcount);
- tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
+ tcg_gen_deposit_i32(ret, ret, temp_low, 0, 16);
}
}
-static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_shaci(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
uint32_t msk, msk_start;
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (shift_count == 0) {
/* Clear PSW.C and PSW.V */
- tcg_gen_movi_tl(cpu_PSW_C, 0);
- tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
- tcg_gen_mov_tl(ret, r1);
+ tcg_gen_movi_i32(cpu_PSW_C, 0);
+ tcg_gen_mov_i32(cpu_PSW_V, cpu_PSW_C);
+ tcg_gen_mov_i32(ret, r1);
} else if (shift_count == -32) {
/* set PSW.C */
- tcg_gen_mov_tl(cpu_PSW_C, r1);
+ tcg_gen_mov_i32(cpu_PSW_C, r1);
/* fill ret completely with sign bit */
- tcg_gen_sari_tl(ret, r1, 31);
+ tcg_gen_sari_i32(ret, r1, 31);
/* clear PSW.V */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
} else if (shift_count > 0) {
- TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
- TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
+ TCGv_i32 t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
+ TCGv_i32 t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
/* calc carry */
msk_start = 32 - shift_count;
msk = ((1 << shift_count) - 1) << msk_start;
- tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ tcg_gen_andi_i32(cpu_PSW_C, r1, msk);
/* calc v/sv bits */
- tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
- tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
- tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcond_i32(TCG_COND_GT, temp, r1, t_max);
+ tcg_gen_setcond_i32(TCG_COND_LT, temp2, r1, t_min);
+ tcg_gen_or_i32(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
/* do shift */
- tcg_gen_shli_tl(ret, r1, shift_count);
+ tcg_gen_shli_i32(ret, r1, shift_count);
} else {
/* clear PSW.V */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc carry */
msk = (1 << -shift_count) - 1;
- tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ tcg_gen_andi_i32(cpu_PSW_C, r1, msk);
/* do shift */
- tcg_gen_sari_tl(ret, r1, -shift_count);
+ tcg_gen_sari_i32(ret, r1, -shift_count);
}
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
+static void gen_shas(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sha_ssov(ret, tcg_env, r1, r2);
}
-static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
+static void gen_shasi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_shas(ret, r1, temp);
}
-static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_sha_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
- TCGv low, high;
+ TCGv_i32 low, high;
if (shift_count == 0) {
- tcg_gen_mov_tl(ret, r1);
+ tcg_gen_mov_i32(ret, r1);
} else if (shift_count > 0) {
- low = tcg_temp_new();
- high = tcg_temp_new();
+ low = tcg_temp_new_i32();
+ high = tcg_temp_new_i32();
- tcg_gen_andi_tl(high, r1, 0xffff0000);
- tcg_gen_shli_tl(low, r1, shift_count);
- tcg_gen_shli_tl(ret, high, shift_count);
- tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+ tcg_gen_andi_i32(high, r1, 0xffff0000);
+ tcg_gen_shli_i32(low, r1, shift_count);
+ tcg_gen_shli_i32(ret, high, shift_count);
+ tcg_gen_deposit_i32(ret, ret, low, 0, 16);
} else {
- low = tcg_temp_new();
- high = tcg_temp_new();
+ low = tcg_temp_new_i32();
+ high = tcg_temp_new_i32();
- tcg_gen_ext16s_tl(low, r1);
- tcg_gen_sari_tl(low, low, -shift_count);
- tcg_gen_sari_tl(ret, r1, -shift_count);
- tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+ tcg_gen_ext16s_i32(low, r1);
+ tcg_gen_sari_i32(low, low, -shift_count);
+ tcg_gen_sari_i32(ret, r1, -shift_count);
+ tcg_gen_deposit_i32(ret, ret, low, 0, 16);
}
}
/* ret = {ret[30:0], (r1 cond r2)}; */
-static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
+static void gen_sh_cond(int cond, TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_shli_tl(temp, ret, 1);
- tcg_gen_setcond_tl(cond, temp2, r1, r2);
- tcg_gen_or_tl(ret, temp, temp2);
+ tcg_gen_shli_i32(temp, ret, 1);
+ tcg_gen_setcond_i32(cond, temp2, r1, r2);
+ tcg_gen_or_i32(ret, temp, temp2);
}
-static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
+static void gen_sh_condi(int cond, TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_sh_cond(cond, ret, r1, temp);
}
-static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
+static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_add_ssov(ret, tcg_env, r1, r2);
}
-static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_add_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsui(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_add_suov(ret, tcg_env, r1, temp);
}
-static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sub_ssov(ret, tcg_env, r1, r2);
}
-static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subsu(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sub_suov(ret, tcg_env, r1, r2);
}
-static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
- int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv),
- void(*op2)(TCGv, TCGv, TCGv))
+static void gen_bit_2op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32),
+ void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp1, temp2;
+ TCGv_i32 temp1, temp2;
- temp1 = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp1 = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
- tcg_gen_shri_tl(temp2, r2, pos2);
- tcg_gen_shri_tl(temp1, r1, pos1);
+ tcg_gen_shri_i32(temp2, r2, pos2);
+ tcg_gen_shri_i32(temp1, r1, pos1);
(*op1)(temp1, temp1, temp2);
(*op2)(temp1 , ret, temp1);
- tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
+ tcg_gen_deposit_i32(ret, ret, temp1, 0, 1);
}
/* ret = r1[pos1] op1 r2[pos2]; */
-static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
- int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv))
+static void gen_bit_1op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp1, temp2;
+ TCGv_i32 temp1, temp2;
- temp1 = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp1 = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
- tcg_gen_shri_tl(temp2, r2, pos2);
- tcg_gen_shri_tl(temp1, r1, pos1);
+ tcg_gen_shri_i32(temp2, r2, pos2);
+ tcg_gen_shri_i32(temp1, r1, pos1);
(*op1)(ret, temp1, temp2);
- tcg_gen_andi_tl(ret, ret, 0x1);
+ tcg_gen_andi_i32(ret, ret, 0x1);
}
-static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
- void(*op)(TCGv, TCGv, TCGv))
+static void gen_accumulating_cond(int cond, TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2,
+ void(*op)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
/* temp = (arg1 cond arg2 )*/
- tcg_gen_setcond_tl(cond, temp, r1, r2);
+ tcg_gen_setcond_i32(cond, temp, r1, r2);
/* temp2 = ret[0]*/
- tcg_gen_andi_tl(temp2, ret, 0x1);
+ tcg_gen_andi_i32(temp2, ret, 0x1);
/* temp = temp insn temp2 */
(*op)(temp, temp, temp2);
/* ret = {ret[31:1], temp} */
- tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
+ tcg_gen_deposit_i32(ret, ret, temp, 0, 1);
}
-static inline void
-gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
- void(*op)(TCGv, TCGv, TCGv))
+static void gen_accumulating_condi(int cond, TCGv_i32 ret, TCGv_i32 r1,
+ int32_t con,
+ void(*op)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_accumulating_cond(cond, ret, r1, temp, op);
}
-static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_bi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv b0 = tcg_temp_new();
- TCGv b1 = tcg_temp_new();
- TCGv b2 = tcg_temp_new();
- TCGv b3 = tcg_temp_new();
+ TCGv_i32 b0 = tcg_temp_new_i32();
+ TCGv_i32 b1 = tcg_temp_new_i32();
+ TCGv_i32 b2 = tcg_temp_new_i32();
+ TCGv_i32 b3 = tcg_temp_new_i32();
/* byte 0 */
- tcg_gen_andi_tl(b0, r1, 0xff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
+ tcg_gen_andi_i32(b0, r1, 0xff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b0, b0, con & 0xff);
/* byte 1 */
- tcg_gen_andi_tl(b1, r1, 0xff00);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
+ tcg_gen_andi_i32(b1, r1, 0xff00);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b1, b1, con & 0xff00);
/* byte 2 */
- tcg_gen_andi_tl(b2, r1, 0xff0000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
+ tcg_gen_andi_i32(b2, r1, 0xff0000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b2, b2, con & 0xff0000);
/* byte 3 */
- tcg_gen_andi_tl(b3, r1, 0xff000000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
+ tcg_gen_andi_i32(b3, r1, 0xff000000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b3, b3, con & 0xff000000);
/* combine them */
- tcg_gen_or_tl(ret, b0, b1);
- tcg_gen_or_tl(ret, ret, b2);
- tcg_gen_or_tl(ret, ret, b3);
+ tcg_gen_or_i32(ret, b0, b1);
+ tcg_gen_or_i32(ret, ret, b2);
+ tcg_gen_or_i32(ret, ret, b3);
}
-static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv h0 = tcg_temp_new();
- TCGv h1 = tcg_temp_new();
+ TCGv_i32 h0 = tcg_temp_new_i32();
+ TCGv_i32 h1 = tcg_temp_new_i32();
/* halfword 0 */
- tcg_gen_andi_tl(h0, r1, 0xffff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
+ tcg_gen_andi_i32(h0, r1, 0xffff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, h0, h0, con & 0xffff);
/* halfword 1 */
- tcg_gen_andi_tl(h1, r1, 0xffff0000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
+ tcg_gen_andi_i32(h1, r1, 0xffff0000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, h1, h1, con & 0xffff0000);
/* combine them */
- tcg_gen_or_tl(ret, h0, h1);
+ tcg_gen_or_i32(ret, h0, h1);
}
/* mask = ((1 << width) -1) << pos;
ret = (r1 & ~mask) | (r2 << pos) & mask); */
-static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+static void gen_insert(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 width, TCGv_i32 pos)
{
- TCGv mask = tcg_temp_new();
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
- tcg_gen_shl_tl(mask, tcg_constant_tl(1), width);
- tcg_gen_subi_tl(mask, mask, 1);
- tcg_gen_shl_tl(mask, mask, pos);
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), width);
+ tcg_gen_subi_i32(mask, mask, 1);
+ tcg_gen_shl_i32(mask, mask, pos);
- tcg_gen_shl_tl(temp, r2, pos);
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_andc_tl(temp2, r1, mask);
- tcg_gen_or_tl(ret, temp, temp2);
+ tcg_gen_shl_i32(temp, r2, pos);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_andc_i32(temp2, r1, mask);
+ tcg_gen_or_i32(ret, temp, temp2);
}
-static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
+static void gen_bsplit(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2747,7 +2778,7 @@ static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
+static void gen_unpack(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2755,8 +2786,9 @@ static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static inline void
-gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+static void gen_dvinit_b(DisasContext *ctx,
+ TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 r1, TCGv_i32 r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2768,8 +2800,9 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
tcg_gen_extr_i64_i32(rl, rh, ret);
}
-static inline void
-gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+static void gen_dvinit_h(DisasContext *ctx,
+ TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 r1, TCGv_i32 r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2781,47 +2814,47 @@ gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
tcg_gen_extr_i64_i32(rl, rh, ret);
}
-static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
+static void gen_calc_usb_mul_h(TCGv_i32 arg_low, TCGv_i32 arg_high)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
/* calc AV bit */
- tcg_gen_add_tl(temp, arg_low, arg_low);
- tcg_gen_xor_tl(temp, temp, arg_low);
- tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, arg_low, arg_low);
+ tcg_gen_xor_i32(temp, temp, arg_low);
+ tcg_gen_add_i32(cpu_PSW_AV, arg_high, arg_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, arg_high);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
}
-static void gen_calc_usb_mulr_h(TCGv arg)
+static void gen_calc_usb_mulr_h(TCGv_i32 arg)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
/* calc AV bit */
- tcg_gen_add_tl(temp, arg, arg);
- tcg_gen_xor_tl(temp, temp, arg);
- tcg_gen_shli_tl(cpu_PSW_AV, temp, 16);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, arg, arg);
+ tcg_gen_xor_i32(temp, temp, arg);
+ tcg_gen_shli_i32(cpu_PSW_AV, temp, 16);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
}
/* helpers for generating program flow micro-ops */
-static inline void gen_save_pc(target_ulong pc)
+static void gen_save_pc(vaddr pc)
{
- tcg_gen_movi_tl(cpu_PC, pc);
+ tcg_gen_movi_i32(cpu_PC, pc);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index, vaddr dest)
{
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_index);
gen_save_pc(dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_index);
} else {
gen_save_pc(dest);
tcg_gen_lookup_and_goto_ptr();
@@ -2839,11 +2872,11 @@ static void generate_trap(DisasContext *ctx, int class, int tin)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
- TCGv r2, int16_t address)
+static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv_i32 r1,
+ TCGv_i32 r2, int16_t address)
{
TCGLabel *jumpLabel = gen_new_label();
- tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
+ tcg_gen_brcond_i32(cond, r1, r2, jumpLabel);
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
@@ -2851,10 +2884,10 @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2);
}
-static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
- int r2, int16_t address)
+static void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv_i32 r1,
+ int r2, int16_t address)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_branch_cond(ctx, cond, r1, temp, address);
}
@@ -2862,8 +2895,8 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
{
TCGLabel *l1 = gen_new_label();
- tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
+ tcg_gen_subi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
gen_goto_tb(ctx, 1, ctx->base.pc_next + offset);
gen_set_label(l1);
gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
@@ -2871,29 +2904,29 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
static void gen_fcall_save_ctx(DisasContext *ctx)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4);
- tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
- tcg_gen_mov_tl(cpu_gpr_a[10], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[10], -4);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_mov_i32(cpu_gpr_a[10], temp);
}
static void gen_fret(DisasContext *ctx)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4);
- tcg_gen_mov_tl(cpu_PC, temp);
+ tcg_gen_andi_i32(temp, cpu_gpr_a[11], ~0x1);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[10], cpu_gpr_a[10], 4);
+ tcg_gen_mov_i32(cpu_PC, temp);
ctx->base.is_jmp = DISAS_EXIT;
}
static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
int r2 , int32_t constant , int32_t offset)
{
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
int n;
switch (opc) {
@@ -2930,13 +2963,13 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
break;
/* SBRN-format jumps */
case OPC1_16_SBRN_JZ_T:
- temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ temp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
break;
case OPC1_16_SBRN_JNZ_T:
- temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ temp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
break;
/* SBR-format jumps */
@@ -2985,7 +3018,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
break;
/* SR-format jumps */
case OPC1_16_SR_JI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
ctx->base.is_jmp = DISAS_EXIT;
break;
case OPC2_32_SYS_RET:
@@ -3007,13 +3040,13 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
break;
case OPC1_32_B_JLA:
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
/* fall through */
case OPC1_32_B_JA:
gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
break;
case OPC1_32_B_JL:
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
break;
/* BOL format */
@@ -3043,16 +3076,16 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
}
break;
case OPCM_32_BRC_JNE:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* subi is unconditional */
- tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
} else {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* addi is unconditional */
- tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
}
break;
@@ -3060,8 +3093,8 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
case OPCM_32_BRN_JTT:
n = MASK_OP_BRN_N(ctx->opcode);
- temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
+ temp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r1], (1 << n));
if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
@@ -3115,21 +3148,21 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
}
break;
case OPCM_32_BRR_JNE:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* also save r2, in case of r1 == r2, so r2 is not decremented */
- tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]);
/* subi is unconditional */
- tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
} else {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* also save r2, in case of r1 == r2, so r2 is not decremented */
- tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]);
/* addi is unconditional */
- tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
}
break;
@@ -3154,7 +3187,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
{
int r1;
int32_t const4;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
r1 = MASK_OP_SRC_S1D(ctx->opcode);
const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
@@ -3170,7 +3203,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
break;
case OPC1_16_SRC_ADD_A:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
break;
case OPC1_16_SRC_CADD:
gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
@@ -3181,37 +3214,37 @@ static void decode_src_opc(DisasContext *ctx, int op1)
cpu_gpr_d[15]);
break;
case OPC1_16_SRC_CMOV:
- temp = tcg_constant_tl(0);
- temp2 = tcg_constant_tl(const4);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const4);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_CMOVN:
- temp = tcg_constant_tl(0);
- temp2 = tcg_constant_tl(const4);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const4);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_EQ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
const4);
break;
case OPC1_16_SRC_LT:
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
const4);
break;
case OPC1_16_SRC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ tcg_gen_movi_i32(cpu_gpr_d[r1], const4);
break;
case OPC1_16_SRC_MOV_A:
const4 = MASK_OP_SRC_CONST4(ctx->opcode);
- tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
+ tcg_gen_movi_i32(cpu_gpr_a[r1], const4);
break;
case OPC1_16_SRC_MOV_E:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r1);
- tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
- tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31);
+ tcg_gen_movi_i32(cpu_gpr_d[r1], const4);
+ tcg_gen_sari_i32(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], 31);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -3230,7 +3263,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
static void decode_srr_opc(DisasContext *ctx, int op1)
{
int r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_SRR_S1D(ctx->opcode);
r2 = MASK_OP_SRR_S2(ctx->opcode);
@@ -3246,49 +3279,49 @@ static void decode_srr_opc(DisasContext *ctx, int op1)
gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_ADD_A:
- tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_ADDS:
gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_AND:
- tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_and_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_CMOV:
- temp = tcg_constant_tl(0);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_CMOVN:
- temp = tcg_constant_tl(0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_EQ:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_LT:
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV:
- tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV_A:
- tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV_AA:
- tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_MOV_D:
- tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_MUL:
gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_OR:
- tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_or_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_SUB:
gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -3303,7 +3336,7 @@ static void decode_srr_opc(DisasContext *ctx, int op1)
gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_XOR:
- tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_xor_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3319,32 +3352,32 @@ static void decode_ssr_opc(DisasContext *ctx, int op1)
switch (op1) {
case OPC1_16_SSR_ST_A:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
break;
case OPC1_16_SSR_ST_A_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
case OPC1_16_SSR_ST_B:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
break;
case OPC1_16_SSR_ST_B_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
break;
case OPC1_16_SSR_ST_H:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
break;
case OPC1_16_SSR_ST_H_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
break;
case OPC1_16_SSR_ST_W:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
break;
case OPC1_16_SSR_ST_W_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3359,7 +3392,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
switch (op1) {
case OPC1_16_SC_AND:
- tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ tcg_gen_andi_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_BISR:
if (ctx->priv == TRICORE_PRIV_SM) {
@@ -3375,10 +3408,10 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
break;
case OPC1_16_SC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[15], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_OR:
- tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ tcg_gen_ori_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_ST_A:
gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
@@ -3387,7 +3420,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
break;
case OPC1_16_SC_SUB_A:
- tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
+ tcg_gen_subi_i32(cpu_gpr_a[10], cpu_gpr_a[10], const16);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3404,32 +3437,32 @@ static void decode_slr_opc(DisasContext *ctx, int op1)
switch (op1) {
/* SLR-format */
case OPC1_16_SLR_LD_A:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
break;
case OPC1_16_SLR_LD_A_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
case OPC1_16_SLR_LD_BU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
break;
case OPC1_16_SLR_LD_BU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
break;
case OPC1_16_SLR_LD_H:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
break;
case OPC1_16_SLR_LD_H_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
break;
case OPC1_16_SLR_LD_W:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
break;
case OPC1_16_SLR_LD_W_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3512,17 +3545,18 @@ static void decode_sr_accu(DisasContext *ctx)
switch (op2) {
case OPC2_16_SR_RSUB:
/* calc V bit -- overflow only if r1 = -0x80000000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V,
+ cpu_gpr_d[r1], -0x80000000);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* sub */
- tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_neg_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]);
/* calc av */
- tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
/* calc sav */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
break;
case OPC2_16_SR_SAT_B:
gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
@@ -3547,7 +3581,7 @@ static void decode_16Bit_opc(DisasContext *ctx)
int r1, r2;
int32_t const16;
int32_t address;
- TCGv temp;
+ TCGv_i32 temp;
op1 = MASK_OP_MAJOR(ctx->opcode);
@@ -3614,9 +3648,9 @@ static void decode_16Bit_opc(DisasContext *ctx)
r2 = MASK_OP_SRRS_S2(ctx->opcode);
r1 = MASK_OP_SRRS_S1D(ctx->opcode);
const16 = MASK_OP_SRRS_N(ctx->opcode);
- temp = tcg_temp_new();
- tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
- tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(temp, cpu_gpr_d[15], const16);
+ tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
break;
/* SLRO-format */
case OPC1_16_SLRO_LD_A:
@@ -3765,7 +3799,7 @@ static void decode_16Bit_opc(DisasContext *ctx)
break;
case OPC1_16_SR_NOT:
r1 = MASK_OP_SR_S1D(ctx->opcode);
- tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_not_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3782,7 +3816,7 @@ static void decode_abs_ldw(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3792,18 +3826,18 @@ static void decode_abs_ldw(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_LD_A:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
break;
case OPC2_32_ABS_LD_D:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
break;
case OPC2_32_ABS_LD_DA:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
break;
case OPC2_32_ABS_LD_W:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3815,7 +3849,7 @@ static void decode_abs_ldb(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3825,16 +3859,16 @@ static void decode_abs_ldb(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_LD_B:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
break;
case OPC2_32_ABS_LD_BU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
break;
case OPC2_32_ABS_LD_H:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
break;
case OPC2_32_ABS_LD_HU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3846,7 +3880,7 @@ static void decode_abs_ldst_swap(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3897,7 +3931,7 @@ static void decode_abs_store(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3907,18 +3941,18 @@ static void decode_abs_store(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_ST_A:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
break;
case OPC2_32_ABS_ST_D:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
break;
case OPC2_32_ABS_ST_DA:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
break;
case OPC2_32_ABS_ST_W:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3930,7 +3964,7 @@ static void decode_abs_storeb_h(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3940,10 +3974,10 @@ static void decode_abs_storeb_h(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_ST_B:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
break;
case OPC2_32_ABS_ST_H:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4032,7 +4066,7 @@ static void decode_bit_insert(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
r2 = MASK_OP_BIT_S2(ctx->opcode);
@@ -4040,13 +4074,13 @@ static void decode_bit_insert(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r2], pos2);
if (op2 == OPC2_32_BIT_INSN_T) {
- tcg_gen_not_tl(temp, temp);
+ tcg_gen_not_i32(temp, temp);
}
- tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
+ tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
}
static void decode_bit_logical_t2(DisasContext *ctx)
@@ -4131,7 +4165,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
@@ -4140,7 +4174,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BIT_SH_AND_T:
@@ -4162,8 +4196,8 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
- tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
}
static void decode_bit_sh_logic2(DisasContext *ctx)
@@ -4171,7 +4205,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
@@ -4180,7 +4214,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BIT_SH_NAND_T:
@@ -4202,8 +4236,8 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
- tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
}
/* BO-format */
@@ -4214,7 +4248,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4232,14 +4266,14 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEA_I_POSTINC:
/* instruction to access the cache, but we still need to handle
the addressing mode */
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CACHEA_WI_PREINC:
case OPC2_32_BO_CACHEA_W_PREINC:
case OPC2_32_BO_CACHEA_I_PREINC:
/* instruction to access the cache, but we still need to handle
the addressing mode */
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CACHEI_WI_SHORTOFF:
case OPC2_32_BO_CACHEI_W_SHORTOFF:
@@ -4250,7 +4284,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEI_W_POSTINC:
case OPC2_32_BO_CACHEI_WI_POSTINC:
if (has_feature(ctx, TRICORE_FEATURE_131)) {
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -4258,7 +4292,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEI_W_PREINC:
case OPC2_32_BO_CACHEI_WI_PREINC:
if (has_feature(ctx, TRICORE_FEATURE_131)) {
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -4267,9 +4301,9 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
break;
case OPC2_32_BO_ST_A_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_A_PREINC:
gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
@@ -4278,82 +4312,84 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_ST_B_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_B_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_ST_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_st_2regs(ctx,
+ cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_ST_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_D_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_st_2regs(ctx,
+ cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_ST_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_DA_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_H_SHORTOFF:
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_H_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_H_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_SHORTOFF:
- temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ temp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_POSTINC:
- temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
+ temp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_Q_PREINC:
- temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ temp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_W_SHORTOFF:
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_ST_W_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_W_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4368,102 +4404,102 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_CACHEA_WI_BR:
case OPC2_32_BO_CACHEA_W_BR:
case OPC2_32_BO_CACHEA_I_BR:
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_CACHEA_WI_CIRC:
case OPC2_32_BO_CACHEA_W_CIRC:
case OPC2_32_BO_CACHEA_I_CIRC:
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_A_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_A_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_B_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_B_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_D_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_D_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_DA_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_DA_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_H_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_H_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_Q_BR:
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_Q_CIRC:
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_W_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_W_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4475,7 +4511,7 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4487,9 +4523,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_LD_A_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_A_PREINC:
gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4498,9 +4534,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
break;
case OPC2_32_BO_LD_B_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_SB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_B_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
@@ -4509,54 +4545,56 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_LD_BU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_BU_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_LD_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_ld_2regs(ctx,
+ cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_LD_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_D_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_ld_2regs(ctx,
+ cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_LD_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_DA_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_H_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
break;
case OPC2_32_BO_LD_H_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LESW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_H_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
@@ -4565,34 +4603,34 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_LD_HU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_HU_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_LD_Q_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPC2_32_BO_LD_Q_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_Q_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPC2_32_BO_LD_W_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_LD_W_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_W_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4607,109 +4645,109 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_LD_A_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_A_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_B_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_B_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_BU_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_BU_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_D_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_D_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_DA_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_DA_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_H_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_H_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_HU_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_HU_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_Q_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_Q_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_W_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_W_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4722,7 +4760,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
uint32_t off10;
int r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4730,74 +4768,74 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BO_LDLCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_ldlcx(tcg_env, temp);
break;
case OPC2_32_BO_LDMST_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_ldmst(ctx, r1, temp);
break;
case OPC2_32_BO_LDMST_POSTINC:
gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LDMST_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_LDUCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_lducx(tcg_env, temp);
break;
case OPC2_32_BO_LEA_SHORTOFF:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_STLCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_stlcx(tcg_env, temp);
break;
case OPC2_32_BO_STUCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_stucx(tcg_env, temp);
break;
case OPC2_32_BO_SWAP_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_swap(ctx, r1, temp);
break;
case OPC2_32_BO_SWAP_W_POSTINC:
gen_swap(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_SWAP_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_swap(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_CMPSWAP_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_cmpswap(ctx, r1, temp);
break;
case OPC2_32_BO_CMPSWAP_W_POSTINC:
gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CMPSWAP_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_SWAPMSK_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_swapmsk(ctx, r1, temp);
break;
case OPC2_32_BO_SWAPMSK_W_POSTINC:
gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_SWAPMSK_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
break;
default:
@@ -4810,52 +4848,52 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_LDMST_BR:
gen_ldmst(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LDMST_CIRC:
gen_ldmst(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_SWAP_W_BR:
gen_swap(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_SWAP_W_CIRC:
gen_swap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_CMPSWAP_W_BR:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_CMPSWAP_W_CIRC:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_SWAPMSK_W_BR:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_SWAPMSK_W_CIRC:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4866,7 +4904,7 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1)
{
int r1, r2;
int32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BOL_S1D(ctx->opcode);
r2 = MASK_OP_BOL_S2(ctx->opcode);
@@ -4874,17 +4912,17 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1)
switch (op1) {
case OPC1_32_BOL_LD_A_LONGOFF:
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
break;
case OPC1_32_BOL_LD_W_LONGOFF:
- temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
+ temp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
break;
case OPC1_32_BOL_LEA_LONGOFF:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
break;
case OPC1_32_BOL_ST_A_LONGOFF:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
@@ -4949,7 +4987,7 @@ static void decode_rc_logical_shift(DisasContext *ctx)
uint32_t op2;
int r1, r2;
int32_t const9;
- TCGv temp;
+ TCGv_i32 temp;
r2 = MASK_OP_RC_D(ctx->opcode);
r1 = MASK_OP_RC_S1(ctx->opcode);
@@ -4958,26 +4996,26 @@ static void decode_rc_logical_shift(DisasContext *ctx)
switch (op2) {
case OPC2_32_RC_AND:
- tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_ANDN:
- tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_NAND:
- temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_nand_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NOR:
- temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_nor_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_OR:
- tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_ORN:
- tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_SH:
const9 = sextract32(const9, 0, 6);
@@ -4999,11 +5037,11 @@ static void decode_rc_logical_shift(DisasContext *ctx)
gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_XNOR:
- tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
- tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
+ tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_not_i32(cpu_gpr_d[r2], cpu_gpr_d[r2]);
break;
case OPC2_32_RC_XOR:
- tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_SHUFFLE:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -5024,7 +5062,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
int r1, r2;
int16_t const9;
- TCGv temp;
+ TCGv_i32 temp;
r2 = MASK_OP_RC_D(ctx->opcode);
r1 = MASK_OP_RC_S1(ctx->opcode);
@@ -5032,7 +5070,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
op2 = MASK_OP_RC_OP2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RC_ABSDIF:
@@ -5083,7 +5121,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
const9, &tcg_gen_and_tl);
break;
case OPC2_32_RC_EQ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_EQANY_B:
gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
@@ -5092,41 +5130,41 @@ static void decode_rc_accumulator(DisasContext *ctx)
gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_GE:
- tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_GE_U:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
- tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_LT:
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_LT_U:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
- tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MAX:
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MAX_U:
- tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
- tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MIN:
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MIN_U:
- tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
- tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NE:
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_OR_EQ:
gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
@@ -5155,15 +5193,15 @@ static void decode_rc_accumulator(DisasContext *ctx)
const9, &tcg_gen_or_tl);
break;
case OPC2_32_RC_RSUB:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_RSUBS:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_RSUBS_U:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_SH_EQ:
@@ -5259,7 +5297,7 @@ static void decode_rc_mul(DisasContext *ctx)
break;
case OPC2_32_RC_MUL_64:
CHECK_REG_PAIR(r2);
- gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MULS_32:
gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
@@ -5267,7 +5305,7 @@ static void decode_rc_mul(DisasContext *ctx)
case OPC2_32_RC_MUL_U_64:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
CHECK_REG_PAIR(r2);
- gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MULS_U_32:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
@@ -5285,7 +5323,7 @@ static void decode_rcpw_insert(DisasContext *ctx)
int r1, r2;
int32_t pos, width, const4;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RCPW_OP2(ctx->opcode);
r1 = MASK_OP_RCPW_S1(ctx->opcode);
@@ -5299,18 +5337,18 @@ static void decode_rcpw_insert(DisasContext *ctx)
CHECK_REG_PAIR(r2);
/* if pos + width > 32 undefined result */
if (pos + width <= 32) {
- tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
- tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
+ tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], ((1u << width) - 1) << pos);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], (const4 << pos));
}
break;
case OPC2_32_RCPW_INSERT:
- /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ /* tcg_gen_deposit_i32() does not handle the case of width = 0 */
if (width == 0) {
- tcg_gen_mov_tl(cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r2], cpu_gpr_d[r1]);
/* if pos + width > 32 undefined result */
} else if (pos + width <= 32) {
temp = tcg_constant_i32(const4);
- tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
+ tcg_gen_deposit_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
}
break;
default:
@@ -5326,7 +5364,7 @@ static void decode_rcrw_insert(DisasContext *ctx)
int r1, r3, r4;
int32_t width, const4;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op2 = MASK_OP_RCRW_OP2(ctx->opcode);
r1 = MASK_OP_RCRW_S1(ctx->opcode);
@@ -5335,24 +5373,24 @@ static void decode_rcrw_insert(DisasContext *ctx)
width = MASK_OP_RCRW_WIDTH(ctx->opcode);
const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RCRW_IMASK:
CHECK_REG_PAIR(r4);
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_movi_tl(temp2, (1 << width) - 1);
- tcg_gen_shl_tl(cpu_gpr_d[r4 + 1], temp2, temp);
- tcg_gen_movi_tl(temp2, const4);
- tcg_gen_shl_tl(cpu_gpr_d[r4], temp2, temp);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp2, (1 << width) - 1);
+ tcg_gen_shl_i32(cpu_gpr_d[r4 + 1], temp2, temp);
+ tcg_gen_movi_i32(temp2, const4);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], temp2, temp);
break;
case OPC2_32_RCRW_INSERT:
- temp3 = tcg_temp_new();
+ temp3 = tcg_temp_new_i32();
- tcg_gen_movi_tl(temp, width);
- tcg_gen_movi_tl(temp2, const4);
- tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp, width);
+ tcg_gen_movi_i32(temp2, const4);
+ tcg_gen_andi_i32(temp3, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], temp2, temp, temp3);
break;
default:
@@ -5368,7 +5406,7 @@ static void decode_rcr_cond_select(DisasContext *ctx)
int r1, r3, r4;
int32_t const9;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RCR_OP2(ctx->opcode);
r1 = MASK_OP_RCR_S1(ctx->opcode);
@@ -5388,13 +5426,13 @@ static void decode_rcr_cond_select(DisasContext *ctx)
case OPC2_32_RCR_SEL:
temp = tcg_constant_i32(0);
temp2 = tcg_constant_i32(const9);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
case OPC2_32_RCR_SELN:
temp = tcg_constant_i32(0);
temp2 = tcg_constant_i32(const9);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
default:
@@ -5422,8 +5460,8 @@ static void decode_rcr_madd(DisasContext *ctx)
case OPC2_32_RCR_MADD_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADDS_32:
gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
@@ -5431,15 +5469,15 @@ static void decode_rcr_madd(DisasContext *ctx)
case OPC2_32_RCR_MADDS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADD_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADDS_U_32:
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
@@ -5449,8 +5487,8 @@ static void decode_rcr_madd(DisasContext *ctx)
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5477,8 +5515,8 @@ static void decode_rcr_msub(DisasContext *ctx)
case OPC2_32_RCR_MSUB_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUBS_32:
gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
@@ -5486,15 +5524,15 @@ static void decode_rcr_msub(DisasContext *ctx)
case OPC2_32_RCR_MSUBS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUB_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUBS_U_32:
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
@@ -5504,8 +5542,8 @@ static void decode_rcr_msub(DisasContext *ctx)
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5532,33 +5570,33 @@ static void decode_rlc_opc(DisasContext *ctx,
gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
break;
case OPC1_32_RLC_ADDIH_A:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
break;
case OPC1_32_RLC_MFCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
gen_mfcr(ctx, cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r2);
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
- tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], const16 >> 15);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
break;
case OPC1_32_RLC_MOV_U:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_H:
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16 << 16);
break;
case OPC1_32_RLC_MOVH_A:
- tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
+ tcg_gen_movi_i32(cpu_gpr_a[r2], const16 << 16);
break;
case OPC1_32_RLC_MTCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
@@ -5575,7 +5613,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
uint32_t op2;
int r3, r2, r1;
- TCGv temp;
+ TCGv_i32 temp;
r3 = MASK_OP_RR_D(ctx->opcode);
r2 = MASK_OP_RR_S2(ctx->opcode);
@@ -5672,7 +5710,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
cpu_gpr_d[r2], &tcg_gen_and_tl);
break;
case OPC2_32_RR_EQ:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQ_B:
@@ -5682,7 +5720,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQ_W:
- tcg_gen_negsetcond_tl(TCG_COND_EQ, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_EQ, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQANY_B:
@@ -5692,19 +5730,19 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_GE:
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_GE_U:
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT:
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_U:
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_B:
@@ -5720,19 +5758,19 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_W:
- tcg_gen_negsetcond_tl(TCG_COND_LT, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_LT, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_WU:
- tcg_gen_negsetcond_tl(TCG_COND_LTU, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_LTU, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX:
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX_U:
- tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX_B:
@@ -5748,11 +5786,11 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN:
- tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN_U:
- tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN_B:
@@ -5768,16 +5806,16 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV:
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
- tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -5785,14 +5823,14 @@ static void decode_rr_accumulator(DisasContext *ctx)
case OPC2_32_RR_MOVS_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
- tcg_gen_sari_tl(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
break;
case OPC2_32_RR_NE:
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_OR_EQ:
@@ -5925,41 +5963,41 @@ static void decode_rr_logical_shift(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_AND:
- tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_and_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ANDN:
- tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_andc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_CLO:
- tcg_gen_not_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
- tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
+ tcg_gen_not_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLO_H:
gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS:
- tcg_gen_clrsb_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clrsb_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS_H:
gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLZ:
- tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
+ tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLZ_H:
gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_NAND:
- tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_nand_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_NOR:
- tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_nor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_OR:
- tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_or_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ORN:
- tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_orc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SH:
gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -5977,10 +6015,10 @@ static void decode_rr_logical_shift(DisasContext *ctx)
gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_XNOR:
- tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_eqv_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_XOR:
- tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_xor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5991,7 +6029,7 @@ static void decode_rr_address(DisasContext *ctx)
{
uint32_t op2, n;
int r1, r2, r3;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RR_OP2(ctx->opcode);
r3 = MASK_OP_RR_D(ctx->opcode);
@@ -6001,52 +6039,52 @@ static void decode_rr_address(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_ADD_A:
- tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_ADDSC_A:
- temp = tcg_temp_new();
- tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n);
- tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(temp, cpu_gpr_d[r1], n);
+ tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
break;
case OPC2_32_RR_ADDSC_AT:
- temp = tcg_temp_new();
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3);
- tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp);
- tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
+ temp = tcg_temp_new_i32();
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 3);
+ tcg_gen_add_i32(temp, cpu_gpr_a[r2], temp);
+ tcg_gen_andi_i32(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
break;
case OPC2_32_RR_EQ_A:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_EQZ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
break;
case OPC2_32_RR_GE_A:
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_LT_A:
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_MOV_A:
- tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV_AA:
- tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_MOV_D:
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_NE_A:
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_NEZ_A:
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
break;
case OPC2_32_RR_SUB_A:
- tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_sub_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6063,19 +6101,19 @@ static void decode_rr_idirect(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_JI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
case OPC2_32_RR_JLI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
break;
case OPC2_32_RR_CALLI:
gen_helper_1arg(call, ctx->pc_succ_insn);
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
case OPC2_32_RR_FCALLI:
gen_fcall_save_ctx(ctx);
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6089,7 +6127,7 @@ static void decode_rr_divide(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op2 = MASK_OP_RR_OP2(ctx->opcode);
r3 = MASK_OP_RR_D(ctx->opcode);
@@ -6102,107 +6140,107 @@ static void decode_rr_divide(DisasContext *ctx)
break;
case OPC2_32_RR_BSPLIT:
CHECK_REG_PAIR(r3);
- gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_DVINIT_B:
CHECK_REG_PAIR(r3);
- gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_BU:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- temp3 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
- tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8);
+ tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 8);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_abs_tl(temp, temp3);
- tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */
+ tcg_gen_abs_i32(temp, temp3);
+ tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
- tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3);
break;
case OPC2_32_RR_DVINIT_H:
CHECK_REG_PAIR(r3);
- gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_HU:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- temp3 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
- tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16);
+ tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 16);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_abs_tl(temp, temp3);
- tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */
+ tcg_gen_abs_i32(temp, temp3);
+ tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
- tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3);
break;
case OPC2_32_RR_DVINIT:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
/* overflow = ((D[b] == 0) ||
((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
- tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
+ tcg_gen_or_i32(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
/* write result */
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
/* sign extend to high reg */
- tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31);
+ tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], 31);
break;
case OPC2_32_RR_DVINIT_U:
CHECK_REG_PAIR(r3);
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
/* write result */
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
/* zero extend to high reg*/
- tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3 + 1], 0);
break;
case OPC2_32_RR_PARITY:
gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_UNPACK:
CHECK_REG_PAIR(r3);
- gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CRC32_B:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -6228,7 +6266,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_POPCNT_W:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
- tcg_gen_ctpop_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_ctpop_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -6236,7 +6274,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_DIV:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6245,7 +6283,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_DIV_U:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6313,7 +6351,7 @@ static void decode_rr1_mul(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
- TCGv n;
+ TCGv_i32 n;
TCGv_i64 temp64;
r1 = MASK_OP_RR1_S1(ctx->opcode);
@@ -6327,69 +6365,69 @@ static void decode_rr1_mul(DisasContext *ctx)
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_LU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_UL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_UU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MULM_H_64_LL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_LU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_UL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_UU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULR_H_16_LL:
GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
@@ -6418,7 +6456,7 @@ static void decode_rr1_mulq(DisasContext *ctx)
int r1, r2, r3;
uint32_t n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
r1 = MASK_OP_RR1_S1(ctx->opcode);
r2 = MASK_OP_RR1_S2(ctx->opcode);
@@ -6426,8 +6464,8 @@ static void decode_rr1_mulq(DisasContext *ctx)
n = MASK_OP_RR1_N(ctx->opcode);
op2 = MASK_OP_RR1_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RR1_MUL_Q_32:
@@ -6435,45 +6473,45 @@ static void decode_rr1_mulq(DisasContext *ctx)
break;
case OPC2_32_RR1_MUL_Q_64:
CHECK_REG_PAIR(r3);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
break;
case OPC2_32_RR1_MUL_Q_64_L:
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
break;
case OPC2_32_RR1_MUL_Q_64_U:
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MUL_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MULR_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MULR_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -6497,7 +6535,7 @@ static void decode_rr2_mul(DisasContext *ctx)
break;
case OPC2_32_RR2_MUL_64:
CHECK_REG_PAIR(r3);
- gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_32:
@@ -6506,7 +6544,7 @@ static void decode_rr2_mul(DisasContext *ctx)
break;
case OPC2_32_RR2_MUL_U_64:
CHECK_REG_PAIR(r3);
- gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_U_32:
@@ -6524,7 +6562,7 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int32_t pos, width;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RRPW_OP2(ctx->opcode);
r1 = MASK_OP_RRPW_S1(ctx->opcode);
@@ -6536,35 +6574,35 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RRPW_EXTR:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3], 0);
} else if (pos + width <= 32) {
- tcg_gen_sextract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
+ tcg_gen_sextract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_EXTR_U:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3], 0);
} else {
- tcg_gen_extract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
+ tcg_gen_extract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_IMASK:
CHECK_REG_PAIR(r3);
if (pos + width <= 32) {
- temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos);
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
- tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+ temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(temp, ((1u << width) - 1) << pos);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp);
}
break;
case OPC2_32_RRPW_INSERT:
- /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ /* tcg_gen_deposit_i32() does not handle the case of width = 0 */
if (width == 0) {
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
} else if (pos + width <= 32) {
- tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos, width);
}
break;
@@ -6578,7 +6616,7 @@ static void decode_rrr_cond_select(DisasContext *ctx)
{
uint32_t op2;
int r1, r2, r3, r4;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RRR_OP2(ctx->opcode);
r1 = MASK_OP_RRR_S1(ctx->opcode);
@@ -6605,12 +6643,12 @@ static void decode_rrr_cond_select(DisasContext *ctx)
break;
case OPC2_32_RRR_SEL:
temp = tcg_constant_i32(0);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_SELN:
temp = tcg_constant_i32(0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
@@ -6634,49 +6672,49 @@ static void decode_rrr_divide(DisasContext *ctx)
case OPC2_32_RRR_DVADJ:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_DVSTEP:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_DVSTEP_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMAX:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMAX_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMIN:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMIN_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_PACK:
CHECK_REG_PAIR(r3);
gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RRR_CRCN:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -6724,8 +6762,8 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADD_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_32:
gen_helper_madd32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6734,14 +6772,14 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADDS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADD_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_U_32:
gen_helper_madd32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6750,8 +6788,8 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADDS_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6777,8 +6815,8 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUB_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_32:
gen_helper_msub32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6787,14 +6825,14 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUBS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUB_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_U_32:
gen_helper_msub32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6803,8 +6841,8 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUBS_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6828,98 +6866,98 @@ static void decode_rrr1_madd(DisasContext *ctx)
case OPC2_32_RRR1_MADD_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADD_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADD_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADD_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDM_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDM_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDM_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDM_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDMS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDMS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDMS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDMS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDR_H_LL:
gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -6962,7 +7000,7 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
{
uint32_t op2;
uint32_t r1, r2, r3, r4, n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRR1_OP2(ctx->opcode);
r1 = MASK_OP_RRR1_S1(ctx->opcode);
@@ -6971,8 +7009,8 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRR1_MADD_Q_32:
@@ -6982,61 +7020,61 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
case OPC2_32_RRR1_MADD_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MADD_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADD_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADD_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADD_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADD_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_32:
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7045,90 +7083,90 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
case OPC2_32_RRR1_MADDS_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADDS_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADDS_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDR_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MADDRS_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MADDR_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDR_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDRS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDRS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -7152,109 +7190,109 @@ static void decode_rrr1_maddsu_h(DisasContext *ctx)
case OPC2_32_RRR1_MADDSU_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSU_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSU_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSU_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUS_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUS_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUS_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUS_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUM_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUM_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUM_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUM_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUR_H_16_LL:
@@ -7310,98 +7348,98 @@ static void decode_rrr1_msub(DisasContext *ctx)
case OPC2_32_RRR1_MSUB_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUB_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUB_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUB_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBM_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBM_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBM_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBM_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBMS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBMS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBMS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBMS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBR_H_LL:
gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7444,7 +7482,7 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
{
uint32_t op2;
uint32_t r1, r2, r3, r4, n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRR1_OP2(ctx->opcode);
r1 = MASK_OP_RRR1_S1(ctx->opcode);
@@ -7453,8 +7491,8 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRR1_MSUB_Q_32:
@@ -7464,61 +7502,61 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUB_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUB_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUB_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_32:
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7527,90 +7565,90 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUBS_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUBS_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUBS_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBR_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MSUBRS_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MSUBR_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBR_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBRS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBRS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -7634,109 +7672,109 @@ static void decode_rrr1_msubad_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUBAD_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBAD_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBAD_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBAD_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADS_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADS_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADS_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADS_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADM_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADM_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADM_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADM_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADR_H_16_LL:
@@ -7781,7 +7819,7 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
{
uint32_t op2;
int r1, r2, r3, r4;
- TCGv tmp_width, tmp_pos;
+ TCGv_i32 tmp_width, tmp_pos;
r1 = MASK_OP_RRRR_S1(ctx->opcode);
r2 = MASK_OP_RRRR_S2(ctx->opcode);
@@ -7789,48 +7827,48 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
r4 = MASK_OP_RRRR_D(ctx->opcode);
op2 = MASK_OP_RRRR_OP2(ctx->opcode);
- tmp_pos = tcg_temp_new();
- tmp_width = tcg_temp_new();
+ tmp_pos = tcg_temp_new_i32();
+ tmp_width = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRRR_DEXTR:
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
if (r1 == r2) {
- tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_rotl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
} else {
- TCGv msw = tcg_temp_new();
- TCGv zero = tcg_constant_tl(0);
- tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos);
- tcg_gen_subfi_tl(msw, 32, tmp_pos);
- tcg_gen_shr_tl(msw, cpu_gpr_d[r2], msw);
+ TCGv_i32 msw = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_constant_i32(0);
+ tcg_gen_shl_i32(tmp_width, cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_i32(msw, 32, tmp_pos);
+ tcg_gen_shr_i32(msw, cpu_gpr_d[r2], msw);
/*
* if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
* behaviour. So check that case here and set the low bits to zero
* which effectivly returns cpu_gpr_d[r1]
*/
- tcg_gen_movcond_tl(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw);
- tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, msw);
+ tcg_gen_movcond_i32(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw);
+ tcg_gen_or_i32(cpu_gpr_d[r4], tmp_width, msw);
}
break;
case OPC2_32_RRRR_EXTR:
case OPC2_32_RRRR_EXTR_U:
CHECK_REG_PAIR(r3);
- tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
- tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width);
- tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
- tcg_gen_subfi_tl(tmp_width, 32, tmp_width);
+ tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_add_i32(tmp_pos, tmp_pos, tmp_width);
+ tcg_gen_subfi_i32(tmp_pos, 32, tmp_pos);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_i32(tmp_width, 32, tmp_width);
if (op2 == OPC2_32_RRRR_EXTR) {
- tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ tcg_gen_sar_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
} else {
- tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
}
break;
case OPC2_32_RRRR_INSERT:
CHECK_REG_PAIR(r3);
- tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width,
tmp_pos);
break;
@@ -7846,7 +7884,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
int r1, r2, r3, r4;
int32_t width;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRRW_OP2(ctx->opcode);
r1 = MASK_OP_RRRW_S1(ctx->opcode);
@@ -7855,39 +7893,39 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
r4 = MASK_OP_RRRW_D(ctx->opcode);
width = MASK_OP_RRRW_WIDTH(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRRW_EXTR:
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_addi_tl(temp, temp, width);
- tcg_gen_subfi_tl(temp, 32, temp);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
- tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_addi_i32(temp, temp, width);
+ tcg_gen_subfi_i32(temp, 32, temp);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_sari_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
break;
case OPC2_32_RRRW_EXTR_U:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r4], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r4], 0);
} else {
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
- tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width));
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_andi_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32 - width));
}
break;
case OPC2_32_RRRW_IMASK:
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(r4);
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_movi_tl(temp2, (1 << width) - 1);
- tcg_gen_shl_tl(temp2, temp2, temp);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
- tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp2, (1 << width) - 1);
+ tcg_gen_shl_i32(temp2, temp2, temp);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
+ tcg_gen_mov_i32(cpu_gpr_d[r4 + 1], temp2);
break;
case OPC2_32_RRRW_INSERT:
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
- tcg_gen_movi_tl(temp, width);
- tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp, width);
+ tcg_gen_andi_i32(temp2, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2);
break;
default:
@@ -7901,7 +7939,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
uint32_t op2;
uint32_t r1;
TCGLabel *l1;
- TCGv tmp;
+ TCGv_i32 tmp;
op2 = MASK_OP_SYS_OP2(ctx->opcode);
r1 = MASK_OP_SYS_S1D(ctx->opcode);
@@ -7912,7 +7950,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_DISABLE:
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
+ tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
}
@@ -7920,9 +7958,9 @@ static void decode_sys_interrupts(DisasContext *ctx)
case OPC2_32_SYS_DISABLE_D:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_extract_tl(cpu_gpr_d[r1], cpu_ICR,
+ tcg_gen_extract_i32(cpu_gpr_d[r1], cpu_ICR,
ctx->icr_ie_offset, 1);
- tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
+ tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
}
@@ -7933,7 +7971,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_ENABLE:
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_ori_tl(cpu_ICR, cpu_ICR, ctx->icr_ie_mask);
+ tcg_gen_ori_i32(cpu_ICR, cpu_ICR, ctx->icr_ie_mask);
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
@@ -7955,12 +7993,12 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_RFM:
if (ctx->priv == TRICORE_PRIV_SM) {
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i32();
l1 = gen_new_label();
- tcg_gen_ld32u_tl(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
- tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
- tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
+ tcg_gen_ld_i32(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
+ tcg_gen_andi_i32(tmp, tmp, MASK_DBGSR_DE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 1, l1);
gen_helper_rfm(tcg_env);
gen_set_label(l1);
ctx->base.is_jmp = DISAS_EXIT;
@@ -7977,7 +8015,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
case OPC2_32_SYS_RESTORE:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1],
+ tcg_gen_deposit_i32(cpu_ICR, cpu_ICR, cpu_gpr_d[r1],
ctx->icr_ie_offset, 1);
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
@@ -7989,13 +8027,13 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_TRAPSV:
l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_SV, 0, l1);
generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF);
gen_set_label(l1);
break;
case OPC2_32_SYS_TRAPV:
l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_V, 0, l1);
generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF);
gen_set_label(l1);
break;
@@ -8011,7 +8049,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
int32_t address, const16;
int8_t b, const4;
int32_t bpos;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op1 = MASK_OP_MAJOR(ctx->opcode);
@@ -8044,18 +8082,18 @@ static void decode_32Bit_opc(DisasContext *ctx)
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
- tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shri_i32(temp2, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_LEUW);
break;
case OPC1_32_ABS_LD_Q:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPCM_32_ABS_LEA_LHA:
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -8064,13 +8102,13 @@ static void decode_32Bit_opc(DisasContext *ctx)
if (has_feature(ctx, TRICORE_FEATURE_162)) {
op2 = MASK_OP_ABS_OP2(ctx->opcode);
if (op2 == OPC2_32_ABS_LHA) {
- tcg_gen_movi_tl(cpu_gpr_a[r1], address << 14);
+ tcg_gen_movi_i32(cpu_gpr_a[r1], address << 14);
break;
}
/* otherwise translate regular LEA */
}
- tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
+ tcg_gen_movi_i32(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
break;
/* ABSB-format */
case OPC1_32_ABSB_ST_T:
@@ -8079,12 +8117,12 @@ static void decode_32Bit_opc(DisasContext *ctx)
bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
- tcg_gen_ori_tl(temp2, temp2, (b << bpos));
- tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_i32(temp2, temp2, ~(0x1u << bpos));
+ tcg_gen_ori_i32(temp2, temp2, (b << bpos));
+ tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_UB);
break;
/* B-format */
case OPC1_32_B_CALL:
@@ -8206,13 +8244,13 @@ static void decode_32Bit_opc(DisasContext *ctx)
r3 = MASK_OP_RCRR_D(ctx->opcode);
const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
temp = tcg_constant_i32(const16);
- temp2 = tcg_temp_new(); /* width*/
- temp3 = tcg_temp_new(); /* pos */
+ temp2 = tcg_temp_new_i32(); /* width*/
+ temp3 = tcg_temp_new_i32(); /* pos */
CHECK_REG_PAIR(r2);
- tcg_gen_andi_tl(temp2, cpu_gpr_d[r2 + 1], 0x1f);
- tcg_gen_andi_tl(temp3, cpu_gpr_d[r2], 0x1f);
+ tcg_gen_andi_i32(temp2, cpu_gpr_d[r2 + 1], 0x1f);
+ tcg_gen_andi_i32(temp3, cpu_gpr_d[r2], 0x1f);
gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, temp2, temp3);
break;
@@ -8280,7 +8318,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
r3 = MASK_OP_RRPW_D(ctx->opcode);
const16 = MASK_OP_RRPW_POS(ctx->opcode);
- tcg_gen_extract2_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1],
+ tcg_gen_extract2_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1],
32 - const16);
break;
/* RRR Format */
@@ -8329,10 +8367,10 @@ static void decode_32Bit_opc(DisasContext *ctx)
decode_sys_interrupts(ctx);
break;
case OPC1_32_SYS_RSTV:
- tcg_gen_movi_tl(cpu_PSW_V, 0);
- tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V);
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
- tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
+ tcg_gen_mov_i32(cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_SAV, cpu_PSW_V);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -8375,7 +8413,7 @@ static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx)
+static bool insn_crosses_page(DisasContext *ctx, CPUTriCoreState *env)
{
/*
* Return true if the insn at ctx->base.pc_next might cross a page boundary.
@@ -8413,12 +8451,12 @@ static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
ctx->base.pc_next = ctx->pc_succ_insn;
if (ctx->base.is_jmp == DISAS_NEXT) {
- target_ulong page_start;
+ vaddr page_start;
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE
|| (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3
- && insn_crosses_page(env, ctx))) {
+ && insn_crosses_page(ctx, env))) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -8479,14 +8517,14 @@ void cpu_state_reset(CPUTriCoreState *env)
static void tricore_tcg_init_csfr(void)
{
- cpu_PCXI = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PCXI), "PCXI");
- cpu_PSW = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW), "PSW");
- cpu_PC = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PC), "PC");
- cpu_ICR = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, ICR), "ICR");
+ cpu_PCXI = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PCXI), "PCXI");
+ cpu_PSW = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW), "PSW");
+ cpu_PC = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PC), "PC");
+ cpu_ICR = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, ICR), "ICR");
}
void tricore_tcg_init(void)
@@ -8495,30 +8533,32 @@ void tricore_tcg_init(void)
/* reg init */
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_a[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, gpr_a[i]),
- regnames_a[i]);
+ cpu_gpr_a[i] = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState,
+ gpr_a[i]),
+ regnames_a[i]);
}
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_d[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, gpr_d[i]),
- regnames_d[i]);
+ cpu_gpr_d[i] = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState,
+ gpr_d[i]),
+ regnames_d[i]);
}
tricore_tcg_init_csfr();
/* init PSW flag cache */
- cpu_PSW_C = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_C),
- "PSW_C");
- cpu_PSW_V = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_V),
- "PSW_V");
- cpu_PSW_SV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_SV),
- "PSW_SV");
- cpu_PSW_AV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_AV),
- "PSW_AV");
- cpu_PSW_SAV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_SAV),
- "PSW_SAV");
+ cpu_PSW_C = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_C),
+ "PSW_C");
+ cpu_PSW_V = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_V),
+ "PSW_V");
+ cpu_PSW_SV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_SV),
+ "PSW_SV");
+ cpu_PSW_AV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_AV),
+ "PSW_AV");
+ cpu_PSW_SAV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_SAV),
+ "PSW_SAV");
}
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index ea9b6df..1eeed44 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -59,13 +59,13 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
{
CPUXtensaState *env = cpu_env(cs);
uint32_t flags = 0;
- target_ulong cs_base = 0;
+ uint64_t cs_base = 0;
flags |= xtensa_get_ring(env);
if (env->sregs[PS] & PS_EXCM) {
flags |= XTENSA_TBFLAG_EXCM;
} else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
- target_ulong lend_dist =
+ uint64_t lend_dist =
env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
/*
@@ -83,7 +83,7 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
* for the TB that contains this instruction.
*/
if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
- target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+ uint64_t lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
cs_base = lend_dist;
if (lbeg_off < 256) {
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 34ae2f4..bb8d2ed 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -1166,7 +1166,7 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUXtensaState *env = cpu_env(cpu);
- target_ulong page_start;
+ vaddr page_start;
/* These two conditions only apply to the first insn in the TB,
but this is the first TranslateOps hook that allows exiting. */
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
index 636f421..431c263 100644
--- a/target/xtensa/xtensa-semi.c
+++ b/target/xtensa/xtensa-semi.c
@@ -32,6 +32,7 @@
#include "exec/target_page.h"
#include "semihosting/semihost.h"
#include "semihosting/uaccess.h"
+#include "system/memory.h"
#include "qapi/error.h"
#include "qemu/log.h"
@@ -192,7 +193,9 @@ void xtensa_sim_open_console(Chardev *chr)
void HELPER(simcall)(CPUXtensaState *env)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
CPUState *cs = env_cpu(env);
+ AddressSpace *as = cs->as;
uint32_t *regs = env->regs;
switch (regs[2]) {
@@ -215,7 +218,7 @@ void HELPER(simcall)(CPUXtensaState *env)
TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
uint32_t io_sz = page_left < len ? page_left : len;
hwaddr sz = io_sz;
- void *buf = cpu_physical_memory_map(paddr, &sz, !is_write);
+ void *buf = address_space_map(as, paddr, &sz, !is_write, attrs);
uint32_t io_done;
bool error = false;
@@ -261,7 +264,7 @@ void HELPER(simcall)(CPUXtensaState *env)
error = true;
io_done = 0;
}
- cpu_physical_memory_unmap(buf, sz, !is_write, io_done);
+ address_space_unmap(as, buf, sz, !is_write, io_done);
} else {
error = true;
regs[3] = TARGET_EINVAL;
@@ -408,11 +411,11 @@ void HELPER(simcall)(CPUXtensaState *env)
while (sz) {
hwaddr len = sz;
- void *buf = cpu_physical_memory_map(base, &len, 1);
+ void *buf = address_space_map(as, base, &len, true, attrs);
if (buf && len) {
memset(buf, regs[4], len);
- cpu_physical_memory_unmap(buf, len, 1, len);
+ address_space_unmap(as, buf, len, true, len);
} else {
len = 1;
}
diff --git a/tests/Makefile.include b/tests/Makefile.include
index e47ef4d..d4dfbf3 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -111,6 +111,7 @@ $(FUNCTIONAL_TARGETS): check-venv
.PHONY: check-functional
check-functional: check-venv
@$(NINJA) precache-functional
+ @$(PYTHON) $(SRC_PATH)/scripts/clean_functional_cache.py
@QEMU_TEST_NO_DOWNLOAD=1 $(MAKE) SPEED=thorough check-func check-func-quick
.PHONY: check-func check-func-quick
diff --git a/tests/functional/aarch64/test_sbsaref_alpine.py b/tests/functional/aarch64/test_sbsaref_alpine.py
index abb8f51..be84b7a 100755
--- a/tests/functional/aarch64/test_sbsaref_alpine.py
+++ b/tests/functional/aarch64/test_sbsaref_alpine.py
@@ -41,15 +41,9 @@ class Aarch64SbsarefAlpine(QemuSystemTest):
self.vm.launch()
wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17")
- def test_sbsaref_alpine_linux_cortex_a57(self):
- self.boot_alpine_linux("cortex-a57")
-
def test_sbsaref_alpine_linux_default_cpu(self):
self.boot_alpine_linux()
- def test_sbsaref_alpine_linux_max_pauth_off(self):
- self.boot_alpine_linux("max,pauth=off")
-
def test_sbsaref_alpine_linux_max_pauth_impdef(self):
self.boot_alpine_linux("max,pauth-impdef=on")
diff --git a/tests/functional/alpha/test_clipper.py b/tests/functional/alpha/test_clipper.py
index c5d7181..d2a4c2a 100755
--- a/tests/functional/alpha/test_clipper.py
+++ b/tests/functional/alpha/test_clipper.py
@@ -17,7 +17,6 @@ class AlphaClipperTest(LinuxKernelTest):
def test_alpha_clipper(self):
self.set_machine('clipper')
- kernel_path = self.ASSET_KERNEL.fetch()
uncompressed_kernel = self.uncompress(self.ASSET_KERNEL, format="gz")
diff --git a/tests/functional/qemu_test/asset.py b/tests/functional/qemu_test/asset.py
index f666125..ab3a7bb 100644
--- a/tests/functional/qemu_test/asset.py
+++ b/tests/functional/qemu_test/asset.py
@@ -10,6 +10,7 @@ import logging
import os
import stat
import sys
+import time
import unittest
import urllib.request
from time import sleep
@@ -113,6 +114,16 @@ class Asset:
self.log.debug("Time out while waiting for %s!", tmp_cache_file)
raise
+ def _save_time_stamp(self):
+ '''
+ Update the time stamp of the asset in the cache. Unfortunately, we
+ cannot use the modification or access time of the asset file itself,
+ since e.g. the functional jobs in the gitlab CI reload the files
+ from the gitlab cache and thus always have recent file time stamps,
+ so we have to save our asset time stamp to a separate file instead.
+ '''
+ self.cache_file.with_suffix(".stamp").write_text(f"{int(time.time())}")
+
def fetch(self):
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
@@ -120,6 +131,7 @@ class Asset:
if self.valid():
self.log.debug("Using cached asset %s for %s",
self.cache_file, self.url)
+ self._save_time_stamp()
return str(self.cache_file)
if not self.fetchable():
@@ -208,6 +220,7 @@ class Asset:
tmp_cache_file.unlink()
raise AssetError(self, "Hash does not match %s" % self.hash)
tmp_cache_file.replace(self.cache_file)
+ self._save_time_stamp()
# Remove write perms to stop tests accidentally modifying them
os.chmod(self.cache_file, stat.S_IRUSR | stat.S_IRGRP)
diff --git a/tests/functional/reverse_debugging.py b/tests/functional/reverse_debugging.py
index 68cfcb3..86fca8d 100644
--- a/tests/functional/reverse_debugging.py
+++ b/tests/functional/reverse_debugging.py
@@ -36,14 +36,13 @@ class ReverseDebugging(LinuxKernelTest):
STEPS = 10
def run_vm(self, record, shift, args, replay_path, image_path, port):
- logger = logging.getLogger('replay')
vm = self.get_vm(name='record' if record else 'replay')
vm.set_console()
if record:
- logger.info('recording the execution...')
+ self.log.info('recording the execution...')
mode = 'record'
else:
- logger.info('replaying the execution...')
+ self.log.info('replaying the execution...')
mode = 'replay'
vm.add_args('-gdb', 'tcp::%d' % port, '-S')
vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' %
@@ -68,10 +67,8 @@ class ReverseDebugging(LinuxKernelTest):
def reverse_debugging(self, gdb_arch, shift=7, args=None):
from qemu_test import GDB
- logger = logging.getLogger('replay')
-
# create qcow2 for snapshots
- logger.info('creating qcow2 image for VM snapshots')
+ self.log.info('creating qcow2 image for VM snapshots')
image_path = os.path.join(self.workdir, 'disk.qcow2')
qemu_img = get_qemu_img(self)
if qemu_img is None:
@@ -79,7 +76,7 @@ class ReverseDebugging(LinuxKernelTest):
'create the temporary qcow2 image')
out = check_output([qemu_img, 'create', '-f', 'qcow2', image_path, '128M'],
encoding='utf8')
- logger.info("qemu-img: %s" % out)
+ self.log.info("qemu-img: %s" % out)
replay_path = os.path.join(self.workdir, 'replay.bin')
@@ -90,7 +87,7 @@ class ReverseDebugging(LinuxKernelTest):
last_icount = self.vm_get_icount(vm)
vm.shutdown()
- logger.info("recorded log with %s+ steps" % last_icount)
+ self.log.info("recorded log with %s+ steps" % last_icount)
# replay and run debug commands
with Ports() as ports:
@@ -98,9 +95,16 @@ class ReverseDebugging(LinuxKernelTest):
vm = self.run_vm(False, shift, args, replay_path, image_path, port)
try:
- logger.info('Connecting to gdbstub...')
- self.reverse_debugging_run(vm, port, gdb_arch, last_icount)
- logger.info('Test passed.')
+ self.log.info('Connecting to gdbstub...')
+ gdb_cmd = os.getenv('QEMU_TEST_GDB')
+ gdb = GDB(gdb_cmd)
+ try:
+ self.reverse_debugging_run(gdb, vm, port, gdb_arch, last_icount)
+ finally:
+ self.log.info('exiting gdb and qemu')
+ gdb.exit()
+ vm.shutdown()
+ self.log.info('Test passed.')
except GDB.TimeoutError:
# Convert a GDB timeout exception into a unittest failure exception.
raise self.failureException("Timeout while connecting to or "
@@ -110,12 +114,7 @@ class ReverseDebugging(LinuxKernelTest):
# skipTest(), etc.
raise
- def reverse_debugging_run(self, vm, port, gdb_arch, last_icount):
- logger = logging.getLogger('replay')
-
- gdb_cmd = os.getenv('QEMU_TEST_GDB')
- gdb = GDB(gdb_cmd)
-
+ def reverse_debugging_run(self, gdb, vm, port, gdb_arch, last_icount):
r = gdb.cli("set architecture").get_log()
if gdb_arch not in r:
self.skipTest(f"GDB does not support arch '{gdb_arch}'")
@@ -135,43 +134,43 @@ class ReverseDebugging(LinuxKernelTest):
gdb.cli("set debug remote 0")
- logger.info('stepping forward')
+ self.log.info('stepping forward')
steps = []
# record first instruction addresses
for _ in range(self.STEPS):
pc = self.get_pc(gdb)
- logger.info('saving position %x' % pc)
+ self.log.info('saving position %x' % pc)
steps.append(pc)
gdb.cli("stepi")
# visit the recorded instruction in reverse order
- logger.info('stepping backward')
+ self.log.info('stepping backward')
for addr in steps[::-1]:
- logger.info('found position %x' % addr)
+ self.log.info('found position %x' % addr)
gdb.cli("reverse-stepi")
pc = self.get_pc(gdb)
if pc != addr:
- logger.info('Invalid PC (read %x instead of %x)' % (pc, addr))
+ self.log.info('Invalid PC (read %x instead of %x)' % (pc, addr))
self.fail('Reverse stepping failed!')
# visit the recorded instruction in forward order
- logger.info('stepping forward')
+ self.log.info('stepping forward')
for addr in steps:
- logger.info('found position %x' % addr)
+ self.log.info('found position %x' % addr)
pc = self.get_pc(gdb)
if pc != addr:
- logger.info('Invalid PC (read %x instead of %x)' % (pc, addr))
+ self.log.info('Invalid PC (read %x instead of %x)' % (pc, addr))
self.fail('Forward stepping failed!')
gdb.cli("stepi")
# set breakpoints for the instructions just stepped over
- logger.info('setting breakpoints')
+ self.log.info('setting breakpoints')
for addr in steps:
gdb.cli(f"break *{hex(addr)}")
# this may hit a breakpoint if first instructions are executed
# again
- logger.info('continuing execution')
+ self.log.info('continuing execution')
vm.qmp('replay-break', icount=last_icount - 1)
# continue - will return after pausing
# This can stop at the end of the replay-break and gdb gets a SIGINT,
@@ -180,12 +179,12 @@ class ReverseDebugging(LinuxKernelTest):
gdb.cli("continue")
if self.vm_get_icount(vm) == last_icount - 1:
- logger.info('reached the end (icount %s)' % (last_icount - 1))
+ self.log.info('reached the end (icount %s)' % (last_icount - 1))
else:
- logger.info('hit a breakpoint again at %x (icount %s)' %
+ self.log.info('hit a breakpoint again at %x (icount %s)' %
(self.get_pc(gdb), self.vm_get_icount(vm)))
- logger.info('running reverse continue to reach %x' % steps[-1])
+ self.log.info('running reverse continue to reach %x' % steps[-1])
# reverse continue - will return after stopping at the breakpoint
gdb.cli("reverse-continue")
@@ -195,8 +194,4 @@ class ReverseDebugging(LinuxKernelTest):
if pc != steps[-1]:
self.fail("'reverse-continue' did not hit the first PC in reverse order!")
- logger.info('successfully reached %x' % steps[-1])
-
- logger.info('exiting gdb and qemu')
- gdb.exit()
- vm.shutdown()
+ self.log.info('successfully reached %x' % steps[-1])
diff --git a/tests/tcg/hexagon/signal_context.c b/tests/tcg/hexagon/signal_context.c
index 7202fa6..9de7f6b 100644
--- a/tests/tcg/hexagon/signal_context.c
+++ b/tests/tcg/hexagon/signal_context.c
@@ -26,7 +26,11 @@ void sig_user(int sig, siginfo_t *info, void *puc)
"p1 = r7\n\t"
"p2 = r7\n\t"
"p3 = r7\n\t"
- : : : "r7", "p0", "p1", "p2", "p3");
+ "r6 = #0x12345678\n\t"
+ "cs0 = r6\n\t"
+ "r6 = #0x87654321\n\t"
+ "cs1 = r6\n\t"
+ : : : "r6", "r7", "p0", "p1", "p2", "p3", "cs0", "cs1");
}
int main()
@@ -53,7 +57,11 @@ int main()
timer_settime(tid, 0, &it, NULL);
asm("loop0(1f, %1)\n\t"
- "1: r8 = #0xff\n\t"
+ "1: r9 = #0xdeadbeef\n\t"
+ " cs0 = r9\n\t"
+ " r9 = #0xbadc0fee\n\t"
+ " cs1 = r9\n\t"
+ " r8 = #0xff\n\t"
" p0 = r8\n\t"
" p1 = r8\n\t"
" p2 = r8\n\t"
@@ -74,10 +82,19 @@ int main()
" r8 = p3\n\t"
" p0 = cmp.eq(r8, #0xff)\n\t"
" if (!p0) jump 2b\n\t"
+ " r8 = cs0\n\t"
+ " r9 = #0xdeadbeef\n\t"
+ " p0 = cmp.eq(r8, r9)\n\t"
+ " if (!p0) jump 2b\n\t"
+ " r8 = cs1\n\t"
+ " r9 = #0xbadc0fee\n\t"
+ " p0 = cmp.eq(r8, r9)\n\t"
+ " if (!p0) jump 2b\n\t"
"4: {}: endloop0\n\t"
:
: "r"(&err), "r"(i)
- : "memory", "r8", "p0", "p1", "p2", "p3");
+ : "memory", "r8", "r9", "p0", "p1", "p2", "p3", "cs0", "cs1", "lc0",
+ "sa0");
puts(err ? "FAIL" : "PASS");
return err;