aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-09-19 13:20:54 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-09-19 13:20:54 -0400
commitd7754940d78a7d5bfb13531afa9a67f8c57e987e (patch)
tree7a20f7049f7dad5287d623b321539426f39a234b
parent13d6b1608160de40ec65ae4c32419e56714bbadf (diff)
parenta97a83753c90d79ed15a716610af23fabd84aaed (diff)
downloadqemu-d7754940d78a7d5bfb13531afa9a67f8c57e987e.zip
qemu-d7754940d78a7d5bfb13531afa9a67f8c57e987e.tar.gz
qemu-d7754940d78a7d5bfb13531afa9a67f8c57e987e.tar.bz2
Merge tag 'pull-tcg-20230915-2' of https://gitlab.com/rth7680/qemu into staging
*: Delete checks for old host definitions tcg/loongarch64: Generate LSX instructions fpu: Add conversions between bfloat16 and [u]int8 fpu: Handle m68k extended precision denormals properly accel/tcg: Improve cputlb i/o organization accel/tcg: Simplify tlb_plugin_lookup accel/tcg: Remove false-negative halted assertion tcg: Add gvec compare with immediate and scalar operand tcg/aarch64: Emit BTI insns at jump landing pads [Resolved conflict between CPUINFO_PMULL and CPUINFO_BTI. --Stefan] * tag 'pull-tcg-20230915-2' of https://gitlab.com/rth7680/qemu: (39 commits) tcg: Map code_gen_buffer with PROT_BTI tcg/aarch64: Emit BTI insns at jump landing pads util/cpuinfo-aarch64: Add CPUINFO_BTI tcg: Add tcg_out_tb_start backend hook fpu: Handle m68k extended precision denormals properly fpu: Add conversions between bfloat16 and [u]int8 accel/tcg: Introduce do_st16_mmio_leN accel/tcg: Introduce do_ld16_mmio_beN accel/tcg: Merge io_writex into do_st_mmio_leN accel/tcg: Merge io_readx into do_ld_mmio_beN accel/tcg: Replace direct use of io_readx/io_writex in do_{ld,st}_1 accel/tcg: Merge cpu_transaction_failed into io_failed plugin: Simplify struct qemu_plugin_hwaddr accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed accel/tcg: Split out io_prepare and io_failed accel/tcg: Simplify tlb_plugin_lookup target/arm: Use tcg_gen_gvec_cmpi for compare vs 0 tcg: Add gvec compare with immediate and scalar operand tcg/loongarch64: Implement 128-bit load & store tcg/loongarch64: Lower rotli_vec to vrotri ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--accel/tcg/cputlb.c435
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c9
-rw-r--r--accel/tcg/tcg-runtime-gvec.c26
-rw-r--r--accel/tcg/tcg-runtime.h25
-rw-r--r--fpu/softfloat-parts.c.inc7
-rw-r--r--fpu/softfloat.c67
-rw-r--r--host/include/aarch64/host/cpuinfo.h1
-rw-r--r--include/exec/cpu-defs.h12
-rw-r--r--include/exec/user/thunk.h3
-rw-r--r--include/fpu/softfloat.h12
-rw-r--r--include/hw/core/cpu.h13
-rw-r--r--include/qemu/plugin-memory.h11
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/tcg/tcg-op-gvec-common.h6
-rw-r--r--plugins/api.c27
-rw-r--r--softmmu/async-teardown.c3
-rw-r--r--target/arm/tcg/translate.c58
-rw-r--r--tcg/aarch64/tcg-target.c.inc59
-rw-r--r--tcg/arm/tcg-target.c.inc7
-rw-r--r--tcg/i386/tcg-target.c.inc7
-rw-r--r--tcg/loongarch64/tcg-insn-defs.c.inc6019
-rw-r--r--tcg/loongarch64/tcg-target-con-set.h9
-rw-r--r--tcg/loongarch64/tcg-target-con-str.h3
-rw-r--r--tcg/loongarch64/tcg-target.c.inc624
-rw-r--r--tcg/loongarch64/tcg-target.h40
-rw-r--r--tcg/loongarch64/tcg-target.opc.h12
-rw-r--r--tcg/mips/tcg-target.c.inc7
-rw-r--r--tcg/ppc/tcg-target.c.inc7
-rw-r--r--tcg/region.c41
-rw-r--r--tcg/riscv/tcg-target.c.inc7
-rw-r--r--tcg/s390x/tcg-target.c.inc7
-rw-r--r--tcg/sparc64/tcg-target.c.inc7
-rw-r--r--tcg/tcg-op-gvec.c149
-rw-r--r--tcg/tcg.c7
-rw-r--r--tcg/tci/tcg-target.c.inc7
-rw-r--r--tests/tcg/m68k/Makefile.target2
-rw-r--r--tests/tcg/m68k/denormal.c53
-rw-r--r--util/cpuinfo-aarch64.c7
-rw-r--r--util/oslib-posix.c15
39 files changed, 7419 insertions, 393 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index c643d66..3270f65 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1193,6 +1193,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
write_flags = read_flags;
if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
/*
* Computing is_clean is expensive; avoid all that unless
* the page is actually writable.
@@ -1255,16 +1256,18 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
/* refill the tlb */
/*
- * At this point iotlb contains a physical section number in the lower
- * TARGET_PAGE_BITS, and either
- * + the ram_addr_t of the page base of the target RAM (RAM)
- * + the offset within section->mr of the page base (I/O, ROMD)
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
+ * aligned ram_addr_t of the page base of the target RAM.
+ * Otherwise, iotlb contains
+ * - a physical section number in the lower TARGET_PAGE_BITS
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
+ * TARGET_PAGE_BITS masked off.
* We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get
* the MemoryRegion offset for the access. Note that the vaddr we
* subtract here is that of the page base, and not the same as the
- * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
+ * vaddr we add back in io_prepare()/get_page_addr_code().
*/
desc->fulltlb[index] = *full;
full = &desc->fulltlb[index];
@@ -1347,116 +1350,41 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
mmu_idx, retaddr);
}
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response,
- uintptr_t retaddr)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cpu->ignore_memory_transaction_failures &&
- cc->tcg_ops->do_transaction_failed) {
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
- access_type, mmu_idx, attrs,
- response, retaddr);
- }
-}
-
-/*
- * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
- * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
- * because of the side effect of io_writex changing memory layout.
- */
-static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
- hwaddr mr_offset)
-{
-#ifdef CONFIG_PLUGIN
- SavedIOTLB *saved = &cs->saved_iotlb;
- saved->section = section;
- saved->mr_offset = mr_offset;
-#endif
-}
-
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, vaddr addr, uintptr_t retaddr,
- MMUAccessType access_type, MemOp op)
+static MemoryRegionSection *
+io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
+ MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
CPUState *cpu = env_cpu(env);
- hwaddr mr_offset;
MemoryRegionSection *section;
- MemoryRegion *mr;
- uint64_t val;
- MemTxResult r;
+ hwaddr mr_offset;
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
- mr = section->mr;
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
+ section = iotlb_to_section(cpu, xlat, attrs);
+ mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
- /*
- * The memory_region_dispatch may trigger a flush/resize
- * so for plugins we save the iotlb_data just in case.
- */
- save_iotlb_data(cpu, section, mr_offset);
-
- {
- QEMU_IOTHREAD_LOCK_GUARD();
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
- }
-
- if (r != MEMTX_OK) {
- hwaddr physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
-
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
- mmu_idx, full->attrs, r, retaddr);
- }
- return val;
+ *out_offset = mr_offset;
+ return section;
}
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, uint64_t val, vaddr addr,
- uintptr_t retaddr, MemOp op)
+static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
+ unsigned size, MMUAccessType access_type, int mmu_idx,
+ MemTxResult response, uintptr_t retaddr)
{
CPUState *cpu = env_cpu(env);
- hwaddr mr_offset;
- MemoryRegionSection *section;
- MemoryRegion *mr;
- MemTxResult r;
-
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
- mr = section->mr;
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
- if (!cpu->can_do_io) {
- cpu_io_recompile(cpu, retaddr);
- }
- cpu->mem_io_pc = retaddr;
- /*
- * The memory_region_dispatch may trigger a flush/resize
- * so for plugins we save the iotlb_data just in case.
- */
- save_iotlb_data(cpu, section, mr_offset);
-
- {
- QEMU_IOTHREAD_LOCK_GUARD();
- r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
- }
+ if (!cpu->ignore_memory_transaction_failures) {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
- if (r != MEMTX_OK) {
- hwaddr physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
+ if (cc->tcg_ops->do_transaction_failed) {
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
- MMU_DATA_STORE, mmu_idx, full->attrs, r,
- retaddr);
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx,
+ full->attrs, response, retaddr);
+ }
}
}
@@ -1726,45 +1654,41 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
* in the softmmu lookup code (or helper). We don't handle re-fills or
* checking the victim table. This is purely informational.
*
- * This almost never fails as the memory access being instrumented
- * should have just filled the TLB. The one corner case is io_writex
- * which can cause TLB flushes and potential resizing of the TLBs
- * losing the information we need. In those cases we need to recover
- * data from a copy of the CPUTLBEntryFull. As long as this always occurs
- * from the same thread (which a mem callback will be) this is safe.
+ * The one corner case is i/o write, which can cause changes to the
+ * address space. Those changes, and the corresponding tlb flush,
+ * should be delayed until the next TB, so even then this ought not fail.
+ * But check, Just in Case.
*/
-
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data)
{
CPUArchState *env = cpu->env_ptr;
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
uintptr_t index = tlb_index(env, mmu_idx, addr);
- uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
-
- if (likely(tlb_hit(tlb_addr, addr))) {
- /* We must have an iotlb entry for MMIO */
- if (tlb_addr & TLB_MMIO) {
- CPUTLBEntryFull *full;
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- data->is_io = true;
- data->v.io.section =
- iotlb_to_section(cpu, full->xlat_section, full->attrs);
- data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
- } else {
- data->is_io = false;
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- }
- return true;
- } else {
- SavedIOTLB *saved = &cpu->saved_iotlb;
+ MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
+ CPUTLBEntryFull *full;
+
+ if (unlikely(!tlb_hit(tlb_addr, addr))) {
+ return false;
+ }
+
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
+
+ /* We must have an iotlb entry for MMIO */
+ if (tlb_addr & TLB_MMIO) {
+ MemoryRegionSection *section =
+ iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
+ full->attrs);
data->is_io = true;
- data->v.io.section = saved->section;
- data->v.io.offset = saved->mr_offset;
- return true;
+ data->mr = section->mr;
+ } else {
+ data->is_io = false;
+ data->mr = NULL;
}
+ return true;
}
-
#endif
/*
@@ -2084,47 +2008,90 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
*/
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
- uint64_t ret_be, vaddr addr, int size,
- int mmu_idx, MMUAccessType type, uintptr_t ra)
+static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, MMUAccessType type, uintptr_t ra,
+ MemoryRegion *mr, hwaddr mr_offset)
{
- uint64_t t;
-
- tcg_debug_assert(size > 0 && size <= 8);
do {
+ MemOp this_mop;
+ unsigned this_size;
+ uint64_t val;
+ MemTxResult r;
+
/* Read aligned pieces up to 8 bytes. */
- switch ((size | (int)addr) & 7) {
- case 1:
- case 3:
- case 5:
- case 7:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
- ret_be = (ret_be << 8) | t;
- size -= 1;
- addr += 1;
- break;
- case 2:
- case 6:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
- ret_be = (ret_be << 16) | t;
- size -= 2;
- addr += 2;
- break;
- case 4:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
- ret_be = (ret_be << 32) | t;
- size -= 4;
- addr += 4;
- break;
- case 0:
- return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
- default:
- qemu_build_not_reached();
+ this_mop = ctz32(size | (int)addr | 8);
+ this_size = 1 << this_mop;
+ this_mop |= MO_BE;
+
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
+ this_mop, full->attrs);
+ if (unlikely(r != MEMTX_OK)) {
+ io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
}
+ if (this_size == 8) {
+ return val;
+ }
+
+ ret_be = (ret_be << (this_size * 8)) | val;
+ addr += this_size;
+ mr_offset += this_size;
+ size -= this_size;
} while (size);
+
return ret_be;
}
+static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, MMUAccessType type, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 0 && size <= 8);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
+ type, ra, mr, mr_offset);
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
+static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t a, b;
+
+ tcg_debug_assert(size > 8 && size <= 16);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
+ MMU_DATA_LOAD, ra, mr, mr_offset);
+ b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
+ MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
+ qemu_mutex_unlock_iothread();
+
+ return int128_make128(b, a);
+}
+
/**
* do_ld_bytes_beN
* @p: translation parameters
@@ -2267,7 +2234,6 @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
}
@@ -2318,12 +2284,7 @@ static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
- mmu_idx, MMU_DATA_LOAD, ra);
- b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
- mmu_idx, MMU_DATA_LOAD, ra);
- return int128_make128(b, a);
+ return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
}
/*
@@ -2368,7 +2329,7 @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
+ return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
} else {
return *(uint8_t *)p->haddr;
}
@@ -2380,7 +2341,6 @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
uint16_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap16(ret);
@@ -2401,7 +2361,6 @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
uint32_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
@@ -2422,7 +2381,6 @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
uint64_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
@@ -2581,12 +2539,8 @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
- l.mmu_idx, MMU_DATA_LOAD, ra);
- b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
- l.mmu_idx, MMU_DATA_LOAD, ra);
- ret = int128_make128(b, a);
+ ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
+ l.mmu_idx, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap128(ret);
}
@@ -2727,48 +2681,90 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
- uint64_t val_le, vaddr addr, int size,
- int mmu_idx, uintptr_t ra)
+static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra,
+ MemoryRegion *mr, hwaddr mr_offset)
{
- tcg_debug_assert(size > 0 && size <= 8);
-
do {
+ MemOp this_mop;
+ unsigned this_size;
+ MemTxResult r;
+
/* Store aligned pieces up to 8 bytes. */
- switch ((size | (int)addr) & 7) {
- case 1:
- case 3:
- case 5:
- case 7:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
- val_le >>= 8;
- size -= 1;
- addr += 1;
- break;
- case 2:
- case 6:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
- val_le >>= 16;
- size -= 2;
- addr += 2;
- break;
- case 4:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
- val_le >>= 32;
- size -= 4;
- addr += 4;
- break;
- case 0:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
+ this_mop = ctz32(size | (int)addr | 8);
+ this_size = 1 << this_mop;
+ this_mop |= MO_LE;
+
+ r = memory_region_dispatch_write(mr, mr_offset, val_le,
+ this_mop, full->attrs);
+ if (unlikely(r != MEMTX_OK)) {
+ io_failed(env, full, addr, this_size, MMU_DATA_STORE,
+ mmu_idx, r, ra);
+ }
+ if (this_size == 8) {
return 0;
- default:
- qemu_build_not_reached();
}
+
+ val_le >>= this_size * 8;
+ addr += this_size;
+ mr_offset += this_size;
+ size -= this_size;
} while (size);
return val_le;
}
+static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ hwaddr mr_offset;
+ MemoryRegion *mr;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 0 && size <= 8);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
+ ra, mr, mr_offset);
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
+static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ Int128 val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 8 && size <= 16);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
+ mmu_idx, ra, mr, mr_offset);
+ ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
/*
* Wrapper for the above.
*/
@@ -2780,7 +2776,6 @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
return do_st_mmio_leN(env, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
@@ -2835,11 +2830,8 @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
- p->addr, 8, mmu_idx, ra);
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
- p->addr + 8, size - 8, mmu_idx, ra);
+ return do_st16_mmio_leN(env, p->full, val_le, p->addr,
+ size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8);
}
@@ -2883,7 +2875,7 @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
int mmu_idx, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
+ do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -2898,7 +2890,6 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap16(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -2918,7 +2909,6 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -2938,7 +2928,6 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -3066,11 +3055,7 @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap128(val);
}
- a = int128_getlo(val);
- b = int128_gethi(val);
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
+ do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index b276262..4b0dfb4 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -100,14 +100,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
break;
case EXCP_HALTED:
/*
- * during start-up the vCPU is reset and the thread is
- * kicked several times. If we don't ensure we go back
- * to sleep in the halted state we won't cleanly
- * start-up when the vCPU is enabled.
- *
- * cpu->halted should ensure we sleep in wait_io_event
+ * Usually cpu->halted is set, but may have already been
+ * reset by another thread by the time we arrive here.
*/
- g_assert(cpu->halted);
break;
case EXCP_ATOMIC:
qemu_mutex_unlock_iothread();
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
index 6c99f95..afca89b 100644
--- a/accel/tcg/tcg-runtime-gvec.c
+++ b/accel/tcg/tcg-runtime-gvec.c
@@ -1042,6 +1042,32 @@ DO_CMP2(64)
#undef DO_CMP1
#undef DO_CMP2
+#define DO_CMP1(NAME, TYPE, OP) \
+void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc); \
+ TYPE inv = simd_data(desc), b = b64; \
+ for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
+ *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
+ } \
+ clear_high(d, oprsz, desc); \
+}
+
+#define DO_CMP2(SZ) \
+ DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
+ DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
+ DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
+ DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
+ DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
+
+DO_CMP2(8)
+DO_CMP2(16)
+DO_CMP2(32)
+DO_CMP2(64)
+
+#undef DO_CMP1
+#undef DO_CMP2
+
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index 186899a..c23b5e6 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -297,4 +297,29 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
index 527e15e..a44649f 100644
--- a/fpu/softfloat-parts.c.inc
+++ b/fpu/softfloat-parts.c.inc
@@ -118,7 +118,8 @@ static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
} else {
int shift = frac_normalize(p);
p->cls = float_class_normal;
- p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
+ p->exp = fmt->frac_shift - fmt->exp_bias
+ - shift + !fmt->m68k_denormal;
}
} else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
p->cls = float_class_normal;
@@ -256,7 +257,7 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
is_tiny = !frac_addi(&discard, p, inc);
}
- frac_shrjam(p, 1 - exp);
+ frac_shrjam(p, !fmt->m68k_denormal - exp);
if (p->frac_lo & round_mask) {
/* Need to recompute round-to-even/round-to-odd. */
@@ -287,7 +288,7 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
p->frac_lo &= ~round_mask;
}
- exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) != 0;
+ exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal;
frac_shr(p, frac_shift);
if (is_tiny && (flags & float_flag_inexact)) {
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 0cc130a..027a8e5 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -517,6 +517,7 @@ typedef struct {
* round_mask: bits below lsb which must be rounded
* The following optional modifiers are available:
* arm_althp: handle ARM Alternative Half Precision
+ * m68k_denormal: explicit integer bit for extended precision may be 1
*/
typedef struct {
int exp_size;
@@ -526,6 +527,7 @@ typedef struct {
int frac_size;
int frac_shift;
bool arm_althp;
+ bool m68k_denormal;
uint64_t round_mask;
} FloatFmt;
@@ -576,7 +578,12 @@ static const FloatFmt float128_params = {
static const FloatFmt floatx80_params[3] = {
[floatx80_precision_s] = { FLOATX80_PARAMS(23) },
[floatx80_precision_d] = { FLOATX80_PARAMS(52) },
- [floatx80_precision_x] = { FLOATX80_PARAMS(64) },
+ [floatx80_precision_x] = {
+ FLOATX80_PARAMS(64),
+#ifdef TARGET_M68K
+ .m68k_denormal = true,
+#endif
+ },
};
/* Unpack a float to parts, but do not canonicalize. */
@@ -3126,6 +3133,15 @@ int64_t float64_to_int64_scalbn(float64 a, FloatRoundMode rmode, int scale,
return parts_float_to_sint(&p, rmode, scale, INT64_MIN, INT64_MAX, s);
}
+int8_t bfloat16_to_int8_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
+ float_status *s)
+{
+ FloatParts64 p;
+
+ bfloat16_unpack_canonical(&p, a, s);
+ return parts_float_to_sint(&p, rmode, scale, INT8_MIN, INT8_MAX, s);
+}
+
int16_t bfloat16_to_int16_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
float_status *s)
{
@@ -3392,6 +3408,11 @@ int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *s)
return floatx80_to_int64_scalbn(a, float_round_to_zero, 0, s);
}
+int8_t bfloat16_to_int8(bfloat16 a, float_status *s)
+{
+ return bfloat16_to_int8_scalbn(a, s->float_rounding_mode, 0, s);
+}
+
int16_t bfloat16_to_int16(bfloat16 a, float_status *s)
{
return bfloat16_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
@@ -3407,6 +3428,11 @@ int64_t bfloat16_to_int64(bfloat16 a, float_status *s)
return bfloat16_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
}
+int8_t bfloat16_to_int8_round_to_zero(bfloat16 a, float_status *s)
+{
+ return bfloat16_to_int8_scalbn(a, float_round_to_zero, 0, s);
+}
+
int16_t bfloat16_to_int16_round_to_zero(bfloat16 a, float_status *s)
{
return bfloat16_to_int16_scalbn(a, float_round_to_zero, 0, s);
@@ -3534,6 +3560,15 @@ uint64_t float64_to_uint64_scalbn(float64 a, FloatRoundMode rmode, int scale,
return parts_float_to_uint(&p, rmode, scale, UINT64_MAX, s);
}
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode rmode,
+ int scale, float_status *s)
+{
+ FloatParts64 p;
+
+ bfloat16_unpack_canonical(&p, a, s);
+ return parts_float_to_uint(&p, rmode, scale, UINT8_MAX, s);
+}
+
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode rmode,
int scale, float_status *s)
{
@@ -3759,6 +3794,11 @@ Int128 float128_to_uint128_round_to_zero(float128 a, float_status *s)
return float128_to_uint128_scalbn(a, float_round_to_zero, 0, s);
}
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *s)
+{
+ return bfloat16_to_uint8_scalbn(a, s->float_rounding_mode, 0, s);
+}
+
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *s)
{
return bfloat16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
@@ -3774,6 +3814,11 @@ uint64_t bfloat16_to_uint64(bfloat16 a, float_status *s)
return bfloat16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
}
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *s)
+{
+ return bfloat16_to_uint8_scalbn(a, float_round_to_zero, 0, s);
+}
+
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *s)
{
return bfloat16_to_uint16_scalbn(a, float_round_to_zero, 0, s);
@@ -3929,6 +3974,11 @@ bfloat16 int16_to_bfloat16_scalbn(int16_t a, int scale, float_status *status)
return int64_to_bfloat16_scalbn(a, scale, status);
}
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int scale, float_status *status)
+{
+ return int64_to_bfloat16_scalbn(a, scale, status);
+}
+
bfloat16 int64_to_bfloat16(int64_t a, float_status *status)
{
return int64_to_bfloat16_scalbn(a, 0, status);
@@ -3944,6 +3994,11 @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status)
return int64_to_bfloat16_scalbn(a, 0, status);
}
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status)
+{
+ return int64_to_bfloat16_scalbn(a, 0, status);
+}
+
float128 int128_to_float128(Int128 a, float_status *status)
{
FloatParts128 p = { };
@@ -4139,6 +4194,11 @@ bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int scale, float_status *status)
return uint64_to_bfloat16_scalbn(a, scale, status);
}
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int scale, float_status *status)
+{
+ return uint64_to_bfloat16_scalbn(a, scale, status);
+}
+
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status)
{
return uint64_to_bfloat16_scalbn(a, 0, status);
@@ -4154,6 +4214,11 @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)
return uint64_to_bfloat16_scalbn(a, 0, status);
}
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status)
+{
+ return uint64_to_bfloat16_scalbn(a, 0, status);
+}
+
float128 uint64_to_float128(uint64_t a, float_status *status)
{
FloatParts128 p;
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
index fe8c3b3..fe67153 100644
--- a/host/include/aarch64/host/cpuinfo.h
+++ b/host/include/aarch64/host/cpuinfo.h
@@ -11,6 +11,7 @@
#define CPUINFO_LSE2 (1u << 2)
#define CPUINFO_AES (1u << 3)
#define CPUINFO_PMULL (1u << 4)
+#define CPUINFO_BTI (1u << 5)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index fb4c8d4..3502878 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -100,12 +100,12 @@
typedef struct CPUTLBEntryFull {
/*
* @xlat_section contains:
- * - in the lower TARGET_PAGE_BITS, a physical section number
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
- * must be added to the virtual address to obtain:
- * + the ram_addr_t of the target RAM (if the physical section
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
- * + the offset within the target MemoryRegion (otherwise)
+ * - For ram, an offset which must be added to the virtual address
+ * to obtain the ram_addr_t of the target RAM
+ * - For other memory regions,
+ * + in the lower TARGET_PAGE_BITS, the physical section number
+ * + with the TARGET_PAGE_BITS masked off, the offset within
+ * the target MemoryRegion
*/
hwaddr xlat_section;
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
index 6eedef4..2ebfecf 100644
--- a/include/exec/user/thunk.h
+++ b/include/exec/user/thunk.h
@@ -111,8 +111,7 @@ static inline int thunk_type_size(const argtype *type_ptr, int is_host)
if (is_host) {
#if defined(HOST_X86_64)
return 8;
-#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
- defined(HOST_PARISC) || defined(HOST_SPARC64)
+#elif defined(HOST_MIPS) || defined(HOST_SPARC64)
return 4;
#elif defined(HOST_PPC)
return sizeof(void *);
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index cd13056..eb64075 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -366,6 +366,8 @@ float32 bfloat16_to_float32(bfloat16, float_status *status);
bfloat16 float64_to_bfloat16(float64 a, float_status *status);
float64 bfloat16_to_float64(bfloat16 a, float_status *status);
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
int, float_status *status);
int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
@@ -373,14 +375,18 @@ int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
int, float_status *status);
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
int16_t bfloat16_to_int16(bfloat16, float_status *status);
int32_t bfloat16_to_int32(bfloat16, float_status *status);
int64_t bfloat16_to_int64(bfloat16, float_status *status);
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
int, float_status *status);
uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
@@ -388,24 +394,30 @@ uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
int, float_status *status);
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 92a4234..648b5b3 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -227,17 +227,6 @@ struct CPUWatchpoint {
QTAILQ_ENTRY(CPUWatchpoint) entry;
};
-#ifdef CONFIG_PLUGIN
-/*
- * For plugins we sometime need to save the resolved iotlb data before
- * the memory regions get moved around by io_writex.
- */
-typedef struct SavedIOTLB {
- MemoryRegionSection *section;
- hwaddr mr_offset;
-} SavedIOTLB;
-#endif
-
struct KVMState;
struct kvm_run;
@@ -409,8 +398,6 @@ struct CPUState {
#ifdef CONFIG_PLUGIN
GArray *plugin_mem_cbs;
- /* saved iotlb data from io_writex */
- SavedIOTLB saved_iotlb;
#endif
/* TODO Move common fields from CPUArchState here. */
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
index 43165f2..71c1123 100644
--- a/include/qemu/plugin-memory.h
+++ b/include/qemu/plugin-memory.h
@@ -15,15 +15,8 @@
struct qemu_plugin_hwaddr {
bool is_io;
bool is_store;
- union {
- struct {
- MemoryRegionSection *section;
- hwaddr offset;
- } io;
- struct {
- void *hostaddr;
- } ram;
- } v;
+ hwaddr phys_addr;
+ MemoryRegion *mr;
};
/**
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 834b0e4..5abdbc3 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -129,7 +129,6 @@ typedef struct QString QString;
typedef struct RAMBlock RAMBlock;
typedef struct Range Range;
typedef struct ReservedRegion ReservedRegion;
-typedef struct SavedIOTLB SavedIOTLB;
typedef struct SHPCDevice SHPCDevice;
typedef struct SSIBus SSIBus;
typedef struct TCGHelperInfo TCGHelperInfo;
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
index e2683d4..4db8a58 100644
--- a/include/tcg/tcg-op-gvec-common.h
+++ b/include/tcg/tcg-op-gvec-common.h
@@ -374,6 +374,12 @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, int64_t c,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz);
/*
* Perform vector bit select: d = (b & a) | (c & ~a).
diff --git a/plugins/api.c b/plugins/api.c
index 2078b16..5521b0a 100644
--- a/plugins/api.c
+++ b/plugins/api.c
@@ -316,22 +316,7 @@ uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
{
#ifdef CONFIG_SOFTMMU
if (haddr) {
- if (!haddr->is_io) {
- RAMBlock *block;
- ram_addr_t offset;
- void *hostaddr = haddr->v.ram.hostaddr;
-
- block = qemu_ram_block_from_host(hostaddr, false, &offset);
- if (!block) {
- error_report("Bad host ram pointer %p", haddr->v.ram.hostaddr);
- abort();
- }
-
- return block->offset + offset + block->mr->addr;
- } else {
- MemoryRegionSection *mrs = haddr->v.io.section;
- return mrs->offset_within_address_space + haddr->v.io.offset;
- }
+ return haddr->phys_addr;
}
#endif
return 0;
@@ -341,13 +326,13 @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
{
#ifdef CONFIG_SOFTMMU
if (h && h->is_io) {
- MemoryRegionSection *mrs = h->v.io.section;
- if (!mrs->mr->name) {
- unsigned long maddr = 0xffffffff & (uintptr_t) mrs->mr;
- g_autofree char *temp = g_strdup_printf("anon%08lx", maddr);
+ MemoryRegion *mr = h->mr;
+ if (!mr->name) {
+ unsigned maddr = (uintptr_t)mr;
+ g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
return g_intern_string(temp);
} else {
- return g_intern_string(mrs->mr->name);
+ return g_intern_string(mr->name);
}
} else {
return g_intern_static_string("RAM");
diff --git a/softmmu/async-teardown.c b/softmmu/async-teardown.c
index 62cdeb0..396963c 100644
--- a/softmmu/async-teardown.c
+++ b/softmmu/async-teardown.c
@@ -121,10 +121,7 @@ static void *new_stack_for_clone(void)
/* Allocate a new stack and get a pointer to its top. */
stack_ptr = qemu_alloc_stack(&stack_size);
-#if !defined(HOST_HPPA)
- /* The top is at the end of the area, except on HPPA. */
stack_ptr += stack_size;
-#endif
return stack_ptr;
}
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 976b704..d83a0e7 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -2943,54 +2943,16 @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
}
-#define GEN_CMP0(NAME, COND) \
- static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
- { \
- tcg_gen_negsetcond_i32(COND, d, a, tcg_constant_i32(0)); \
- } \
- static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
- { \
- tcg_gen_negsetcond_i64(COND, d, a, tcg_constant_i64(0)); \
- } \
- static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
- { \
- TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \
- tcg_gen_cmp_vec(COND, vece, d, a, zero); \
- } \
- void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
- uint32_t opr_sz, uint32_t max_sz) \
- { \
- const GVecGen2 op[4] = { \
- { .fno = gen_helper_gvec_##NAME##0_b, \
- .fniv = gen_##NAME##0_vec, \
- .opt_opc = vecop_list_cmp, \
- .vece = MO_8 }, \
- { .fno = gen_helper_gvec_##NAME##0_h, \
- .fniv = gen_##NAME##0_vec, \
- .opt_opc = vecop_list_cmp, \
- .vece = MO_16 }, \
- { .fni4 = gen_##NAME##0_i32, \
- .fniv = gen_##NAME##0_vec, \
- .opt_opc = vecop_list_cmp, \
- .vece = MO_32 }, \
- { .fni8 = gen_##NAME##0_i64, \
- .fniv = gen_##NAME##0_vec, \
- .opt_opc = vecop_list_cmp, \
- .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
- .vece = MO_64 }, \
- }; \
- tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
- }
-
-static const TCGOpcode vecop_list_cmp[] = {
- INDEX_op_cmp_vec, 0
-};
-
-GEN_CMP0(ceq, TCG_COND_EQ)
-GEN_CMP0(cle, TCG_COND_LE)
-GEN_CMP0(cge, TCG_COND_GE)
-GEN_CMP0(clt, TCG_COND_LT)
-GEN_CMP0(cgt, TCG_COND_GT)
+#define GEN_CMP0(NAME, COND) \
+ void NAME(unsigned vece, uint32_t d, uint32_t m, \
+ uint32_t opr_sz, uint32_t max_sz) \
+ { tcg_gen_gvec_cmpi(COND, vece, d, m, 0, opr_sz, max_sz); }
+
+GEN_CMP0(gen_gvec_ceq0, TCG_COND_EQ)
+GEN_CMP0(gen_gvec_cle0, TCG_COND_LE)
+GEN_CMP0(gen_gvec_cge0, TCG_COND_GE)
+GEN_CMP0(gen_gvec_clt0, TCG_COND_LT)
+GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT)
#undef GEN_CMP0
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 0931a69..06ea3c7 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -272,7 +272,7 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
}
}
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -602,6 +602,10 @@ typedef enum {
DMB_ISH = 0xd50338bf,
DMB_LD = 0x00000100,
DMB_ST = 0x00000200,
+
+ BTI_C = 0xd503245f,
+ BTI_J = 0xd503249f,
+ BTI_JC = 0xd50324df,
} AArch64Insn;
static inline uint32_t tcg_in32(TCGContext *s)
@@ -843,6 +847,17 @@ static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
| rn << 5 | (rd & 0x1f));
}
+static void tcg_out_bti(TCGContext *s, AArch64Insn insn)
+{
+ /*
+ * While BTI insns are nops on hosts without FEAT_BTI,
+ * there is no point in emitting them in that case either.
+ */
+ if (cpuinfo & CPUINFO_BTI) {
+ tcg_out32(s, insn);
+ }
+}
+
/* Register to register move using ORR (shifted register with no shift). */
static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
{
@@ -1351,18 +1366,6 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
tcg_out_insn(s, 3206, B, offset);
}
-static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
-{
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
- if (offset == sextract64(offset, 0, 26)) {
- tcg_out_insn(s, 3206, B, offset);
- } else {
- /* Choose X9 as a call-clobbered non-LR temporary. */
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
- tcg_out_insn(s, 3207, BR, TCG_REG_X9);
- }
-}
-
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
{
ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
@@ -1947,12 +1950,28 @@ static const tcg_insn_unit *tb_ret_addr;
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
+ const tcg_insn_unit *target;
+ ptrdiff_t offset;
+
/* Reuse the zeroing that exists for goto_ptr. */
if (a0 == 0) {
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
+ target = tcg_code_gen_epilogue;
} else {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
- tcg_out_goto_long(s, tb_ret_addr);
+ target = tb_ret_addr;
+ }
+
+ offset = tcg_pcrel_diff(s, target) >> 2;
+ if (offset == sextract64(offset, 0, 26)) {
+ tcg_out_insn(s, 3206, B, offset);
+ } else {
+ /*
+ * Only x16/x17 generate BTI type Jump (2),
+ * other registers generate BTI type Jump|Call (3).
+ */
+ QEMU_BUILD_BUG_ON(TCG_REG_TMP0 != TCG_REG_X16);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
}
}
@@ -1970,6 +1989,7 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
tcg_out32(s, I3206_B);
tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
set_jmp_reset_offset(s, which);
+ tcg_out_bti(s, BTI_J);
}
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
@@ -3074,6 +3094,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
{
TCGReg r;
+ tcg_out_bti(s, BTI_C);
+
/* Push (FP, LR) and allocate space for all saved registers. */
tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
TCG_REG_SP, -PUSH_SIZE, 1, 1);
@@ -3114,10 +3136,12 @@ static void tcg_target_qemu_prologue(TCGContext *s)
* and fall through to the rest of the epilogue.
*/
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
+ tcg_out_bti(s, BTI_J);
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
/* TB epilogue */
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
+ tcg_out_bti(s, BTI_J);
/* Remove TCG locals stack space. */
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
@@ -3135,6 +3159,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_insn(s, 3207, RET, TCG_REG_LR);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ tcg_out_bti(s, BTI_J);
+}
+
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
int i;
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index acb5f23..b1d5636 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -509,7 +509,7 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
* mov operand2: values represented with x << (2 * y), x < 0x100
* add, sub, eor...: ditto
*/
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -2962,6 +2962,11 @@ static void tcg_out_epilogue(TCGContext *s)
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
typedef struct {
DebugFrameHeader h;
uint8_t fde_def_cfa[4];
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 0c3d1e4..4e47151 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -198,7 +198,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -4191,6 +4191,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_opc(s, OPC_RET, 0, 0, 0);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
memset(p, 0x90, count);
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
index b5bb0c5..ee3b483 100644
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
@@ -4,7 +4,7 @@
*
* This file is auto-generated by genqemutcgdefs from
* https://github.com/loongson-community/loongarch-opcodes,
- * from commit 25ca7effe9d88101c1cf96c4005423643386d81f.
+ * from commit 8027da9a8157a8b47fc48ff1def292e09c5668bd.
* DO NOT EDIT.
*/
@@ -74,6 +74,60 @@ typedef enum {
OPC_ANDI = 0x03400000,
OPC_ORI = 0x03800000,
OPC_XORI = 0x03c00000,
+ OPC_VFMADD_S = 0x09100000,
+ OPC_VFMADD_D = 0x09200000,
+ OPC_VFMSUB_S = 0x09500000,
+ OPC_VFMSUB_D = 0x09600000,
+ OPC_VFNMADD_S = 0x09900000,
+ OPC_VFNMADD_D = 0x09a00000,
+ OPC_VFNMSUB_S = 0x09d00000,
+ OPC_VFNMSUB_D = 0x09e00000,
+ OPC_VFCMP_CAF_S = 0x0c500000,
+ OPC_VFCMP_SAF_S = 0x0c508000,
+ OPC_VFCMP_CLT_S = 0x0c510000,
+ OPC_VFCMP_SLT_S = 0x0c518000,
+ OPC_VFCMP_CEQ_S = 0x0c520000,
+ OPC_VFCMP_SEQ_S = 0x0c528000,
+ OPC_VFCMP_CLE_S = 0x0c530000,
+ OPC_VFCMP_SLE_S = 0x0c538000,
+ OPC_VFCMP_CUN_S = 0x0c540000,
+ OPC_VFCMP_SUN_S = 0x0c548000,
+ OPC_VFCMP_CULT_S = 0x0c550000,
+ OPC_VFCMP_SULT_S = 0x0c558000,
+ OPC_VFCMP_CUEQ_S = 0x0c560000,
+ OPC_VFCMP_SUEQ_S = 0x0c568000,
+ OPC_VFCMP_CULE_S = 0x0c570000,
+ OPC_VFCMP_SULE_S = 0x0c578000,
+ OPC_VFCMP_CNE_S = 0x0c580000,
+ OPC_VFCMP_SNE_S = 0x0c588000,
+ OPC_VFCMP_COR_S = 0x0c5a0000,
+ OPC_VFCMP_SOR_S = 0x0c5a8000,
+ OPC_VFCMP_CUNE_S = 0x0c5c0000,
+ OPC_VFCMP_SUNE_S = 0x0c5c8000,
+ OPC_VFCMP_CAF_D = 0x0c600000,
+ OPC_VFCMP_SAF_D = 0x0c608000,
+ OPC_VFCMP_CLT_D = 0x0c610000,
+ OPC_VFCMP_SLT_D = 0x0c618000,
+ OPC_VFCMP_CEQ_D = 0x0c620000,
+ OPC_VFCMP_SEQ_D = 0x0c628000,
+ OPC_VFCMP_CLE_D = 0x0c630000,
+ OPC_VFCMP_SLE_D = 0x0c638000,
+ OPC_VFCMP_CUN_D = 0x0c640000,
+ OPC_VFCMP_SUN_D = 0x0c648000,
+ OPC_VFCMP_CULT_D = 0x0c650000,
+ OPC_VFCMP_SULT_D = 0x0c658000,
+ OPC_VFCMP_CUEQ_D = 0x0c660000,
+ OPC_VFCMP_SUEQ_D = 0x0c668000,
+ OPC_VFCMP_CULE_D = 0x0c670000,
+ OPC_VFCMP_SULE_D = 0x0c678000,
+ OPC_VFCMP_CNE_D = 0x0c680000,
+ OPC_VFCMP_SNE_D = 0x0c688000,
+ OPC_VFCMP_COR_D = 0x0c6a0000,
+ OPC_VFCMP_SOR_D = 0x0c6a8000,
+ OPC_VFCMP_CUNE_D = 0x0c6c0000,
+ OPC_VFCMP_SUNE_D = 0x0c6c8000,
+ OPC_VBITSEL_V = 0x0d100000,
+ OPC_VSHUF_B = 0x0d500000,
OPC_ADDU16I_D = 0x10000000,
OPC_LU12I_W = 0x14000000,
OPC_CU32I_D = 0x16000000,
@@ -92,6 +146,16 @@ typedef enum {
OPC_LD_BU = 0x2a000000,
OPC_LD_HU = 0x2a400000,
OPC_LD_WU = 0x2a800000,
+ OPC_VLD = 0x2c000000,
+ OPC_VST = 0x2c400000,
+ OPC_VLDREPL_D = 0x30100000,
+ OPC_VLDREPL_W = 0x30200000,
+ OPC_VLDREPL_H = 0x30400000,
+ OPC_VLDREPL_B = 0x30800000,
+ OPC_VSTELM_D = 0x31100000,
+ OPC_VSTELM_W = 0x31200000,
+ OPC_VSTELM_H = 0x31400000,
+ OPC_VSTELM_B = 0x31800000,
OPC_LDX_B = 0x38000000,
OPC_LDX_H = 0x38040000,
OPC_LDX_W = 0x38080000,
@@ -103,6 +167,8 @@ typedef enum {
OPC_LDX_BU = 0x38200000,
OPC_LDX_HU = 0x38240000,
OPC_LDX_WU = 0x38280000,
+ OPC_VLDX = 0x38400000,
+ OPC_VSTX = 0x38440000,
OPC_DBAR = 0x38720000,
OPC_JIRL = 0x4c000000,
OPC_B = 0x50000000,
@@ -113,6 +179,652 @@ typedef enum {
OPC_BLE = 0x64000000,
OPC_BGTU = 0x68000000,
OPC_BLEU = 0x6c000000,
+ OPC_VSEQ_B = 0x70000000,
+ OPC_VSEQ_H = 0x70008000,
+ OPC_VSEQ_W = 0x70010000,
+ OPC_VSEQ_D = 0x70018000,
+ OPC_VSLE_B = 0x70020000,
+ OPC_VSLE_H = 0x70028000,
+ OPC_VSLE_W = 0x70030000,
+ OPC_VSLE_D = 0x70038000,
+ OPC_VSLE_BU = 0x70040000,
+ OPC_VSLE_HU = 0x70048000,
+ OPC_VSLE_WU = 0x70050000,
+ OPC_VSLE_DU = 0x70058000,
+ OPC_VSLT_B = 0x70060000,
+ OPC_VSLT_H = 0x70068000,
+ OPC_VSLT_W = 0x70070000,
+ OPC_VSLT_D = 0x70078000,
+ OPC_VSLT_BU = 0x70080000,
+ OPC_VSLT_HU = 0x70088000,
+ OPC_VSLT_WU = 0x70090000,
+ OPC_VSLT_DU = 0x70098000,
+ OPC_VADD_B = 0x700a0000,
+ OPC_VADD_H = 0x700a8000,
+ OPC_VADD_W = 0x700b0000,
+ OPC_VADD_D = 0x700b8000,
+ OPC_VSUB_B = 0x700c0000,
+ OPC_VSUB_H = 0x700c8000,
+ OPC_VSUB_W = 0x700d0000,
+ OPC_VSUB_D = 0x700d8000,
+ OPC_VADDWEV_H_B = 0x701e0000,
+ OPC_VADDWEV_W_H = 0x701e8000,
+ OPC_VADDWEV_D_W = 0x701f0000,
+ OPC_VADDWEV_Q_D = 0x701f8000,
+ OPC_VSUBWEV_H_B = 0x70200000,
+ OPC_VSUBWEV_W_H = 0x70208000,
+ OPC_VSUBWEV_D_W = 0x70210000,
+ OPC_VSUBWEV_Q_D = 0x70218000,
+ OPC_VADDWOD_H_B = 0x70220000,
+ OPC_VADDWOD_W_H = 0x70228000,
+ OPC_VADDWOD_D_W = 0x70230000,
+ OPC_VADDWOD_Q_D = 0x70238000,
+ OPC_VSUBWOD_H_B = 0x70240000,
+ OPC_VSUBWOD_W_H = 0x70248000,
+ OPC_VSUBWOD_D_W = 0x70250000,
+ OPC_VSUBWOD_Q_D = 0x70258000,
+ OPC_VADDWEV_H_BU = 0x702e0000,
+ OPC_VADDWEV_W_HU = 0x702e8000,
+ OPC_VADDWEV_D_WU = 0x702f0000,
+ OPC_VADDWEV_Q_DU = 0x702f8000,
+ OPC_VSUBWEV_H_BU = 0x70300000,
+ OPC_VSUBWEV_W_HU = 0x70308000,
+ OPC_VSUBWEV_D_WU = 0x70310000,
+ OPC_VSUBWEV_Q_DU = 0x70318000,
+ OPC_VADDWOD_H_BU = 0x70320000,
+ OPC_VADDWOD_W_HU = 0x70328000,
+ OPC_VADDWOD_D_WU = 0x70330000,
+ OPC_VADDWOD_Q_DU = 0x70338000,
+ OPC_VSUBWOD_H_BU = 0x70340000,
+ OPC_VSUBWOD_W_HU = 0x70348000,
+ OPC_VSUBWOD_D_WU = 0x70350000,
+ OPC_VSUBWOD_Q_DU = 0x70358000,
+ OPC_VADDWEV_H_BU_B = 0x703e0000,
+ OPC_VADDWEV_W_HU_H = 0x703e8000,
+ OPC_VADDWEV_D_WU_W = 0x703f0000,
+ OPC_VADDWEV_Q_DU_D = 0x703f8000,
+ OPC_VADDWOD_H_BU_B = 0x70400000,
+ OPC_VADDWOD_W_HU_H = 0x70408000,
+ OPC_VADDWOD_D_WU_W = 0x70410000,
+ OPC_VADDWOD_Q_DU_D = 0x70418000,
+ OPC_VSADD_B = 0x70460000,
+ OPC_VSADD_H = 0x70468000,
+ OPC_VSADD_W = 0x70470000,
+ OPC_VSADD_D = 0x70478000,
+ OPC_VSSUB_B = 0x70480000,
+ OPC_VSSUB_H = 0x70488000,
+ OPC_VSSUB_W = 0x70490000,
+ OPC_VSSUB_D = 0x70498000,
+ OPC_VSADD_BU = 0x704a0000,
+ OPC_VSADD_HU = 0x704a8000,
+ OPC_VSADD_WU = 0x704b0000,
+ OPC_VSADD_DU = 0x704b8000,
+ OPC_VSSUB_BU = 0x704c0000,
+ OPC_VSSUB_HU = 0x704c8000,
+ OPC_VSSUB_WU = 0x704d0000,
+ OPC_VSSUB_DU = 0x704d8000,
+ OPC_VHADDW_H_B = 0x70540000,
+ OPC_VHADDW_W_H = 0x70548000,
+ OPC_VHADDW_D_W = 0x70550000,
+ OPC_VHADDW_Q_D = 0x70558000,
+ OPC_VHSUBW_H_B = 0x70560000,
+ OPC_VHSUBW_W_H = 0x70568000,
+ OPC_VHSUBW_D_W = 0x70570000,
+ OPC_VHSUBW_Q_D = 0x70578000,
+ OPC_VHADDW_HU_BU = 0x70580000,
+ OPC_VHADDW_WU_HU = 0x70588000,
+ OPC_VHADDW_DU_WU = 0x70590000,
+ OPC_VHADDW_QU_DU = 0x70598000,
+ OPC_VHSUBW_HU_BU = 0x705a0000,
+ OPC_VHSUBW_WU_HU = 0x705a8000,
+ OPC_VHSUBW_DU_WU = 0x705b0000,
+ OPC_VHSUBW_QU_DU = 0x705b8000,
+ OPC_VADDA_B = 0x705c0000,
+ OPC_VADDA_H = 0x705c8000,
+ OPC_VADDA_W = 0x705d0000,
+ OPC_VADDA_D = 0x705d8000,
+ OPC_VABSD_B = 0x70600000,
+ OPC_VABSD_H = 0x70608000,
+ OPC_VABSD_W = 0x70610000,
+ OPC_VABSD_D = 0x70618000,
+ OPC_VABSD_BU = 0x70620000,
+ OPC_VABSD_HU = 0x70628000,
+ OPC_VABSD_WU = 0x70630000,
+ OPC_VABSD_DU = 0x70638000,
+ OPC_VAVG_B = 0x70640000,
+ OPC_VAVG_H = 0x70648000,
+ OPC_VAVG_W = 0x70650000,
+ OPC_VAVG_D = 0x70658000,
+ OPC_VAVG_BU = 0x70660000,
+ OPC_VAVG_HU = 0x70668000,
+ OPC_VAVG_WU = 0x70670000,
+ OPC_VAVG_DU = 0x70678000,
+ OPC_VAVGR_B = 0x70680000,
+ OPC_VAVGR_H = 0x70688000,
+ OPC_VAVGR_W = 0x70690000,
+ OPC_VAVGR_D = 0x70698000,
+ OPC_VAVGR_BU = 0x706a0000,
+ OPC_VAVGR_HU = 0x706a8000,
+ OPC_VAVGR_WU = 0x706b0000,
+ OPC_VAVGR_DU = 0x706b8000,
+ OPC_VMAX_B = 0x70700000,
+ OPC_VMAX_H = 0x70708000,
+ OPC_VMAX_W = 0x70710000,
+ OPC_VMAX_D = 0x70718000,
+ OPC_VMIN_B = 0x70720000,
+ OPC_VMIN_H = 0x70728000,
+ OPC_VMIN_W = 0x70730000,
+ OPC_VMIN_D = 0x70738000,
+ OPC_VMAX_BU = 0x70740000,
+ OPC_VMAX_HU = 0x70748000,
+ OPC_VMAX_WU = 0x70750000,
+ OPC_VMAX_DU = 0x70758000,
+ OPC_VMIN_BU = 0x70760000,
+ OPC_VMIN_HU = 0x70768000,
+ OPC_VMIN_WU = 0x70770000,
+ OPC_VMIN_DU = 0x70778000,
+ OPC_VMUL_B = 0x70840000,
+ OPC_VMUL_H = 0x70848000,
+ OPC_VMUL_W = 0x70850000,
+ OPC_VMUL_D = 0x70858000,
+ OPC_VMUH_B = 0x70860000,
+ OPC_VMUH_H = 0x70868000,
+ OPC_VMUH_W = 0x70870000,
+ OPC_VMUH_D = 0x70878000,
+ OPC_VMUH_BU = 0x70880000,
+ OPC_VMUH_HU = 0x70888000,
+ OPC_VMUH_WU = 0x70890000,
+ OPC_VMUH_DU = 0x70898000,
+ OPC_VMULWEV_H_B = 0x70900000,
+ OPC_VMULWEV_W_H = 0x70908000,
+ OPC_VMULWEV_D_W = 0x70910000,
+ OPC_VMULWEV_Q_D = 0x70918000,
+ OPC_VMULWOD_H_B = 0x70920000,
+ OPC_VMULWOD_W_H = 0x70928000,
+ OPC_VMULWOD_D_W = 0x70930000,
+ OPC_VMULWOD_Q_D = 0x70938000,
+ OPC_VMULWEV_H_BU = 0x70980000,
+ OPC_VMULWEV_W_HU = 0x70988000,
+ OPC_VMULWEV_D_WU = 0x70990000,
+ OPC_VMULWEV_Q_DU = 0x70998000,
+ OPC_VMULWOD_H_BU = 0x709a0000,
+ OPC_VMULWOD_W_HU = 0x709a8000,
+ OPC_VMULWOD_D_WU = 0x709b0000,
+ OPC_VMULWOD_Q_DU = 0x709b8000,
+ OPC_VMULWEV_H_BU_B = 0x70a00000,
+ OPC_VMULWEV_W_HU_H = 0x70a08000,
+ OPC_VMULWEV_D_WU_W = 0x70a10000,
+ OPC_VMULWEV_Q_DU_D = 0x70a18000,
+ OPC_VMULWOD_H_BU_B = 0x70a20000,
+ OPC_VMULWOD_W_HU_H = 0x70a28000,
+ OPC_VMULWOD_D_WU_W = 0x70a30000,
+ OPC_VMULWOD_Q_DU_D = 0x70a38000,
+ OPC_VMADD_B = 0x70a80000,
+ OPC_VMADD_H = 0x70a88000,
+ OPC_VMADD_W = 0x70a90000,
+ OPC_VMADD_D = 0x70a98000,
+ OPC_VMSUB_B = 0x70aa0000,
+ OPC_VMSUB_H = 0x70aa8000,
+ OPC_VMSUB_W = 0x70ab0000,
+ OPC_VMSUB_D = 0x70ab8000,
+ OPC_VMADDWEV_H_B = 0x70ac0000,
+ OPC_VMADDWEV_W_H = 0x70ac8000,
+ OPC_VMADDWEV_D_W = 0x70ad0000,
+ OPC_VMADDWEV_Q_D = 0x70ad8000,
+ OPC_VMADDWOD_H_B = 0x70ae0000,
+ OPC_VMADDWOD_W_H = 0x70ae8000,
+ OPC_VMADDWOD_D_W = 0x70af0000,
+ OPC_VMADDWOD_Q_D = 0x70af8000,
+ OPC_VMADDWEV_H_BU = 0x70b40000,
+ OPC_VMADDWEV_W_HU = 0x70b48000,
+ OPC_VMADDWEV_D_WU = 0x70b50000,
+ OPC_VMADDWEV_Q_DU = 0x70b58000,
+ OPC_VMADDWOD_H_BU = 0x70b60000,
+ OPC_VMADDWOD_W_HU = 0x70b68000,
+ OPC_VMADDWOD_D_WU = 0x70b70000,
+ OPC_VMADDWOD_Q_DU = 0x70b78000,
+ OPC_VMADDWEV_H_BU_B = 0x70bc0000,
+ OPC_VMADDWEV_W_HU_H = 0x70bc8000,
+ OPC_VMADDWEV_D_WU_W = 0x70bd0000,
+ OPC_VMADDWEV_Q_DU_D = 0x70bd8000,
+ OPC_VMADDWOD_H_BU_B = 0x70be0000,
+ OPC_VMADDWOD_W_HU_H = 0x70be8000,
+ OPC_VMADDWOD_D_WU_W = 0x70bf0000,
+ OPC_VMADDWOD_Q_DU_D = 0x70bf8000,
+ OPC_VDIV_B = 0x70e00000,
+ OPC_VDIV_H = 0x70e08000,
+ OPC_VDIV_W = 0x70e10000,
+ OPC_VDIV_D = 0x70e18000,
+ OPC_VMOD_B = 0x70e20000,
+ OPC_VMOD_H = 0x70e28000,
+ OPC_VMOD_W = 0x70e30000,
+ OPC_VMOD_D = 0x70e38000,
+ OPC_VDIV_BU = 0x70e40000,
+ OPC_VDIV_HU = 0x70e48000,
+ OPC_VDIV_WU = 0x70e50000,
+ OPC_VDIV_DU = 0x70e58000,
+ OPC_VMOD_BU = 0x70e60000,
+ OPC_VMOD_HU = 0x70e68000,
+ OPC_VMOD_WU = 0x70e70000,
+ OPC_VMOD_DU = 0x70e78000,
+ OPC_VSLL_B = 0x70e80000,
+ OPC_VSLL_H = 0x70e88000,
+ OPC_VSLL_W = 0x70e90000,
+ OPC_VSLL_D = 0x70e98000,
+ OPC_VSRL_B = 0x70ea0000,
+ OPC_VSRL_H = 0x70ea8000,
+ OPC_VSRL_W = 0x70eb0000,
+ OPC_VSRL_D = 0x70eb8000,
+ OPC_VSRA_B = 0x70ec0000,
+ OPC_VSRA_H = 0x70ec8000,
+ OPC_VSRA_W = 0x70ed0000,
+ OPC_VSRA_D = 0x70ed8000,
+ OPC_VROTR_B = 0x70ee0000,
+ OPC_VROTR_H = 0x70ee8000,
+ OPC_VROTR_W = 0x70ef0000,
+ OPC_VROTR_D = 0x70ef8000,
+ OPC_VSRLR_B = 0x70f00000,
+ OPC_VSRLR_H = 0x70f08000,
+ OPC_VSRLR_W = 0x70f10000,
+ OPC_VSRLR_D = 0x70f18000,
+ OPC_VSRAR_B = 0x70f20000,
+ OPC_VSRAR_H = 0x70f28000,
+ OPC_VSRAR_W = 0x70f30000,
+ OPC_VSRAR_D = 0x70f38000,
+ OPC_VSRLN_B_H = 0x70f48000,
+ OPC_VSRLN_H_W = 0x70f50000,
+ OPC_VSRLN_W_D = 0x70f58000,
+ OPC_VSRAN_B_H = 0x70f68000,
+ OPC_VSRAN_H_W = 0x70f70000,
+ OPC_VSRAN_W_D = 0x70f78000,
+ OPC_VSRLRN_B_H = 0x70f88000,
+ OPC_VSRLRN_H_W = 0x70f90000,
+ OPC_VSRLRN_W_D = 0x70f98000,
+ OPC_VSRARN_B_H = 0x70fa8000,
+ OPC_VSRARN_H_W = 0x70fb0000,
+ OPC_VSRARN_W_D = 0x70fb8000,
+ OPC_VSSRLN_B_H = 0x70fc8000,
+ OPC_VSSRLN_H_W = 0x70fd0000,
+ OPC_VSSRLN_W_D = 0x70fd8000,
+ OPC_VSSRAN_B_H = 0x70fe8000,
+ OPC_VSSRAN_H_W = 0x70ff0000,
+ OPC_VSSRAN_W_D = 0x70ff8000,
+ OPC_VSSRLRN_B_H = 0x71008000,
+ OPC_VSSRLRN_H_W = 0x71010000,
+ OPC_VSSRLRN_W_D = 0x71018000,
+ OPC_VSSRARN_B_H = 0x71028000,
+ OPC_VSSRARN_H_W = 0x71030000,
+ OPC_VSSRARN_W_D = 0x71038000,
+ OPC_VSSRLN_BU_H = 0x71048000,
+ OPC_VSSRLN_HU_W = 0x71050000,
+ OPC_VSSRLN_WU_D = 0x71058000,
+ OPC_VSSRAN_BU_H = 0x71068000,
+ OPC_VSSRAN_HU_W = 0x71070000,
+ OPC_VSSRAN_WU_D = 0x71078000,
+ OPC_VSSRLRN_BU_H = 0x71088000,
+ OPC_VSSRLRN_HU_W = 0x71090000,
+ OPC_VSSRLRN_WU_D = 0x71098000,
+ OPC_VSSRARN_BU_H = 0x710a8000,
+ OPC_VSSRARN_HU_W = 0x710b0000,
+ OPC_VSSRARN_WU_D = 0x710b8000,
+ OPC_VBITCLR_B = 0x710c0000,
+ OPC_VBITCLR_H = 0x710c8000,
+ OPC_VBITCLR_W = 0x710d0000,
+ OPC_VBITCLR_D = 0x710d8000,
+ OPC_VBITSET_B = 0x710e0000,
+ OPC_VBITSET_H = 0x710e8000,
+ OPC_VBITSET_W = 0x710f0000,
+ OPC_VBITSET_D = 0x710f8000,
+ OPC_VBITREV_B = 0x71100000,
+ OPC_VBITREV_H = 0x71108000,
+ OPC_VBITREV_W = 0x71110000,
+ OPC_VBITREV_D = 0x71118000,
+ OPC_VPACKEV_B = 0x71160000,
+ OPC_VPACKEV_H = 0x71168000,
+ OPC_VPACKEV_W = 0x71170000,
+ OPC_VPACKEV_D = 0x71178000,
+ OPC_VPACKOD_B = 0x71180000,
+ OPC_VPACKOD_H = 0x71188000,
+ OPC_VPACKOD_W = 0x71190000,
+ OPC_VPACKOD_D = 0x71198000,
+ OPC_VILVL_B = 0x711a0000,
+ OPC_VILVL_H = 0x711a8000,
+ OPC_VILVL_W = 0x711b0000,
+ OPC_VILVL_D = 0x711b8000,
+ OPC_VILVH_B = 0x711c0000,
+ OPC_VILVH_H = 0x711c8000,
+ OPC_VILVH_W = 0x711d0000,
+ OPC_VILVH_D = 0x711d8000,
+ OPC_VPICKEV_B = 0x711e0000,
+ OPC_VPICKEV_H = 0x711e8000,
+ OPC_VPICKEV_W = 0x711f0000,
+ OPC_VPICKEV_D = 0x711f8000,
+ OPC_VPICKOD_B = 0x71200000,
+ OPC_VPICKOD_H = 0x71208000,
+ OPC_VPICKOD_W = 0x71210000,
+ OPC_VPICKOD_D = 0x71218000,
+ OPC_VREPLVE_B = 0x71220000,
+ OPC_VREPLVE_H = 0x71228000,
+ OPC_VREPLVE_W = 0x71230000,
+ OPC_VREPLVE_D = 0x71238000,
+ OPC_VAND_V = 0x71260000,
+ OPC_VOR_V = 0x71268000,
+ OPC_VXOR_V = 0x71270000,
+ OPC_VNOR_V = 0x71278000,
+ OPC_VANDN_V = 0x71280000,
+ OPC_VORN_V = 0x71288000,
+ OPC_VFRSTP_B = 0x712b0000,
+ OPC_VFRSTP_H = 0x712b8000,
+ OPC_VADD_Q = 0x712d0000,
+ OPC_VSUB_Q = 0x712d8000,
+ OPC_VSIGNCOV_B = 0x712e0000,
+ OPC_VSIGNCOV_H = 0x712e8000,
+ OPC_VSIGNCOV_W = 0x712f0000,
+ OPC_VSIGNCOV_D = 0x712f8000,
+ OPC_VFADD_S = 0x71308000,
+ OPC_VFADD_D = 0x71310000,
+ OPC_VFSUB_S = 0x71328000,
+ OPC_VFSUB_D = 0x71330000,
+ OPC_VFMUL_S = 0x71388000,
+ OPC_VFMUL_D = 0x71390000,
+ OPC_VFDIV_S = 0x713a8000,
+ OPC_VFDIV_D = 0x713b0000,
+ OPC_VFMAX_S = 0x713c8000,
+ OPC_VFMAX_D = 0x713d0000,
+ OPC_VFMIN_S = 0x713e8000,
+ OPC_VFMIN_D = 0x713f0000,
+ OPC_VFMAXA_S = 0x71408000,
+ OPC_VFMAXA_D = 0x71410000,
+ OPC_VFMINA_S = 0x71428000,
+ OPC_VFMINA_D = 0x71430000,
+ OPC_VFCVT_H_S = 0x71460000,
+ OPC_VFCVT_S_D = 0x71468000,
+ OPC_VFFINT_S_L = 0x71480000,
+ OPC_VFTINT_W_D = 0x71498000,
+ OPC_VFTINTRM_W_D = 0x714a0000,
+ OPC_VFTINTRP_W_D = 0x714a8000,
+ OPC_VFTINTRZ_W_D = 0x714b0000,
+ OPC_VFTINTRNE_W_D = 0x714b8000,
+ OPC_VSHUF_H = 0x717a8000,
+ OPC_VSHUF_W = 0x717b0000,
+ OPC_VSHUF_D = 0x717b8000,
+ OPC_VSEQI_B = 0x72800000,
+ OPC_VSEQI_H = 0x72808000,
+ OPC_VSEQI_W = 0x72810000,
+ OPC_VSEQI_D = 0x72818000,
+ OPC_VSLEI_B = 0x72820000,
+ OPC_VSLEI_H = 0x72828000,
+ OPC_VSLEI_W = 0x72830000,
+ OPC_VSLEI_D = 0x72838000,
+ OPC_VSLEI_BU = 0x72840000,
+ OPC_VSLEI_HU = 0x72848000,
+ OPC_VSLEI_WU = 0x72850000,
+ OPC_VSLEI_DU = 0x72858000,
+ OPC_VSLTI_B = 0x72860000,
+ OPC_VSLTI_H = 0x72868000,
+ OPC_VSLTI_W = 0x72870000,
+ OPC_VSLTI_D = 0x72878000,
+ OPC_VSLTI_BU = 0x72880000,
+ OPC_VSLTI_HU = 0x72888000,
+ OPC_VSLTI_WU = 0x72890000,
+ OPC_VSLTI_DU = 0x72898000,
+ OPC_VADDI_BU = 0x728a0000,
+ OPC_VADDI_HU = 0x728a8000,
+ OPC_VADDI_WU = 0x728b0000,
+ OPC_VADDI_DU = 0x728b8000,
+ OPC_VSUBI_BU = 0x728c0000,
+ OPC_VSUBI_HU = 0x728c8000,
+ OPC_VSUBI_WU = 0x728d0000,
+ OPC_VSUBI_DU = 0x728d8000,
+ OPC_VBSLL_V = 0x728e0000,
+ OPC_VBSRL_V = 0x728e8000,
+ OPC_VMAXI_B = 0x72900000,
+ OPC_VMAXI_H = 0x72908000,
+ OPC_VMAXI_W = 0x72910000,
+ OPC_VMAXI_D = 0x72918000,
+ OPC_VMINI_B = 0x72920000,
+ OPC_VMINI_H = 0x72928000,
+ OPC_VMINI_W = 0x72930000,
+ OPC_VMINI_D = 0x72938000,
+ OPC_VMAXI_BU = 0x72940000,
+ OPC_VMAXI_HU = 0x72948000,
+ OPC_VMAXI_WU = 0x72950000,
+ OPC_VMAXI_DU = 0x72958000,
+ OPC_VMINI_BU = 0x72960000,
+ OPC_VMINI_HU = 0x72968000,
+ OPC_VMINI_WU = 0x72970000,
+ OPC_VMINI_DU = 0x72978000,
+ OPC_VFRSTPI_B = 0x729a0000,
+ OPC_VFRSTPI_H = 0x729a8000,
+ OPC_VCLO_B = 0x729c0000,
+ OPC_VCLO_H = 0x729c0400,
+ OPC_VCLO_W = 0x729c0800,
+ OPC_VCLO_D = 0x729c0c00,
+ OPC_VCLZ_B = 0x729c1000,
+ OPC_VCLZ_H = 0x729c1400,
+ OPC_VCLZ_W = 0x729c1800,
+ OPC_VCLZ_D = 0x729c1c00,
+ OPC_VPCNT_B = 0x729c2000,
+ OPC_VPCNT_H = 0x729c2400,
+ OPC_VPCNT_W = 0x729c2800,
+ OPC_VPCNT_D = 0x729c2c00,
+ OPC_VNEG_B = 0x729c3000,
+ OPC_VNEG_H = 0x729c3400,
+ OPC_VNEG_W = 0x729c3800,
+ OPC_VNEG_D = 0x729c3c00,
+ OPC_VMSKLTZ_B = 0x729c4000,
+ OPC_VMSKLTZ_H = 0x729c4400,
+ OPC_VMSKLTZ_W = 0x729c4800,
+ OPC_VMSKLTZ_D = 0x729c4c00,
+ OPC_VMSKGEZ_B = 0x729c5000,
+ OPC_VMSKNZ_B = 0x729c6000,
+ OPC_VSETEQZ_V = 0x729c9800,
+ OPC_VSETNEZ_V = 0x729c9c00,
+ OPC_VSETANYEQZ_B = 0x729ca000,
+ OPC_VSETANYEQZ_H = 0x729ca400,
+ OPC_VSETANYEQZ_W = 0x729ca800,
+ OPC_VSETANYEQZ_D = 0x729cac00,
+ OPC_VSETALLNEZ_B = 0x729cb000,
+ OPC_VSETALLNEZ_H = 0x729cb400,
+ OPC_VSETALLNEZ_W = 0x729cb800,
+ OPC_VSETALLNEZ_D = 0x729cbc00,
+ OPC_VFLOGB_S = 0x729cc400,
+ OPC_VFLOGB_D = 0x729cc800,
+ OPC_VFCLASS_S = 0x729cd400,
+ OPC_VFCLASS_D = 0x729cd800,
+ OPC_VFSQRT_S = 0x729ce400,
+ OPC_VFSQRT_D = 0x729ce800,
+ OPC_VFRECIP_S = 0x729cf400,
+ OPC_VFRECIP_D = 0x729cf800,
+ OPC_VFRSQRT_S = 0x729d0400,
+ OPC_VFRSQRT_D = 0x729d0800,
+ OPC_VFRINT_S = 0x729d3400,
+ OPC_VFRINT_D = 0x729d3800,
+ OPC_VFRINTRM_S = 0x729d4400,
+ OPC_VFRINTRM_D = 0x729d4800,
+ OPC_VFRINTRP_S = 0x729d5400,
+ OPC_VFRINTRP_D = 0x729d5800,
+ OPC_VFRINTRZ_S = 0x729d6400,
+ OPC_VFRINTRZ_D = 0x729d6800,
+ OPC_VFRINTRNE_S = 0x729d7400,
+ OPC_VFRINTRNE_D = 0x729d7800,
+ OPC_VFCVTL_S_H = 0x729de800,
+ OPC_VFCVTH_S_H = 0x729dec00,
+ OPC_VFCVTL_D_S = 0x729df000,
+ OPC_VFCVTH_D_S = 0x729df400,
+ OPC_VFFINT_S_W = 0x729e0000,
+ OPC_VFFINT_S_WU = 0x729e0400,
+ OPC_VFFINT_D_L = 0x729e0800,
+ OPC_VFFINT_D_LU = 0x729e0c00,
+ OPC_VFFINTL_D_W = 0x729e1000,
+ OPC_VFFINTH_D_W = 0x729e1400,
+ OPC_VFTINT_W_S = 0x729e3000,
+ OPC_VFTINT_L_D = 0x729e3400,
+ OPC_VFTINTRM_W_S = 0x729e3800,
+ OPC_VFTINTRM_L_D = 0x729e3c00,
+ OPC_VFTINTRP_W_S = 0x729e4000,
+ OPC_VFTINTRP_L_D = 0x729e4400,
+ OPC_VFTINTRZ_W_S = 0x729e4800,
+ OPC_VFTINTRZ_L_D = 0x729e4c00,
+ OPC_VFTINTRNE_W_S = 0x729e5000,
+ OPC_VFTINTRNE_L_D = 0x729e5400,
+ OPC_VFTINT_WU_S = 0x729e5800,
+ OPC_VFTINT_LU_D = 0x729e5c00,
+ OPC_VFTINTRZ_WU_S = 0x729e7000,
+ OPC_VFTINTRZ_LU_D = 0x729e7400,
+ OPC_VFTINTL_L_S = 0x729e8000,
+ OPC_VFTINTH_L_S = 0x729e8400,
+ OPC_VFTINTRML_L_S = 0x729e8800,
+ OPC_VFTINTRMH_L_S = 0x729e8c00,
+ OPC_VFTINTRPL_L_S = 0x729e9000,
+ OPC_VFTINTRPH_L_S = 0x729e9400,
+ OPC_VFTINTRZL_L_S = 0x729e9800,
+ OPC_VFTINTRZH_L_S = 0x729e9c00,
+ OPC_VFTINTRNEL_L_S = 0x729ea000,
+ OPC_VFTINTRNEH_L_S = 0x729ea400,
+ OPC_VEXTH_H_B = 0x729ee000,
+ OPC_VEXTH_W_H = 0x729ee400,
+ OPC_VEXTH_D_W = 0x729ee800,
+ OPC_VEXTH_Q_D = 0x729eec00,
+ OPC_VEXTH_HU_BU = 0x729ef000,
+ OPC_VEXTH_WU_HU = 0x729ef400,
+ OPC_VEXTH_DU_WU = 0x729ef800,
+ OPC_VEXTH_QU_DU = 0x729efc00,
+ OPC_VREPLGR2VR_B = 0x729f0000,
+ OPC_VREPLGR2VR_H = 0x729f0400,
+ OPC_VREPLGR2VR_W = 0x729f0800,
+ OPC_VREPLGR2VR_D = 0x729f0c00,
+ OPC_VROTRI_B = 0x72a02000,
+ OPC_VROTRI_H = 0x72a04000,
+ OPC_VROTRI_W = 0x72a08000,
+ OPC_VROTRI_D = 0x72a10000,
+ OPC_VSRLRI_B = 0x72a42000,
+ OPC_VSRLRI_H = 0x72a44000,
+ OPC_VSRLRI_W = 0x72a48000,
+ OPC_VSRLRI_D = 0x72a50000,
+ OPC_VSRARI_B = 0x72a82000,
+ OPC_VSRARI_H = 0x72a84000,
+ OPC_VSRARI_W = 0x72a88000,
+ OPC_VSRARI_D = 0x72a90000,
+ OPC_VINSGR2VR_B = 0x72eb8000,
+ OPC_VINSGR2VR_H = 0x72ebc000,
+ OPC_VINSGR2VR_W = 0x72ebe000,
+ OPC_VINSGR2VR_D = 0x72ebf000,
+ OPC_VPICKVE2GR_B = 0x72ef8000,
+ OPC_VPICKVE2GR_H = 0x72efc000,
+ OPC_VPICKVE2GR_W = 0x72efe000,
+ OPC_VPICKVE2GR_D = 0x72eff000,
+ OPC_VPICKVE2GR_BU = 0x72f38000,
+ OPC_VPICKVE2GR_HU = 0x72f3c000,
+ OPC_VPICKVE2GR_WU = 0x72f3e000,
+ OPC_VPICKVE2GR_DU = 0x72f3f000,
+ OPC_VREPLVEI_B = 0x72f78000,
+ OPC_VREPLVEI_H = 0x72f7c000,
+ OPC_VREPLVEI_W = 0x72f7e000,
+ OPC_VREPLVEI_D = 0x72f7f000,
+ OPC_VSLLWIL_H_B = 0x73082000,
+ OPC_VSLLWIL_W_H = 0x73084000,
+ OPC_VSLLWIL_D_W = 0x73088000,
+ OPC_VEXTL_Q_D = 0x73090000,
+ OPC_VSLLWIL_HU_BU = 0x730c2000,
+ OPC_VSLLWIL_WU_HU = 0x730c4000,
+ OPC_VSLLWIL_DU_WU = 0x730c8000,
+ OPC_VEXTL_QU_DU = 0x730d0000,
+ OPC_VBITCLRI_B = 0x73102000,
+ OPC_VBITCLRI_H = 0x73104000,
+ OPC_VBITCLRI_W = 0x73108000,
+ OPC_VBITCLRI_D = 0x73110000,
+ OPC_VBITSETI_B = 0x73142000,
+ OPC_VBITSETI_H = 0x73144000,
+ OPC_VBITSETI_W = 0x73148000,
+ OPC_VBITSETI_D = 0x73150000,
+ OPC_VBITREVI_B = 0x73182000,
+ OPC_VBITREVI_H = 0x73184000,
+ OPC_VBITREVI_W = 0x73188000,
+ OPC_VBITREVI_D = 0x73190000,
+ OPC_VSAT_B = 0x73242000,
+ OPC_VSAT_H = 0x73244000,
+ OPC_VSAT_W = 0x73248000,
+ OPC_VSAT_D = 0x73250000,
+ OPC_VSAT_BU = 0x73282000,
+ OPC_VSAT_HU = 0x73284000,
+ OPC_VSAT_WU = 0x73288000,
+ OPC_VSAT_DU = 0x73290000,
+ OPC_VSLLI_B = 0x732c2000,
+ OPC_VSLLI_H = 0x732c4000,
+ OPC_VSLLI_W = 0x732c8000,
+ OPC_VSLLI_D = 0x732d0000,
+ OPC_VSRLI_B = 0x73302000,
+ OPC_VSRLI_H = 0x73304000,
+ OPC_VSRLI_W = 0x73308000,
+ OPC_VSRLI_D = 0x73310000,
+ OPC_VSRAI_B = 0x73342000,
+ OPC_VSRAI_H = 0x73344000,
+ OPC_VSRAI_W = 0x73348000,
+ OPC_VSRAI_D = 0x73350000,
+ OPC_VSRLNI_B_H = 0x73404000,
+ OPC_VSRLNI_H_W = 0x73408000,
+ OPC_VSRLNI_W_D = 0x73410000,
+ OPC_VSRLNI_D_Q = 0x73420000,
+ OPC_VSRLRNI_B_H = 0x73444000,
+ OPC_VSRLRNI_H_W = 0x73448000,
+ OPC_VSRLRNI_W_D = 0x73450000,
+ OPC_VSRLRNI_D_Q = 0x73460000,
+ OPC_VSSRLNI_B_H = 0x73484000,
+ OPC_VSSRLNI_H_W = 0x73488000,
+ OPC_VSSRLNI_W_D = 0x73490000,
+ OPC_VSSRLNI_D_Q = 0x734a0000,
+ OPC_VSSRLNI_BU_H = 0x734c4000,
+ OPC_VSSRLNI_HU_W = 0x734c8000,
+ OPC_VSSRLNI_WU_D = 0x734d0000,
+ OPC_VSSRLNI_DU_Q = 0x734e0000,
+ OPC_VSSRLRNI_B_H = 0x73504000,
+ OPC_VSSRLRNI_H_W = 0x73508000,
+ OPC_VSSRLRNI_W_D = 0x73510000,
+ OPC_VSSRLRNI_D_Q = 0x73520000,
+ OPC_VSSRLRNI_BU_H = 0x73544000,
+ OPC_VSSRLRNI_HU_W = 0x73548000,
+ OPC_VSSRLRNI_WU_D = 0x73550000,
+ OPC_VSSRLRNI_DU_Q = 0x73560000,
+ OPC_VSRANI_B_H = 0x73584000,
+ OPC_VSRANI_H_W = 0x73588000,
+ OPC_VSRANI_W_D = 0x73590000,
+ OPC_VSRANI_D_Q = 0x735a0000,
+ OPC_VSRARNI_B_H = 0x735c4000,
+ OPC_VSRARNI_H_W = 0x735c8000,
+ OPC_VSRARNI_W_D = 0x735d0000,
+ OPC_VSRARNI_D_Q = 0x735e0000,
+ OPC_VSSRANI_B_H = 0x73604000,
+ OPC_VSSRANI_H_W = 0x73608000,
+ OPC_VSSRANI_W_D = 0x73610000,
+ OPC_VSSRANI_D_Q = 0x73620000,
+ OPC_VSSRANI_BU_H = 0x73644000,
+ OPC_VSSRANI_HU_W = 0x73648000,
+ OPC_VSSRANI_WU_D = 0x73650000,
+ OPC_VSSRANI_DU_Q = 0x73660000,
+ OPC_VSSRARNI_B_H = 0x73684000,
+ OPC_VSSRARNI_H_W = 0x73688000,
+ OPC_VSSRARNI_W_D = 0x73690000,
+ OPC_VSSRARNI_D_Q = 0x736a0000,
+ OPC_VSSRARNI_BU_H = 0x736c4000,
+ OPC_VSSRARNI_HU_W = 0x736c8000,
+ OPC_VSSRARNI_WU_D = 0x736d0000,
+ OPC_VSSRARNI_DU_Q = 0x736e0000,
+ OPC_VEXTRINS_D = 0x73800000,
+ OPC_VEXTRINS_W = 0x73840000,
+ OPC_VEXTRINS_H = 0x73880000,
+ OPC_VEXTRINS_B = 0x738c0000,
+ OPC_VSHUF4I_B = 0x73900000,
+ OPC_VSHUF4I_H = 0x73940000,
+ OPC_VSHUF4I_W = 0x73980000,
+ OPC_VSHUF4I_D = 0x739c0000,
+ OPC_VBITSELI_B = 0x73c40000,
+ OPC_VANDI_B = 0x73d00000,
+ OPC_VORI_B = 0x73d40000,
+ OPC_VXORI_B = 0x73d80000,
+ OPC_VNORI_B = 0x73dc0000,
+ OPC_VLDI = 0x73e00000,
+ OPC_VPERMI_W = 0x73e40000,
} LoongArchInsn;
static int32_t __attribute__((unused))
@@ -134,6 +846,13 @@ encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
}
static int32_t __attribute__((unused))
+encode_djka_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
+ uint32_t a)
+{
+ return opc | d | j << 5 | k << 10 | a << 15;
+}
+
+static int32_t __attribute__((unused))
encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
uint32_t m)
{
@@ -141,12 +860,27 @@ encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
}
static int32_t __attribute__((unused))
+encode_djkn_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
+ uint32_t n)
+{
+ return opc | d | j << 5 | k << 10 | n << 18;
+}
+
+static int32_t __attribute__((unused))
encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
{
return opc | d | k << 10;
}
static int32_t __attribute__((unused))
+encode_cdvj_insn(LoongArchInsn opc, TCGReg cd, TCGReg vj)
+{
+ tcg_debug_assert(cd >= 0 && cd <= 0x7);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ return encode_dj_slots(opc, cd, vj & 0x1f);
+}
+
+static int32_t __attribute__((unused))
encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
{
tcg_debug_assert(d >= 0 && d <= 0x1f);
@@ -239,6 +973,42 @@ encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
}
static int32_t __attribute__((unused))
+encode_dvjuk1_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk1)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk1 <= 0x1);
+ return encode_djk_slots(opc, d, vj & 0x1f, uk1);
+}
+
+static int32_t __attribute__((unused))
+encode_dvjuk2_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk2)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk2 <= 0x3);
+ return encode_djk_slots(opc, d, vj & 0x1f, uk2);
+}
+
+static int32_t __attribute__((unused))
+encode_dvjuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk3)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk3 <= 0x7);
+ return encode_djk_slots(opc, d, vj & 0x1f, uk3);
+}
+
+static int32_t __attribute__((unused))
+encode_dvjuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk4)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk4 <= 0xf);
+ return encode_djk_slots(opc, d, vj & 0x1f, uk4);
+}
+
+static int32_t __attribute__((unused))
encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
{
tcg_debug_assert(sd10k16 >= -0x2000000 && sd10k16 <= 0x1ffffff);
@@ -252,6 +1022,265 @@ encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
return encode_d_slot(opc, ud15);
}
+static int32_t __attribute__((unused))
+encode_vdj_insn(LoongArchInsn opc, TCGReg vd, TCGReg j)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ return encode_dj_slots(opc, vd & 0x1f, j);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, TCGReg k)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
+ return encode_djk_slots(opc, vd & 0x1f, j, k);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk10_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk10)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk10 >= -0x200 && sk10 <= 0x1ff);
+ return encode_djk_slots(opc, vd & 0x1f, j, sk10 & 0x3ff);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk11_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk11)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk11 >= -0x400 && sk11 <= 0x3ff);
+ return encode_djk_slots(opc, vd & 0x1f, j, sk11 & 0x7ff);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk12_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk12)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
+ return encode_djk_slots(opc, vd & 0x1f, j, sk12 & 0xfff);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk8un1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un1)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
+ tcg_debug_assert(un1 <= 0x1);
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un1);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk8un2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un2)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
+ tcg_debug_assert(un2 <= 0x3);
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un2);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk8un3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un3)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
+ tcg_debug_assert(un3 <= 0x7);
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un3);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk8un4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un4)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
+ tcg_debug_assert(un4 <= 0xf);
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un4);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjsk9_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk9)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk9 >= -0x100 && sk9 <= 0xff);
+ return encode_djk_slots(opc, vd & 0x1f, j, sk9 & 0x1ff);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk1)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk1 <= 0x1);
+ return encode_djk_slots(opc, vd & 0x1f, j, uk1);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk2)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk2 <= 0x3);
+ return encode_djk_slots(opc, vd & 0x1f, j, uk2);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk3)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk3 <= 0x7);
+ return encode_djk_slots(opc, vd & 0x1f, j, uk3);
+}
+
+static int32_t __attribute__((unused))
+encode_vdjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk4)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk4 <= 0xf);
+ return encode_djk_slots(opc, vd & 0x1f, j, uk4);
+}
+
+static int32_t __attribute__((unused))
+encode_vdsj13_insn(LoongArchInsn opc, TCGReg vd, int32_t sj13)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(sj13 >= -0x1000 && sj13 <= 0xfff);
+ return encode_dj_slots(opc, vd & 0x1f, sj13 & 0x1fff);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvj_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ return encode_dj_slots(opc, vd & 0x1f, vj & 0x1f);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg k)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, k);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjsk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(sk5 >= -0x10 && sk5 <= 0xf);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, sk5 & 0x1f);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk1)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk1 <= 0x1);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk1);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk2)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk2 <= 0x3);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk2);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk3 <= 0x7);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk3);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk4 <= 0xf);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk4);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk5 <= 0x1f);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk5);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk6_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk6 <= 0x3f);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk6);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk7_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk7 <= 0x7f);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk7);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjuk8_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(uk8 <= 0xff);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk8);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjvk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f);
+}
+
+static int32_t __attribute__((unused))
+encode_vdvjvkva_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk,
+ TCGReg va)
+{
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
+ tcg_debug_assert(va >= 0x20 && va <= 0x3f);
+ return encode_djka_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f, va & 0x1f);
+}
+
/* Emits the `clz.w d, j` instruction. */
static void __attribute__((unused))
tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
@@ -711,6 +1740,384 @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
}
+/* Emits the `vfmadd.s vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_S, vd, vj, vk, va));
+}
+
+/* Emits the `vfmadd.d vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_D, vd, vj, vk, va));
+}
+
+/* Emits the `vfmsub.s vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_S, vd, vj, vk, va));
+}
+
+/* Emits the `vfmsub.d vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_D, vd, vj, vk, va));
+}
+
+/* Emits the `vfnmadd.s vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfnmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_S, vd, vj, vk, va));
+}
+
+/* Emits the `vfnmadd.d vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfnmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_D, vd, vj, vk, va));
+}
+
+/* Emits the `vfnmsub.s vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfnmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_S, vd, vj, vk, va));
+}
+
+/* Emits the `vfnmsub.d vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfnmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_D, vd, vj, vk, va));
+}
+
+/* Emits the `vfcmp.caf.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_caf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.saf.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_saf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.clt.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_clt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.slt.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_slt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.ceq.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_ceq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.seq.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_seq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cle.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sle.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cun.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sun.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cult.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sult.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cueq.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sueq.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cule.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sule.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cne.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sne.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cor.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sor.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cune.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sune.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_S, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.caf.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_caf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.saf.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_saf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.clt.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_clt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.slt.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_slt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.ceq.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_ceq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.seq.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_seq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cle.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sle.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cun.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sun.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cult.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sult.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cueq.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sueq.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cule.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sule.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cne.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sne.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cor.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sor.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.cune.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_cune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_D, vd, vj, vk));
+}
+
+/* Emits the `vfcmp.sune.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcmp_sune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_D, vd, vj, vk));
+}
+
+/* Emits the `vbitsel.v vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VBITSEL_V, vd, vj, vk, va));
+}
+
+/* Emits the `vshuf.b vd, vj, vk, va` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
+{
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VSHUF_B, vd, vj, vk, va));
+}
+
/* Emits the `addu16i.d d, j, sk16` instruction. */
static void __attribute__((unused))
tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
@@ -837,6 +2244,80 @@ tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
}
+/* Emits the `vld vd, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vld(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLD, vd, j, sk12));
+}
+
+/* Emits the `vst vd, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vst(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VST, vd, j, sk12));
+}
+
+/* Emits the `vldrepl.d vd, j, sk9` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldrepl_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk9)
+{
+ tcg_out32(s, encode_vdjsk9_insn(OPC_VLDREPL_D, vd, j, sk9));
+}
+
+/* Emits the `vldrepl.w vd, j, sk10` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldrepl_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk10)
+{
+ tcg_out32(s, encode_vdjsk10_insn(OPC_VLDREPL_W, vd, j, sk10));
+}
+
+/* Emits the `vldrepl.h vd, j, sk11` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldrepl_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk11)
+{
+ tcg_out32(s, encode_vdjsk11_insn(OPC_VLDREPL_H, vd, j, sk11));
+}
+
+/* Emits the `vldrepl.b vd, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldrepl_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLDREPL_B, vd, j, sk12));
+}
+
+/* Emits the `vstelm.d vd, j, sk8, un1` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vstelm_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un1)
+{
+ tcg_out32(s, encode_vdjsk8un1_insn(OPC_VSTELM_D, vd, j, sk8, un1));
+}
+
+/* Emits the `vstelm.w vd, j, sk8, un2` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vstelm_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un2)
+{
+ tcg_out32(s, encode_vdjsk8un2_insn(OPC_VSTELM_W, vd, j, sk8, un2));
+}
+
+/* Emits the `vstelm.h vd, j, sk8, un3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vstelm_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un3)
+{
+ tcg_out32(s, encode_vdjsk8un3_insn(OPC_VSTELM_H, vd, j, sk8, un3));
+}
+
+/* Emits the `vstelm.b vd, j, sk8, un4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vstelm_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
+ uint32_t un4)
+{
+ tcg_out32(s, encode_vdjsk8un4_insn(OPC_VSTELM_B, vd, j, sk8, un4));
+}
+
/* Emits the `ldx.b d, j, k` instruction. */
static void __attribute__((unused))
tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
@@ -914,6 +2395,20 @@ tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
}
+/* Emits the `vldx vd, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_vdjk_insn(OPC_VLDX, vd, j, k));
+}
+
+/* Emits the `vstx vd, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vstx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_vdjk_insn(OPC_VSTX, vd, j, k));
+}
+
/* Emits the `dbar ud15` instruction. */
static void __attribute__((unused))
tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
@@ -984,4 +2479,4526 @@ tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
}
+/* Emits the `vseq.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseq_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_B, vd, vj, vk));
+}
+
+/* Emits the `vseq.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseq_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_H, vd, vj, vk));
+}
+
+/* Emits the `vseq.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseq_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_W, vd, vj, vk));
+}
+
+/* Emits the `vseq.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_D, vd, vj, vk));
+}
+
+/* Emits the `vsle.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_B, vd, vj, vk));
+}
+
+/* Emits the `vsle.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_H, vd, vj, vk));
+}
+
+/* Emits the `vsle.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_W, vd, vj, vk));
+}
+
+/* Emits the `vsle.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_D, vd, vj, vk));
+}
+
+/* Emits the `vsle.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_BU, vd, vj, vk));
+}
+
+/* Emits the `vsle.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_HU, vd, vj, vk));
+}
+
+/* Emits the `vsle.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_WU, vd, vj, vk));
+}
+
+/* Emits the `vsle.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsle_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_DU, vd, vj, vk));
+}
+
+/* Emits the `vslt.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_B, vd, vj, vk));
+}
+
+/* Emits the `vslt.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_H, vd, vj, vk));
+}
+
+/* Emits the `vslt.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_W, vd, vj, vk));
+}
+
+/* Emits the `vslt.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_D, vd, vj, vk));
+}
+
+/* Emits the `vslt.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_BU, vd, vj, vk));
+}
+
+/* Emits the `vslt.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_HU, vd, vj, vk));
+}
+
+/* Emits the `vslt.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_WU, vd, vj, vk));
+}
+
+/* Emits the `vslt.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslt_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_DU, vd, vj, vk));
+}
+
+/* Emits the `vadd.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_B, vd, vj, vk));
+}
+
+/* Emits the `vadd.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_H, vd, vj, vk));
+}
+
+/* Emits the `vadd.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_W, vd, vj, vk));
+}
+
+/* Emits the `vadd.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_D, vd, vj, vk));
+}
+
+/* Emits the `vsub.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_B, vd, vj, vk));
+}
+
+/* Emits the `vsub.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_H, vd, vj, vk));
+}
+
+/* Emits the `vsub.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_W, vd, vj, vk));
+}
+
+/* Emits the `vsub.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_D, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_B, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_H, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_W, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_B, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_H, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_W, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_B, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_H, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_W, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_B, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_H, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_W, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vsubwev.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vsubwod.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vaddwev.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vaddwod.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vsadd.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_B, vd, vj, vk));
+}
+
+/* Emits the `vsadd.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_H, vd, vj, vk));
+}
+
+/* Emits the `vsadd.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_W, vd, vj, vk));
+}
+
+/* Emits the `vsadd.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_D, vd, vj, vk));
+}
+
+/* Emits the `vssub.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_B, vd, vj, vk));
+}
+
+/* Emits the `vssub.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_H, vd, vj, vk));
+}
+
+/* Emits the `vssub.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_W, vd, vj, vk));
+}
+
+/* Emits the `vssub.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_D, vd, vj, vk));
+}
+
+/* Emits the `vsadd.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_BU, vd, vj, vk));
+}
+
+/* Emits the `vsadd.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_HU, vd, vj, vk));
+}
+
+/* Emits the `vsadd.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_WU, vd, vj, vk));
+}
+
+/* Emits the `vsadd.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsadd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_DU, vd, vj, vk));
+}
+
+/* Emits the `vssub.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_BU, vd, vj, vk));
+}
+
+/* Emits the `vssub.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_HU, vd, vj, vk));
+}
+
+/* Emits the `vssub.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_WU, vd, vj, vk));
+}
+
+/* Emits the `vssub.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssub_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_DU, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_H_B, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_W_H, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_D_W, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_H_B, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_W_H, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_D_W, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.hu.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_HU_BU, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.wu.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_WU_HU, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.du.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_DU_WU, vd, vj, vk));
+}
+
+/* Emits the `vhaddw.qu.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhaddw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_QU_DU, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.hu.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_HU_BU, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.wu.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_WU_HU, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.du.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_DU_WU, vd, vj, vk));
+}
+
+/* Emits the `vhsubw.qu.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vhsubw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_QU_DU, vd, vj, vk));
+}
+
+/* Emits the `vadda.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadda_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_B, vd, vj, vk));
+}
+
+/* Emits the `vadda.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadda_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_H, vd, vj, vk));
+}
+
+/* Emits the `vadda.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadda_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_W, vd, vj, vk));
+}
+
+/* Emits the `vadda.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadda_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_D, vd, vj, vk));
+}
+
+/* Emits the `vabsd.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_B, vd, vj, vk));
+}
+
+/* Emits the `vabsd.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_H, vd, vj, vk));
+}
+
+/* Emits the `vabsd.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_W, vd, vj, vk));
+}
+
+/* Emits the `vabsd.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_D, vd, vj, vk));
+}
+
+/* Emits the `vabsd.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_BU, vd, vj, vk));
+}
+
+/* Emits the `vabsd.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_HU, vd, vj, vk));
+}
+
+/* Emits the `vabsd.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_WU, vd, vj, vk));
+}
+
+/* Emits the `vabsd.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vabsd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_DU, vd, vj, vk));
+}
+
+/* Emits the `vavg.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_B, vd, vj, vk));
+}
+
+/* Emits the `vavg.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_H, vd, vj, vk));
+}
+
+/* Emits the `vavg.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_W, vd, vj, vk));
+}
+
+/* Emits the `vavg.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_D, vd, vj, vk));
+}
+
+/* Emits the `vavg.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_BU, vd, vj, vk));
+}
+
+/* Emits the `vavg.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_HU, vd, vj, vk));
+}
+
+/* Emits the `vavg.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_WU, vd, vj, vk));
+}
+
+/* Emits the `vavg.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavg_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_DU, vd, vj, vk));
+}
+
+/* Emits the `vavgr.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_B, vd, vj, vk));
+}
+
+/* Emits the `vavgr.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_H, vd, vj, vk));
+}
+
+/* Emits the `vavgr.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_W, vd, vj, vk));
+}
+
+/* Emits the `vavgr.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_D, vd, vj, vk));
+}
+
+/* Emits the `vavgr.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_BU, vd, vj, vk));
+}
+
+/* Emits the `vavgr.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_HU, vd, vj, vk));
+}
+
+/* Emits the `vavgr.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_WU, vd, vj, vk));
+}
+
+/* Emits the `vavgr.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vavgr_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_DU, vd, vj, vk));
+}
+
+/* Emits the `vmax.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_B, vd, vj, vk));
+}
+
+/* Emits the `vmax.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_H, vd, vj, vk));
+}
+
+/* Emits the `vmax.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_W, vd, vj, vk));
+}
+
+/* Emits the `vmax.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_D, vd, vj, vk));
+}
+
+/* Emits the `vmin.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_B, vd, vj, vk));
+}
+
+/* Emits the `vmin.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_H, vd, vj, vk));
+}
+
+/* Emits the `vmin.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_W, vd, vj, vk));
+}
+
+/* Emits the `vmin.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_D, vd, vj, vk));
+}
+
+/* Emits the `vmax.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_BU, vd, vj, vk));
+}
+
+/* Emits the `vmax.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_HU, vd, vj, vk));
+}
+
+/* Emits the `vmax.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_WU, vd, vj, vk));
+}
+
+/* Emits the `vmax.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmax_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_DU, vd, vj, vk));
+}
+
+/* Emits the `vmin.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_BU, vd, vj, vk));
+}
+
+/* Emits the `vmin.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_HU, vd, vj, vk));
+}
+
+/* Emits the `vmin.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_WU, vd, vj, vk));
+}
+
+/* Emits the `vmin.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmin_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_DU, vd, vj, vk));
+}
+
+/* Emits the `vmul.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmul_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_B, vd, vj, vk));
+}
+
+/* Emits the `vmul.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmul_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_H, vd, vj, vk));
+}
+
+/* Emits the `vmul.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmul_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_W, vd, vj, vk));
+}
+
+/* Emits the `vmul.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_D, vd, vj, vk));
+}
+
+/* Emits the `vmuh.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_B, vd, vj, vk));
+}
+
+/* Emits the `vmuh.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_H, vd, vj, vk));
+}
+
+/* Emits the `vmuh.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_W, vd, vj, vk));
+}
+
+/* Emits the `vmuh.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_D, vd, vj, vk));
+}
+
+/* Emits the `vmuh.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_BU, vd, vj, vk));
+}
+
+/* Emits the `vmuh.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_HU, vd, vj, vk));
+}
+
+/* Emits the `vmuh.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_WU, vd, vj, vk));
+}
+
+/* Emits the `vmuh.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmuh_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_DU, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_B, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_H, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_W, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_B, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_H, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_W, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vmulwev.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vmulwod.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmulwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vmadd.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_B, vd, vj, vk));
+}
+
+/* Emits the `vmadd.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_H, vd, vj, vk));
+}
+
+/* Emits the `vmadd.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_W, vd, vj, vk));
+}
+
+/* Emits the `vmadd.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_D, vd, vj, vk));
+}
+
+/* Emits the `vmsub.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_B, vd, vj, vk));
+}
+
+/* Emits the `vmsub.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_H, vd, vj, vk));
+}
+
+/* Emits the `vmsub.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_W, vd, vj, vk));
+}
+
+/* Emits the `vmsub.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_D, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_B, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_H, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_W, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.h.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_B, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.w.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_H, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.d.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_W, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.q.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_D, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.h.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.w.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.d.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.q.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vmaddwev.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.h.bu.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU_B, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.w.hu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU_H, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.d.wu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU_W, vd, vj, vk));
+}
+
+/* Emits the `vmaddwod.q.du.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU_D, vd, vj, vk));
+}
+
+/* Emits the `vdiv.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_B, vd, vj, vk));
+}
+
+/* Emits the `vdiv.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_H, vd, vj, vk));
+}
+
+/* Emits the `vdiv.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_W, vd, vj, vk));
+}
+
+/* Emits the `vdiv.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_D, vd, vj, vk));
+}
+
+/* Emits the `vmod.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_B, vd, vj, vk));
+}
+
+/* Emits the `vmod.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_H, vd, vj, vk));
+}
+
+/* Emits the `vmod.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_W, vd, vj, vk));
+}
+
+/* Emits the `vmod.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_D, vd, vj, vk));
+}
+
+/* Emits the `vdiv.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_BU, vd, vj, vk));
+}
+
+/* Emits the `vdiv.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_HU, vd, vj, vk));
+}
+
+/* Emits the `vdiv.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_WU, vd, vj, vk));
+}
+
+/* Emits the `vdiv.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vdiv_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_DU, vd, vj, vk));
+}
+
+/* Emits the `vmod.bu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_BU, vd, vj, vk));
+}
+
+/* Emits the `vmod.hu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_HU, vd, vj, vk));
+}
+
+/* Emits the `vmod.wu vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_WU, vd, vj, vk));
+}
+
+/* Emits the `vmod.du vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmod_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_DU, vd, vj, vk));
+}
+
+/* Emits the `vsll.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsll_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_B, vd, vj, vk));
+}
+
+/* Emits the `vsll.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsll_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_H, vd, vj, vk));
+}
+
+/* Emits the `vsll.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsll_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_W, vd, vj, vk));
+}
+
+/* Emits the `vsll.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsll_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_D, vd, vj, vk));
+}
+
+/* Emits the `vsrl.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_B, vd, vj, vk));
+}
+
+/* Emits the `vsrl.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_H, vd, vj, vk));
+}
+
+/* Emits the `vsrl.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_W, vd, vj, vk));
+}
+
+/* Emits the `vsrl.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_D, vd, vj, vk));
+}
+
+/* Emits the `vsra.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsra_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_B, vd, vj, vk));
+}
+
+/* Emits the `vsra.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsra_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_H, vd, vj, vk));
+}
+
+/* Emits the `vsra.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsra_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_W, vd, vj, vk));
+}
+
+/* Emits the `vsra.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsra_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_D, vd, vj, vk));
+}
+
+/* Emits the `vrotr.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_B, vd, vj, vk));
+}
+
+/* Emits the `vrotr.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_H, vd, vj, vk));
+}
+
+/* Emits the `vrotr.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_W, vd, vj, vk));
+}
+
+/* Emits the `vrotr.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_D, vd, vj, vk));
+}
+
+/* Emits the `vsrlr.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_B, vd, vj, vk));
+}
+
+/* Emits the `vsrlr.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_H, vd, vj, vk));
+}
+
+/* Emits the `vsrlr.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_W, vd, vj, vk));
+}
+
+/* Emits the `vsrlr.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_D, vd, vj, vk));
+}
+
+/* Emits the `vsrar.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrar_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_B, vd, vj, vk));
+}
+
+/* Emits the `vsrar.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrar_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_H, vd, vj, vk));
+}
+
+/* Emits the `vsrar.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrar_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_W, vd, vj, vk));
+}
+
+/* Emits the `vsrar.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrar_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_D, vd, vj, vk));
+}
+
+/* Emits the `vsrln.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vsrln.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vsrln.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vsran.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vsran.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vsran.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vsrlrn.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vsrlrn.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vsrlrn.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vsrarn.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vsrarn.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vsrarn.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vssrln.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vssrln.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vssrln.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vssran.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vssran.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vssran.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.b.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_B_H, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.h.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_H_W, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_W_D, vd, vj, vk));
+}
+
+/* Emits the `vssrln.bu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_BU_H, vd, vj, vk));
+}
+
+/* Emits the `vssrln.hu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_HU_W, vd, vj, vk));
+}
+
+/* Emits the `vssrln.wu.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrln_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_WU_D, vd, vj, vk));
+}
+
+/* Emits the `vssran.bu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_BU_H, vd, vj, vk));
+}
+
+/* Emits the `vssran.hu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_HU_W, vd, vj, vk));
+}
+
+/* Emits the `vssran.wu.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssran_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_WU_D, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.bu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_BU_H, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.hu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_HU_W, vd, vj, vk));
+}
+
+/* Emits the `vssrlrn.wu.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_WU_D, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.bu.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_BU_H, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.hu.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_HU_W, vd, vj, vk));
+}
+
+/* Emits the `vssrarn.wu.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_WU_D, vd, vj, vk));
+}
+
+/* Emits the `vbitclr.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_B, vd, vj, vk));
+}
+
+/* Emits the `vbitclr.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_H, vd, vj, vk));
+}
+
+/* Emits the `vbitclr.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_W, vd, vj, vk));
+}
+
+/* Emits the `vbitclr.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_D, vd, vj, vk));
+}
+
+/* Emits the `vbitset.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitset_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_B, vd, vj, vk));
+}
+
+/* Emits the `vbitset.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitset_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_H, vd, vj, vk));
+}
+
+/* Emits the `vbitset.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitset_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_W, vd, vj, vk));
+}
+
+/* Emits the `vbitset.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitset_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_D, vd, vj, vk));
+}
+
+/* Emits the `vbitrev.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_B, vd, vj, vk));
+}
+
+/* Emits the `vbitrev.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_H, vd, vj, vk));
+}
+
+/* Emits the `vbitrev.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_W, vd, vj, vk));
+}
+
+/* Emits the `vbitrev.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_D, vd, vj, vk));
+}
+
+/* Emits the `vpackev.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_B, vd, vj, vk));
+}
+
+/* Emits the `vpackev.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_H, vd, vj, vk));
+}
+
+/* Emits the `vpackev.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_W, vd, vj, vk));
+}
+
+/* Emits the `vpackev.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_D, vd, vj, vk));
+}
+
+/* Emits the `vpackod.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_B, vd, vj, vk));
+}
+
+/* Emits the `vpackod.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_H, vd, vj, vk));
+}
+
+/* Emits the `vpackod.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_W, vd, vj, vk));
+}
+
+/* Emits the `vpackod.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpackod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_D, vd, vj, vk));
+}
+
+/* Emits the `vilvl.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_B, vd, vj, vk));
+}
+
+/* Emits the `vilvl.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_H, vd, vj, vk));
+}
+
+/* Emits the `vilvl.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_W, vd, vj, vk));
+}
+
+/* Emits the `vilvl.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_D, vd, vj, vk));
+}
+
+/* Emits the `vilvh.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_B, vd, vj, vk));
+}
+
+/* Emits the `vilvh.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_H, vd, vj, vk));
+}
+
+/* Emits the `vilvh.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_W, vd, vj, vk));
+}
+
+/* Emits the `vilvh.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vilvh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_D, vd, vj, vk));
+}
+
+/* Emits the `vpickev.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_B, vd, vj, vk));
+}
+
+/* Emits the `vpickev.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_H, vd, vj, vk));
+}
+
+/* Emits the `vpickev.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_W, vd, vj, vk));
+}
+
+/* Emits the `vpickev.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_D, vd, vj, vk));
+}
+
+/* Emits the `vpickod.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_B, vd, vj, vk));
+}
+
+/* Emits the `vpickod.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_H, vd, vj, vk));
+}
+
+/* Emits the `vpickod.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_W, vd, vj, vk));
+}
+
+/* Emits the `vpickod.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_D, vd, vj, vk));
+}
+
+/* Emits the `vreplve.b vd, vj, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplve_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
+{
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_B, vd, vj, k));
+}
+
+/* Emits the `vreplve.h vd, vj, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplve_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
+{
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_H, vd, vj, k));
+}
+
+/* Emits the `vreplve.w vd, vj, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplve_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
+{
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_W, vd, vj, k));
+}
+
+/* Emits the `vreplve.d vd, vj, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplve_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
+{
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_D, vd, vj, k));
+}
+
+/* Emits the `vand.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vand_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAND_V, vd, vj, vk));
+}
+
+/* Emits the `vor.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VOR_V, vd, vj, vk));
+}
+
+/* Emits the `vxor.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vxor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VXOR_V, vd, vj, vk));
+}
+
+/* Emits the `vnor.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vnor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VNOR_V, vd, vj, vk));
+}
+
+/* Emits the `vandn.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vandn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VANDN_V, vd, vj, vk));
+}
+
+/* Emits the `vorn.v vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vorn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VORN_V, vd, vj, vk));
+}
+
+/* Emits the `vfrstp.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrstp_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_B, vd, vj, vk));
+}
+
+/* Emits the `vfrstp.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrstp_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_H, vd, vj, vk));
+}
+
+/* Emits the `vadd.q vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vadd_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_Q, vd, vj, vk));
+}
+
+/* Emits the `vsub.q vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsub_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_Q, vd, vj, vk));
+}
+
+/* Emits the `vsigncov.b vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsigncov_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_B, vd, vj, vk));
+}
+
+/* Emits the `vsigncov.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsigncov_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_H, vd, vj, vk));
+}
+
+/* Emits the `vsigncov.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsigncov_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_W, vd, vj, vk));
+}
+
+/* Emits the `vsigncov.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsigncov_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_D, vd, vj, vk));
+}
+
+/* Emits the `vfadd.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_S, vd, vj, vk));
+}
+
+/* Emits the `vfadd.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_D, vd, vj, vk));
+}
+
+/* Emits the `vfsub.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_S, vd, vj, vk));
+}
+
+/* Emits the `vfsub.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_D, vd, vj, vk));
+}
+
+/* Emits the `vfmul.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmul_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_S, vd, vj, vk));
+}
+
+/* Emits the `vfmul.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_D, vd, vj, vk));
+}
+
+/* Emits the `vfdiv.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfdiv_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_S, vd, vj, vk));
+}
+
+/* Emits the `vfdiv.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_D, vd, vj, vk));
+}
+
+/* Emits the `vfmax.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmax_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_S, vd, vj, vk));
+}
+
+/* Emits the `vfmax.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_D, vd, vj, vk));
+}
+
+/* Emits the `vfmin.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmin_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_S, vd, vj, vk));
+}
+
+/* Emits the `vfmin.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_D, vd, vj, vk));
+}
+
+/* Emits the `vfmaxa.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmaxa_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_S, vd, vj, vk));
+}
+
+/* Emits the `vfmaxa.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmaxa_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_D, vd, vj, vk));
+}
+
+/* Emits the `vfmina.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmina_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_S, vd, vj, vk));
+}
+
+/* Emits the `vfmina.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfmina_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_D, vd, vj, vk));
+}
+
+/* Emits the `vfcvt.h.s vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvt_h_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_H_S, vd, vj, vk));
+}
+
+/* Emits the `vfcvt.s.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvt_s_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_S_D, vd, vj, vk));
+}
+
+/* Emits the `vffint.s.l vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffint_s_l(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFFINT_S_L, vd, vj, vk));
+}
+
+/* Emits the `vftint.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftint_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINT_W_D, vd, vj, vk));
+}
+
+/* Emits the `vftintrm.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrm_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRM_W_D, vd, vj, vk));
+}
+
+/* Emits the `vftintrp.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrp_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRP_W_D, vd, vj, vk));
+}
+
+/* Emits the `vftintrz.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrz_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRZ_W_D, vd, vj, vk));
+}
+
+/* Emits the `vftintrne.w.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrne_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRNE_W_D, vd, vj, vk));
+}
+
+/* Emits the `vshuf.h vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_H, vd, vj, vk));
+}
+
+/* Emits the `vshuf.w vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_W, vd, vj, vk));
+}
+
+/* Emits the `vshuf.d vd, vj, vk` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
+{
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_D, vd, vj, vk));
+}
+
+/* Emits the `vseqi.b vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseqi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_B, vd, vj, sk5));
+}
+
+/* Emits the `vseqi.h vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseqi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_H, vd, vj, sk5));
+}
+
+/* Emits the `vseqi.w vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseqi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_W, vd, vj, sk5));
+}
+
+/* Emits the `vseqi.d vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseqi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_D, vd, vj, sk5));
+}
+
+/* Emits the `vslei.b vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_B, vd, vj, sk5));
+}
+
+/* Emits the `vslei.h vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_H, vd, vj, sk5));
+}
+
+/* Emits the `vslei.w vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_W, vd, vj, sk5));
+}
+
+/* Emits the `vslei.d vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_D, vd, vj, sk5));
+}
+
+/* Emits the `vslei.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vslei.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vslei.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vslei.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslei_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vslti.b vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_B, vd, vj, sk5));
+}
+
+/* Emits the `vslti.h vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_H, vd, vj, sk5));
+}
+
+/* Emits the `vslti.w vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_W, vd, vj, sk5));
+}
+
+/* Emits the `vslti.d vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_D, vd, vj, sk5));
+}
+
+/* Emits the `vslti.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vslti.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vslti.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vslti.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslti_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vaddi.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vaddi.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vaddi.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vaddi.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vaddi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vsubi.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vsubi.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vsubi.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vsubi.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsubi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vbsll.v vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbsll_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSLL_V, vd, vj, uk5));
+}
+
+/* Emits the `vbsrl.v vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbsrl_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSRL_V, vd, vj, uk5));
+}
+
+/* Emits the `vmaxi.b vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_B, vd, vj, sk5));
+}
+
+/* Emits the `vmaxi.h vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_H, vd, vj, sk5));
+}
+
+/* Emits the `vmaxi.w vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_W, vd, vj, sk5));
+}
+
+/* Emits the `vmaxi.d vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_D, vd, vj, sk5));
+}
+
+/* Emits the `vmini.b vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_B, vd, vj, sk5));
+}
+
+/* Emits the `vmini.h vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_H, vd, vj, sk5));
+}
+
+/* Emits the `vmini.w vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_W, vd, vj, sk5));
+}
+
+/* Emits the `vmini.d vd, vj, sk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
+{
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_D, vd, vj, sk5));
+}
+
+/* Emits the `vmaxi.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vmaxi.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vmaxi.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vmaxi.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmaxi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vmini.bu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_BU, vd, vj, uk5));
+}
+
+/* Emits the `vmini.hu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_HU, vd, vj, uk5));
+}
+
+/* Emits the `vmini.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_WU, vd, vj, uk5));
+}
+
+/* Emits the `vmini.du vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmini_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_DU, vd, vj, uk5));
+}
+
+/* Emits the `vfrstpi.b vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrstpi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_B, vd, vj, uk5));
+}
+
+/* Emits the `vfrstpi.h vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrstpi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_H, vd, vj, uk5));
+}
+
+/* Emits the `vclo.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclo_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_B, vd, vj));
+}
+
+/* Emits the `vclo.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclo_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_H, vd, vj));
+}
+
+/* Emits the `vclo.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclo_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_W, vd, vj));
+}
+
+/* Emits the `vclo.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclo_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_D, vd, vj));
+}
+
+/* Emits the `vclz.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclz_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_B, vd, vj));
+}
+
+/* Emits the `vclz.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclz_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_H, vd, vj));
+}
+
+/* Emits the `vclz.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclz_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_W, vd, vj));
+}
+
+/* Emits the `vclz.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vclz_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_D, vd, vj));
+}
+
+/* Emits the `vpcnt.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpcnt_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_B, vd, vj));
+}
+
+/* Emits the `vpcnt.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpcnt_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_H, vd, vj));
+}
+
+/* Emits the `vpcnt.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpcnt_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_W, vd, vj));
+}
+
+/* Emits the `vpcnt.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpcnt_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_D, vd, vj));
+}
+
+/* Emits the `vneg.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vneg_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_B, vd, vj));
+}
+
+/* Emits the `vneg.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vneg_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_H, vd, vj));
+}
+
+/* Emits the `vneg.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vneg_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_W, vd, vj));
+}
+
+/* Emits the `vneg.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vneg_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_D, vd, vj));
+}
+
+/* Emits the `vmskltz.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmskltz_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_B, vd, vj));
+}
+
+/* Emits the `vmskltz.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmskltz_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_H, vd, vj));
+}
+
+/* Emits the `vmskltz.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmskltz_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_W, vd, vj));
+}
+
+/* Emits the `vmskltz.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmskltz_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_D, vd, vj));
+}
+
+/* Emits the `vmskgez.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmskgez_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKGEZ_B, vd, vj));
+}
+
+/* Emits the `vmsknz.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vmsknz_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKNZ_B, vd, vj));
+}
+
+/* Emits the `vseteqz.v cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vseteqz_v(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETEQZ_V, cd, vj));
+}
+
+/* Emits the `vsetnez.v cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetnez_v(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETNEZ_V, cd, vj));
+}
+
+/* Emits the `vsetanyeqz.b cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetanyeqz_b(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_B, cd, vj));
+}
+
+/* Emits the `vsetanyeqz.h cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetanyeqz_h(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_H, cd, vj));
+}
+
+/* Emits the `vsetanyeqz.w cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetanyeqz_w(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_W, cd, vj));
+}
+
+/* Emits the `vsetanyeqz.d cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetanyeqz_d(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_D, cd, vj));
+}
+
+/* Emits the `vsetallnez.b cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetallnez_b(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_B, cd, vj));
+}
+
+/* Emits the `vsetallnez.h cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetallnez_h(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_H, cd, vj));
+}
+
+/* Emits the `vsetallnez.w cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetallnez_w(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_W, cd, vj));
+}
+
+/* Emits the `vsetallnez.d cd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsetallnez_d(TCGContext *s, TCGReg cd, TCGReg vj)
+{
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_D, cd, vj));
+}
+
+/* Emits the `vflogb.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vflogb_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_S, vd, vj));
+}
+
+/* Emits the `vflogb.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vflogb_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_D, vd, vj));
+}
+
+/* Emits the `vfclass.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfclass_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_S, vd, vj));
+}
+
+/* Emits the `vfclass.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfclass_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_D, vd, vj));
+}
+
+/* Emits the `vfsqrt.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_S, vd, vj));
+}
+
+/* Emits the `vfsqrt.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_D, vd, vj));
+}
+
+/* Emits the `vfrecip.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrecip_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_S, vd, vj));
+}
+
+/* Emits the `vfrecip.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrecip_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_D, vd, vj));
+}
+
+/* Emits the `vfrsqrt.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_S, vd, vj));
+}
+
+/* Emits the `vfrsqrt.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_D, vd, vj));
+}
+
+/* Emits the `vfrint.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrint_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_S, vd, vj));
+}
+
+/* Emits the `vfrint.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrint_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_D, vd, vj));
+}
+
+/* Emits the `vfrintrm.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrm_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_S, vd, vj));
+}
+
+/* Emits the `vfrintrm.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrm_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_D, vd, vj));
+}
+
+/* Emits the `vfrintrp.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrp_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_S, vd, vj));
+}
+
+/* Emits the `vfrintrp.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrp_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_D, vd, vj));
+}
+
+/* Emits the `vfrintrz.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrz_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_S, vd, vj));
+}
+
+/* Emits the `vfrintrz.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrz_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_D, vd, vj));
+}
+
+/* Emits the `vfrintrne.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrne_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_S, vd, vj));
+}
+
+/* Emits the `vfrintrne.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfrintrne_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_D, vd, vj));
+}
+
+/* Emits the `vfcvtl.s.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvtl_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_S_H, vd, vj));
+}
+
+/* Emits the `vfcvth.s.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvth_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_S_H, vd, vj));
+}
+
+/* Emits the `vfcvtl.d.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvtl_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_D_S, vd, vj));
+}
+
+/* Emits the `vfcvth.d.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vfcvth_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_D_S, vd, vj));
+}
+
+/* Emits the `vffint.s.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffint_s_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_W, vd, vj));
+}
+
+/* Emits the `vffint.s.wu vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffint_s_wu(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_WU, vd, vj));
+}
+
+/* Emits the `vffint.d.l vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffint_d_l(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_L, vd, vj));
+}
+
+/* Emits the `vffint.d.lu vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffint_d_lu(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_LU, vd, vj));
+}
+
+/* Emits the `vffintl.d.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffintl_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTL_D_W, vd, vj));
+}
+
+/* Emits the `vffinth.d.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vffinth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTH_D_W, vd, vj));
+}
+
+/* Emits the `vftint.w.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftint_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_W_S, vd, vj));
+}
+
+/* Emits the `vftint.l.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftint_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_L_D, vd, vj));
+}
+
+/* Emits the `vftintrm.w.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrm_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_W_S, vd, vj));
+}
+
+/* Emits the `vftintrm.l.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrm_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_L_D, vd, vj));
+}
+
+/* Emits the `vftintrp.w.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrp_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_W_S, vd, vj));
+}
+
+/* Emits the `vftintrp.l.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrp_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_L_D, vd, vj));
+}
+
+/* Emits the `vftintrz.w.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrz_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_W_S, vd, vj));
+}
+
+/* Emits the `vftintrz.l.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrz_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_L_D, vd, vj));
+}
+
+/* Emits the `vftintrne.w.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrne_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_W_S, vd, vj));
+}
+
+/* Emits the `vftintrne.l.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrne_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_L_D, vd, vj));
+}
+
+/* Emits the `vftint.wu.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftint_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_WU_S, vd, vj));
+}
+
+/* Emits the `vftint.lu.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftint_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_LU_D, vd, vj));
+}
+
+/* Emits the `vftintrz.wu.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrz_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_WU_S, vd, vj));
+}
+
+/* Emits the `vftintrz.lu.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrz_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_LU_D, vd, vj));
+}
+
+/* Emits the `vftintl.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTL_L_S, vd, vj));
+}
+
+/* Emits the `vftinth.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftinth_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTH_L_S, vd, vj));
+}
+
+/* Emits the `vftintrml.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrml_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRML_L_S, vd, vj));
+}
+
+/* Emits the `vftintrmh.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrmh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRMH_L_S, vd, vj));
+}
+
+/* Emits the `vftintrpl.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrpl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPL_L_S, vd, vj));
+}
+
+/* Emits the `vftintrph.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrph_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPH_L_S, vd, vj));
+}
+
+/* Emits the `vftintrzl.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrzl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZL_L_S, vd, vj));
+}
+
+/* Emits the `vftintrzh.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrzh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZH_L_S, vd, vj));
+}
+
+/* Emits the `vftintrnel.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrnel_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEL_L_S, vd, vj));
+}
+
+/* Emits the `vftintrneh.l.s vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vftintrneh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEH_L_S, vd, vj));
+}
+
+/* Emits the `vexth.h.b vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_h_b(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_H_B, vd, vj));
+}
+
+/* Emits the `vexth.w.h vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_w_h(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_W_H, vd, vj));
+}
+
+/* Emits the `vexth.d.w vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_D_W, vd, vj));
+}
+
+/* Emits the `vexth.q.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_Q_D, vd, vj));
+}
+
+/* Emits the `vexth.hu.bu vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_HU_BU, vd, vj));
+}
+
+/* Emits the `vexth.wu.hu vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_WU_HU, vd, vj));
+}
+
+/* Emits the `vexth.du.wu vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_du_wu(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_DU_WU, vd, vj));
+}
+
+/* Emits the `vexth.qu.du vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vexth_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_QU_DU, vd, vj));
+}
+
+/* Emits the `vreplgr2vr.b vd, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j)
+{
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_B, vd, j));
+}
+
+/* Emits the `vreplgr2vr.h vd, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j)
+{
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_H, vd, j));
+}
+
+/* Emits the `vreplgr2vr.w vd, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j)
+{
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_W, vd, j));
+}
+
+/* Emits the `vreplgr2vr.d vd, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j)
+{
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_D, vd, j));
+}
+
+/* Emits the `vrotri.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VROTRI_B, vd, vj, uk3));
+}
+
+/* Emits the `vrotri.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VROTRI_H, vd, vj, uk4));
+}
+
+/* Emits the `vrotri.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VROTRI_W, vd, vj, uk5));
+}
+
+/* Emits the `vrotri.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vrotri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VROTRI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrlri.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLRI_B, vd, vj, uk3));
+}
+
+/* Emits the `vsrlri.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRI_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrlri.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRI_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrlri.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrari.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrari_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRARI_B, vd, vj, uk3));
+}
+
+/* Emits the `vsrari.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrari_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARI_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrari.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrari_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARI_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrari.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrari_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARI_D, vd, vj, uk6));
+}
+
+/* Emits the `vinsgr2vr.b vd, j, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vinsgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdjuk4_insn(OPC_VINSGR2VR_B, vd, j, uk4));
+}
+
+/* Emits the `vinsgr2vr.h vd, j, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vinsgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdjuk3_insn(OPC_VINSGR2VR_H, vd, j, uk3));
+}
+
+/* Emits the `vinsgr2vr.w vd, j, uk2` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vinsgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk2)
+{
+ tcg_out32(s, encode_vdjuk2_insn(OPC_VINSGR2VR_W, vd, j, uk2));
+}
+
+/* Emits the `vinsgr2vr.d vd, j, uk1` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vinsgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk1)
+{
+ tcg_out32(s, encode_vdjuk1_insn(OPC_VINSGR2VR_D, vd, j, uk1));
+}
+
+/* Emits the `vpickve2gr.b d, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_b(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_B, d, vj, uk4));
+}
+
+/* Emits the `vpickve2gr.h d, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_h(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_H, d, vj, uk3));
+}
+
+/* Emits the `vpickve2gr.w d, vj, uk2` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_w(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
+{
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_W, d, vj, uk2));
+}
+
+/* Emits the `vpickve2gr.d d, vj, uk1` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_d(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
+{
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_D, d, vj, uk1));
+}
+
+/* Emits the `vpickve2gr.bu d, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_bu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_BU, d, vj, uk4));
+}
+
+/* Emits the `vpickve2gr.hu d, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_hu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_HU, d, vj, uk3));
+}
+
+/* Emits the `vpickve2gr.wu d, vj, uk2` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_wu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
+{
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_WU, d, vj, uk2));
+}
+
+/* Emits the `vpickve2gr.du d, vj, uk1` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpickve2gr_du(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
+{
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_DU, d, vj, uk1));
+}
+
+/* Emits the `vreplvei.b vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplvei_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VREPLVEI_B, vd, vj, uk4));
+}
+
+/* Emits the `vreplvei.h vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplvei_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VREPLVEI_H, vd, vj, uk3));
+}
+
+/* Emits the `vreplvei.w vd, vj, uk2` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplvei_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk2)
+{
+ tcg_out32(s, encode_vdvjuk2_insn(OPC_VREPLVEI_W, vd, vj, uk2));
+}
+
+/* Emits the `vreplvei.d vd, vj, uk1` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vreplvei_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk1)
+{
+ tcg_out32(s, encode_vdvjuk1_insn(OPC_VREPLVEI_D, vd, vj, uk1));
+}
+
+/* Emits the `vsllwil.h.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_h_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_H_B, vd, vj, uk3));
+}
+
+/* Emits the `vsllwil.w.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_w_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_W_H, vd, vj, uk4));
+}
+
+/* Emits the `vsllwil.d.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_d_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_D_W, vd, vj, uk5));
+}
+
+/* Emits the `vextl.q.d vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextl_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_Q_D, vd, vj));
+}
+
+/* Emits the `vsllwil.hu.bu vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_HU_BU, vd, vj, uk3));
+}
+
+/* Emits the `vsllwil.wu.hu vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_WU_HU, vd, vj, uk4));
+}
+
+/* Emits the `vsllwil.du.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsllwil_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_DU_WU, vd, vj, uk5));
+}
+
+/* Emits the `vextl.qu.du vd, vj` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextl_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
+{
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_QU_DU, vd, vj));
+}
+
+/* Emits the `vbitclri.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITCLRI_B, vd, vj, uk3));
+}
+
+/* Emits the `vbitclri.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITCLRI_H, vd, vj, uk4));
+}
+
+/* Emits the `vbitclri.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITCLRI_W, vd, vj, uk5));
+}
+
+/* Emits the `vbitclri.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitclri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITCLRI_D, vd, vj, uk6));
+}
+
+/* Emits the `vbitseti.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitseti_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITSETI_B, vd, vj, uk3));
+}
+
+/* Emits the `vbitseti.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitseti_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITSETI_H, vd, vj, uk4));
+}
+
+/* Emits the `vbitseti.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitseti_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITSETI_W, vd, vj, uk5));
+}
+
+/* Emits the `vbitseti.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitseti_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITSETI_D, vd, vj, uk6));
+}
+
+/* Emits the `vbitrevi.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrevi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITREVI_B, vd, vj, uk3));
+}
+
+/* Emits the `vbitrevi.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrevi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITREVI_H, vd, vj, uk4));
+}
+
+/* Emits the `vbitrevi.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrevi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITREVI_W, vd, vj, uk5));
+}
+
+/* Emits the `vbitrevi.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitrevi_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITREVI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsat.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_B, vd, vj, uk3));
+}
+
+/* Emits the `vsat.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_H, vd, vj, uk4));
+}
+
+/* Emits the `vsat.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_W, vd, vj, uk5));
+}
+
+/* Emits the `vsat.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_D, vd, vj, uk6));
+}
+
+/* Emits the `vsat.bu vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_BU, vd, vj, uk3));
+}
+
+/* Emits the `vsat.hu vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_HU, vd, vj, uk4));
+}
+
+/* Emits the `vsat.wu vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_WU, vd, vj, uk5));
+}
+
+/* Emits the `vsat.du vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsat_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_DU, vd, vj, uk6));
+}
+
+/* Emits the `vslli.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLI_B, vd, vj, uk3));
+}
+
+/* Emits the `vslli.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLI_H, vd, vj, uk4));
+}
+
+/* Emits the `vslli.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLI_W, vd, vj, uk5));
+}
+
+/* Emits the `vslli.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vslli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSLLI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrli.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLI_B, vd, vj, uk3));
+}
+
+/* Emits the `vsrli.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLI_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrli.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLI_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrli.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrai.b vd, vj, uk3` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrai_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
+{
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRAI_B, vd, vj, uk3));
+}
+
+/* Emits the `vsrai.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrai_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRAI_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrai.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrai_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRAI_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrai.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrai_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRAI_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrlni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrlni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrlni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrlni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vsrlrni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrlrni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrlrni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrlrni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLRNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrlni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrlni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrlni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrlni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrlni.bu.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_BU_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrlni.hu.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_HU_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrlni.wu.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_WU_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrlni.du.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_DU_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrlrni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrlrni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrlrni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrlrni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrlrni.bu.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_BU_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrlrni.hu.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_HU_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrlrni.wu.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_WU_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrlrni.du.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrlrni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_DU_Q, vd, vj, uk7));
+}
+
+/* Emits the `vsrani.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRANI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrani.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRANI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrani.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRANI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrani.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRANI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vsrarni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vsrarni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vsrarni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vsrarni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vsrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRARNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrani.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrani.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrani.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrani.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrani.bu.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_BU_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrani.hu.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_HU_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrani.wu.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_WU_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrani.du.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrani_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_DU_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrarni.b.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_B_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrarni.h.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_H_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrarni.w.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_W_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrarni.d.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_D_Q, vd, vj, uk7));
+}
+
+/* Emits the `vssrarni.bu.h vd, vj, uk4` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
+{
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_BU_H, vd, vj, uk4));
+}
+
+/* Emits the `vssrarni.hu.w vd, vj, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
+{
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_HU_W, vd, vj, uk5));
+}
+
+/* Emits the `vssrarni.wu.d vd, vj, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
+{
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_WU_D, vd, vj, uk6));
+}
+
+/* Emits the `vssrarni.du.q vd, vj, uk7` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vssrarni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
+{
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_DU_Q, vd, vj, uk7));
+}
+
+/* Emits the `vextrins.d vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextrins_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_D, vd, vj, uk8));
+}
+
+/* Emits the `vextrins.w vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextrins_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_W, vd, vj, uk8));
+}
+
+/* Emits the `vextrins.h vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextrins_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_H, vd, vj, uk8));
+}
+
+/* Emits the `vextrins.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vextrins_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_B, vd, vj, uk8));
+}
+
+/* Emits the `vshuf4i.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf4i_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_B, vd, vj, uk8));
+}
+
+/* Emits the `vshuf4i.h vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf4i_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_H, vd, vj, uk8));
+}
+
+/* Emits the `vshuf4i.w vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf4i_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_W, vd, vj, uk8));
+}
+
+/* Emits the `vshuf4i.d vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vshuf4i_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_D, vd, vj, uk8));
+}
+
+/* Emits the `vbitseli.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vbitseli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VBITSELI_B, vd, vj, uk8));
+}
+
+/* Emits the `vandi.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vandi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VANDI_B, vd, vj, uk8));
+}
+
+/* Emits the `vori.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VORI_B, vd, vj, uk8));
+}
+
+/* Emits the `vxori.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vxori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VXORI_B, vd, vj, uk8));
+}
+
+/* Emits the `vnori.b vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vnori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VNORI_B, vd, vj, uk8));
+}
+
+/* Emits the `vldi vd, sj13` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vldi(TCGContext *s, TCGReg vd, int32_t sj13)
+{
+ tcg_out32(s, encode_vdsj13_insn(OPC_VLDI, vd, sj13));
+}
+
+/* Emits the `vpermi.w vd, vj, uk8` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_vpermi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
+{
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VPERMI_W, vd, vj, uk8));
+}
+
/* End of generated code. */
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
index c2bde44..77d62e3 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -17,7 +17,11 @@
C_O0_I1(r)
C_O0_I2(rZ, r)
C_O0_I2(rZ, rZ)
+C_O0_I2(w, r)
+C_O0_I3(r, r, r)
C_O1_I1(r, r)
+C_O1_I1(w, r)
+C_O1_I1(w, w)
C_O1_I2(r, r, rC)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
@@ -29,4 +33,9 @@ C_O1_I2(r, 0, rZ)
C_O1_I2(r, rZ, ri)
C_O1_I2(r, rZ, rJ)
C_O1_I2(r, rZ, rZ)
+C_O1_I2(w, w, w)
+C_O1_I2(w, w, wM)
+C_O1_I2(w, w, wA)
+C_O1_I3(w, w, w, w)
C_O1_I4(r, rZ, rJ, rZ, rZ)
+C_O2_I1(r, r, r)
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
index 6e9ccca..2ba9c13 100644
--- a/tcg/loongarch64/tcg-target-con-str.h
+++ b/tcg/loongarch64/tcg-target-con-str.h
@@ -14,6 +14,7 @@
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
+REGS('w', ALL_VECTOR_REGS)
/*
* Define constraint letters for constants:
@@ -25,3 +26,5 @@ CONST('U', TCG_CT_CONST_U12)
CONST('Z', TCG_CT_CONST_ZERO)
CONST('C', TCG_CT_CONST_C12)
CONST('W', TCG_CT_CONST_WSZ)
+CONST('M', TCG_CT_CONST_VCMP)
+CONST('A', TCG_CT_CONST_VADD)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index baf5fc3..b701df5 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -32,6 +32,8 @@
#include "../tcg-ldst.c.inc"
#include <asm/hwcap.h>
+bool use_lsx_instructions;
+
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"zero",
@@ -65,7 +67,39 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"s5",
"s6",
"s7",
- "s8"
+ "s8",
+ "vr0",
+ "vr1",
+ "vr2",
+ "vr3",
+ "vr4",
+ "vr5",
+ "vr6",
+ "vr7",
+ "vr8",
+ "vr9",
+ "vr10",
+ "vr11",
+ "vr12",
+ "vr13",
+ "vr14",
+ "vr15",
+ "vr16",
+ "vr17",
+ "vr18",
+ "vr19",
+ "vr20",
+ "vr21",
+ "vr22",
+ "vr23",
+ "vr24",
+ "vr25",
+ "vr26",
+ "vr27",
+ "vr28",
+ "vr29",
+ "vr30",
+ "vr31",
};
#endif
@@ -102,6 +136,15 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_A2,
TCG_REG_A1,
TCG_REG_A0,
+
+ /* Vector registers */
+ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
+ /* V24 - V31 are caller-saved, and skipped. */
};
static const int tcg_target_call_iarg_regs[] = {
@@ -133,8 +176,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_U12 0x800
#define TCG_CT_CONST_C12 0x1000
#define TCG_CT_CONST_WSZ 0x2000
+#define TCG_CT_CONST_VCMP 0x4000
+#define TCG_CT_CONST_VADD 0x8000
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
{
@@ -142,7 +188,7 @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return true;
@@ -165,6 +211,13 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return true;
}
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
+ if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
+ return true;
+ }
return false;
}
@@ -1028,6 +1081,48 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
}
}
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
+{
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
+
+ if (h.aa.atom == MO_128) {
+ /*
+ * Use VLDX/VSTX when 128-bit atomicity is required.
+ * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
+ */
+ if (is_ld) {
+ tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
+ tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
+ tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
+ } else {
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
+ tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
+ }
+ } else {
+ /* Otherwise use a pair of LD/ST. */
+ tcg_out_opc_add_d(s, TCG_REG_TMP0, h.base, h.index);
+ if (is_ld) {
+ tcg_out_opc_ld_d(s, data_lo, TCG_REG_TMP0, 0);
+ tcg_out_opc_ld_d(s, data_hi, TCG_REG_TMP0, 8);
+ } else {
+ tcg_out_opc_st_d(s, data_lo, TCG_REG_TMP0, 0);
+ tcg_out_opc_st_d(s, data_hi, TCG_REG_TMP0, 8);
+ }
+ }
+
+ if (ldst) {
+ ldst->type = TCG_TYPE_I128;
+ ldst->datalo_reg = data_lo;
+ ldst->datahi_reg = data_hi;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
/*
* Entry-points
*/
@@ -1092,6 +1187,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0 = args[0];
TCGArg a1 = args[1];
TCGArg a2 = args[2];
+ TCGArg a3 = args[3];
int c2 = const_args[2];
switch (opc) {
@@ -1454,6 +1550,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_ld_a64_i64:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
+ break;
case INDEX_op_qemu_st_a32_i32:
case INDEX_op_qemu_st_a64_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
@@ -1462,6 +1562,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_st_a64_i64:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break;
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
+ break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
@@ -1486,6 +1590,444 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg rd, TCGReg rs)
+{
+ switch (vece) {
+ case MO_8:
+ tcg_out_opc_vreplgr2vr_b(s, rd, rs);
+ break;
+ case MO_16:
+ tcg_out_opc_vreplgr2vr_h(s, rd, rs);
+ break;
+ case MO_32:
+ tcg_out_opc_vreplgr2vr_w(s, rd, rs);
+ break;
+ case MO_64:
+ tcg_out_opc_vreplgr2vr_d(s, rd, rs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg r, TCGReg base, intptr_t offset)
+{
+ /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
+ if (offset < -0x800 || offset > 0x7ff || \
+ (offset & ((1 << vece) - 1)) != 0) {
+ tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
+ base = TCG_REG_TMP0;
+ offset = 0;
+ }
+ offset >>= vece;
+
+ switch (vece) {
+ case MO_8:
+ tcg_out_opc_vldrepl_b(s, r, base, offset);
+ break;
+ case MO_16:
+ tcg_out_opc_vldrepl_h(s, r, base, offset);
+ break;
+ case MO_32:
+ tcg_out_opc_vldrepl_w(s, r, base, offset);
+ break;
+ case MO_64:
+ tcg_out_opc_vldrepl_d(s, r, base, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg rd, int64_t v64)
+{
+ /* Try vldi if imm can fit */
+ int64_t value = sextract64(v64, 0, 8 << vece);
+ if (-0x200 <= value && value <= 0x1FF) {
+ uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
+ tcg_out_opc_vldi(s, rd, imm);
+ return;
+ }
+
+ /* TODO: vldi patterns when imm 12 is set */
+
+ /* Fallback to vreplgr2vr */
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
+ switch (vece) {
+ case MO_8:
+ tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
+ break;
+ case MO_16:
+ tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
+ break;
+ case MO_32:
+ tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
+ break;
+ case MO_64:
+ tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
+ const TCGArg a1, const TCGArg a2,
+ bool a2_is_const, bool is_add)
+{
+ static const LoongArchInsn add_vec_insn[4] = {
+ OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
+ };
+ static const LoongArchInsn add_vec_imm_insn[4] = {
+ OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
+ };
+ static const LoongArchInsn sub_vec_insn[4] = {
+ OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
+ };
+ static const LoongArchInsn sub_vec_imm_insn[4] = {
+ OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
+ };
+
+ if (a2_is_const) {
+ int64_t value = sextract64(a2, 0, 8 << vece);
+ if (!is_add) {
+ value = -value;
+ }
+
+ /* Try vaddi/vsubi */
+ if (0 <= value && value <= 0x1f) {
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
+ a1, value));
+ return;
+ } else if (-0x1f <= value && value < 0) {
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
+ a1, -value));
+ return;
+ }
+
+ /* constraint TCG_CT_CONST_VADD ensures unreachable */
+ g_assert_not_reached();
+ }
+
+ if (is_add) {
+ tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
+ } else {
+ tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
+ }
+}
+
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
+{
+ TCGType type = vecl + TCG_TYPE_V64;
+ TCGArg a0, a1, a2, a3;
+ TCGReg temp = TCG_REG_TMP0;
+ TCGReg temp_vec = TCG_VEC_TMP0;
+
+ static const LoongArchInsn cmp_vec_insn[16][4] = {
+ [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
+ [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
+ [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
+ [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
+ [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
+ };
+ static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
+ [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
+ [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
+ [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
+ [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
+ [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
+ };
+ LoongArchInsn insn;
+ static const LoongArchInsn neg_vec_insn[4] = {
+ OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
+ };
+ static const LoongArchInsn mul_vec_insn[4] = {
+ OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
+ };
+ static const LoongArchInsn smin_vec_insn[4] = {
+ OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
+ };
+ static const LoongArchInsn umin_vec_insn[4] = {
+ OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
+ };
+ static const LoongArchInsn smax_vec_insn[4] = {
+ OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
+ };
+ static const LoongArchInsn umax_vec_insn[4] = {
+ OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
+ };
+ static const LoongArchInsn ssadd_vec_insn[4] = {
+ OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
+ };
+ static const LoongArchInsn usadd_vec_insn[4] = {
+ OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
+ };
+ static const LoongArchInsn sssub_vec_insn[4] = {
+ OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
+ };
+ static const LoongArchInsn ussub_vec_insn[4] = {
+ OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
+ };
+ static const LoongArchInsn shlv_vec_insn[4] = {
+ OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
+ };
+ static const LoongArchInsn shrv_vec_insn[4] = {
+ OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
+ };
+ static const LoongArchInsn sarv_vec_insn[4] = {
+ OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
+ };
+ static const LoongArchInsn shli_vec_insn[4] = {
+ OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
+ };
+ static const LoongArchInsn shri_vec_insn[4] = {
+ OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
+ };
+ static const LoongArchInsn sari_vec_insn[4] = {
+ OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
+ };
+ static const LoongArchInsn rotrv_vec_insn[4] = {
+ OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
+ };
+
+ a0 = args[0];
+ a1 = args[1];
+ a2 = args[2];
+ a3 = args[3];
+
+ /* Currently only supports V128 */
+ tcg_debug_assert(type == TCG_TYPE_V128);
+
+ switch (opc) {
+ case INDEX_op_st_vec:
+ /* Try to fit vst imm */
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
+ tcg_out_opc_vst(s, a0, a1, a2);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
+ tcg_out_opc_vstx(s, a0, a1, temp);
+ }
+ break;
+ case INDEX_op_ld_vec:
+ /* Try to fit vld imm */
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
+ tcg_out_opc_vld(s, a0, a1, a2);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
+ tcg_out_opc_vldx(s, a0, a1, temp);
+ }
+ break;
+ case INDEX_op_and_vec:
+ tcg_out_opc_vand_v(s, a0, a1, a2);
+ break;
+ case INDEX_op_andc_vec:
+ /*
+ * vandn vd, vj, vk: vd = vk & ~vj
+ * andc_vec vd, vj, vk: vd = vj & ~vk
+ * vk and vk are swapped
+ */
+ tcg_out_opc_vandn_v(s, a0, a2, a1);
+ break;
+ case INDEX_op_or_vec:
+ tcg_out_opc_vor_v(s, a0, a1, a2);
+ break;
+ case INDEX_op_orc_vec:
+ tcg_out_opc_vorn_v(s, a0, a1, a2);
+ break;
+ case INDEX_op_xor_vec:
+ tcg_out_opc_vxor_v(s, a0, a1, a2);
+ break;
+ case INDEX_op_nor_vec:
+ tcg_out_opc_vnor_v(s, a0, a1, a2);
+ break;
+ case INDEX_op_not_vec:
+ tcg_out_opc_vnor_v(s, a0, a1, a1);
+ break;
+ case INDEX_op_cmp_vec:
+ TCGCond cond = args[3];
+ if (const_args[2]) {
+ /*
+ * cmp_vec dest, src, value
+ * Try vseqi/vslei/vslti
+ */
+ int64_t value = sextract64(a2, 0, 8 << vece);
+ if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
+ cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
+ tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
+ a0, a1, value));
+ break;
+ } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
+ (0x00 <= value && value <= 0x1f)) {
+ tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
+ a0, a1, value));
+ break;
+ }
+
+ /*
+ * Fallback to:
+ * dupi_vec temp, a2
+ * cmp_vec a0, a1, temp, cond
+ */
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+ a2 = temp_vec;
+ }
+
+ insn = cmp_vec_insn[cond][vece];
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_vec_insn[cond][vece];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
+ break;
+ case INDEX_op_add_vec:
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
+ break;
+ case INDEX_op_sub_vec:
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
+ break;
+ case INDEX_op_neg_vec:
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
+ break;
+ case INDEX_op_mul_vec:
+ tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_smin_vec:
+ tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_smax_vec:
+ tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_umin_vec:
+ tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_umax_vec:
+ tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_ssadd_vec:
+ tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_usadd_vec:
+ tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_sssub_vec:
+ tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_ussub_vec:
+ tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_shlv_vec:
+ tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_shrv_vec:
+ tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_sarv_vec:
+ tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_shli_vec:
+ tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_shri_vec:
+ tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_sari_vec:
+ tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_rotrv_vec:
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_rotlv_vec:
+ /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
+ temp_vec));
+ break;
+ case INDEX_op_rotli_vec:
+ /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
+ a2 = extract32(-a2, 0, 3 + vece);
+ switch (vece) {
+ case MO_8:
+ tcg_out_opc_vrotri_b(s, a0, a1, a2);
+ break;
+ case MO_16:
+ tcg_out_opc_vrotri_h(s, a0, a1, a2);
+ break;
+ case MO_32:
+ tcg_out_opc_vrotri_w(s, a0, a1, a2);
+ break;
+ case MO_64:
+ tcg_out_opc_vrotri_d(s, a0, a1, a2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case INDEX_op_bitsel_vec:
+ /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
+ tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
+ break;
+ case INDEX_op_dupm_vec:
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
+{
+ switch (opc) {
+ case INDEX_op_ld_vec:
+ case INDEX_op_st_vec:
+ case INDEX_op_dup_vec:
+ case INDEX_op_dupm_vec:
+ case INDEX_op_cmp_vec:
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ case INDEX_op_and_vec:
+ case INDEX_op_andc_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_orc_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_nor_vec:
+ case INDEX_op_not_vec:
+ case INDEX_op_neg_vec:
+ case INDEX_op_mul_vec:
+ case INDEX_op_smin_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_umin_vec:
+ case INDEX_op_umax_vec:
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_ussub_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_bitsel_vec:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
+ TCGArg a0, ...)
+{
+ g_assert_not_reached();
+}
+
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
switch (op) {
@@ -1505,6 +2047,14 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_qemu_st_a64_i64:
return C_O0_I2(rZ, r);
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ return C_O2_I1(r, r, r);
+
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ return C_O0_I3(r, r, r);
+
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return C_O0_I2(rZ, rZ);
@@ -1627,6 +2177,54 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_movcond_i64:
return C_O1_I4(r, rZ, rJ, rZ, rZ);
+ case INDEX_op_ld_vec:
+ case INDEX_op_dupm_vec:
+ case INDEX_op_dup_vec:
+ return C_O1_I1(w, r);
+
+ case INDEX_op_st_vec:
+ return C_O0_I2(w, r);
+
+ case INDEX_op_cmp_vec:
+ return C_O1_I2(w, w, wM);
+
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ return C_O1_I2(w, w, wA);
+
+ case INDEX_op_and_vec:
+ case INDEX_op_andc_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_orc_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_nor_vec:
+ case INDEX_op_mul_vec:
+ case INDEX_op_smin_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_umin_vec:
+ case INDEX_op_umax_vec:
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_ussub_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_rotrv_vec:
+ case INDEX_op_rotlv_vec:
+ return C_O1_I2(w, w, w);
+
+ case INDEX_op_not_vec:
+ case INDEX_op_neg_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_rotli_vec:
+ return C_O1_I1(w, w);
+
+ case INDEX_op_bitsel_vec:
+ return C_O1_I3(w, w, w, w);
+
default:
g_assert_not_reached();
}
@@ -1698,6 +2296,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_target_init(TCGContext *s)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
@@ -1708,6 +2311,10 @@ static void tcg_target_init(TCGContext *s)
exit(EXIT_FAILURE);
}
+ if (hwcap & HWCAP_LOONGARCH_LSX) {
+ use_lsx_instructions = 1;
+ }
+
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
@@ -1723,6 +2330,18 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
+ if (use_lsx_instructions) {
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
+ }
+
s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
@@ -1731,6 +2350,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
}
typedef struct {
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
index 559be67..0301767 100644
--- a/tcg/loongarch64/tcg-target.h
+++ b/tcg/loongarch64/tcg-target.h
@@ -30,7 +30,7 @@
#define LOONGARCH_TCG_TARGET_H
#define TCG_TARGET_INSN_UNIT_SIZE 4
-#define TCG_TARGET_NB_REGS 32
+#define TCG_TARGET_NB_REGS 64
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
@@ -68,13 +68,25 @@ typedef enum {
TCG_REG_S7,
TCG_REG_S8,
+ TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
+
/* aliases */
TCG_AREG0 = TCG_REG_S0,
TCG_REG_TMP0 = TCG_REG_T8,
TCG_REG_TMP1 = TCG_REG_T7,
TCG_REG_TMP2 = TCG_REG_T6,
+ TCG_VEC_TMP0 = TCG_REG_V23,
} TCGReg;
+extern bool use_lsx_instructions;
+
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_SP
#define TCG_TARGET_STACK_ALIGN 16
@@ -159,7 +171,31 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 use_lsx_instructions
+
+#define TCG_TARGET_HAS_v64 0
+#define TCG_TARGET_HAS_v128 use_lsx_instructions
+#define TCG_TARGET_HAS_v256 0
+
+#define TCG_TARGET_HAS_not_vec 1
+#define TCG_TARGET_HAS_neg_vec 1
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_andc_vec 1
+#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 1
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_shi_vec 1
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
+#define TCG_TARGET_HAS_roti_vec 1
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec 1
+#define TCG_TARGET_HAS_cmpsel_vec 0
#define TCG_TARGET_DEFAULT_MO (0)
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target.opc.h
new file mode 100644
index 0000000..fd1a40b
--- /dev/null
+++ b/tcg/loongarch64/tcg-target.opc.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2023 Jiajie Chen
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 9faa8bd..f52bda4 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -190,7 +190,7 @@ static bool is_p2m1(tcg_target_long val)
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -2628,6 +2628,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_target_init(TCGContext *s)
{
tcg_target_detect_isa();
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 090f11e..90d76c2 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -261,7 +261,7 @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -2527,6 +2527,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out32(s, BCLR | BO_ALWAYS);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
{
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
diff --git a/tcg/region.c b/tcg/region.c
index 2b28ed3..a078899 100644
--- a/tcg/region.c
+++ b/tcg/region.c
@@ -33,8 +33,19 @@
#include "tcg/tcg.h"
#include "exec/translation-block.h"
#include "tcg-internal.h"
+#include "host/cpuinfo.h"
+/*
+ * Local source-level compatibility with Unix.
+ * Used by tcg_region_init below.
+ */
+#if defined(_WIN32)
+#define PROT_READ 1
+#define PROT_WRITE 2
+#define PROT_EXEC 4
+#endif
+
struct tcg_region_tree {
QemuMutex lock;
QTree *tree;
@@ -83,6 +94,18 @@ bool in_code_gen_buffer(const void *p)
return (size_t)(p - region.start_aligned) <= region.total_size;
}
+#ifndef CONFIG_TCG_INTERPRETER
+static int host_prot_read_exec(void)
+{
+#if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI)
+ if (cpuinfo & CPUINFO_BTI) {
+ return PROT_READ | PROT_EXEC | PROT_BTI;
+ }
+#endif
+ return PROT_READ | PROT_EXEC;
+}
+#endif
+
#ifdef CONFIG_DEBUG_TCG
const void *tcg_splitwx_to_rx(void *rw)
{
@@ -505,14 +528,6 @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
return PROT_READ | PROT_WRITE;
}
#elif defined(_WIN32)
-/*
- * Local source-level compatibility with Unix.
- * Used by tcg_region_init below.
- */
-#define PROT_READ 1
-#define PROT_WRITE 2
-#define PROT_EXEC 4
-
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
{
void *buf;
@@ -567,7 +582,7 @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
goto fail;
}
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
+ buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0);
if (buf_rx == MAP_FAILED) {
goto fail_rx;
}
@@ -642,7 +657,7 @@ static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
return -1;
}
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
+ if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) {
error_setg_errno(errp, errno, "mprotect for jit splitwx");
munmap((void *)buf_rx, size);
munmap((void *)buf_rw, size);
@@ -805,7 +820,7 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
need_prot = PROT_READ | PROT_WRITE;
#ifndef CONFIG_TCG_INTERPRETER
if (tcg_splitwx_diff == 0) {
- need_prot |= PROT_EXEC;
+ need_prot |= host_prot_read_exec();
}
#endif
for (size_t i = 0, n = region.n; i < n; i++) {
@@ -820,7 +835,11 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
} else if (need_prot == (PROT_READ | PROT_WRITE)) {
rc = qemu_mprotect_rw(start, end - start);
} else {
+#ifdef CONFIG_POSIX
+ rc = mprotect(start, end - start, need_prot);
+#else
g_assert_not_reached();
+#endif
}
if (rc) {
error_setg_errno(&error_fatal, errno,
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 9be81c1..c2bcdea 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -145,7 +145,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define sextreg sextract64
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -2099,6 +2099,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static volatile sig_atomic_t got_sigill;
static void sigill_handler(int signo, siginfo_t *si, void *data)
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index ecd8aaf..7552f63 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -540,7 +540,7 @@ static bool risbg_mask(uint64_t c)
}
/* Test if a constant matches the constraint. */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -3483,6 +3483,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
memset(p, 0x07, count * sizeof(tcg_insn_unit));
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 81a08bb..01ac26c 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -322,7 +322,7 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -962,6 +962,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_movi_s13(s, TCG_REG_O0, 0);
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
int i;
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index e260a07..41b1ae1 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -3846,6 +3846,155 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
}
}
+static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t tysz, TCGType type,
+ TCGCond cond, TCGv_vec c)
+{
+ TCGv_vec t0 = tcg_temp_new_vec(type);
+ TCGv_vec t1 = tcg_temp_new_vec(type);
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += tysz) {
+ tcg_gen_ld_vec(t1, cpu_env, aofs + i);
+ tcg_gen_cmp_vec(cond, vece, t0, t1, c);
+ tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ }
+}
+
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
+ static gen_helper_gvec_2i * const eq_fn[4] = {
+ gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
+ gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
+ };
+ static gen_helper_gvec_2i * const lt_fn[4] = {
+ gen_helper_gvec_lts8, gen_helper_gvec_lts16,
+ gen_helper_gvec_lts32, gen_helper_gvec_lts64
+ };
+ static gen_helper_gvec_2i * const le_fn[4] = {
+ gen_helper_gvec_les8, gen_helper_gvec_les16,
+ gen_helper_gvec_les32, gen_helper_gvec_les64
+ };
+ static gen_helper_gvec_2i * const ltu_fn[4] = {
+ gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
+ gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
+ };
+ static gen_helper_gvec_2i * const leu_fn[4] = {
+ gen_helper_gvec_leus8, gen_helper_gvec_leus16,
+ gen_helper_gvec_leus32, gen_helper_gvec_leus64
+ };
+ static gen_helper_gvec_2i * const * const fns[16] = {
+ [TCG_COND_EQ] = eq_fn,
+ [TCG_COND_LT] = lt_fn,
+ [TCG_COND_LE] = le_fn,
+ [TCG_COND_LTU] = ltu_fn,
+ [TCG_COND_LEU] = leu_fn,
+ };
+
+ TCGType type;
+
+ check_size_align(oprsz, maxsz, dofs | aofs);
+ check_overlap_2(dofs, aofs, maxsz);
+
+ if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
+ do_dup(MO_8, dofs, oprsz, maxsz,
+ NULL, NULL, -(cond == TCG_COND_ALWAYS));
+ return;
+ }
+
+ /*
+ * Implement inline with a vector type, if possible.
+ * Prefer integer when 64-bit host and 64-bit comparison.
+ */
+ type = choose_vector_type(cmp_list, vece, oprsz,
+ TCG_TARGET_REG_BITS == 64 && vece == MO_64);
+ if (type != 0) {
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
+ TCGv_vec t_vec = tcg_temp_new_vec(type);
+ uint32_t some;
+
+ tcg_gen_dup_i64_vec(vece, t_vec, c);
+ switch (type) {
+ case TCG_TYPE_V256:
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
+ expand_cmps_vec(vece, dofs, aofs, some, 32,
+ TCG_TYPE_V256, cond, t_vec);
+ aofs += some;
+ dofs += some;
+ oprsz -= some;
+ maxsz -= some;
+ /* fallthru */
+
+ case TCG_TYPE_V128:
+ some = QEMU_ALIGN_DOWN(oprsz, 16);
+ expand_cmps_vec(vece, dofs, aofs, some, 16,
+ TCG_TYPE_V128, cond, t_vec);
+ break;
+
+ case TCG_TYPE_V64:
+ some = QEMU_ALIGN_DOWN(oprsz, 8);
+ expand_cmps_vec(vece, dofs, aofs, some, 8,
+ TCG_TYPE_V64, cond, t_vec);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_vec(t_vec);
+ tcg_swap_vecop_list(hold_list);
+ } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
+ uint32_t i;
+
+ for (i = 0; i < oprsz; i += 8) {
+ tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_negsetcond_i64(cond, t0, t0, c);
+ tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ }
+ tcg_temp_free_i64(t0);
+ } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
+ uint32_t i;
+
+ tcg_gen_extrl_i64_i32(t1, c);
+ for (i = 0; i < oprsz; i += 8) {
+ tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_negsetcond_i32(cond, t0, t0, t1);
+ tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else {
+ gen_helper_gvec_2i * const *fn = fns[cond];
+ bool inv = false;
+
+ if (fn == NULL) {
+ cond = tcg_invert_cond(cond);
+ fn = fns[cond];
+ assert(fn != NULL);
+ inv = true;
+ }
+ tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
+ return;
+ }
+
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+}
+
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, int64_t c,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ TCGv_i64 tmp = tcg_constant_i64(c);
+ tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
+}
+
static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
{
TCGv_i64 t = tcg_temp_ebb_new_i64();
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 620dbe0..604fa9b 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -108,6 +108,7 @@ static void tcg_register_jit_int(const void *buf, size_t size,
__attribute__((unused));
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
+static void tcg_out_tb_start(TCGContext *s);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
intptr_t arg2);
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
@@ -171,7 +172,7 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
const TCGHelperInfo *info);
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
#ifdef TCG_TARGET_NEED_LDST_LABELS
static int tcg_out_ldst_finalize(TCGContext *s);
#endif
@@ -4689,7 +4690,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
ts = arg_temp(arg);
if (ts->val_type == TEMP_VAL_CONST
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
+ && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
/* constant is OK for instruction */
const_args[i] = 1;
new_args[i] = ts->val;
@@ -6014,6 +6015,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
s->gen_insn_data =
tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
+ tcg_out_tb_start(s);
+
num_insns = -1;
QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc;
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index 253f27f..461f4b4 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -913,7 +913,7 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
}
/* Test if a constant matches the constraint. */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
return ct & TCG_CT_CONST;
}
@@ -955,6 +955,11 @@ static inline void tcg_target_qemu_prologue(TCGContext *s)
{
}
+static void tcg_out_tb_start(TCGContext *s)
+{
+ /* nothing to do */
+}
+
bool tcg_target_has_memory_bswap(MemOp memop)
{
return true;
diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target
index 1163c7e..6ff214e 100644
--- a/tests/tcg/m68k/Makefile.target
+++ b/tests/tcg/m68k/Makefile.target
@@ -4,7 +4,7 @@
#
VPATH += $(SRC_PATH)/tests/tcg/m68k
-TESTS += trap
+TESTS += trap denormal
# On m68k Linux supports 4k and 8k pages (but 8k is currently broken)
EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192
diff --git a/tests/tcg/m68k/denormal.c b/tests/tcg/m68k/denormal.c
new file mode 100644
index 0000000..20bd8c7
--- /dev/null
+++ b/tests/tcg/m68k/denormal.c
@@ -0,0 +1,53 @@
+/*
+ * Test m68k extended double denormals.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#define TEST(X, Y) { X, Y, X * Y }
+
+static volatile long double test[][3] = {
+ TEST(0x1p+16383l, 0x1p-16446l),
+ TEST(0x1.1p-8223l, 0x1.1p-8224l),
+ TEST(1.0l, 0x1p-16383l),
+};
+
+#undef TEST
+
+static void dump_ld(const char *label, long double ld)
+{
+ union {
+ long double d;
+ struct {
+ uint32_t exp:16;
+ uint32_t space:16;
+ uint32_t h;
+ uint32_t l;
+ };
+ } u;
+
+ u.d = ld;
+ printf("%12s: % -27La 0x%04x 0x%08x 0x%08x\n", label, u.d, u.exp, u.h, u.l);
+}
+
+int main(void)
+{
+ int i, n = sizeof(test) / sizeof(test[0]), err = 0;
+
+ for (i = 0; i < n; ++i) {
+ long double x = test[i][0];
+ long double y = test[i][1];
+ long double build_mul = test[i][2];
+ long double runtime_mul = x * y;
+
+ if (runtime_mul != build_mul) {
+ dump_ld("x", x);
+ dump_ld("y", y);
+ dump_ld("build_mul", build_mul);
+ dump_ld("runtime_mul", runtime_mul);
+ err = 1;
+ }
+ }
+ return err;
+}
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
index e0e1fe6..4c8a005 100644
--- a/util/cpuinfo-aarch64.c
+++ b/util/cpuinfo-aarch64.c
@@ -13,6 +13,9 @@
# include <asm/hwcap.h>
# include "elf.h"
# endif
+# ifndef HWCAP2_BTI
+# define HWCAP2_BTI 0 /* added in glibc 2.32 */
+# endif
#endif
#ifdef CONFIG_DARWIN
# include <sys/sysctl.h>
@@ -58,12 +61,16 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
info |= (hwcap & HWCAP_AES ? CPUINFO_AES : 0);
info |= (hwcap & HWCAP_PMULL ? CPUINFO_PMULL : 0);
+
+ unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
+ info |= (hwcap2 & HWCAP2_BTI ? CPUINFO_BTI : 0);
#endif
#ifdef CONFIG_DARWIN
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
info |= sysctl_for_bool("hw.optional.arm.FEAT_PMULL") * CPUINFO_PMULL;
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_BTI") * CPUINFO_BTI;
#endif
cpuinfo = info;
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index 4d583da..e86fd64 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -585,7 +585,7 @@ char *qemu_get_pid_name(pid_t pid)
void *qemu_alloc_stack(size_t *sz)
{
- void *ptr, *guardpage;
+ void *ptr;
int flags;
#ifdef CONFIG_DEBUG_STACK_USAGE
void *ptr2;
@@ -618,17 +618,8 @@ void *qemu_alloc_stack(size_t *sz)
abort();
}
-#if defined(HOST_IA64)
- /* separate register stack */
- guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
-#elif defined(HOST_HPPA)
- /* stack grows up */
- guardpage = ptr + *sz - pagesz;
-#else
- /* stack grows down */
- guardpage = ptr;
-#endif
- if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
+ /* Stack grows down -- guard page at the bottom. */
+ if (mprotect(ptr, pagesz, PROT_NONE) != 0) {
perror("failed to set up stack guard page");
abort();
}